summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-17 21:11:28 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-19 17:23:34 -0800
commit3d43fc1954cf188414b41b90e569e5057d7f20a2 (patch)
tree5d11c95c50e9b61b64c648bb5cf25362b84bb510
parent8b9eb1274b81c9144238c589f0a9e12ea5961898 (diff)
bcachefs: Rename to avoid conflicts with bcache module
-rw-r--r--fs/bcachefs/acl.c24
-rw-r--r--fs/bcachefs/acl.h8
-rw-r--r--fs/bcachefs/alloc.c312
-rw-r--r--fs/bcachefs/alloc.h28
-rw-r--r--fs/bcachefs/bcachefs.h (renamed from fs/bcachefs/bcache.h)35
-rw-r--r--fs/bcachefs/bcachefs_format.h16
-rw-r--r--fs/bcachefs/bkey.c119
-rw-r--r--fs/bcachefs/bkey.h87
-rw-r--r--fs/bcachefs/bkey_methods.c50
-rw-r--r--fs/bcachefs/bkey_methods.h22
-rw-r--r--fs/bcachefs/bset.c252
-rw-r--r--fs/bcachefs/bset.h130
-rw-r--r--fs/bcachefs/btree_cache.c112
-rw-r--r--fs/bcachefs/btree_cache.h28
-rw-r--r--fs/bcachefs/btree_gc.c211
-rw-r--r--fs/bcachefs/btree_gc.h18
-rw-r--r--fs/bcachefs/btree_io.c208
-rw-r--r--fs/bcachefs/btree_io.h30
-rw-r--r--fs/bcachefs/btree_iter.c195
-rw-r--r--fs/bcachefs/btree_iter.h94
-rw-r--r--fs/bcachefs/btree_locking.h13
-rw-r--r--fs/bcachefs/btree_types.h11
-rw-r--r--fs/bcachefs/btree_update.c593
-rw-r--r--fs/bcachefs/btree_update.h55
-rw-r--r--fs/bcachefs/buckets.c112
-rw-r--r--fs/bcachefs/buckets.h46
-rw-r--r--fs/bcachefs/chardev.c124
-rw-r--r--fs/bcachefs/chardev.h20
-rw-r--r--fs/bcachefs/checksum.c103
-rw-r--r--fs/bcachefs/checksum.h46
-rw-r--r--fs/bcachefs/clock.c24
-rw-r--r--fs/bcachefs/clock.h16
-rw-r--r--fs/bcachefs/compress.c36
-rw-r--r--fs/bcachefs/compress.h12
-rw-r--r--fs/bcachefs/debug.c98
-rw-r--r--fs/bcachefs/debug.h28
-rw-r--r--fs/bcachefs/dirent.c136
-rw-r--r--fs/bcachefs/dirent.h28
-rw-r--r--fs/bcachefs/error.c22
-rw-r--r--fs/bcachefs/error.h68
-rw-r--r--fs/bcachefs/extents.c402
-rw-r--r--fs/bcachefs/extents.h52
-rw-r--r--fs/bcachefs/fs-gc.c120
-rw-r--r--fs/bcachefs/fs-gc.h4
-rw-r--r--fs/bcachefs/fs-io.c440
-rw-r--r--fs/bcachefs/fs-io.h48
-rw-r--r--fs/bcachefs/fs.c528
-rw-r--r--fs/bcachefs/fs.h16
-rw-r--r--fs/bcachefs/inode.c88
-rw-r--r--fs/bcachefs/inode.h22
-rw-r--r--fs/bcachefs/io.c309
-rw-r--r--fs/bcachefs/io.h48
-rw-r--r--fs/bcachefs/io_types.h2
-rw-r--r--fs/bcachefs/journal.c290
-rw-r--r--fs/bcachefs/journal.h92
-rw-r--r--fs/bcachefs/keylist.c10
-rw-r--r--fs/bcachefs/keylist.h22
-rw-r--r--fs/bcachefs/migrate.c90
-rw-r--r--fs/bcachefs/migrate.h6
-rw-r--r--fs/bcachefs/move.c92
-rw-r--r--fs/bcachefs/move.h12
-rw-r--r--fs/bcachefs/movinggc.c66
-rw-r--r--fs/bcachefs/movinggc.h6
-rw-r--r--fs/bcachefs/opts.c64
-rw-r--r--fs/bcachefs/opts.h42
-rw-r--r--fs/bcachefs/str_hash.h98
-rw-r--r--fs/bcachefs/super-io.c130
-rw-r--r--fs/bcachefs/super-io.h74
-rw-r--r--fs/bcachefs/super.c546
-rw-r--r--fs/bcachefs/super.h68
-rw-r--r--fs/bcachefs/sysfs.c154
-rw-r--r--fs/bcachefs/sysfs.h2
-rw-r--r--fs/bcachefs/tier.c60
-rw-r--r--fs/bcachefs/tier.h6
-rw-r--r--fs/bcachefs/trace.c2
-rw-r--r--fs/bcachefs/util.c52
-rw-r--r--fs/bcachefs/util.h72
-rw-r--r--fs/bcachefs/xattr.c86
-rw-r--r--fs/bcachefs/xattr.h12
-rw-r--r--include/trace/events/bcachefs.h171
80 files changed, 3941 insertions, 4033 deletions
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 4363c57e5677..6fcac72cdd10 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -1,4 +1,4 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include <linux/init.h>
#include <linux/sched.h>
@@ -11,7 +11,7 @@
/*
* Convert from filesystem to in-memory representation.
*/
-static struct posix_acl *bch_acl_from_disk(const void *value, size_t size)
+static struct posix_acl *bch2_acl_from_disk(const void *value, size_t size)
{
const char *end = (char *)value + size;
int n, count;
@@ -25,7 +25,7 @@ static struct posix_acl *bch_acl_from_disk(const void *value, size_t size)
cpu_to_le32(BCH_ACL_VERSION))
return ERR_PTR(-EINVAL);
value = (char *)value + sizeof(bch_acl_header);
- count = bch_acl_count(size);
+ count = bch2_acl_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
@@ -82,13 +82,13 @@ fail:
/*
* Convert from in-memory to filesystem representation.
*/
-static void *bch_acl_to_disk(const struct posix_acl *acl, size_t *size)
+static void *bch2_acl_to_disk(const struct posix_acl *acl, size_t *size)
{
bch_acl_header *ext_acl;
char *e;
size_t n;
- *size = bch_acl_size(acl->a_count);
+ *size = bch2_acl_size(acl->a_count);
ext_acl = kmalloc(sizeof(bch_acl_header) + acl->a_count *
sizeof(bch_acl_entry), GFP_KERNEL);
if (!ext_acl)
@@ -131,7 +131,7 @@ fail:
return ERR_PTR(-EINVAL);
}
-struct posix_acl *bch_get_acl(struct inode *inode, int type)
+struct posix_acl *bch2_get_acl(struct inode *inode, int type)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
int name_index;
@@ -149,16 +149,16 @@ struct posix_acl *bch_get_acl(struct inode *inode, int type)
default:
BUG();
}
- ret = bch_xattr_get(c, inode, "", NULL, 0, name_index);
+ ret = bch2_xattr_get(c, inode, "", NULL, 0, name_index);
if (ret > 0) {
value = kmalloc(ret, GFP_KERNEL);
if (!value)
return ERR_PTR(-ENOMEM);
- ret = bch_xattr_get(c, inode, "", value,
+ ret = bch2_xattr_get(c, inode, "", value,
ret, name_index);
}
if (ret > 0)
- acl = bch_acl_from_disk(value, ret);
+ acl = bch2_acl_from_disk(value, ret);
else if (ret == -ENODATA || ret == -ENOSYS)
acl = NULL;
else
@@ -171,7 +171,7 @@ struct posix_acl *bch_get_acl(struct inode *inode, int type)
return acl;
}
-int bch_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+int bch2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
int name_index;
@@ -206,12 +206,12 @@ int bch_set_acl(struct inode *inode, struct posix_acl *acl, int type)
}
if (acl) {
- value = bch_acl_to_disk(acl, &size);
+ value = bch2_acl_to_disk(acl, &size);
if (IS_ERR(value))
return (int)PTR_ERR(value);
}
- ret = bch_xattr_set(c, inode, "", value, size, 0, name_index);
+ ret = bch2_xattr_set(c, inode, "", value, size, 0, name_index);
kfree(value);
diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h
index 079e568908e4..2e51726fa849 100644
--- a/fs/bcachefs/acl.h
+++ b/fs/bcachefs/acl.h
@@ -23,7 +23,7 @@ typedef struct {
__le32 a_version;
} bch_acl_header;
-static inline size_t bch_acl_size(int count)
+static inline size_t bch2_acl_size(int count)
{
if (count <= 4) {
return sizeof(bch_acl_header) +
@@ -35,7 +35,7 @@ static inline size_t bch_acl_size(int count)
}
}
-static inline int bch_acl_count(size_t size)
+static inline int bch2_acl_count(size_t size)
{
ssize_t s;
@@ -52,5 +52,5 @@ static inline int bch_acl_count(size_t size)
}
}
-extern struct posix_acl *bch_get_acl(struct inode *, int);
-extern int bch_set_acl(struct inode *, struct posix_acl *, int);
+extern struct posix_acl *bch2_get_acl(struct inode *, int);
+extern int bch2_set_acl(struct inode *, struct posix_acl *, int);
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index 299795d117f2..3067181cfe26 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -39,13 +39,13 @@
* time around, and we garbage collect or rewrite the priorities sooner than we
* would have otherwise.
*
- * bch_bucket_alloc() allocates a single bucket from a specific device.
+ * bch2_bucket_alloc() allocates a single bucket from a specific device.
*
- * bch_bucket_alloc_set() allocates one or more buckets from different devices
+ * bch2_bucket_alloc_set() allocates one or more buckets from different devices
* in a given filesystem.
*
* invalidate_buckets() drives all the processes described above. It's called
- * from bch_bucket_alloc() and a few other places that need to make sure free
+ * from bch2_bucket_alloc() and a few other places that need to make sure free
* buckets are ready.
*
* invalidate_buckets_(lru|fifo)() find buckets that are available to be
@@ -53,7 +53,7 @@
* in either lru or fifo order.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "btree_update.h"
#include "buckets.h"
@@ -73,12 +73,12 @@
#include <linux/rcupdate.h>
#include <trace/events/bcachefs.h>
-static void __bch_bucket_free(struct bch_dev *, struct bucket *);
-static void bch_recalc_min_prio(struct bch_dev *, int);
+static void __bch2_bucket_free(struct bch_dev *, struct bucket *);
+static void bch2_recalc_min_prio(struct bch_dev *, int);
/* Allocation groups: */
-void bch_dev_group_remove(struct dev_group *grp, struct bch_dev *ca)
+void bch2_dev_group_remove(struct dev_group *grp, struct bch_dev *ca)
{
unsigned i;
@@ -96,7 +96,7 @@ void bch_dev_group_remove(struct dev_group *grp, struct bch_dev *ca)
spin_unlock(&grp->lock);
}
-void bch_dev_group_add(struct dev_group *grp, struct bch_dev *ca)
+void bch2_dev_group_add(struct dev_group *grp, struct bch_dev *ca)
{
unsigned i;
@@ -132,7 +132,7 @@ static void pd_controllers_update(struct work_struct *work)
rcu_read_lock();
for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
- bch_pd_controller_update(&c->tiers[i].pd,
+ bch2_pd_controller_update(&c->tiers[i].pd,
div_u64(faster_tiers_size *
c->tiering_percent, 100),
faster_tiers_dirty,
@@ -140,7 +140,7 @@ static void pd_controllers_update(struct work_struct *work)
spin_lock(&c->tiers[i].devs.lock);
group_for_each_dev(ca, &c->tiers[i].devs, iter) {
- struct bch_dev_usage stats = bch_dev_usage_read(ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
unsigned bucket_bits = ca->bucket_bits + 9;
u64 size = (ca->mi.nbuckets -
@@ -159,7 +159,7 @@ static void pd_controllers_update(struct work_struct *work)
fragmented = max(0LL, fragmented);
- bch_pd_controller_update(&ca->moving_gc_pd,
+ bch2_pd_controller_update(&ca->moving_gc_pd,
free, fragmented, -1);
faster_tiers_size += size;
@@ -192,7 +192,7 @@ static void pd_controllers_update(struct work_struct *work)
if (c->fastest_tier)
copygc_can_free = U64_MAX;
- bch_pd_controller_update(&c->foreground_write_pd,
+ bch2_pd_controller_update(&c->foreground_write_pd,
min(copygc_can_free,
div_u64(fastest_tier_size *
c->foreground_target_percent,
@@ -241,7 +241,7 @@ static int prio_io(struct bch_dev *ca, uint64_t bucket, int op)
ca->bio_prio->bi_iter.bi_sector = bucket * ca->mi.bucket_size;
ca->bio_prio->bi_bdev = ca->disk_sb.bdev;
ca->bio_prio->bi_iter.bi_size = bucket_bytes(ca);
- bch_bio_map(ca->bio_prio, ca->disk_buckets);
+ bch2_bio_map(ca->bio_prio, ca->disk_buckets);
return submit_bio_wait(ca->bio_prio);
}
@@ -256,7 +256,7 @@ static struct nonce prio_nonce(struct prio_set *p)
}};
}
-static int bch_prio_write(struct bch_dev *ca)
+static int bch2_prio_write(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct journal *j = &c->journal;
@@ -267,7 +267,7 @@ static int bch_prio_write(struct bch_dev *ca)
if (c->opts.nochanges)
return 0;
- trace_bcache_prio_write_start(ca);
+ trace_prio_write_start(ca);
atomic64_add(ca->mi.bucket_size * prio_buckets(ca),
&ca->meta_sectors_written);
@@ -293,7 +293,7 @@ static int bch_prio_write(struct bch_dev *ca)
get_random_bytes(&p->nonce, sizeof(p->nonce));
spin_lock(&ca->prio_buckets_lock);
- r = bch_bucket_alloc(ca, RESERVE_PRIO);
+ r = bch2_bucket_alloc(ca, RESERVE_PRIO);
BUG_ON(!r);
/*
@@ -301,27 +301,27 @@ static int bch_prio_write(struct bch_dev *ca)
* it getting gc'd from under us
*/
ca->prio_buckets[i] = r;
- bch_mark_metadata_bucket(ca, ca->buckets + r,
+ bch2_mark_metadata_bucket(ca, ca->buckets + r,
BUCKET_PRIOS, false);
spin_unlock(&ca->prio_buckets_lock);
- SET_PSET_CSUM_TYPE(p, bch_meta_checksum_type(c));
+ SET_PSET_CSUM_TYPE(p, bch2_meta_checksum_type(c));
- bch_encrypt(c, PSET_CSUM_TYPE(p),
+ bch2_encrypt(c, PSET_CSUM_TYPE(p),
prio_nonce(p),
p->encrypted_start,
bucket_bytes(ca) -
offsetof(struct prio_set, encrypted_start));
- p->csum = bch_checksum(c, PSET_CSUM_TYPE(p),
+ p->csum = bch2_checksum(c, PSET_CSUM_TYPE(p),
prio_nonce(p),
(void *) p + sizeof(p->csum),
bucket_bytes(ca) - sizeof(p->csum));
ret = prio_io(ca, r, REQ_OP_WRITE);
- if (bch_dev_fatal_io_err_on(ret, ca,
+ if (bch2_dev_fatal_io_err_on(ret, ca,
"prio write to bucket %zu", r) ||
- bch_meta_write_fault("prio"))
+ bch2_meta_write_fault("prio"))
return ret;
}
@@ -338,15 +338,15 @@ static int bch_prio_write(struct bch_dev *ca)
if (!test_bit(JOURNAL_STARTED, &c->journal.flags))
break;
- ret = bch_journal_res_get(j, &res, u64s, u64s);
+ ret = bch2_journal_res_get(j, &res, u64s, u64s);
if (ret)
return ret;
need_new_journal_entry = j->buf[res.idx].nr_prio_buckets <
ca->dev_idx + 1;
- bch_journal_res_put(j, &res);
+ bch2_journal_res_put(j, &res);
- ret = bch_journal_flush_seq(j, res.seq);
+ ret = bch2_journal_flush_seq(j, res.seq);
if (ret)
return ret;
} while (need_new_journal_entry);
@@ -360,7 +360,7 @@ static int bch_prio_write(struct bch_dev *ca)
for (i = 0; i < prio_buckets(ca); i++) {
if (ca->prio_last_buckets[i])
- __bch_bucket_free(ca,
+ __bch2_bucket_free(ca,
&ca->buckets[ca->prio_last_buckets[i]]);
ca->prio_last_buckets[i] = ca->prio_buckets[i];
@@ -368,11 +368,11 @@ static int bch_prio_write(struct bch_dev *ca)
spin_unlock(&ca->prio_buckets_lock);
- trace_bcache_prio_write_end(ca);
+ trace_prio_write_end(ca);
return 0;
}
-int bch_prio_read(struct bch_dev *ca)
+int bch2_prio_read(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct prio_set *p = ca->disk_buckets;
@@ -404,10 +404,10 @@ int bch_prio_read(struct bch_dev *ca)
bucket_nr++;
ret = prio_io(ca, bucket, REQ_OP_READ);
- if (bch_dev_fatal_io_err_on(ret, ca,
+ if (bch2_dev_fatal_io_err_on(ret, ca,
"prior read from bucket %llu",
bucket) ||
- bch_meta_read_fault("prio"))
+ bch2_meta_read_fault("prio"))
return -EIO;
got = le64_to_cpu(p->magic);
@@ -420,15 +420,15 @@ int bch_prio_read(struct bch_dev *ca)
"prio bucket with unknown csum type %llu bucket %lluu",
PSET_CSUM_TYPE(p), bucket);
- csum = bch_checksum(c, PSET_CSUM_TYPE(p),
+ csum = bch2_checksum(c, PSET_CSUM_TYPE(p),
prio_nonce(p),
(void *) p + sizeof(p->csum),
bucket_bytes(ca) - sizeof(p->csum));
- unfixable_fsck_err_on(bch_crc_cmp(csum, p->csum), c,
+ unfixable_fsck_err_on(bch2_crc_cmp(csum, p->csum), c,
"bad checksum reading prios from bucket %llu",
bucket);
- bch_encrypt(c, PSET_CSUM_TYPE(p),
+ bch2_encrypt(c, PSET_CSUM_TYPE(p),
prio_nonce(p),
p->encrypted_start,
bucket_bytes(ca) -
@@ -445,8 +445,8 @@ int bch_prio_read(struct bch_dev *ca)
}
mutex_lock(&c->bucket_lock);
- bch_recalc_min_prio(ca, READ);
- bch_recalc_min_prio(ca, WRITE);
+ bch2_recalc_min_prio(ca, READ);
+ bch2_recalc_min_prio(ca, WRITE);
mutex_unlock(&c->bucket_lock);
ret = 0;
@@ -476,7 +476,7 @@ static int wait_buckets_available(struct bch_dev *ca)
if (ca->inc_gen_needs_gc >= fifo_free(&ca->free_inc)) {
if (c->gc_thread) {
- trace_bcache_gc_cannot_inc_gens(ca->fs);
+ trace_gc_cannot_inc_gens(ca->fs);
atomic_inc(&c->kick_gc);
wake_up_process(ca->fs->gc_thread);
}
@@ -521,7 +521,7 @@ static void verify_not_on_freelist(struct bch_dev *ca, size_t bucket)
/* Bucket heap / gen */
-void bch_recalc_min_prio(struct bch_dev *ca, int rw)
+void bch2_recalc_min_prio(struct bch_dev *ca, int rw)
{
struct bch_fs *c = ca->fs;
struct prio_clock *clock = &c->prio_clock[rw];
@@ -550,25 +550,25 @@ void bch_recalc_min_prio(struct bch_dev *ca, int rw)
clock->min_prio = clock->hand - max_delta;
}
-static void bch_rescale_prios(struct bch_fs *c, int rw)
+static void bch2_rescale_prios(struct bch_fs *c, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
struct bch_dev *ca;
struct bucket *g;
unsigned i;
- trace_bcache_rescale_prios(c);
+ trace_rescale_prios(c);
for_each_member_device(ca, c, i) {
for_each_bucket(g, ca)
g->prio[rw] = clock->hand -
(clock->hand - g->prio[rw]) / 2;
- bch_recalc_min_prio(ca, rw);
+ bch2_recalc_min_prio(ca, rw);
}
}
-static void bch_inc_clock_hand(struct io_timer *timer)
+static void bch2_inc_clock_hand(struct io_timer *timer)
{
struct prio_clock *clock = container_of(timer,
struct prio_clock, rescale);
@@ -582,7 +582,7 @@ static void bch_inc_clock_hand(struct io_timer *timer)
/* if clock cannot be advanced more, rescale prio */
if (clock->hand == (u16) (clock->min_prio - 1))
- bch_rescale_prios(c, clock->rw);
+ bch2_rescale_prios(c, clock->rw);
mutex_unlock(&c->bucket_lock);
@@ -601,16 +601,16 @@ static void bch_inc_clock_hand(struct io_timer *timer)
*/
timer->expire += capacity >> 10;
- bch_io_timer_add(&c->io_clock[clock->rw], timer);
+ bch2_io_timer_add(&c->io_clock[clock->rw], timer);
}
-static void bch_prio_timer_init(struct bch_fs *c, int rw)
+static void bch2_prio_timer_init(struct bch_fs *c, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
struct io_timer *timer = &clock->rescale;
clock->rw = rw;
- timer->fn = bch_inc_clock_hand;
+ timer->fn = bch2_inc_clock_hand;
timer->expire = c->capacity >> 10;
}
@@ -626,7 +626,7 @@ static inline bool can_inc_bucket_gen(struct bch_dev *ca, struct bucket *g)
return bucket_gc_gen(ca, g) < BUCKET_GC_GEN_MAX;
}
-static bool bch_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
+static bool bch2_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
{
if (!is_available_bucket(READ_ONCE(g->mark)))
return false;
@@ -637,11 +637,11 @@ static bool bch_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
return can_inc_bucket_gen(ca, g);
}
-static void bch_invalidate_one_bucket(struct bch_dev *ca, struct bucket *g)
+static void bch2_invalidate_one_bucket(struct bch_dev *ca, struct bucket *g)
{
spin_lock(&ca->freelist_lock);
- bch_invalidate_bucket(ca, g);
+ bch2_invalidate_bucket(ca, g);
g->read_prio = ca->fs->prio_clock[READ].hand;
g->write_prio = ca->fs->prio_clock[WRITE].hand;
@@ -689,8 +689,8 @@ static void invalidate_buckets_lru(struct bch_dev *ca)
ca->heap.used = 0;
mutex_lock(&ca->fs->bucket_lock);
- bch_recalc_min_prio(ca, READ);
- bch_recalc_min_prio(ca, WRITE);
+ bch2_recalc_min_prio(ca, READ);
+ bch2_recalc_min_prio(ca, WRITE);
/*
* Find buckets with lowest read priority, by building a maxheap sorted
@@ -698,7 +698,7 @@ static void invalidate_buckets_lru(struct bch_dev *ca)
* all buckets have been visited.
*/
for_each_bucket(g, ca) {
- if (!bch_can_invalidate_bucket(ca, g))
+ if (!bch2_can_invalidate_bucket(ca, g))
continue;
bucket_heap_push(ca, g, bucket_sort_key(g));
@@ -714,13 +714,13 @@ static void invalidate_buckets_lru(struct bch_dev *ca)
heap_resort(&ca->heap, bucket_max_cmp);
/*
- * If we run out of buckets to invalidate, bch_allocator_thread() will
+ * If we run out of buckets to invalidate, bch2_allocator_thread() will
* kick stuff and retry us
*/
while (!fifo_full(&ca->free_inc) &&
heap_pop(&ca->heap, e, bucket_max_cmp)) {
- BUG_ON(!bch_can_invalidate_bucket(ca, e.g));
- bch_invalidate_one_bucket(ca, e.g);
+ BUG_ON(!bch2_can_invalidate_bucket(ca, e.g));
+ bch2_invalidate_one_bucket(ca, e.g);
}
mutex_unlock(&ca->fs->bucket_lock);
@@ -739,8 +739,8 @@ static void invalidate_buckets_fifo(struct bch_dev *ca)
g = ca->buckets + ca->fifo_last_bucket++;
- if (bch_can_invalidate_bucket(ca, g))
- bch_invalidate_one_bucket(ca, g);
+ if (bch2_can_invalidate_bucket(ca, g))
+ bch2_invalidate_one_bucket(ca, g);
if (++checked >= ca->mi.nbuckets)
return;
@@ -753,14 +753,14 @@ static void invalidate_buckets_random(struct bch_dev *ca)
size_t checked = 0;
while (!fifo_full(&ca->free_inc)) {
- size_t n = bch_rand_range(ca->mi.nbuckets -
+ size_t n = bch2_rand_range(ca->mi.nbuckets -
ca->mi.first_bucket) +
ca->mi.first_bucket;
g = ca->buckets + n;
- if (bch_can_invalidate_bucket(ca, g))
- bch_invalidate_one_bucket(ca, g);
+ if (bch2_can_invalidate_bucket(ca, g))
+ bch2_invalidate_one_bucket(ca, g);
if (++checked >= ca->mi.nbuckets / 2)
return;
@@ -784,7 +784,7 @@ static void invalidate_buckets(struct bch_dev *ca)
}
}
-static bool __bch_allocator_push(struct bch_dev *ca, long bucket)
+static bool __bch2_allocator_push(struct bch_dev *ca, long bucket)
{
if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
goto success;
@@ -804,12 +804,12 @@ success:
return true;
}
-static bool bch_allocator_push(struct bch_dev *ca, long bucket)
+static bool bch2_allocator_push(struct bch_dev *ca, long bucket)
{
bool ret;
spin_lock(&ca->freelist_lock);
- ret = __bch_allocator_push(ca, bucket);
+ ret = __bch2_allocator_push(ca, bucket);
if (ret)
fifo_pop(&ca->free_inc, bucket);
spin_unlock(&ca->freelist_lock);
@@ -817,7 +817,7 @@ static bool bch_allocator_push(struct bch_dev *ca, long bucket)
return ret;
}
-static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca)
+static void bch2_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca)
{
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bucket *g;
@@ -831,7 +831,7 @@ static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca)
!bucket_needs_journal_commit(m, last_seq_ondisk)) {
spin_lock(&ca->freelist_lock);
- bch_mark_alloc_bucket(ca, g, true);
+ bch2_mark_alloc_bucket(ca, g, true);
g->read_prio = c->prio_clock[READ].hand;
g->write_prio = c->prio_clock[WRITE].hand;
@@ -854,7 +854,7 @@ static void bch_find_empty_buckets(struct bch_fs *c, struct bch_dev *ca)
* of free_inc, try to invalidate some buckets and write out
* prios and gens.
*/
-static int bch_allocator_thread(void *arg)
+static int bch2_allocator_thread(void *arg)
{
struct bch_dev *ca = arg;
struct bch_fs *c = ca->fs;
@@ -862,7 +862,7 @@ static int bch_allocator_thread(void *arg)
set_freezable();
- bch_find_empty_buckets(c, ca);
+ bch2_find_empty_buckets(c, ca);
while (1) {
/*
@@ -888,7 +888,7 @@ static int bch_allocator_thread(void *arg)
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- if (bch_allocator_push(ca, bucket))
+ if (bch2_allocator_push(ca, bucket))
break;
if (kthread_should_stop()) {
@@ -908,7 +908,7 @@ static int bch_allocator_thread(void *arg)
* See if we have buckets we can reuse without invalidating them
* or forcing a journal commit:
*/
- //bch_find_empty_buckets(c, ca);
+ //bch2_find_empty_buckets(c, ca);
if (fifo_used(&ca->free_inc) * 2 > ca->free_inc.size) {
up_read(&c->gc_lock);
@@ -931,7 +931,7 @@ static int bch_allocator_thread(void *arg)
*/
invalidate_buckets(ca);
- trace_bcache_alloc_batch(ca, fifo_used(&ca->free_inc),
+ trace_alloc_batch(ca, fifo_used(&ca->free_inc),
ca->free_inc.size);
}
@@ -941,7 +941,7 @@ static int bch_allocator_thread(void *arg)
* free_inc is full of newly-invalidated buckets, must write out
* prios and gens before they can be re-used
*/
- ret = bch_prio_write(ca);
+ ret = bch2_prio_write(ca);
if (ret) {
/*
* Emergency read only - allocator thread has to
@@ -959,7 +959,7 @@ static int bch_allocator_thread(void *arg)
long bucket;
fifo_pop(&ca->free_inc, bucket);
- bch_mark_free_bucket(ca, ca->buckets + bucket);
+ bch2_mark_free_bucket(ca, ca->buckets + bucket);
}
spin_unlock(&ca->freelist_lock);
goto out;
@@ -967,7 +967,7 @@ static int bch_allocator_thread(void *arg)
}
out:
/*
- * Avoid a race with bch_usage_update() trying to wake us up after
+ * Avoid a race with bch2_usage_update() trying to wake us up after
* we've exited:
*/
synchronize_rcu();
@@ -981,7 +981,7 @@ out:
*
* Returns index of bucket on success, 0 on failure
* */
-size_t bch_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve)
+size_t bch2_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve)
{
struct bucket *g;
long r;
@@ -993,15 +993,15 @@ size_t bch_bucket_alloc(struct bch_dev *ca, enum alloc_reserve reserve)
spin_unlock(&ca->freelist_lock);
- trace_bcache_bucket_alloc_fail(ca, reserve);
+ trace_bucket_alloc_fail(ca, reserve);
return 0;
out:
verify_not_on_freelist(ca, r);
spin_unlock(&ca->freelist_lock);
- trace_bcache_bucket_alloc(ca, reserve);
+ trace_bucket_alloc(ca, reserve);
- bch_wake_allocator(ca);
+ bch2_wake_allocator(ca);
g = ca->buckets + r;
@@ -1011,9 +1011,9 @@ out:
return r;
}
-static void __bch_bucket_free(struct bch_dev *ca, struct bucket *g)
+static void __bch2_bucket_free(struct bch_dev *ca, struct bucket *g)
{
- bch_mark_free_bucket(ca, g);
+ bch2_mark_free_bucket(ca, g);
g->read_prio = ca->fs->prio_clock[READ].hand;
g->write_prio = ca->fs->prio_clock[WRITE].hand;
@@ -1053,7 +1053,7 @@ static void recalc_alloc_group_weights(struct bch_fs *c,
}
}
-static enum bucket_alloc_ret bch_bucket_alloc_group(struct bch_fs *c,
+static enum bucket_alloc_ret bch2_bucket_alloc_group(struct bch_fs *c,
struct open_bucket *ob,
enum alloc_reserve reserve,
unsigned nr_replicas,
@@ -1104,7 +1104,7 @@ static enum bucket_alloc_ret bch_bucket_alloc_group(struct bch_fs *c,
get_random_int() > devs->d[i].weight)
continue;
- bucket = bch_bucket_alloc(ca, reserve);
+ bucket = bch2_bucket_alloc(ca, reserve);
if (!bucket) {
if (fail_idx == -1)
fail_idx = i;
@@ -1141,7 +1141,7 @@ err:
return ret;
}
-static enum bucket_alloc_ret __bch_bucket_alloc_set(struct bch_fs *c,
+static enum bucket_alloc_ret __bch2_bucket_alloc_set(struct bch_fs *c,
struct write_point *wp,
struct open_bucket *ob,
unsigned nr_replicas,
@@ -1156,20 +1156,20 @@ static enum bucket_alloc_ret __bch_bucket_alloc_set(struct bch_fs *c,
* XXX: switch off wp->type and do something more intelligent here
*/
if (wp->group)
- return bch_bucket_alloc_group(c, ob, reserve, nr_replicas,
+ return bch2_bucket_alloc_group(c, ob, reserve, nr_replicas,
wp->group, devs_used);
/* foreground writes: prefer fastest tier: */
tier = READ_ONCE(c->fastest_tier);
if (tier)
- bch_bucket_alloc_group(c, ob, reserve, nr_replicas,
+ bch2_bucket_alloc_group(c, ob, reserve, nr_replicas,
&tier->devs, devs_used);
- return bch_bucket_alloc_group(c, ob, reserve, nr_replicas,
+ return bch2_bucket_alloc_group(c, ob, reserve, nr_replicas,
&c->all_devs, devs_used);
}
-static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
+static int bch2_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
struct open_bucket *ob, unsigned nr_replicas,
enum alloc_reserve reserve, long *devs_used,
struct closure *cl)
@@ -1177,7 +1177,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
bool waiting = false;
while (1) {
- switch (__bch_bucket_alloc_set(c, wp, ob, nr_replicas,
+ switch (__bch2_bucket_alloc_set(c, wp, ob, nr_replicas,
reserve, devs_used)) {
case ALLOC_SUCCESS:
if (waiting)
@@ -1192,7 +1192,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
case FREELIST_EMPTY:
if (!cl || waiting)
- trace_bcache_freelist_empty_fail(c,
+ trace_freelist_empty_fail(c,
reserve, cl);
if (!cl)
@@ -1229,7 +1229,7 @@ static int bch_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
* reference _after_ doing the index update that makes its allocation reachable.
*/
-static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
+static void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
const struct bch_extent_ptr *ptr;
@@ -1238,7 +1238,7 @@ static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
open_bucket_for_each_ptr(ob, ptr) {
struct bch_dev *ca = c->devs[ptr->dev];
- bch_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), false);
+ bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), false);
}
ob->nr_ptrs = 0;
@@ -1248,16 +1248,16 @@ static void __bch_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
closure_wake_up(&c->open_buckets_wait);
}
-void bch_open_bucket_put(struct bch_fs *c, struct open_bucket *b)
+void bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *b)
{
if (atomic_dec_and_test(&b->pin)) {
spin_lock(&c->open_buckets_lock);
- __bch_open_bucket_put(c, b);
+ __bch2_open_bucket_put(c, b);
spin_unlock(&c->open_buckets_lock);
}
}
-static struct open_bucket *bch_open_bucket_get(struct bch_fs *c,
+static struct open_bucket *bch2_open_bucket_get(struct bch_fs *c,
unsigned nr_reserved,
struct closure *cl)
{
@@ -1276,9 +1276,9 @@ static struct open_bucket *bch_open_bucket_get(struct bch_fs *c,
ret->has_full_ptrs = false;
c->open_buckets_nr_free--;
- trace_bcache_open_bucket_alloc(c, cl);
+ trace_open_bucket_alloc(c, cl);
} else {
- trace_bcache_open_bucket_alloc_fail(c, cl);
+ trace_open_bucket_alloc_fail(c, cl);
if (cl) {
closure_wait(&c->open_buckets_wait, cl);
@@ -1396,7 +1396,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
for (i = 0; i < ob->nr_ptrs; i++)
__set_bit(ob->ptrs[i].dev, devs_used);
- ret = bch_bucket_alloc_set(c, wp, ob, nr_replicas,
+ ret = bch2_bucket_alloc_set(c, wp, ob, nr_replicas,
reserve, devs_used, cl);
if (ret == -EROFS &&
@@ -1409,12 +1409,12 @@ static int open_bucket_add_buckets(struct bch_fs *c,
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
-struct open_bucket *bch_alloc_sectors_start(struct bch_fs *c,
- struct write_point *wp,
- unsigned nr_replicas,
- unsigned nr_replicas_required,
- enum alloc_reserve reserve,
- struct closure *cl)
+struct open_bucket *bch2_alloc_sectors_start(struct bch_fs *c,
+ struct write_point *wp,
+ unsigned nr_replicas,
+ unsigned nr_replicas_required,
+ enum alloc_reserve reserve,
+ struct closure *cl)
{
struct open_bucket *ob;
unsigned open_buckets_reserved = wp == &c->btree_write_point
@@ -1435,7 +1435,7 @@ retry:
if (!ob || ob->has_full_ptrs) {
struct open_bucket *new_ob;
- new_ob = bch_open_bucket_get(c, open_buckets_reserved, cl);
+ new_ob = bch2_open_bucket_get(c, open_buckets_reserved, cl);
if (IS_ERR(new_ob))
return new_ob;
@@ -1449,7 +1449,7 @@ retry:
cmpxchg(&wp->b, ob, new_ob) != ob) {
/* We raced: */
mutex_unlock(&new_ob->lock);
- bch_open_bucket_put(c, new_ob);
+ bch2_open_bucket_put(c, new_ob);
if (ob)
mutex_unlock(&ob->lock);
@@ -1459,7 +1459,7 @@ retry:
if (ob) {
open_bucket_copy_unused_ptrs(c, new_ob, ob);
mutex_unlock(&ob->lock);
- bch_open_bucket_put(c, ob);
+ bch2_open_bucket_put(c, ob);
}
ob = new_ob;
@@ -1485,9 +1485,9 @@ retry:
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
-void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e,
- unsigned nr_replicas, struct open_bucket *ob,
- unsigned sectors)
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e,
+ unsigned nr_replicas, struct open_bucket *ob,
+ unsigned sectors)
{
struct bch_extent_ptr tmp;
bool has_data = false;
@@ -1495,7 +1495,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e,
/*
* We're keeping any existing pointer k has, and appending new pointers:
- * __bch_write() will only write to the pointers we add here:
+ * __bch2_write() will only write to the pointers we add here:
*/
BUG_ON(sectors > ob->sectors_free);
@@ -1505,7 +1505,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e,
has_data = true;
for (i = 0; i < min(ob->nr_ptrs, nr_replicas); i++) {
- EBUG_ON(bch_extent_has_device(extent_i_to_s_c(e), ob->ptrs[i].dev));
+ EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptrs[i].dev));
tmp = ob->ptrs[i];
tmp.cached = bkey_extent_is_cached(&e->k);
@@ -1522,7 +1522,7 @@ void bch_alloc_sectors_append_ptrs(struct bch_fs *c, struct bkey_i_extent *e,
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
-void bch_alloc_sectors_done(struct bch_fs *c, struct write_point *wp,
+void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp,
struct open_bucket *ob)
{
bool has_data = false;
@@ -1560,35 +1560,35 @@ void bch_alloc_sectors_done(struct bch_fs *c, struct write_point *wp,
* @k - key to return the allocated space information.
* @cl - closure to wait for a bucket
*/
-struct open_bucket *bch_alloc_sectors(struct bch_fs *c,
- struct write_point *wp,
- struct bkey_i_extent *e,
- unsigned nr_replicas,
- unsigned nr_replicas_required,
- enum alloc_reserve reserve,
- struct closure *cl)
+struct open_bucket *bch2_alloc_sectors(struct bch_fs *c,
+ struct write_point *wp,
+ struct bkey_i_extent *e,
+ unsigned nr_replicas,
+ unsigned nr_replicas_required,
+ enum alloc_reserve reserve,
+ struct closure *cl)
{
struct open_bucket *ob;
- ob = bch_alloc_sectors_start(c, wp, nr_replicas,
+ ob = bch2_alloc_sectors_start(c, wp, nr_replicas,
nr_replicas_required,
reserve, cl);
if (IS_ERR_OR_NULL(ob))
return ob;
if (e->k.size > ob->sectors_free)
- bch_key_resize(&e->k, ob->sectors_free);
+ bch2_key_resize(&e->k, ob->sectors_free);
- bch_alloc_sectors_append_ptrs(c, e, nr_replicas, ob, e->k.size);
+ bch2_alloc_sectors_append_ptrs(c, e, nr_replicas, ob, e->k.size);
- bch_alloc_sectors_done(c, wp, ob);
+ bch2_alloc_sectors_done(c, wp, ob);
return ob;
}
/* Startup/shutdown (ro/rw): */
-void bch_recalc_capacity(struct bch_fs *c)
+void bch2_recalc_capacity(struct bch_fs *c)
{
struct bch_tier *fastest_tier = NULL, *slowest_tier = NULL, *tier;
struct bch_dev *ca;
@@ -1676,14 +1676,14 @@ set_capacity:
c->capacity = capacity;
if (c->capacity) {
- bch_io_timer_add(&c->io_clock[READ],
+ bch2_io_timer_add(&c->io_clock[READ],
&c->prio_clock[READ].rescale);
- bch_io_timer_add(&c->io_clock[WRITE],
+ bch2_io_timer_add(&c->io_clock[WRITE],
&c->prio_clock[WRITE].rescale);
} else {
- bch_io_timer_del(&c->io_clock[READ],
+ bch2_io_timer_del(&c->io_clock[READ],
&c->prio_clock[READ].rescale);
- bch_io_timer_del(&c->io_clock[WRITE],
+ bch2_io_timer_del(&c->io_clock[WRITE],
&c->prio_clock[WRITE].rescale);
}
@@ -1691,7 +1691,7 @@ set_capacity:
closure_wake_up(&c->freelist_wait);
}
-static void bch_stop_write_point(struct bch_dev *ca,
+static void bch2_stop_write_point(struct bch_dev *ca,
struct write_point *wp)
{
struct bch_fs *c = ca->fs;
@@ -1713,10 +1713,10 @@ found:
mutex_unlock(&ob->lock);
/* Drop writepoint's ref: */
- bch_open_bucket_put(c, ob);
+ bch2_open_bucket_put(c, ob);
}
-static bool bch_dev_has_open_write_point(struct bch_dev *ca)
+static bool bch2_dev_has_open_write_point(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct bch_extent_ptr *ptr;
@@ -1739,7 +1739,7 @@ static bool bch_dev_has_open_write_point(struct bch_dev *ca)
}
/* device goes ro: */
-void bch_dev_allocator_stop(struct bch_dev *ca)
+void bch2_dev_allocator_stop(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct dev_group *tier = &c->tiers[ca->mi.tier].devs;
@@ -1751,10 +1751,10 @@ void bch_dev_allocator_stop(struct bch_dev *ca)
/* First, remove device from allocation groups: */
- bch_dev_group_remove(tier, ca);
- bch_dev_group_remove(&c->all_devs, ca);
+ bch2_dev_group_remove(tier, ca);
+ bch2_dev_group_remove(&c->all_devs, ca);
- bch_recalc_capacity(c);
+ bch2_recalc_capacity(c);
/*
* Stopping the allocator thread comes after removing from allocation
@@ -1767,7 +1767,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca)
/*
* We need an rcu barrier between setting ca->alloc_thread = NULL and
- * the thread shutting down to avoid a race with bch_usage_update() -
+ * the thread shutting down to avoid a race with bch2_usage_update() -
* the allocator thread itself does a synchronize_rcu() on exit.
*
* XXX: it would be better to have the rcu barrier be asynchronous
@@ -1781,20 +1781,20 @@ void bch_dev_allocator_stop(struct bch_dev *ca)
/* Next, close write points that point to this device... */
for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
- bch_stop_write_point(ca, &c->write_points[i]);
+ bch2_stop_write_point(ca, &c->write_points[i]);
- bch_stop_write_point(ca, &ca->copygc_write_point);
- bch_stop_write_point(ca, &c->promote_write_point);
- bch_stop_write_point(ca, &ca->tiering_write_point);
- bch_stop_write_point(ca, &c->migration_write_point);
- bch_stop_write_point(ca, &c->btree_write_point);
+ bch2_stop_write_point(ca, &ca->copygc_write_point);
+ bch2_stop_write_point(ca, &c->promote_write_point);
+ bch2_stop_write_point(ca, &ca->tiering_write_point);
+ bch2_stop_write_point(ca, &c->migration_write_point);
+ bch2_stop_write_point(ca, &c->btree_write_point);
mutex_lock(&c->btree_reserve_cache_lock);
while (c->btree_reserve_cache_nr) {
struct btree_alloc *a =
&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
- bch_open_bucket_put(c, a->ob);
+ bch2_open_bucket_put(c, a->ob);
}
mutex_unlock(&c->btree_reserve_cache_lock);
@@ -1808,7 +1808,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca)
while (1) {
closure_wait(&c->open_buckets_wait, &cl);
- if (!bch_dev_has_open_write_point(ca)) {
+ if (!bch2_dev_has_open_write_point(ca)) {
closure_wake_up(&c->open_buckets_wait);
break;
}
@@ -1820,7 +1820,7 @@ void bch_dev_allocator_stop(struct bch_dev *ca)
/*
* Startup the allocator thread for transition to RW mode:
*/
-int bch_dev_allocator_start(struct bch_dev *ca)
+int bch2_dev_allocator_start(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct dev_group *tier = &c->tiers[ca->mi.tier].devs;
@@ -1834,26 +1834,26 @@ int bch_dev_allocator_start(struct bch_dev *ca)
if (ca->alloc_thread)
return 0;
- k = kthread_create(bch_allocator_thread, ca, "bcache_allocator");
+ k = kthread_create(bch2_allocator_thread, ca, "bcache_allocator");
if (IS_ERR(k))
return 0;
get_task_struct(k);
ca->alloc_thread = k;
- bch_dev_group_add(tier, ca);
- bch_dev_group_add(&c->all_devs, ca);
+ bch2_dev_group_add(tier, ca);
+ bch2_dev_group_add(&c->all_devs, ca);
mutex_lock(&c->sb_lock);
- journal_buckets = bch_sb_get_journal(ca->disk_sb.sb);
- has_journal = bch_nr_journal_buckets(journal_buckets) >=
+ journal_buckets = bch2_sb_get_journal(ca->disk_sb.sb);
+ has_journal = bch2_nr_journal_buckets(journal_buckets) >=
BCH_JOURNAL_BUCKETS_MIN;
mutex_unlock(&c->sb_lock);
if (has_journal)
- bch_dev_group_add(&c->journal.devs, ca);
+ bch2_dev_group_add(&c->journal.devs, ca);
- bch_recalc_capacity(c);
+ bch2_recalc_capacity(c);
/*
* Don't wake up allocator thread until after adding device to
@@ -1864,15 +1864,15 @@ int bch_dev_allocator_start(struct bch_dev *ca)
return 0;
}
-void bch_fs_allocator_init(struct bch_fs *c)
+void bch2_fs_allocator_init(struct bch_fs *c)
{
unsigned i;
INIT_LIST_HEAD(&c->open_buckets_open);
INIT_LIST_HEAD(&c->open_buckets_free);
spin_lock_init(&c->open_buckets_lock);
- bch_prio_timer_init(c, READ);
- bch_prio_timer_init(c, WRITE);
+ bch2_prio_timer_init(c, READ);
+ bch2_prio_timer_init(c, WRITE);
/* open bucket 0 is a sentinal NULL: */
mutex_init(&c->open_buckets[0].lock);
@@ -1896,12 +1896,12 @@ void bch_fs_allocator_init(struct bch_fs *c)
INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);
spin_lock_init(&c->foreground_write_pd_lock);
- bch_pd_controller_init(&c->foreground_write_pd);
+ bch2_pd_controller_init(&c->foreground_write_pd);
/*
* We do not want the write rate to have an effect on the computed
* rate, for two reasons:
*
- * We do not call bch_ratelimit_delay() at all if the write rate
+ * We do not call bch2_ratelimit_delay() at all if the write rate
* exceeds 1GB/s. In this case, the PD controller will think we are
* not "keeping up" and not change the rate.
*/
@@ -1909,5 +1909,5 @@ void bch_fs_allocator_init(struct bch_fs *c)
init_timer(&c->foreground_write_wakeup);
c->foreground_write_wakeup.data = (unsigned long) c;
- c->foreground_write_wakeup.function = bch_wake_delayed_writes;
+ c->foreground_write_wakeup.function = bch2_wake_delayed_writes;
}
diff --git a/fs/bcachefs/alloc.h b/fs/bcachefs/alloc.h
index f8aa762de2e0..08638b259667 100644
--- a/fs/bcachefs/alloc.h
+++ b/fs/bcachefs/alloc.h
@@ -20,31 +20,31 @@ static inline size_t prio_buckets(const struct bch_dev *ca)
return DIV_ROUND_UP((size_t) (ca)->mi.nbuckets, prios_per_bucket(ca));
}
-void bch_dev_group_remove(struct dev_group *, struct bch_dev *);
-void bch_dev_group_add(struct dev_group *, struct bch_dev *);
+void bch2_dev_group_remove(struct dev_group *, struct bch_dev *);
+void bch2_dev_group_add(struct dev_group *, struct bch_dev *);
-int bch_prio_read(struct bch_dev *);
+int bch2_prio_read(struct bch_dev *);
-size_t bch_bucket_alloc(struct bch_dev *, enum alloc_reserve);
+size_t bch2_bucket_alloc(struct bch_dev *, enum alloc_reserve);
-void bch_open_bucket_put(struct bch_fs *, struct open_bucket *);
+void bch2_open_bucket_put(struct bch_fs *, struct open_bucket *);
-struct open_bucket *bch_alloc_sectors_start(struct bch_fs *,
+struct open_bucket *bch2_alloc_sectors_start(struct bch_fs *,
struct write_point *,
unsigned, unsigned,
enum alloc_reserve,
struct closure *);
-void bch_alloc_sectors_append_ptrs(struct bch_fs *, struct bkey_i_extent *,
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct bkey_i_extent *,
unsigned, struct open_bucket *, unsigned);
-void bch_alloc_sectors_done(struct bch_fs *, struct write_point *,
+void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *,
struct open_bucket *);
-struct open_bucket *bch_alloc_sectors(struct bch_fs *, struct write_point *,
+struct open_bucket *bch2_alloc_sectors(struct bch_fs *, struct write_point *,
struct bkey_i_extent *, unsigned, unsigned,
enum alloc_reserve, struct closure *);
-static inline void bch_wake_allocator(struct bch_dev *ca)
+static inline void bch2_wake_allocator(struct bch_dev *ca)
{
struct task_struct *p;
@@ -77,9 +77,9 @@ static inline struct bch_dev *dev_group_next(struct dev_group *devs,
(_ptr) < (_ob)->ptrs + (_ob)->nr_ptrs; \
(_ptr)++)
-void bch_recalc_capacity(struct bch_fs *);
-void bch_dev_allocator_stop(struct bch_dev *);
-int bch_dev_allocator_start(struct bch_dev *);
-void bch_fs_allocator_init(struct bch_fs *);
+void bch2_recalc_capacity(struct bch_fs *);
+void bch2_dev_allocator_stop(struct bch_dev *);
+int bch2_dev_allocator_start(struct bch_dev *);
+void bch2_fs_allocator_init(struct bch_fs *);
#endif /* _BCACHE_ALLOC_H */
diff --git a/fs/bcachefs/bcache.h b/fs/bcachefs/bcachefs.h
index 61209bfb15d4..6e08947ca0b2 100644
--- a/fs/bcachefs/bcache.h
+++ b/fs/bcachefs/bcachefs.h
@@ -176,7 +176,7 @@
*/
#undef pr_fmt
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
+#define pr_fmt(fmt) "bcachefs: %s() " fmt "\n", __func__
#include <linux/bug.h>
#include <linux/bio.h>
@@ -203,25 +203,25 @@
#include <linux/dynamic_fault.h>
-#define bch_fs_init_fault(name) \
- dynamic_fault("bcache:bch_fs_init:" name)
-#define bch_meta_read_fault(name) \
- dynamic_fault("bcache:meta:read:" name)
-#define bch_meta_write_fault(name) \
- dynamic_fault("bcache:meta:write:" name)
+#define bch2_fs_init_fault(name) \
+ dynamic_fault("bcachefs:bch_fs_init:" name)
+#define bch2_meta_read_fault(name) \
+ dynamic_fault("bcachefs:meta:read:" name)
+#define bch2_meta_write_fault(name) \
+ dynamic_fault("bcachefs:meta:write:" name)
-#ifndef bch_fmt
-#define bch_fmt(_c, fmt) "bcache (%s): " fmt "\n", ((_c)->name)
+#ifndef bch2_fmt
+#define bch2_fmt(_c, fmt) "bcachefs (%s): " fmt "\n", ((_c)->name)
#endif
#define bch_info(c, fmt, ...) \
- printk(KERN_INFO bch_fmt(c, fmt), ##__VA_ARGS__)
+ printk(KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_notice(c, fmt, ...) \
- printk(KERN_NOTICE bch_fmt(c, fmt), ##__VA_ARGS__)
+ printk(KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_warn(c, fmt, ...) \
- printk(KERN_WARNING bch_fmt(c, fmt), ##__VA_ARGS__)
+ printk(KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_err(c, fmt, ...) \
- printk(KERN_ERR bch_fmt(c, fmt), ##__VA_ARGS__)
+ printk(KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_verbose(c, fmt, ...) \
do { \
@@ -269,8 +269,7 @@ do { \
/* name, frequency_units, duration_units */
#define BCH_TIME_STATS() \
- BCH_TIME_STAT(mca_alloc, sec, us) \
- BCH_TIME_STAT(mca_scan, sec, ms) \
+ BCH_TIME_STAT(btree_node_mem_alloc, sec, us) \
BCH_TIME_STAT(btree_gc, sec, ms) \
BCH_TIME_STAT(btree_coalesce, sec, ms) \
BCH_TIME_STAT(btree_split, sec, us) \
@@ -350,7 +349,7 @@ struct bch_dev {
u8 dev_idx;
/*
* Cached version of this device's member info from superblock
- * Committed by bch_write_super() -> bch_fs_mi_update()
+ * Committed by bch2_write_super() -> bch_fs_mi_update()
*/
struct bch_member_cpu mi;
uuid_le uuid;
@@ -505,7 +504,7 @@ struct bch_fs {
struct bch_opts opts;
- /* Updated by bch_sb_update():*/
+ /* Updated by bch2_sb_update():*/
struct {
uuid_le uuid;
uuid_le user_uuid;
@@ -772,7 +771,7 @@ struct bch_fs {
#undef BCH_TIME_STAT
};
-static inline bool bch_fs_running(struct bch_fs *c)
+static inline bool bch2_fs_running(struct bch_fs *c)
{
return c->state == BCH_FS_RO || c->state == BCH_FS_RW;
}
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 5bed6ed488ab..0a0dc8708b94 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -391,7 +391,7 @@ struct bch_csum {
#define BCH_CSUM_CHACHA20_POLY1305_128 4U
#define BCH_CSUM_NR 5U
-static inline _Bool bch_csum_type_is_encryption(unsigned type)
+static inline _Bool bch2_csum_type_is_encryption(unsigned type)
{
switch (type) {
case BCH_CSUM_CHACHA20_POLY1305_80:
@@ -805,7 +805,7 @@ enum cache_replacement {
};
struct bch_sb_layout {
- uuid_le magic; /* bcache superblock UUID */
+ uuid_le magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
@@ -893,7 +893,7 @@ struct bch_sb_field_replication {
/*
* @offset - sector where this sb was written
* @version - on disk format version
- * @magic - identifies as a bcache superblock (BCACHE_MAGIC)
+ * @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
* @seq - incremented each time superblock is written
* @uuid - used for generating various magic numbers and identifying
* member devices, never changes
@@ -1035,7 +1035,7 @@ struct backingdev_sb {
__le64 offset; /* sector where this sb was written */
__le64 version; /* of on disk format */
- uuid_le magic; /* bcache superblock UUID */
+ uuid_le magic; /* bcachefs superblock UUID */
uuid_le disk_uuid;
@@ -1116,7 +1116,7 @@ static inline _Bool SB_IS_BDEV(const struct bch_sb *sb)
#define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL)
#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
-static inline __le64 __bch_sb_magic(struct bch_sb *sb)
+static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
memcpy(&ret, &sb->uuid, sizeof(ret));
@@ -1125,17 +1125,17 @@ static inline __le64 __bch_sb_magic(struct bch_sb *sb)
static inline __u64 __jset_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(__bch_sb_magic(sb) ^ JSET_MAGIC);
+ return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
}
static inline __u64 __pset_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(__bch_sb_magic(sb) ^ PSET_MAGIC);
+ return __le64_to_cpu(__bch2_sb_magic(sb) ^ PSET_MAGIC);
}
static inline __u64 __bset_magic(struct bch_sb *sb)
{
- return __le64_to_cpu(__bch_sb_magic(sb) ^ BSET_MAGIC);
+ return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
}
/* Journal */
diff --git a/fs/bcachefs/bkey.c b/fs/bcachefs/bkey.c
index 05fa0ac9ac9a..b9ceb6ead6aa 100644
--- a/fs/bcachefs/bkey.c
+++ b/fs/bcachefs/bkey.c
@@ -1,18 +1,15 @@
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
-
-#include <linux/kernel.h>
-
+#include "bcachefs.h"
#include "bkey.h"
#include "bset.h"
#include "util.h"
-const struct bkey_format bch_bkey_format_current = BKEY_FORMAT_CURRENT;
+const struct bkey_format bch2_bkey_format_current = BKEY_FORMAT_CURRENT;
-struct bkey __bkey_unpack_key(const struct bkey_format *,
+struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
const struct bkey_packed *);
-void bch_to_binary(char *out, const u64 *p, unsigned nr_bits)
+void bch2_to_binary(char *out, const u64 *p, unsigned nr_bits)
{
unsigned bit = high_bit_offset, done = 0;
@@ -36,7 +33,7 @@ void bch_to_binary(char *out, const u64 *p, unsigned nr_bits)
#ifdef CONFIG_BCACHEFS_DEBUG
-static void bch_bkey_pack_verify(const struct bkey_packed *packed,
+static void bch2_bkey_pack_verify(const struct bkey_packed *packed,
const struct bkey *unpacked,
const struct bkey_format *format)
{
@@ -47,16 +44,16 @@ static void bch_bkey_pack_verify(const struct bkey_packed *packed,
BUG_ON(packed->u64s < bkeyp_key_u64s(format, packed));
- tmp = __bkey_unpack_key(format, packed);
+ tmp = __bch2_bkey_unpack_key(format, packed);
if (memcmp(&tmp, unpacked, sizeof(struct bkey))) {
char buf1[160], buf2[160];
char buf3[160], buf4[160];
- bch_bkey_to_text(buf1, sizeof(buf1), unpacked);
- bch_bkey_to_text(buf2, sizeof(buf2), &tmp);
- bch_to_binary(buf3, (void *) unpacked, 80);
- bch_to_binary(buf4, high_word(format, packed), 80);
+ bch2_bkey_to_text(buf1, sizeof(buf1), unpacked);
+ bch2_bkey_to_text(buf2, sizeof(buf2), &tmp);
+ bch2_to_binary(buf3, (void *) unpacked, 80);
+ bch2_to_binary(buf4, high_word(format, packed), 80);
panic("keys differ: format u64s %u fields %u %u %u %u %u\n%s\n%s\n%s\n%s\n",
format->key_u64s,
@@ -70,12 +67,12 @@ static void bch_bkey_pack_verify(const struct bkey_packed *packed,
}
#else
-static inline void bch_bkey_pack_verify(const struct bkey_packed *packed,
+static inline void bch2_bkey_pack_verify(const struct bkey_packed *packed,
const struct bkey *unpacked,
const struct bkey_format *format) {}
#endif
-int bch_bkey_to_text(char *buf, size_t size, const struct bkey *k)
+int bch2_bkey_to_text(char *buf, size_t size, const struct bkey *k)
{
char *out = buf, *end = buf + size;
@@ -218,7 +215,7 @@ static bool set_inc_field(struct pack_state *state, unsigned field, u64 v)
* Also: doesn't work on extents - it doesn't preserve the invariant that
* if k is packed bkey_start_pos(k) will successfully pack
*/
-static bool bch_bkey_transform_key(const struct bkey_format *out_f,
+static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
struct bkey_packed *out,
const struct bkey_format *in_f,
const struct bkey_packed *in)
@@ -244,12 +241,12 @@ static bool bch_bkey_transform_key(const struct bkey_format *out_f,
return true;
}
-bool bch_bkey_transform(const struct bkey_format *out_f,
+bool bch2_bkey_transform(const struct bkey_format *out_f,
struct bkey_packed *out,
const struct bkey_format *in_f,
const struct bkey_packed *in)
{
- if (!bch_bkey_transform_key(out_f, out, in_f, in))
+ if (!bch2_bkey_transform_key(out_f, out, in_f, in))
return false;
memcpy_u64s((u64 *) out + out_f->key_u64s,
@@ -266,7 +263,7 @@ bool bch_bkey_transform(const struct bkey_format *out_f,
x(BKEY_FIELD_VERSION_HI, version.hi) \
x(BKEY_FIELD_VERSION_LO, version.lo)
-struct bkey __bkey_unpack_key(const struct bkey_format *format,
+struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
const struct bkey_packed *in)
{
struct unpack_state state = unpack_state_init(format, in);
@@ -310,9 +307,9 @@ struct bpos __bkey_unpack_pos(const struct bkey_format *format,
#endif
/**
- * bkey_pack_key -- pack just the key, not the value
+ * bch2_bkey_pack_key -- pack just the key, not the value
*/
-bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
+bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
const struct bkey_format *format)
{
struct pack_state state = pack_state_init(format, out);
@@ -340,14 +337,14 @@ bool bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
out->needs_whiteout = in->needs_whiteout;
out->type = in->type;
- bch_bkey_pack_verify(out, in, format);
+ bch2_bkey_pack_verify(out, in, format);
return true;
}
/**
- * bkey_unpack -- unpack the key and the value
+ * bch2_bkey_unpack -- unpack the key and the value
*/
-void bkey_unpack(const struct btree *b, struct bkey_i *dst,
+void bch2_bkey_unpack(const struct btree *b, struct bkey_i *dst,
const struct bkey_packed *src)
{
dst->k = bkey_unpack_key(b, src);
@@ -358,14 +355,14 @@ void bkey_unpack(const struct btree *b, struct bkey_i *dst,
}
/**
- * bkey_pack -- pack the key and the value
+ * bch2_bkey_pack -- pack the key and the value
*/
-bool bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
+bool bch2_bkey_pack(struct bkey_packed *out, const struct bkey_i *in,
const struct bkey_format *format)
{
struct bkey_packed tmp;
- if (!bkey_pack_key(&tmp, &in->k, format))
+ if (!bch2_bkey_pack_key(&tmp, &in->k, format))
return false;
memmove_u64s((u64 *) out + format->key_u64s,
@@ -456,7 +453,7 @@ static bool bkey_packed_successor(struct bkey_packed *out,
* legal to use a packed pos that isn't equivalent to the original pos,
* _provided_ it compares <= to the original pos.
*/
-enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
+enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
struct bpos in,
const struct btree *b)
{
@@ -525,7 +522,7 @@ enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *out,
return exact ? BKEY_PACK_POS_EXACT : BKEY_PACK_POS_SMALLER;
}
-void bch_bkey_format_init(struct bkey_format_state *s)
+void bch2_bkey_format_init(struct bkey_format_state *s)
{
unsigned i;
@@ -549,7 +546,7 @@ static void __bkey_format_add(struct bkey_format_state *s,
/*
* Changes @format so that @k can be successfully packed with @format
*/
-void bch_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
+void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
{
#define x(id, field) __bkey_format_add(s, id, k->field);
bkey_fields()
@@ -557,7 +554,7 @@ void bch_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
__bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
}
-void bch_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
+void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
{
unsigned field = 0;
@@ -580,7 +577,7 @@ static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
f->field_offset[i] = cpu_to_le64(offset);
}
-struct bkey_format bch_bkey_format_done(struct bkey_format_state *s)
+struct bkey_format bch2_bkey_format_done(struct bkey_format_state *s)
{
unsigned i, bits = KEY_PACKED_BITS_START;
struct bkey_format ret = {
@@ -620,11 +617,11 @@ struct bkey_format bch_bkey_format_done(struct bkey_format_state *s)
}
}
- EBUG_ON(bch_bkey_format_validate(&ret));
+ EBUG_ON(bch2_bkey_format_validate(&ret));
return ret;
}
-const char *bch_bkey_format_validate(struct bkey_format *f)
+const char *bch2_bkey_format_validate(struct bkey_format *f)
{
unsigned i, bits = KEY_PACKED_BITS_START;
@@ -657,9 +654,9 @@ const char *bch_bkey_format_validate(struct bkey_format *f)
* Bits are indexed from 0 - return is [0, nr_key_bits)
*/
__pure
-unsigned bkey_greatest_differing_bit(const struct btree *b,
- const struct bkey_packed *l_k,
- const struct bkey_packed *r_k)
+unsigned bch2_bkey_greatest_differing_bit(const struct btree *b,
+ const struct bkey_packed *l_k,
+ const struct bkey_packed *r_k)
{
const u64 *l = high_word(&b->format, l_k);
const u64 *r = high_word(&b->format, r_k);
@@ -701,8 +698,7 @@ unsigned bkey_greatest_differing_bit(const struct btree *b,
* Bits are indexed from 0 - return is [0, nr_key_bits)
*/
__pure
-unsigned bkey_ffs(const struct btree *b,
- const struct bkey_packed *k)
+unsigned bch2_bkey_ffs(const struct btree *b, const struct bkey_packed *k)
{
const u64 *p = high_word(&b->format, k);
unsigned nr_key_bits = b->nr_key_bits;
@@ -957,7 +953,7 @@ set_field:
return out;
}
-int bch_compile_bkey_format(const struct bkey_format *format, void *_out)
+int bch2_compile_bkey_format(const struct bkey_format *format, void *_out)
{
bool eax_zeroed = false;
u8 *out = _out;
@@ -1034,9 +1030,9 @@ static inline int __bkey_cmp_bits(const u64 *l, const u64 *r,
#endif
__pure
-int __bkey_cmp_packed_format_checked(const struct bkey_packed *l,
- const struct bkey_packed *r,
- const struct btree *b)
+int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *l,
+ const struct bkey_packed *r,
+ const struct btree *b)
{
const struct bkey_format *f = &b->format;
int ret;
@@ -1054,33 +1050,33 @@ int __bkey_cmp_packed_format_checked(const struct bkey_packed *l,
}
__pure __flatten
-int __bkey_cmp_left_packed_format_checked(const struct btree *b,
- const struct bkey_packed *l,
- const struct bpos *r)
+int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bpos *r)
{
return bkey_cmp(bkey_unpack_pos_format_checked(b, l), *r);
}
__pure __flatten
-int __bkey_cmp_packed(const struct bkey_packed *l,
- const struct bkey_packed *r,
- const struct btree *b)
+int __bch2_bkey_cmp_packed(const struct bkey_packed *l,
+ const struct bkey_packed *r,
+ const struct btree *b)
{
int packed = bkey_lr_packed(l, r);
if (likely(packed == BKEY_PACKED_BOTH))
- return __bkey_cmp_packed_format_checked(l, r, b);
+ return __bch2_bkey_cmp_packed_format_checked(l, r, b);
switch (packed) {
case BKEY_PACKED_NONE:
return bkey_cmp(((struct bkey *) l)->p,
((struct bkey *) r)->p);
case BKEY_PACKED_LEFT:
- return __bkey_cmp_left_packed_format_checked(b,
+ return __bch2_bkey_cmp_left_packed_format_checked(b,
(struct bkey_packed *) l,
&((struct bkey *) r)->p);
case BKEY_PACKED_RIGHT:
- return -__bkey_cmp_left_packed_format_checked(b,
+ return -__bch2_bkey_cmp_left_packed_format_checked(b,
(struct bkey_packed *) r,
&((struct bkey *) l)->p);
default:
@@ -1089,17 +1085,18 @@ int __bkey_cmp_packed(const struct bkey_packed *l,
}
__pure __flatten
-int bkey_cmp_left_packed(const struct btree *b,
- const struct bkey_packed *l, const struct bpos *r)
+int __bch2_bkey_cmp_left_packed(const struct btree *b,
+ const struct bkey_packed *l,
+ const struct bpos *r)
{
const struct bkey *l_unpacked;
return unlikely(l_unpacked = packed_to_bkey_c(l))
? bkey_cmp(l_unpacked->p, *r)
- : __bkey_cmp_left_packed_format_checked(b, l, r);
+ : __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
}
-void bch_bpos_swab(struct bpos *p)
+void bch2_bpos_swab(struct bpos *p)
{
u8 *l = (u8 *) p;
u8 *h = ((u8 *) &p[1]) - 1;
@@ -1111,9 +1108,9 @@ void bch_bpos_swab(struct bpos *p)
}
}
-void bch_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
+void bch2_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
{
- const struct bkey_format *f = bkey_packed(k) ? _f : &bch_bkey_format_current;
+ const struct bkey_format *f = bkey_packed(k) ? _f : &bch2_bkey_format_current;
u8 *l = k->key_start;
u8 *h = (u8 *) (k->_data + f->key_u64s) - 1;
@@ -1125,7 +1122,7 @@ void bch_bkey_swab_key(const struct bkey_format *_f, struct bkey_packed *k)
}
#ifdef CONFIG_BCACHEFS_DEBUG
-void bkey_pack_test(void)
+void bch2_bkey_pack_test(void)
{
struct bkey t = KEY(4134ULL, 1250629070527416633ULL, 0);
struct bkey_packed p;
@@ -1140,7 +1137,7 @@ void bkey_pack_test(void)
};
struct unpack_state in_s =
- unpack_state_init(&bch_bkey_format_current, (void *) &t);
+ unpack_state_init(&bch2_bkey_format_current, (void *) &t);
struct pack_state out_s = pack_state_init(&test_format, &p);
unsigned i;
@@ -1162,6 +1159,6 @@ void bkey_pack_test(void)
panic("failed at %u\n", i);
}
- BUG_ON(!bkey_pack_key(&p, &t, &test_format));
+ BUG_ON(!bch2_bkey_pack_key(&p, &t, &test_format));
}
#endif
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index 4a1c308d4549..1383c96b09e0 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -7,8 +7,8 @@
#include "util.h"
#include "vstructs.h"
-void bch_to_binary(char *, const u64 *, unsigned);
-int bch_bkey_to_text(char *, size_t, const struct bkey *);
+void bch2_to_binary(char *, const u64 *, unsigned);
+int bch2_bkey_to_text(char *, size_t, const struct bkey *);
#define BKEY_PADDED(key) __BKEY_PADDED(key, BKEY_EXTENT_VAL_U64s_MAX)
@@ -112,38 +112,45 @@ struct bkey_format_state {
u64 field_max[BKEY_NR_FIELDS];
};
-void bch_bkey_format_init(struct bkey_format_state *);
-void bch_bkey_format_add_key(struct bkey_format_state *, const struct bkey *);
-void bch_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
-struct bkey_format bch_bkey_format_done(struct bkey_format_state *);
-const char *bch_bkey_format_validate(struct bkey_format *);
+void bch2_bkey_format_init(struct bkey_format_state *);
+void bch2_bkey_format_add_key(struct bkey_format_state *, const struct bkey *);
+void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
+struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
+const char *bch2_bkey_format_validate(struct bkey_format *);
__pure
-unsigned bkey_greatest_differing_bit(const struct btree *,
- const struct bkey_packed *,
- const struct bkey_packed *);
+unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
+ const struct bkey_packed *,
+ const struct bkey_packed *);
__pure
-unsigned bkey_ffs(const struct btree *, const struct bkey_packed *);
+unsigned bch2_bkey_ffs(const struct btree *, const struct bkey_packed *);
__pure
-int __bkey_cmp_packed_format_checked(const struct bkey_packed *,
+int __bch2_bkey_cmp_packed_format_checked(const struct bkey_packed *,
const struct bkey_packed *,
const struct btree *);
__pure
-int __bkey_cmp_left_packed_format_checked(const struct btree *,
+int __bch2_bkey_cmp_left_packed_format_checked(const struct btree *,
const struct bkey_packed *,
const struct bpos *);
__pure
-int __bkey_cmp_packed(const struct bkey_packed *,
- const struct bkey_packed *,
- const struct btree *);
+int __bch2_bkey_cmp_packed(const struct bkey_packed *,
+ const struct bkey_packed *,
+ const struct btree *);
__pure
-int bkey_cmp_left_packed(const struct btree *,
- const struct bkey_packed *,
- const struct bpos *);
+int __bch2_bkey_cmp_left_packed(const struct btree *,
+ const struct bkey_packed *,
+ const struct bpos *);
+
+static inline __pure
+int bkey_cmp_left_packed(const struct btree *b,
+ const struct bkey_packed *l, const struct bpos *r)
+{
+ return __bch2_bkey_cmp_left_packed(b, l, r);
+}
/*
* we prefer to pass bpos by ref, but it's often enough terribly convenient to
@@ -181,7 +188,7 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
&((struct bkey *) (_l))->p); \
break; \
case BKEY_PACKED_BOTH: \
- _cmp = __bkey_cmp_packed((void *) (_l), \
+ _cmp = __bch2_bkey_cmp_packed((void *) (_l), \
(void *) (_r), (_b)); \
break; \
} \
@@ -208,8 +215,8 @@ static inline struct bpos bpos_min(struct bpos l, struct bpos r)
return bkey_cmp(l, r) < 0 ? l : r;
}
-void bch_bpos_swab(struct bpos *);
-void bch_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
+void bch2_bpos_swab(struct bpos *);
+void bch2_bkey_swab_key(const struct bkey_format *, struct bkey_packed *);
static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
{
@@ -328,22 +335,22 @@ static inline void set_bkeyp_val_u64s(const struct bkey_format *format,
#define bkeyp_val(_format, _k) \
((struct bch_val *) ((_k)->_data + bkeyp_key_u64s(_format, _k)))
-extern const struct bkey_format bch_bkey_format_current;
+extern const struct bkey_format bch2_bkey_format_current;
-bool bch_bkey_transform(const struct bkey_format *,
- struct bkey_packed *,
- const struct bkey_format *,
- const struct bkey_packed *);
+bool bch2_bkey_transform(const struct bkey_format *,
+ struct bkey_packed *,
+ const struct bkey_format *,
+ const struct bkey_packed *);
-struct bkey __bkey_unpack_key(const struct bkey_format *,
- const struct bkey_packed *);
+struct bkey __bch2_bkey_unpack_key(const struct bkey_format *,
+ const struct bkey_packed *);
#ifndef HAVE_BCACHE_COMPILED_UNPACK
struct bpos __bkey_unpack_pos(const struct bkey_format *,
const struct bkey_packed *);
#endif
-bool bkey_pack_key(struct bkey_packed *, const struct bkey *,
+bool bch2_bkey_pack_key(struct bkey_packed *, const struct bkey *,
const struct bkey_format *);
enum bkey_pack_pos_ret {
@@ -352,18 +359,18 @@ enum bkey_pack_pos_ret {
BKEY_PACK_POS_FAIL,
};
-enum bkey_pack_pos_ret bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
+enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *, struct bpos,
const struct btree *);
static inline bool bkey_pack_pos(struct bkey_packed *out, struct bpos in,
const struct btree *b)
{
- return bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
+ return bch2_bkey_pack_pos_lossy(out, in, b) == BKEY_PACK_POS_EXACT;
}
-void bkey_unpack(const struct btree *, struct bkey_i *,
+void bch2_bkey_unpack(const struct btree *, struct bkey_i *,
const struct bkey_packed *);
-bool bkey_pack(struct bkey_packed *, const struct bkey_i *,
+bool bch2_bkey_pack(struct bkey_packed *, const struct bkey_i *,
const struct bkey_format *);
static inline u64 bkey_field_max(const struct bkey_format *f,
@@ -377,11 +384,11 @@ static inline u64 bkey_field_max(const struct bkey_format *f,
#ifdef CONFIG_X86_64
#define HAVE_BCACHE_COMPILED_UNPACK 1
-int bch_compile_bkey_format(const struct bkey_format *, void *);
+int bch2_compile_bkey_format(const struct bkey_format *, void *);
#else
-static inline int bch_compile_bkey_format(const struct bkey_format *format,
+static inline int bch2_compile_bkey_format(const struct bkey_format *format,
void *out) { return 0; }
#endif
@@ -558,12 +565,12 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
BKEY_VAL_ACCESSORS(cookie, KEY_TYPE_COOKIE);
-static inline void __bch_extent_assert(u8 type, u8 nr)
+static inline void __bch2_extent_assert(u8 type, u8 nr)
{
EBUG_ON(type != BCH_EXTENT && type != BCH_EXTENT_CACHED);
}
-__BKEY_VAL_ACCESSORS(extent, BCH_EXTENT, __bch_extent_assert);
+__BKEY_VAL_ACCESSORS(extent, BCH_EXTENT, __bch2_extent_assert);
BKEY_VAL_ACCESSORS(reservation, BCH_RESERVATION);
BKEY_VAL_ACCESSORS(inode, BCH_INODE_FS);
@@ -598,9 +605,9 @@ BKEY_VAL_ACCESSORS(xattr, BCH_XATTR);
#define prev_word(p) nth_word(p, -1)
#ifdef CONFIG_BCACHEFS_DEBUG
-void bkey_pack_test(void);
+void bch2_bkey_pack_test(void);
#else
-static inline void bkey_pack_test(void) {}
+static inline void bch2_bkey_pack_test(void) {}
#endif
#endif /* _BCACHE_BKEY_H */
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 2908489c1fd2..51a13fca2e7d 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_types.h"
#include "dirent.h"
@@ -8,19 +8,19 @@
#include "inode.h"
#include "xattr.h"
-const struct bkey_ops *bch_bkey_ops[] = {
- [BKEY_TYPE_EXTENTS] = &bch_bkey_extent_ops,
- [BKEY_TYPE_INODES] = &bch_bkey_inode_ops,
- [BKEY_TYPE_DIRENTS] = &bch_bkey_dirent_ops,
- [BKEY_TYPE_XATTRS] = &bch_bkey_xattr_ops,
- [BKEY_TYPE_BTREE] = &bch_bkey_btree_ops,
+const struct bkey_ops *bch2_bkey_ops[] = {
+ [BKEY_TYPE_EXTENTS] = &bch2_bkey_extent_ops,
+ [BKEY_TYPE_INODES] = &bch2_bkey_inode_ops,
+ [BKEY_TYPE_DIRENTS] = &bch2_bkey_dirent_ops,
+ [BKEY_TYPE_XATTRS] = &bch2_bkey_xattr_ops,
+ [BKEY_TYPE_BTREE] = &bch2_bkey_btree_ops,
};
/* Returns string indicating reason for being invalid, or NULL if valid: */
-const char *bkey_invalid(struct bch_fs *c, enum bkey_type type,
+const char *bch2_bkey_invalid(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
{
- const struct bkey_ops *ops = bch_bkey_ops[type];
+ const struct bkey_ops *ops = bch2_bkey_ops[type];
if (k.k->u64s < BKEY_U64s)
return "u64s too small";
@@ -52,8 +52,8 @@ const char *bkey_invalid(struct bch_fs *c, enum bkey_type type,
}
}
-const char *btree_bkey_invalid(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
+const char *bch2_btree_bkey_invalid(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k)
{
if (bkey_cmp(bkey_start_pos(k.k), b->data->min_key) < 0)
return "key before start of btree node";
@@ -64,23 +64,23 @@ const char *btree_bkey_invalid(struct bch_fs *c, struct btree *b,
if (k.k->p.snapshot)
return "nonzero snapshot";
- return bkey_invalid(c, btree_node_type(b), k);
+ return bch2_bkey_invalid(c, btree_node_type(b), k);
}
-void bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
+void bch2_bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
{
enum bkey_type type = btree_node_type(b);
- const struct bkey_ops *ops = bch_bkey_ops[type];
+ const struct bkey_ops *ops = bch2_bkey_ops[type];
const char *invalid;
BUG_ON(!k.k->u64s);
- invalid = btree_bkey_invalid(c, b, k);
+ invalid = bch2_btree_bkey_invalid(c, b, k);
if (invalid) {
char buf[160];
- bch_bkey_val_to_text(c, type, buf, sizeof(buf), k);
- bch_fs_bug(c, "invalid bkey %s: %s", buf, invalid);
+ bch2_bkey_val_to_text(c, type, buf, sizeof(buf), k);
+ bch2_fs_bug(c, "invalid bkey %s: %s", buf, invalid);
return;
}
@@ -89,23 +89,23 @@ void bkey_debugcheck(struct bch_fs *c, struct btree *b, struct bkey_s_c k)
ops->key_debugcheck(c, b, k);
}
-void bch_val_to_text(struct bch_fs *c, enum bkey_type type,
+void bch2_val_to_text(struct bch_fs *c, enum bkey_type type,
char *buf, size_t size, struct bkey_s_c k)
{
- const struct bkey_ops *ops = bch_bkey_ops[type];
+ const struct bkey_ops *ops = bch2_bkey_ops[type];
if (k.k->type >= KEY_TYPE_GENERIC_NR &&
ops->val_to_text)
ops->val_to_text(c, buf, size, k);
}
-void bch_bkey_val_to_text(struct bch_fs *c, enum bkey_type type,
+void bch2_bkey_val_to_text(struct bch_fs *c, enum bkey_type type,
char *buf, size_t size, struct bkey_s_c k)
{
- const struct bkey_ops *ops = bch_bkey_ops[type];
+ const struct bkey_ops *ops = bch2_bkey_ops[type];
char *out = buf, *end = buf + size;
- out += bch_bkey_to_text(out, end - out, k.k);
+ out += bch2_bkey_to_text(out, end - out, k.k);
if (k.k->type >= KEY_TYPE_GENERIC_NR &&
ops->val_to_text) {
@@ -114,13 +114,13 @@ void bch_bkey_val_to_text(struct bch_fs *c, enum bkey_type type,
}
}
-void bch_bkey_swab(enum bkey_type type,
+void bch2_bkey_swab(enum bkey_type type,
const struct bkey_format *f,
struct bkey_packed *k)
{
- const struct bkey_ops *ops = bch_bkey_ops[type];
+ const struct bkey_ops *ops = bch2_bkey_ops[type];
- bch_bkey_swab_key(f, k);
+ bch2_bkey_swab_key(f, k);
if (ops->swab)
ops->swab(f, k);
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index 111b1789b2c8..d372fa61e52c 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -62,20 +62,20 @@ struct bkey_ops {
bool is_extents;
};
-const char *bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c);
-const char *btree_bkey_invalid(struct bch_fs *, struct btree *,
- struct bkey_s_c);
+const char *bch2_bkey_invalid(struct bch_fs *, enum bkey_type, struct bkey_s_c);
+const char *bch2_btree_bkey_invalid(struct bch_fs *, struct btree *,
+ struct bkey_s_c);
-void bkey_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c);
-void bch_val_to_text(struct bch_fs *, enum bkey_type,
- char *, size_t, struct bkey_s_c);
-void bch_bkey_val_to_text(struct bch_fs *, enum bkey_type,
- char *, size_t, struct bkey_s_c);
+void bch2_bkey_debugcheck(struct bch_fs *, struct btree *, struct bkey_s_c);
+void bch2_val_to_text(struct bch_fs *, enum bkey_type,
+ char *, size_t, struct bkey_s_c);
+void bch2_bkey_val_to_text(struct bch_fs *, enum bkey_type,
+ char *, size_t, struct bkey_s_c);
-void bch_bkey_swab(enum bkey_type, const struct bkey_format *,
- struct bkey_packed *);
+void bch2_bkey_swab(enum bkey_type, const struct bkey_format *,
+ struct bkey_packed *);
-extern const struct bkey_ops *bch_bkey_ops[];
+extern const struct bkey_ops *bch2_bkey_ops[];
#undef DEF_BTREE_ID
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 69f4aa2da5e4..280dcf3e1479 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -5,12 +5,10 @@
* Copyright 2012 Google, Inc.
*/
-#define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
-
+#include "bcachefs.h"
+#include "bset.h"
#include "eytzinger.h"
#include "util.h"
-#include "bset.h"
-#include "bcache.h"
#include <asm/unaligned.h>
#include <linux/dynamic_fault.h>
@@ -22,7 +20,7 @@
#include "alloc_types.h"
#include <trace/events/bcachefs.h>
-struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k)
+struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
{
struct bset_tree *t;
@@ -50,7 +48,7 @@ struct bset_tree *bch_bkey_to_bset(struct btree *b, struct bkey_packed *k)
* by the time we actually do the insert will all be deleted.
*/
-void bch_dump_bset(struct btree *b, struct bset *i, unsigned set)
+void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
{
struct bkey_packed *_k, *_n;
struct bkey k, n;
@@ -64,7 +62,7 @@ void bch_dump_bset(struct btree *b, struct bset *i, unsigned set)
_k = _n, k = n) {
_n = bkey_next(_k);
- bch_bkey_to_text(buf, sizeof(buf), &k);
+ bch2_bkey_to_text(buf, sizeof(buf), &k);
printk(KERN_ERR "block %u key %zi/%u: %s\n", set,
_k->_data - i->_data, i->u64s, buf);
@@ -91,17 +89,17 @@ void bch_dump_bset(struct btree *b, struct bset *i, unsigned set)
}
}
-void bch_dump_btree_node(struct btree *b)
+void bch2_dump_btree_node(struct btree *b)
{
struct bset_tree *t;
console_lock();
for_each_bset(b, t)
- bch_dump_bset(b, bset(b, t), t - b->set);
+ bch2_dump_bset(b, bset(b, t), t - b->set);
console_unlock();
}
-void bch_dump_btree_node_iter(struct btree *b,
+void bch2_dump_btree_node_iter(struct btree *b,
struct btree_node_iter *iter)
{
struct btree_node_iter_set *set;
@@ -110,11 +108,11 @@ void bch_dump_btree_node_iter(struct btree *b,
btree_node_iter_for_each(iter, set) {
struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
- struct bset_tree *t = bch_bkey_to_bset(b, k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
struct bkey uk = bkey_unpack_key(b, k);
char buf[100];
- bch_bkey_to_text(buf, sizeof(buf), &uk);
+ bch2_bkey_to_text(buf, sizeof(buf), &uk);
printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
}
@@ -136,7 +134,7 @@ static bool keys_out_of_order(struct btree *b,
!bkey_cmp_packed(b, prev, next));
}
-void __bch_verify_btree_nr_keys(struct btree *b)
+void __bch2_verify_btree_nr_keys(struct btree *b)
{
struct bset_tree *t;
struct bkey_packed *k;
@@ -152,11 +150,11 @@ void __bch_verify_btree_nr_keys(struct btree *b)
BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
}
-static void bch_btree_node_iter_next_check(struct btree_node_iter *iter,
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
struct btree *b,
struct bkey_packed *k)
{
- const struct bkey_packed *n = bch_btree_node_iter_peek_all(iter, b);
+ const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b);
bkey_unpack_key(b, k);
@@ -166,14 +164,14 @@ static void bch_btree_node_iter_next_check(struct btree_node_iter *iter,
struct bkey nu = bkey_unpack_key(b, n);
char buf1[80], buf2[80];
- bch_dump_btree_node(b);
- bch_bkey_to_text(buf1, sizeof(buf1), &ku);
- bch_bkey_to_text(buf2, sizeof(buf2), &nu);
+ bch2_dump_btree_node(b);
+ bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
+ bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2);
}
}
-void bch_btree_node_iter_verify(struct btree_node_iter *iter,
+void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
struct btree *b)
{
struct btree_node_iter_set *set;
@@ -187,7 +185,7 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter,
btree_node_iter_for_each(iter, set) {
k = __btree_node_offset_to_key(b, set->k);
- t = bch_bkey_to_bset(b, k);
+ t = bch2_bkey_to_bset(b, k);
BUG_ON(__btree_node_offset_to_key(b, set->end) !=
btree_bkey_last(b, t));
@@ -199,30 +197,30 @@ void bch_btree_node_iter_verify(struct btree_node_iter *iter,
first = __btree_node_offset_to_key(b, iter->data[0].k);
for_each_bset(b, t)
- if (bch_btree_node_iter_bset_pos(iter, b, t) ==
+ if (bch2_btree_node_iter_bset_pos(iter, b, t) ==
btree_bkey_last(b, t) &&
- (k = bkey_prev_all(b, t, btree_bkey_last(b, t))))
+ (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
k, first) > 0);
}
-void bch_verify_key_order(struct btree *b,
+void bch2_verify_key_order(struct btree *b,
struct btree_node_iter *iter,
struct bkey_packed *where)
{
- struct bset_tree *t = bch_bkey_to_bset(b, where);
+ struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct bkey_packed *k, *prev;
struct bkey uk, uw = bkey_unpack_key(b, where);
- k = bkey_prev_all(b, t, where);
+ k = bch2_bkey_prev_all(b, t, where);
if (k &&
keys_out_of_order(b, k, where, iter->is_extents)) {
char buf1[100], buf2[100];
- bch_dump_btree_node(b);
+ bch2_dump_btree_node(b);
uk = bkey_unpack_key(b, k);
- bch_bkey_to_text(buf1, sizeof(buf1), &uk);
- bch_bkey_to_text(buf2, sizeof(buf2), &uw);
+ bch2_bkey_to_text(buf1, sizeof(buf1), &uk);
+ bch2_bkey_to_text(buf2, sizeof(buf2), &uw);
panic("out of order with prev:\n%s\n%s\n",
buf1, buf2);
}
@@ -236,13 +234,13 @@ void bch_verify_key_order(struct btree *b,
where < btree_bkey_last(b, t))
continue;
- k = bch_btree_node_iter_bset_pos(iter, b, t);
+ k = bch2_btree_node_iter_bset_pos(iter, b, t);
if (k == btree_bkey_last(b, t))
- k = bkey_prev_all(b, t, k);
+ k = bch2_bkey_prev_all(b, t, k);
while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
- (prev = bkey_prev_all(b, t, k)))
+ (prev = bch2_bkey_prev_all(b, t, k)))
k = prev;
for (;
@@ -266,7 +264,7 @@ void bch_verify_key_order(struct btree *b,
#else
-static void bch_btree_node_iter_next_check(struct btree_node_iter *iter,
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
struct btree *b,
struct bkey_packed *k) {}
@@ -434,13 +432,13 @@ static void bset_aux_tree_verify(struct btree *b)
/* Memory allocation */
-void bch_btree_keys_free(struct btree *b)
+void bch2_btree_keys_free(struct btree *b)
{
vfree(b->aux_data);
b->aux_data = NULL;
}
-int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
+int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
{
b->page_order = page_order;
b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp,
@@ -451,7 +449,7 @@ int bch_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
return 0;
}
-void bch_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
+void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
{
unsigned i;
@@ -463,7 +461,7 @@ void bch_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
for (i = 0; i < MAX_BSETS; i++)
b->set[i].data_offset = U16_MAX;
- bch_bset_set_no_aux_tree(b, b->set);
+ bch2_bset_set_no_aux_tree(b, b->set);
}
/* Binary tree stuff for auxiliary search trees */
@@ -579,7 +577,7 @@ static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
};
}
-static void bch_bset_verify_rw_aux_tree(struct btree *b,
+static void bch2_bset_verify_rw_aux_tree(struct btree *b,
struct bset_tree *t)
{
struct bkey_packed *k = btree_bkey_first(b, t);
@@ -754,7 +752,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
* Note that this may be negative - we may be running off the low end
* of the key: we handle this later:
*/
- exponent = (int) bkey_greatest_differing_bit(b, l, r) - (bits - 1);
+ exponent = (int) bch2_bkey_greatest_differing_bit(b, l, r) - (bits - 1);
/*
* Then we calculate the actual shift value, from the start of the key
@@ -804,7 +802,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
* the comparison in bset_search_tree. If we're dropping set bits,
* increment it:
*/
- if (exponent > (int) bkey_ffs(b, m)) {
+ if (exponent > (int) bch2_bkey_ffs(b, m)) {
if (j < BFLOAT_32BIT_NR
? f->mantissa32 == U32_MAX
: f->mantissa16 == U16_MAX)
@@ -918,7 +916,7 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
for (i = b->set; i != t; i++)
BUG_ON(bset_has_rw_aux_tree(i));
- bch_bset_set_no_aux_tree(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
/* round up to next cacheline: */
t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
@@ -927,7 +925,7 @@ static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
bset_aux_tree_verify(b);
}
-void bch_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
+void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
bool writeable)
{
if (writeable
@@ -948,7 +946,7 @@ void bch_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
bset_aux_tree_verify(b);
}
-void bch_bset_init_first(struct btree *b, struct bset *i)
+void bch2_bset_init_first(struct btree *b, struct bset *i)
{
struct bset_tree *t;
@@ -962,7 +960,7 @@ void bch_bset_init_first(struct btree *b, struct bset *i)
set_btree_bset(b, t, i);
}
-void bch_bset_init_next(struct btree *b, struct bset *i)
+void bch2_bset_init_next(struct btree *b, struct bset *i)
{
struct bset_tree *t;
@@ -1014,8 +1012,8 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
return p;
}
-struct bkey_packed *bkey_prev_all(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+struct bkey_packed *bch2_bkey_prev_all(struct btree *b, struct bset_tree *t,
+ struct bkey_packed *k)
{
struct bkey_packed *p;
@@ -1029,8 +1027,8 @@ struct bkey_packed *bkey_prev_all(struct btree *b, struct bset_tree *t,
return p;
}
-struct bkey_packed *bkey_prev(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+struct bkey_packed *bch2_bkey_prev(struct btree *b, struct bset_tree *t,
+ struct bkey_packed *k)
{
while (1) {
struct bkey_packed *p, *i, *ret = NULL;
@@ -1063,7 +1061,7 @@ static void rw_aux_tree_fix_invalidated_key(struct btree *b,
rw_aux_tree(b, t)[j].offset == offset)
rw_aux_tree_set(b, t, j, k);
- bch_bset_verify_rw_aux_tree(b, t);
+ bch2_bset_verify_rw_aux_tree(b, t);
}
static void ro_aux_tree_fix_invalidated_key(struct btree *b,
@@ -1119,12 +1117,12 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
}
/**
- * bch_bset_fix_invalidated_key() - given an existing key @k that has been
+ * bch2_bset_fix_invalidated_key() - given an existing key @k that has been
* modified, fix any auxiliary search tree by remaking all the nodes in the
* auxiliary search tree that @k corresponds to
*/
-void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
- struct bkey_packed *k)
+void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
+ struct bkey_packed *k)
{
switch (bset_aux_tree_type(t)) {
case BSET_NO_AUX_TREE:
@@ -1138,11 +1136,11 @@ void bch_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
}
}
-static void bch_bset_fix_lookup_table(struct btree *b,
- struct bset_tree *t,
- struct bkey_packed *_where,
- unsigned clobber_u64s,
- unsigned new_u64s)
+static void bch2_bset_fix_lookup_table(struct btree *b,
+ struct bset_tree *t,
+ struct bkey_packed *_where,
+ unsigned clobber_u64s,
+ unsigned new_u64s)
{
int shift = new_u64s - clobber_u64s;
unsigned l, j, where = __btree_node_key_to_offset(b, _where);
@@ -1221,23 +1219,23 @@ static void bch_bset_fix_lookup_table(struct btree *b,
}
}
- bch_bset_verify_rw_aux_tree(b, t);
+ bch2_bset_verify_rw_aux_tree(b, t);
bset_aux_tree_verify(b);
}
-void bch_bset_insert(struct btree *b,
- struct btree_node_iter *iter,
- struct bkey_packed *where,
- struct bkey_i *insert,
- unsigned clobber_u64s)
+void bch2_bset_insert(struct btree *b,
+ struct btree_node_iter *iter,
+ struct bkey_packed *where,
+ struct bkey_i *insert,
+ unsigned clobber_u64s)
{
struct bkey_format *f = &b->format;
struct bset_tree *t = bset_tree_last(b);
struct bkey_packed packed, *src = bkey_to_packed(insert);
- bch_bset_verify_rw_aux_tree(b, t);
+ bch2_bset_verify_rw_aux_tree(b, t);
- if (bkey_pack_key(&packed, &insert->k, f))
+ if (bch2_bkey_pack_key(&packed, &insert->k, f))
src = &packed;
if (!bkey_whiteout(&insert->k))
@@ -1260,21 +1258,21 @@ void bch_bset_insert(struct btree *b,
memcpy_u64s(bkeyp_val(f, where), &insert->v,
bkeyp_val_u64s(f, src));
- bch_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
+ bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
- bch_verify_key_order(b, iter, where);
- bch_verify_btree_nr_keys(b);
+ bch2_verify_key_order(b, iter, where);
+ bch2_verify_btree_nr_keys(b);
}
-void bch_bset_delete(struct btree *b,
- struct bkey_packed *where,
- unsigned clobber_u64s)
+void bch2_bset_delete(struct btree *b,
+ struct bkey_packed *where,
+ unsigned clobber_u64s)
{
struct bset_tree *t = bset_tree_last(b);
u64 *src_p = where->_data + clobber_u64s;
u64 *dst_p = where->_data;
- bch_bset_verify_rw_aux_tree(b, t);
+ bch2_bset_verify_rw_aux_tree(b, t);
BUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
@@ -1282,7 +1280,7 @@ void bch_bset_delete(struct btree *b,
le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
set_btree_bset_end(b, t);
- bch_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
+ bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
}
/* Lookup */
@@ -1386,7 +1384,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
* Returns the first key greater than or equal to @search
*/
__always_inline __flatten
-static struct bkey_packed *bch_bset_search(struct btree *b,
+static struct bkey_packed *bch2_bset_search(struct btree *b,
struct bset_tree *t,
struct bpos search,
struct bkey_packed *packed_search,
@@ -1444,7 +1442,7 @@ static struct bkey_packed *bch_bset_search(struct btree *b,
m = bkey_next(m);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- struct bkey_packed *prev = bkey_prev_all(b, t, m);
+ struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
BUG_ON(prev &&
btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
@@ -1456,10 +1454,10 @@ static struct bkey_packed *bch_bset_search(struct btree *b,
/* Btree node iterator */
-void bch_btree_node_iter_push(struct btree_node_iter *iter,
- struct btree *b,
- const struct bkey_packed *k,
- const struct bkey_packed *end)
+void bch2_btree_node_iter_push(struct btree_node_iter *iter,
+ struct btree *b,
+ const struct bkey_packed *k,
+ const struct bkey_packed *end)
{
if (k != end) {
struct btree_node_iter_set *pos, n =
@@ -1489,12 +1487,12 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
trace_bkey_pack_pos_fail(search);
for_each_bset(b, t)
- __bch_btree_node_iter_push(iter, b,
- bch_bset_search(b, t, search, NULL, NULL,
+ __bch2_btree_node_iter_push(iter, b,
+ bch2_bset_search(b, t, search, NULL, NULL,
strictly_greater),
btree_bkey_last(b, t));
- bch_btree_node_iter_sort(iter, b);
+ bch2_btree_node_iter_sort(iter, b);
}
/**
@@ -1537,9 +1535,9 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
* So we've got to search for start_of_range, then after the lookup iterate
* past any extents that compare equal to the position we searched for.
*/
-void bch_btree_node_iter_init(struct btree_node_iter *iter,
- struct btree *b, struct bpos search,
- bool strictly_greater, bool is_extents)
+void bch2_btree_node_iter_init(struct btree_node_iter *iter,
+ struct btree *b, struct bpos search,
+ bool strictly_greater, bool is_extents)
{
struct bset_tree *t;
struct bkey_packed p, *packed_search = NULL;
@@ -1547,12 +1545,12 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter,
EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
bset_aux_tree_verify(b);
- __bch_btree_node_iter_init(iter, is_extents);
+ __bch2_btree_node_iter_init(iter, is_extents);
//if (bkey_cmp(search, b->curr_max_key) > 0)
// return;
- switch (bkey_pack_pos_lossy(&p, search, b)) {
+ switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
case BKEY_PACK_POS_EXACT:
packed_search = &p;
break;
@@ -1566,33 +1564,33 @@ void bch_btree_node_iter_init(struct btree_node_iter *iter,
}
for_each_bset(b, t)
- __bch_btree_node_iter_push(iter, b,
- bch_bset_search(b, t, search,
+ __bch2_btree_node_iter_push(iter, b,
+ bch2_bset_search(b, t, search,
packed_search, &p,
strictly_greater),
btree_bkey_last(b, t));
- bch_btree_node_iter_sort(iter, b);
+ bch2_btree_node_iter_sort(iter, b);
}
-void bch_btree_node_iter_init_from_start(struct btree_node_iter *iter,
- struct btree *b,
- bool is_extents)
+void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
+ struct btree *b,
+ bool is_extents)
{
struct bset_tree *t;
- __bch_btree_node_iter_init(iter, is_extents);
+ __bch2_btree_node_iter_init(iter, is_extents);
for_each_bset(b, t)
- __bch_btree_node_iter_push(iter, b,
+ __bch2_btree_node_iter_push(iter, b,
btree_bkey_first(b, t),
btree_bkey_last(b, t));
- bch_btree_node_iter_sort(iter, b);
+ bch2_btree_node_iter_sort(iter, b);
}
-struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *iter,
- struct btree *b,
- struct bset_tree *t)
+struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bset_tree *t)
{
struct btree_node_iter_set *set;
@@ -1630,8 +1628,8 @@ static inline void btree_node_iter_sort_two(struct btree_node_iter *iter,
swap(iter->data[first], iter->data[first + 1]);
}
-void bch_btree_node_iter_sort(struct btree_node_iter *iter,
- struct btree *b)
+void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
+ struct btree *b)
{
EBUG_ON(iter->used > 3);
@@ -1645,7 +1643,6 @@ void bch_btree_node_iter_sort(struct btree_node_iter *iter,
if (iter->used > 1)
btree_node_iter_sort_two(iter, b, 0);
}
-EXPORT_SYMBOL(bch_btree_node_iter_sort);
/**
* bch_btree_node_iter_advance - advance @iter by one key
@@ -1653,12 +1650,12 @@ EXPORT_SYMBOL(bch_btree_node_iter_sort);
* Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might
* momentarily have out of order extents.
*/
-void bch_btree_node_iter_advance(struct btree_node_iter *iter,
- struct btree *b)
+void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+ struct btree *b)
{
- struct bkey_packed *k = bch_btree_node_iter_peek_all(iter, b);
+ struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
- iter->data->k += __bch_btree_node_iter_peek_all(iter, b)->u64s;
+ iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
BUG_ON(iter->data->k > iter->data->end);
@@ -1669,14 +1666,14 @@ void bch_btree_node_iter_advance(struct btree_node_iter *iter,
btree_node_iter_sift(iter, b, 0);
- bch_btree_node_iter_next_check(iter, b, k);
+ bch2_btree_node_iter_next_check(iter, b, k);
}
/*
* Expensive:
*/
-struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter,
- struct btree *b)
+struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
+ struct btree *b)
{
struct bkey_packed *k, *prev = NULL;
struct btree_node_iter_set *set;
@@ -1684,11 +1681,11 @@ struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *iter,
struct bset_tree *prev_t;
unsigned end;
- bch_btree_node_iter_verify(iter, b);
+ bch2_btree_node_iter_verify(iter, b);
for_each_bset(b, t) {
- k = bkey_prev_all(b, t,
- bch_btree_node_iter_bset_pos(iter, b, t));
+ k = bch2_bkey_prev_all(b, t,
+ bch2_btree_node_iter_bset_pos(iter, b, t));
if (k &&
(!prev || __btree_node_iter_cmp(iter->is_extents, b,
k, prev) > 0)) {
@@ -1724,31 +1721,30 @@ out:
return prev;
}
-struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *iter,
- struct btree *b)
+struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
+ struct btree *b)
{
struct bkey_packed *k;
do {
- k = bch_btree_node_iter_prev_all(iter, b);
+ k = bch2_btree_node_iter_prev_all(iter, b);
} while (k && bkey_deleted(k));
return k;
}
-struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
- struct btree *b,
- struct bkey *u)
+struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
+ struct btree *b,
+ struct bkey *u)
{
- struct bkey_packed *k = bch_btree_node_iter_peek(iter, b);
+ struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
}
-EXPORT_SYMBOL(bch_btree_node_iter_peek_unpack);
/* Mergesort */
-void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats)
+void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
{
struct bset_tree *t;
@@ -1779,10 +1775,10 @@ void bch_btree_keys_stats(struct btree *b, struct bset_stats *stats)
}
}
-int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
- char *buf, size_t size)
+int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
+ char *buf, size_t size)
{
- struct bset_tree *t = bch_bkey_to_bset(b, k);
+ struct bset_tree *t = bch2_bkey_to_bset(b, k);
struct bkey_packed *l, *r, *p;
struct bkey uk, up;
char buf1[200], buf2[200];
@@ -1812,13 +1808,13 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
? btree_bkey_first(b, t)
: tree_to_prev_bkey(b, t, j >> ffs(j));
r = is_power_of_2(j + 1)
- ? bkey_prev_all(b, t, btree_bkey_last(b, t))
+ ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
up = bkey_unpack_key(b, p);
uk = bkey_unpack_key(b, k);
- bch_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
- bch_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
+ bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
+ bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
return scnprintf(buf, size,
" failed prev at depth %u\n"
@@ -1828,8 +1824,8 @@ int bch_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
"\t%s\n"
"\t%s\n",
ilog2(j),
- bkey_greatest_differing_bit(b, l, r),
- bkey_greatest_differing_bit(b, p, k),
+ bch2_bkey_greatest_differing_bit(b, l, r),
+ bch2_bkey_greatest_differing_bit(b, p, k),
uk.p.inode, uk.p.offset,
up.p.inode, up.p.offset,
buf1, buf2);
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index a0d8f26c8bb1..76a83fcb92bd 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -189,13 +189,13 @@ bkey_unpack_key_format_checked(const struct btree *b,
unpack_fn(&dst, src);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
- struct bkey dst2 = __bkey_unpack_key(&b->format, src);
+ struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
BUG_ON(memcmp(&dst, &dst2, sizeof(dst)));
}
}
#else
- dst = __bkey_unpack_key(&b->format, src);
+ dst = __bch2_bkey_unpack_key(&b->format, src);
#endif
return dst;
}
@@ -254,12 +254,12 @@ static inline struct bkey_s __bkey_disassemble(struct btree *b,
#define for_each_bset(_b, _t) \
for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
-extern bool bch_expensive_debug_checks;
+extern bool bch2_expensive_debug_checks;
static inline bool btree_keys_expensive_checks(struct btree *b)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- return bch_expensive_debug_checks || *b->expensive_debug_checks;
+ return bch2_expensive_debug_checks || *b->expensive_debug_checks;
#else
return false;
#endif
@@ -275,7 +275,7 @@ static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
}
-static inline void bch_bset_set_no_aux_tree(struct btree *b,
+static inline void bch2_bset_set_no_aux_tree(struct btree *b,
struct bset_tree *t)
{
BUG_ON(t < b->set);
@@ -295,12 +295,12 @@ static inline void btree_node_set_format(struct btree *b,
b->format = f;
b->nr_key_bits = bkey_format_key_bits(&f);
- len = bch_compile_bkey_format(&b->format, b->aux_data);
+ len = bch2_compile_bkey_format(&b->format, b->aux_data);
BUG_ON(len < 0 || len > U8_MAX);
b->unpack_fn_len = len;
- bch_bset_set_no_aux_tree(b, b->set);
+ bch2_bset_set_no_aux_tree(b, b->set);
}
static inline struct bset *bset_next_set(struct btree *b,
@@ -313,19 +313,19 @@ static inline struct bset *bset_next_set(struct btree *b,
return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
}
-void bch_btree_keys_free(struct btree *);
-int bch_btree_keys_alloc(struct btree *, unsigned, gfp_t);
-void bch_btree_keys_init(struct btree *, bool *);
+void bch2_btree_keys_free(struct btree *);
+int bch2_btree_keys_alloc(struct btree *, unsigned, gfp_t);
+void bch2_btree_keys_init(struct btree *, bool *);
-void bch_bset_init_first(struct btree *, struct bset *);
-void bch_bset_init_next(struct btree *, struct bset *);
-void bch_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
-void bch_bset_fix_invalidated_key(struct btree *, struct bset_tree *,
+void bch2_bset_init_first(struct btree *, struct bset *);
+void bch2_bset_init_next(struct btree *, struct bset *);
+void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
+void bch2_bset_fix_invalidated_key(struct btree *, struct bset_tree *,
struct bkey_packed *);
-void bch_bset_insert(struct btree *, struct btree_node_iter *,
+void bch2_bset_insert(struct btree *, struct btree_node_iter *,
struct bkey_packed *, struct bkey_i *, unsigned);
-void bch_bset_delete(struct btree *, struct bkey_packed *, unsigned);
+void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
/* Bkey utility code */
@@ -341,9 +341,9 @@ static inline int bkey_cmp_p_or_unp(const struct btree *b,
return bkey_cmp(packed_to_bkey_c(l)->p, *r);
if (likely(r_packed))
- return __bkey_cmp_packed_format_checked(l, r_packed, b);
+ return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
- return __bkey_cmp_left_packed_format_checked(b, l, r);
+ return __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
}
/* Returns true if @k is after iterator position @pos */
@@ -379,11 +379,11 @@ static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree *b,
(cmp == 0 && !strictly_greater && !bkey_deleted(k));
}
-struct bset_tree *bch_bkey_to_bset(struct btree *, struct bkey_packed *);
-struct bkey_packed *bkey_prev_all(struct btree *, struct bset_tree *,
+struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
+struct bkey_packed *bch2_bkey_prev_all(struct btree *, struct bset_tree *,
struct bkey_packed *);
-struct bkey_packed *bkey_prev(struct btree *, struct bset_tree *,
- struct bkey_packed *);
+struct bkey_packed *bch2_bkey_prev(struct btree *, struct bset_tree *,
+ struct bkey_packed *);
enum bch_extent_overlap {
BCH_EXTENT_OVERLAP_ALL = 0,
@@ -393,7 +393,7 @@ enum bch_extent_overlap {
};
/* Returns how k overlaps with m */
-static inline enum bch_extent_overlap bch_extent_overlap(const struct bkey *k,
+static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
const struct bkey *m)
{
int cmp1 = bkey_cmp(k->p, m->p) < 0;
@@ -414,33 +414,33 @@ struct btree_node_iter {
} data[MAX_BSETS];
};
-static inline void __bch_btree_node_iter_init(struct btree_node_iter *iter,
+static inline void __bch2_btree_node_iter_init(struct btree_node_iter *iter,
bool is_extents)
{
iter->used = 0;
iter->is_extents = is_extents;
}
-void bch_btree_node_iter_push(struct btree_node_iter *, struct btree *,
+void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
const struct bkey_packed *,
const struct bkey_packed *);
-void bch_btree_node_iter_init(struct btree_node_iter *, struct btree *,
+void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *,
struct bpos, bool, bool);
-void bch_btree_node_iter_init_from_start(struct btree_node_iter *,
+void bch2_btree_node_iter_init_from_start(struct btree_node_iter *,
struct btree *, bool);
-struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *,
+struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *,
struct btree *,
struct bset_tree *);
-void bch_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
-void bch_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
+void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
+void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
#define btree_node_iter_for_each(_iter, _set) \
for (_set = (_iter)->data; \
_set < (_iter)->data + (_iter)->used; \
_set++)
-static inline bool bch_btree_node_iter_end(struct btree_node_iter *iter)
+static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
{
return !iter->used;
}
@@ -452,7 +452,7 @@ static inline int __btree_node_iter_cmp(bool is_extents,
{
/*
* For non extents, when keys compare equal the deleted keys have to
- * come first - so that bch_btree_node_iter_next_check() can detect
+ * come first - so that bch2_btree_node_iter_next_check() can detect
* duplicate nondeleted keys (and possibly other reasons?)
*
* For extents, bkey_deleted() is used as a proxy for k->size == 0, so
@@ -473,7 +473,7 @@ static inline int btree_node_iter_cmp(struct btree_node_iter *iter,
__btree_node_offset_to_key(b, r.k));
}
-static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter,
+static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
struct btree *b,
const struct bkey_packed *k,
const struct bkey_packed *end)
@@ -486,47 +486,47 @@ static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter,
}
static inline struct bkey_packed *
-__bch_btree_node_iter_peek_all(struct btree_node_iter *iter,
+__bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
struct btree *b)
{
return __btree_node_offset_to_key(b, iter->data->k);
}
static inline struct bkey_packed *
-bch_btree_node_iter_peek_all(struct btree_node_iter *iter,
+bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
struct btree *b)
{
- return bch_btree_node_iter_end(iter)
+ return bch2_btree_node_iter_end(iter)
? NULL
- : __bch_btree_node_iter_peek_all(iter, b);
+ : __bch2_btree_node_iter_peek_all(iter, b);
}
static inline struct bkey_packed *
-bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
+bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
{
struct bkey_packed *ret;
- while ((ret = bch_btree_node_iter_peek_all(iter, b)) &&
+ while ((ret = bch2_btree_node_iter_peek_all(iter, b)) &&
bkey_deleted(ret))
- bch_btree_node_iter_advance(iter, b);
+ bch2_btree_node_iter_advance(iter, b);
return ret;
}
static inline struct bkey_packed *
-bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
+bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
{
- struct bkey_packed *ret = bch_btree_node_iter_peek_all(iter, b);
+ struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b);
if (ret)
- bch_btree_node_iter_advance(iter, b);
+ bch2_btree_node_iter_advance(iter, b);
return ret;
}
-struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *,
+struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *,
struct btree *);
-struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *,
+struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *,
struct btree *);
/*
@@ -534,18 +534,18 @@ struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *,
* overlapping) keys
*/
#define for_each_btree_node_key(b, k, iter, _is_extents) \
- for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
- ((k) = bch_btree_node_iter_peek(iter, b)); \
- bch_btree_node_iter_advance(iter, b))
+ for (bch2_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
+ ((k) = bch2_btree_node_iter_peek(iter, b)); \
+ bch2_btree_node_iter_advance(iter, b))
-struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *,
+struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
struct btree *,
struct bkey *);
#define for_each_btree_node_key_unpack(b, k, iter, _is_extents, unpacked)\
- for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
- (k = bch_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
- bch_btree_node_iter_advance(iter, b))
+ for (bch2_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
+ (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
+ bch2_btree_node_iter_advance(iter, b))
/* Accounting: */
@@ -579,37 +579,37 @@ struct bset_stats {
size_t failed_overflow;
};
-void bch_btree_keys_stats(struct btree *, struct bset_stats *);
-int bch_bkey_print_bfloat(struct btree *, struct bkey_packed *,
+void bch2_btree_keys_stats(struct btree *, struct bset_stats *);
+int bch2_bkey_print_bfloat(struct btree *, struct bkey_packed *,
char *, size_t);
/* Debug stuff */
-void bch_dump_bset(struct btree *, struct bset *, unsigned);
-void bch_dump_btree_node(struct btree *);
-void bch_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
+void bch2_dump_bset(struct btree *, struct bset *, unsigned);
+void bch2_dump_btree_node(struct btree *);
+void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
#ifdef CONFIG_BCACHEFS_DEBUG
-void __bch_verify_btree_nr_keys(struct btree *);
-void bch_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
-void bch_verify_key_order(struct btree *, struct btree_node_iter *,
+void __bch2_verify_btree_nr_keys(struct btree *);
+void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
+void bch2_verify_key_order(struct btree *, struct btree_node_iter *,
struct bkey_packed *);
#else
-static inline void __bch_verify_btree_nr_keys(struct btree *b) {}
-static inline void bch_btree_node_iter_verify(struct btree_node_iter *iter,
+static inline void __bch2_verify_btree_nr_keys(struct btree *b) {}
+static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
struct btree *b) {}
-static inline void bch_verify_key_order(struct btree *b,
+static inline void bch2_verify_key_order(struct btree *b,
struct btree_node_iter *iter,
struct bkey_packed *where) {}
#endif
-static inline void bch_verify_btree_nr_keys(struct btree *b)
+static inline void bch2_verify_btree_nr_keys(struct btree *b)
{
if (btree_keys_expensive_checks(b))
- __bch_verify_btree_nr_keys(b);
+ __bch2_verify_btree_nr_keys(b);
}
#endif
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 578a27f0c51c..c4cc26f9138b 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
@@ -11,14 +11,14 @@
#define DEF_BTREE_ID(kwd, val, name) name,
-const char * const bch_btree_ids[] = {
+const char * const bch2_btree_ids[] = {
DEFINE_BCH_BTREE_IDS()
NULL
};
#undef DEF_BTREE_ID
-void bch_recalc_btree_reserve(struct bch_fs *c)
+void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;
@@ -42,7 +42,7 @@ static void __mca_data_free(struct bch_fs *c, struct btree *b)
free_pages((unsigned long) b->data, btree_page_order(c));
b->data = NULL;
- bch_btree_keys_free(b);
+ bch2_btree_keys_free(b);
}
static void mca_data_free(struct bch_fs *c, struct btree *b)
@@ -68,7 +68,7 @@ static void mca_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (!b->data)
goto err;
- if (bch_btree_keys_alloc(b, order, gfp))
+ if (bch2_btree_keys_alloc(b, order, gfp))
goto err;
c->btree_cache_used++;
@@ -96,7 +96,7 @@ static struct btree *mca_bucket_alloc(struct bch_fs *c, gfp_t gfp)
/* Btree in memory cache - hash table */
-void mca_hash_remove(struct bch_fs *c, struct btree *b)
+void bch2_btree_node_hash_remove(struct bch_fs *c, struct btree *b)
{
BUG_ON(btree_node_dirty(b));
@@ -109,7 +109,7 @@ void mca_hash_remove(struct bch_fs *c, struct btree *b)
bkey_i_to_extent(&b->key)->v._data[0] = 0;
}
-int mca_hash_insert(struct bch_fs *c, struct btree *b,
+int bch2_btree_node_hash_insert(struct bch_fs *c, struct btree *b,
unsigned level, enum btree_id id)
{
int ret;
@@ -170,9 +170,9 @@ static int mca_reap_notrace(struct bch_fs *c, struct btree *b, bool flush)
*/
if (btree_node_dirty(b)) {
if (verify_btree_ondisk(c))
- bch_btree_node_write(c, b, NULL, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, b, NULL, SIX_LOCK_intent, -1);
else
- __bch_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
+ __bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
}
/* wait for any in flight btree write */
@@ -191,12 +191,12 @@ static int mca_reap(struct bch_fs *c, struct btree *b, bool flush)
{
int ret = mca_reap_notrace(c, b, flush);
- trace_bcache_mca_reap(c, b, ret);
+ trace_btree_node_reap(c, b, ret);
return ret;
}
-static unsigned long bch_mca_scan(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long bch2_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct bch_fs *c = container_of(shrink, struct bch_fs,
btree_cache_shrink);
@@ -207,8 +207,6 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
unsigned long freed = 0;
unsigned i;
- u64 start_time = local_clock();
-
if (btree_shrinker_disabled(c))
return SHRINK_STOP;
@@ -260,7 +258,7 @@ restart:
if (!btree_node_accessed(b) &&
!mca_reap(c, b, false)) {
- /* can't call mca_hash_remove under btree_cache_lock */
+ /* can't call bch2_btree_node_hash_remove under btree_cache_lock */
freed++;
if (&t->list != &c->btree_cache)
list_move_tail(&c->btree_cache, &t->list);
@@ -268,7 +266,7 @@ restart:
mca_data_free(c, b);
mutex_unlock(&c->btree_cache_lock);
- mca_hash_remove(c, b);
+ bch2_btree_node_hash_remove(c, b);
six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
@@ -286,19 +284,11 @@ restart:
mutex_unlock(&c->btree_cache_lock);
out:
- bch_time_stats_update(&c->mca_scan_time, start_time);
-
- trace_bcache_mca_scan(c,
- touched * btree_pages(c),
- freed * btree_pages(c),
- can_free * btree_pages(c),
- sc->nr_to_scan);
-
return (unsigned long) freed * btree_pages(c);
}
-static unsigned long bch_mca_count(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long bch2_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct bch_fs *c = container_of(shrink, struct bch_fs,
btree_cache_shrink);
@@ -312,7 +302,7 @@ static unsigned long bch_mca_count(struct shrinker *shrink,
return mca_can_free(c) * btree_pages(c);
}
-void bch_fs_btree_exit(struct bch_fs *c)
+void bch2_fs_btree_exit(struct bch_fs *c)
{
struct btree *b;
unsigned i;
@@ -340,7 +330,7 @@ void bch_fs_btree_exit(struct bch_fs *c)
b = list_first_entry(&c->btree_cache, struct btree, list);
if (btree_node_dirty(b))
- bch_btree_complete_write(c, b, btree_current_write(b));
+ bch2_btree_complete_write(c, b, btree_current_write(b));
clear_btree_node_dirty(b);
mca_data_free(c, b);
@@ -359,7 +349,7 @@ void bch_fs_btree_exit(struct bch_fs *c)
rhashtable_destroy(&c->btree_cache_table);
}
-int bch_fs_btree_init(struct bch_fs *c)
+int bch2_fs_btree_init(struct bch_fs *c)
{
unsigned i;
int ret;
@@ -370,7 +360,7 @@ int bch_fs_btree_init(struct bch_fs *c)
c->btree_cache_table_init_done = true;
- bch_recalc_btree_reserve(c);
+ bch2_recalc_btree_reserve(c);
for (i = 0; i < c->btree_cache_reserve; i++)
if (!mca_bucket_alloc(c, GFP_KERNEL))
@@ -394,8 +384,8 @@ int bch_fs_btree_init(struct bch_fs *c)
list_del_init(&c->verify_data->list);
#endif
- c->btree_cache_shrink.count_objects = bch_mca_count;
- c->btree_cache_shrink.scan_objects = bch_mca_scan;
+ c->btree_cache_shrink.count_objects = bch2_mca_count;
+ c->btree_cache_shrink.scan_objects = bch2_mca_scan;
c->btree_cache_shrink.seeks = 4;
c->btree_cache_shrink.batch = btree_pages(c) * 2;
register_shrinker(&c->btree_cache_shrink);
@@ -409,16 +399,16 @@ int bch_fs_btree_init(struct bch_fs *c)
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void mca_cannibalize_unlock(struct bch_fs *c)
+void bch2_btree_node_cannibalize_unlock(struct bch_fs *c)
{
if (c->btree_cache_alloc_lock == current) {
- trace_bcache_mca_cannibalize_unlock(c);
+ trace_btree_node_cannibalize_unlock(c);
c->btree_cache_alloc_lock = NULL;
closure_wake_up(&c->mca_wait);
}
}
-int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl)
+int bch2_btree_node_cannibalize_lock(struct bch_fs *c, struct closure *cl)
{
struct task_struct *old;
@@ -427,7 +417,7 @@ int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success;
if (!cl) {
- trace_bcache_mca_cannibalize_lock_fail(c);
+ trace_btree_node_cannibalize_lock_fail(c);
return -ENOMEM;
}
@@ -441,11 +431,11 @@ int mca_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success;
}
- trace_bcache_mca_cannibalize_lock_fail(c);
+ trace_btree_node_cannibalize_lock_fail(c);
return -EAGAIN;
success:
- trace_bcache_mca_cannibalize_lock(c);
+ trace_btree_node_cannibalize_lock(c);
return 0;
}
@@ -471,7 +461,7 @@ static struct btree *mca_cannibalize(struct bch_fs *c)
}
}
-struct btree *mca_alloc(struct bch_fs *c)
+struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
{
struct btree *b;
u64 start_time = local_clock();
@@ -521,9 +511,9 @@ out:
b->sib_u64s[1] = 0;
b->whiteout_u64s = 0;
b->uncompacted_whiteout_u64s = 0;
- bch_btree_keys_init(b, &c->expensive_debug_checks);
+ bch2_btree_keys_init(b, &c->expensive_debug_checks);
- bch_time_stats_update(&c->mca_alloc_time, start_time);
+ bch2_time_stats_update(&c->btree_node_mem_alloc_time, start_time);
return b;
err:
@@ -533,9 +523,9 @@ err:
list_del_init(&b->list);
mutex_unlock(&c->btree_cache_lock);
- mca_hash_remove(c, b);
+ bch2_btree_node_hash_remove(c, b);
- trace_bcache_mca_cannibalize(c);
+ trace_btree_node_cannibalize(c);
goto out;
}
@@ -544,20 +534,20 @@ err:
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
- const struct bkey_i *k,
- unsigned level,
- enum six_lock_type lock_type)
+static noinline struct btree *bch2_btree_node_fill(struct btree_iter *iter,
+ const struct bkey_i *k,
+ unsigned level,
+ enum six_lock_type lock_type)
{
struct bch_fs *c = iter->c;
struct btree *b;
- b = mca_alloc(c);
+ b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b))
return b;
bkey_copy(&b->key, k);
- if (mca_hash_insert(c, b, level, iter->btree_id)) {
+ if (bch2_btree_node_hash_insert(c, b, level, iter->btree_id)) {
/* raced with another fill: */
/* mark as unhashed... */
@@ -584,7 +574,7 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
- bch_btree_node_read(c, b);
+ bch2_btree_node_read(c, b);
six_unlock_write(&b->lock);
if (lock_type == SIX_LOCK_read)
@@ -602,9 +592,9 @@ static noinline struct btree *bch_btree_node_fill(struct btree_iter *iter,
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
-struct btree *bch_btree_node_get(struct btree_iter *iter,
- const struct bkey_i *k, unsigned level,
- enum six_lock_type lock_type)
+struct btree *bch2_btree_node_get(struct btree_iter *iter,
+ const struct bkey_i *k, unsigned level,
+ enum six_lock_type lock_type)
{
struct btree *b;
struct bset_tree *t;
@@ -617,11 +607,11 @@ retry:
if (unlikely(!b)) {
/*
- * We must have the parent locked to call bch_btree_node_fill(),
+ * We must have the parent locked to call bch2_btree_node_fill(),
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch_btree_node_fill(iter, k, level, lock_type);
+ b = bch2_btree_node_fill(iter, k, level, lock_type);
/* We raced and found the btree node in the cache */
if (!b)
@@ -654,7 +644,7 @@ retry:
* when they're freed - and PTR_HASH() is zeroed out, which we
* check for after we lock the node.
*
- * Then, btree_node_relock() on the parent will fail - because
+ * Then, bch2_btree_node_relock() on the parent will fail - because
* the parent was modified, when the pointer to the node we want
* was removed - and we'll bail out:
*/
@@ -668,7 +658,7 @@ retry:
b->level != level ||
race_fault())) {
six_unlock_type(&b->lock, lock_type);
- if (btree_node_relock(iter, level + 1))
+ if (bch2_btree_node_relock(iter, level + 1))
goto retry;
return ERR_PTR(-EINTR);
@@ -702,8 +692,8 @@ retry:
return b;
}
-int bch_print_btree_node(struct bch_fs *c, struct btree *b,
- char *buf, size_t len)
+int bch2_print_btree_node(struct bch_fs *c, struct btree *b,
+ char *buf, size_t len)
{
const struct bkey_format *f = &b->format;
struct bset_stats stats;
@@ -711,9 +701,9 @@ int bch_print_btree_node(struct bch_fs *c, struct btree *b,
memset(&stats, 0, sizeof(stats));
- bch_val_to_text(c, BKEY_TYPE_BTREE, ptrs, sizeof(ptrs),
+ bch2_val_to_text(c, BKEY_TYPE_BTREE, ptrs, sizeof(ptrs),
bkey_i_to_s_c(&b->key));
- bch_btree_keys_stats(b, &stats);
+ bch2_btree_keys_stats(b, &stats);
return scnprintf(buf, len,
"l %u %llu:%llu - %llu:%llu:\n"
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 0d1c00c475f5..23f637ab64cd 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -1,29 +1,29 @@
#ifndef _BCACHE_BTREE_CACHE_H
#define _BCACHE_BTREE_CACHE_H
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_types.h"
struct btree_iter;
-extern const char * const bch_btree_ids[];
+extern const char * const bch2_btree_ids[];
-void bch_recalc_btree_reserve(struct bch_fs *);
+void bch2_recalc_btree_reserve(struct bch_fs *);
-void mca_hash_remove(struct bch_fs *, struct btree *);
-int mca_hash_insert(struct bch_fs *, struct btree *,
- unsigned, enum btree_id);
+void bch2_btree_node_hash_remove(struct bch_fs *, struct btree *);
+int bch2_btree_node_hash_insert(struct bch_fs *, struct btree *,
+ unsigned, enum btree_id);
-void mca_cannibalize_unlock(struct bch_fs *);
-int mca_cannibalize_lock(struct bch_fs *, struct closure *);
+void bch2_btree_node_cannibalize_unlock(struct bch_fs *);
+int bch2_btree_node_cannibalize_lock(struct bch_fs *, struct closure *);
-struct btree *mca_alloc(struct bch_fs *);
+struct btree *bch2_btree_node_mem_alloc(struct bch_fs *);
-struct btree *bch_btree_node_get(struct btree_iter *, const struct bkey_i *,
- unsigned, enum six_lock_type);
+struct btree *bch2_btree_node_get(struct btree_iter *, const struct bkey_i *,
+ unsigned, enum six_lock_type);
-void bch_fs_btree_exit(struct bch_fs *);
-int bch_fs_btree_init(struct bch_fs *);
+void bch2_fs_btree_exit(struct bch_fs *);
+int bch2_fs_btree_init(struct bch_fs *);
#define for_each_cached_btree(_b, _c, _tbl, _iter, _pos) \
for ((_tbl) = rht_dereference_rcu((_c)->btree_cache_table.tbl, \
@@ -65,7 +65,7 @@ static inline unsigned btree_blocks(struct bch_fs *c)
#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->btree_id].b)
-int bch_print_btree_node(struct bch_fs *, struct btree *,
+int bch2_print_btree_node(struct bch_fs *, struct btree *,
char *, size_t);
#endif /* _BCACHE_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index c86e7ac7abfd..0883b9b47621 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -3,7 +3,7 @@
* Copyright (C) 2014 Datera Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "bkey_methods.h"
#include "btree_locking.h"
@@ -53,7 +53,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
? btree_type_successor(b->btree_id, l->max)
: l->max;
- bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
+ bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, expected_min), c,
"btree node has incorrect min key: %llu:%llu != %llu:%llu",
b->data->min_key.inode,
b->data->min_key.offset,
@@ -65,14 +65,14 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
if (b->level > r->depth) {
l = &r->l[b->level - 1];
- bch_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
+ bch2_fs_inconsistent_on(bkey_cmp(b->data->min_key, l->min), c,
"btree node min doesn't match min of child nodes: %llu:%llu != %llu:%llu",
b->data->min_key.inode,
b->data->min_key.offset,
l->min.inode,
l->min.offset);
- bch_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
+ bch2_fs_inconsistent_on(bkey_cmp(b->data->max_key, l->max), c,
"btree node max doesn't match max of child nodes: %llu:%llu != %llu:%llu",
b->data->max_key.inode,
b->data->max_key.offset,
@@ -86,7 +86,7 @@ static void btree_node_range_checks(struct bch_fs *c, struct btree *b,
}
}
-u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
+u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
{
const struct bch_extent_ptr *ptr;
u8 max_stale = 0;
@@ -111,29 +111,29 @@ u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
/*
* For runtime mark and sweep:
*/
-static u8 bch_btree_mark_key(struct bch_fs *c, enum bkey_type type,
+static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
struct bkey_s_c k)
{
switch (type) {
case BKEY_TYPE_BTREE:
- bch_gc_mark_key(c, k, c->sb.btree_node_size, true);
+ bch2_gc_mark_key(c, k, c->sb.btree_node_size, true);
return 0;
case BKEY_TYPE_EXTENTS:
- bch_gc_mark_key(c, k, k.k->size, false);
- return bch_btree_key_recalc_oldest_gen(c, k);
+ bch2_gc_mark_key(c, k, k.k->size, false);
+ return bch2_btree_key_recalc_oldest_gen(c, k);
default:
BUG();
}
}
-u8 bch_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
- struct bkey_s_c k)
+u8 bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
+ struct bkey_s_c k)
{
atomic64_set(&c->key_version,
max_t(u64, k.k->version.lo,
atomic64_read(&c->key_version)));
- return bch_btree_mark_key(c, type, k);
+ return bch2_btree_mark_key(c, type, k);
}
static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b)
@@ -147,8 +147,8 @@ static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b)
for_each_btree_node_key_unpack(b, k, &iter,
btree_node_is_extents(b),
&unpacked) {
- bkey_debugcheck(c, b, k);
- stale = max(stale, bch_btree_mark_key(c,
+ bch2_bkey_debugcheck(c, b, k);
+ stale = max(stale, bch2_btree_mark_key(c,
btree_node_type(b), k));
}
@@ -178,7 +178,7 @@ static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
__gc_pos_set(c, new_pos);
}
-static int bch_gc_btree(struct bch_fs *c, enum btree_id btree_id)
+static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
{
struct btree_iter iter;
struct btree *b;
@@ -198,32 +198,32 @@ static int bch_gc_btree(struct bch_fs *c, enum btree_id btree_id)
for_each_btree_node(&iter, c, btree_id, POS_MIN, depth, b) {
btree_node_range_checks(c, b, &r);
- bch_verify_btree_nr_keys(b);
+ bch2_verify_btree_nr_keys(b);
should_rewrite = btree_gc_mark_node(c, b);
gc_pos_set(c, gc_pos_btree_node(b));
if (should_rewrite)
- bch_btree_node_rewrite(&iter, b, NULL);
+ bch2_btree_node_rewrite(&iter, b, NULL);
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
return ret;
mutex_lock(&c->btree_root_lock);
b = c->btree_roots[btree_id].b;
- bch_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
+ bch2_btree_mark_key(c, BKEY_TYPE_BTREE, bkey_i_to_s_c(&b->key));
gc_pos_set(c, gc_pos_btree_root(b->btree_id));
mutex_unlock(&c->btree_root_lock);
return 0;
}
-static void bch_mark_allocator_buckets(struct bch_fs *c)
+static void bch2_mark_allocator_buckets(struct bch_fs *c)
{
struct bch_dev *ca;
struct open_bucket *ob;
@@ -234,11 +234,11 @@ static void bch_mark_allocator_buckets(struct bch_fs *c)
spin_lock(&ca->freelist_lock);
fifo_for_each_entry(i, &ca->free_inc, iter)
- bch_mark_alloc_bucket(ca, &ca->buckets[i], true);
+ bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
for (j = 0; j < RESERVE_NR; j++)
fifo_for_each_entry(i, &ca->free[j], iter)
- bch_mark_alloc_bucket(ca, &ca->buckets[i], true);
+ bch2_mark_alloc_bucket(ca, &ca->buckets[i], true);
spin_unlock(&ca->freelist_lock);
}
@@ -251,7 +251,7 @@ static void bch_mark_allocator_buckets(struct bch_fs *c)
mutex_lock(&ob->lock);
open_bucket_for_each_ptr(ob, ptr) {
ca = c->devs[ptr->dev];
- bch_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
+ bch2_mark_alloc_bucket(ca, PTR_BUCKET(ca, ptr), true);
}
mutex_unlock(&ob->lock);
}
@@ -263,12 +263,12 @@ static void mark_metadata_sectors(struct bch_dev *ca, u64 start, u64 end,
u64 b = start >> ca->bucket_bits;
do {
- bch_mark_metadata_bucket(ca, ca->buckets + b, type, true);
+ bch2_mark_metadata_bucket(ca, ca->buckets + b, type, true);
b++;
} while (b < end >> ca->bucket_bits);
}
-static void bch_dev_mark_superblocks(struct bch_dev *ca)
+static void bch2_dev_mark_superblocks(struct bch_dev *ca)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
unsigned i;
@@ -289,20 +289,20 @@ static void bch_dev_mark_superblocks(struct bch_dev *ca)
/*
* Mark non btree metadata - prios, journal
*/
-void bch_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
+void bch2_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
{
unsigned i;
u64 b;
lockdep_assert_held(&c->sb_lock);
- bch_dev_mark_superblocks(ca);
+ bch2_dev_mark_superblocks(ca);
spin_lock(&c->journal.lock);
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
- bch_mark_metadata_bucket(ca, ca->buckets + b,
+ bch2_mark_metadata_bucket(ca, ca->buckets + b,
BUCKET_JOURNAL, true);
}
@@ -313,14 +313,14 @@ void bch_mark_dev_metadata(struct bch_fs *c, struct bch_dev *ca)
for (i = 0; i < prio_buckets(ca) * 2; i++) {
b = ca->prio_buckets[i];
if (b)
- bch_mark_metadata_bucket(ca, ca->buckets + b,
+ bch2_mark_metadata_bucket(ca, ca->buckets + b,
BUCKET_PRIOS, true);
}
spin_unlock(&ca->prio_buckets_lock);
}
-static void bch_mark_metadata(struct bch_fs *c)
+static void bch2_mark_metadata(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
@@ -329,12 +329,12 @@ static void bch_mark_metadata(struct bch_fs *c)
gc_pos_set(c, gc_phase(GC_PHASE_SB_METADATA));
for_each_online_member(ca, c, i)
- bch_mark_dev_metadata(c, ca);
+ bch2_mark_dev_metadata(c, ca);
mutex_unlock(&c->sb_lock);
}
-/* Also see bch_pending_btree_node_free_insert_done() */
-static void bch_mark_pending_btree_node_frees(struct bch_fs *c)
+/* Also see bch2_pending_btree_node_free_insert_done() */
+static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
{
struct bch_fs_usage stats = { 0 };
struct btree_interior_update *as;
@@ -345,7 +345,7 @@ static void bch_mark_pending_btree_node_frees(struct bch_fs *c)
for_each_pending_btree_node_free(c, as, d)
if (d->index_update_done)
- __bch_gc_mark_key(c, bkey_i_to_s_c(&d->key),
+ __bch2_gc_mark_key(c, bkey_i_to_s_c(&d->key),
c->sb.btree_node_size, true,
&stats);
/*
@@ -359,7 +359,7 @@ static void bch_mark_pending_btree_node_frees(struct bch_fs *c)
/**
* bch_gc - recompute bucket marks and oldest_gen, rewrite btree nodes
*/
-void bch_gc(struct bch_fs *c)
+void bch2_gc(struct bch_fs *c)
{
struct bch_dev *ca;
struct bucket *g;
@@ -390,13 +390,13 @@ void bch_gc(struct bch_fs *c)
if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
return;
- trace_bcache_gc_start(c);
+ trace_gc_start(c);
/*
- * Do this before taking gc_lock - bch_disk_reservation_get() blocks on
+ * Do this before taking gc_lock - bch2_disk_reservation_get() blocks on
* gc_lock if sectors_available goes to 0:
*/
- bch_recalc_sectors_available(c);
+ bch2_recalc_sectors_available(c);
down_write(&c->gc_lock);
@@ -404,13 +404,13 @@ void bch_gc(struct bch_fs *c)
/*
* Indicates to buckets code that gc is now in progress - done under
- * usage_lock to avoid racing with bch_mark_key():
+ * usage_lock to avoid racing with bch2_mark_key():
*/
__gc_pos_set(c, GC_POS_MIN);
/* Save a copy of the existing bucket stats while we recompute them: */
for_each_member_device(ca, c, i) {
- ca->usage_cached = __bch_dev_usage_read(ca);
+ ca->usage_cached = __bch2_dev_usage_read(ca);
for_each_possible_cpu(cpu) {
struct bch_dev_usage *p =
per_cpu_ptr(ca->usage_percpu, cpu);
@@ -418,7 +418,7 @@ void bch_gc(struct bch_fs *c)
}
}
- c->usage_cached = __bch_fs_usage_read(c);
+ c->usage_cached = __bch2_fs_usage_read(c);
for_each_possible_cpu(cpu) {
struct bch_fs_usage *p =
per_cpu_ptr(c->usage_percpu, cpu);
@@ -442,12 +442,12 @@ void bch_gc(struct bch_fs *c)
}
/* Walk allocator's references: */
- bch_mark_allocator_buckets(c);
+ bch2_mark_allocator_buckets(c);
/* Walk btree: */
while (c->gc_pos.phase < (int) BTREE_ID_NR) {
int ret = c->btree_roots[c->gc_pos.phase].b
- ? bch_gc_btree(c, (int) c->gc_pos.phase)
+ ? bch2_gc_btree(c, (int) c->gc_pos.phase)
: 0;
if (ret) {
@@ -460,8 +460,8 @@ void bch_gc(struct bch_fs *c)
gc_pos_set(c, gc_phase(c->gc_pos.phase + 1));
}
- bch_mark_metadata(c);
- bch_mark_pending_btree_node_frees(c);
+ bch2_mark_metadata(c);
+ bch2_mark_pending_btree_node_frees(c);
for_each_member_device(ca, c, i)
atomic_long_set(&ca->saturated_count, 0);
@@ -470,15 +470,15 @@ void bch_gc(struct bch_fs *c)
gc_pos_set(c, gc_phase(GC_PHASE_DONE));
up_write(&c->gc_lock);
- trace_bcache_gc_end(c);
- bch_time_stats_update(&c->btree_gc_time, start_time);
+ trace_gc_end(c);
+ bch2_time_stats_update(&c->btree_gc_time, start_time);
/*
* Wake up allocator in case it was waiting for buckets
* because of not being able to inc gens
*/
for_each_member_device(ca, c, i)
- bch_wake_allocator(ca);
+ bch2_wake_allocator(ca);
}
/* Btree coalescing */
@@ -497,8 +497,8 @@ static void recalc_packed_keys(struct btree *b)
btree_keys_account_key_add(&b->nr, 0, k);
}
-static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
- struct btree_iter *iter)
+static void bch2_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
+ struct btree_iter *iter)
{
struct btree *parent = iter->nodes[old_nodes[0]->level + 1];
struct bch_fs *c = iter->c;
@@ -512,7 +512,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
struct bkey_format new_format;
memset(new_nodes, 0, sizeof(new_nodes));
- bch_keylist_init(&keylist, NULL, 0);
+ bch2_keylist_init(&keylist, NULL, 0);
/* Count keys that are not deleted */
for (i = 0; i < GC_MERGE_NODES && old_nodes[i]; i++)
@@ -526,50 +526,51 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
DIV_ROUND_UP(u64s, nr_old_nodes - 1)) > blocks)
return;
- res = bch_btree_reserve_get(c, parent, nr_old_nodes,
+ res = bch2_btree_reserve_get(c, parent, nr_old_nodes,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE,
NULL);
if (IS_ERR(res)) {
- trace_bcache_btree_gc_coalesce_fail(c,
+ trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_RESERVE_GET);
return;
}
- if (bch_keylist_realloc(&keylist, NULL, 0,
+ if (bch2_keylist_realloc(&keylist, NULL, 0,
(BKEY_U64s + BKEY_EXTENT_U64s_MAX) * nr_old_nodes)) {
- trace_bcache_btree_gc_coalesce_fail(c,
+ trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC);
goto out;
}
/* Find a format that all keys in @old_nodes can pack into */
- bch_bkey_format_init(&format_state);
+ bch2_bkey_format_init(&format_state);
for (i = 0; i < nr_old_nodes; i++)
- __bch_btree_calc_format(&format_state, old_nodes[i]);
+ __bch2_btree_calc_format(&format_state, old_nodes[i]);
- new_format = bch_bkey_format_done(&format_state);
+ new_format = bch2_bkey_format_done(&format_state);
/* Check if repacking would make any nodes too big to fit */
for (i = 0; i < nr_old_nodes; i++)
- if (!bch_btree_node_format_fits(c, old_nodes[i], &new_format)) {
- trace_bcache_btree_gc_coalesce_fail(c,
+ if (!bch2_btree_node_format_fits(c, old_nodes[i], &new_format)) {
+ trace_btree_gc_coalesce_fail(c,
BTREE_GC_COALESCE_FAIL_FORMAT_FITS);
goto out;
}
- trace_bcache_btree_gc_coalesce(c, parent, nr_old_nodes);
+ trace_btree_gc_coalesce(c, parent, nr_old_nodes);
- as = bch_btree_interior_update_alloc(c);
+ as = bch2_btree_interior_update_alloc(c);
for (i = 0; i < nr_old_nodes; i++)
- bch_btree_interior_update_will_free_node(c, as, old_nodes[i]);
+ bch2_btree_interior_update_will_free_node(c, as, old_nodes[i]);
/* Repack everything with @new_format and sort down to one bset */
for (i = 0; i < nr_old_nodes; i++)
- new_nodes[i] = __btree_node_alloc_replacement(c, old_nodes[i],
- new_format, res);
+ new_nodes[i] =
+ __bch2_btree_node_alloc_replacement(c, old_nodes[i],
+ new_format, res);
/*
* Conceptually we concatenate the nodes together and slice them
@@ -607,7 +608,7 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
set_btree_bset_end(n1, n1->set);
six_unlock_write(&n2->lock);
- bch_btree_node_free_never_inserted(c, n2);
+ bch2_btree_node_free_never_inserted(c, n2);
six_unlock_intent(&n2->lock);
memmove(new_nodes + i - 1,
@@ -643,10 +644,10 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
recalc_packed_keys(n);
btree_node_reset_sib_u64s(n);
- bch_btree_build_aux_trees(n);
+ bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock);
- bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
}
/*
@@ -668,35 +669,35 @@ static void bch_coalesce_nodes(struct btree *old_nodes[GC_MERGE_NODES],
bkey_init(&delete.k);
delete.k.p = old_nodes[i]->key.k.p;
- bch_keylist_add_in_order(&keylist, &delete);
+ bch2_keylist_add_in_order(&keylist, &delete);
next:
i = i;
}
/*
- * Keys for the new nodes get inserted: bch_btree_insert_keys() only
+ * Keys for the new nodes get inserted: bch2_btree_insert_keys() only
* does the lookup once and thus expects the keys to be in sorted order
* so we have to make sure the new keys are correctly ordered with
* respect to the deleted keys added in the previous loop
*/
for (i = 0; i < nr_new_nodes; i++)
- bch_keylist_add_in_order(&keylist, &new_nodes[i]->key);
+ bch2_keylist_add_in_order(&keylist, &new_nodes[i]->key);
/* Insert the newly coalesced nodes */
- bch_btree_insert_node(parent, iter, &keylist, res, as);
+ bch2_btree_insert_node(parent, iter, &keylist, res, as);
- BUG_ON(!bch_keylist_empty(&keylist));
+ BUG_ON(!bch2_keylist_empty(&keylist));
BUG_ON(iter->nodes[old_nodes[0]->level] != old_nodes[0]);
- BUG_ON(!bch_btree_iter_node_replace(iter, new_nodes[0]));
+ BUG_ON(!bch2_btree_iter_node_replace(iter, new_nodes[0]));
for (i = 0; i < nr_new_nodes; i++)
- btree_open_bucket_put(c, new_nodes[i]);
+ bch2_btree_open_bucket_put(c, new_nodes[i]);
/* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) {
- bch_btree_node_free_inmem(iter, old_nodes[i]);
+ bch2_btree_node_free_inmem(iter, old_nodes[i]);
six_unlock_intent(&old_nodes[i]->lock);
/*
@@ -714,11 +715,11 @@ next:
}
}
out:
- bch_keylist_free(&keylist, NULL);
- bch_btree_reserve_put(c, res);
+ bch2_keylist_free(&keylist, NULL);
+ bch2_btree_reserve_put(c, res);
}
-static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
+static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
{
struct btree_iter iter;
struct btree *b;
@@ -757,7 +758,7 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
}
memset(merge + i, 0, (GC_MERGE_NODES - i) * sizeof(merge[0]));
- bch_coalesce_nodes(merge, &iter);
+ bch2_coalesce_nodes(merge, &iter);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
lock_seq[i] = merge[i]->lock.state.seq;
@@ -767,11 +768,11 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
lock_seq[0] = merge[0]->lock.state.seq;
if (test_bit(BCH_FS_GC_STOPPING, &c->flags)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return -ESHUTDOWN;
}
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
/*
* If the parent node wasn't relocked, it might have been split
@@ -783,13 +784,13 @@ static int bch_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
memset(merge + 1, 0,
(GC_MERGE_NODES - 1) * sizeof(merge[0]));
}
- return bch_btree_iter_unlock(&iter);
+ return bch2_btree_iter_unlock(&iter);
}
/**
* bch_coalesce - coalesce adjacent nodes with low occupancy
*/
-void bch_coalesce(struct bch_fs *c)
+void bch2_coalesce(struct bch_fs *c)
{
u64 start_time;
enum btree_id id;
@@ -798,12 +799,12 @@ void bch_coalesce(struct bch_fs *c)
return;
down_read(&c->gc_lock);
- trace_bcache_gc_coalesce_start(c);
+ trace_gc_coalesce_start(c);
start_time = local_clock();
for (id = 0; id < BTREE_ID_NR; id++) {
int ret = c->btree_roots[id].b
- ? bch_coalesce_btree(c, id)
+ ? bch2_coalesce_btree(c, id)
: 0;
if (ret) {
@@ -814,12 +815,12 @@ void bch_coalesce(struct bch_fs *c)
}
}
- bch_time_stats_update(&c->btree_coalesce_time, start_time);
- trace_bcache_gc_coalesce_end(c);
+ bch2_time_stats_update(&c->btree_coalesce_time, start_time);
+ trace_gc_coalesce_end(c);
up_read(&c->gc_lock);
}
-static int bch_gc_thread(void *arg)
+static int bch2_gc_thread(void *arg)
{
struct bch_fs *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
@@ -844,16 +845,16 @@ static int bch_gc_thread(void *arg)
break;
}
- bch_io_clock_schedule_timeout(clock, next);
+ bch2_io_clock_schedule_timeout(clock, next);
try_to_freeze();
}
last = atomic_long_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- bch_gc(c);
+ bch2_gc(c);
if (!btree_gc_coalesce_disabled(c))
- bch_coalesce(c);
+ bch2_coalesce(c);
debug_check_no_locks_held();
}
@@ -861,7 +862,7 @@ static int bch_gc_thread(void *arg)
return 0;
}
-void bch_gc_thread_stop(struct bch_fs *c)
+void bch2_gc_thread_stop(struct bch_fs *c)
{
set_bit(BCH_FS_GC_STOPPING, &c->flags);
@@ -872,13 +873,13 @@ void bch_gc_thread_stop(struct bch_fs *c)
clear_bit(BCH_FS_GC_STOPPING, &c->flags);
}
-int bch_gc_thread_start(struct bch_fs *c)
+int bch2_gc_thread_start(struct bch_fs *c)
{
struct task_struct *p;
BUG_ON(c->gc_thread);
- p = kthread_create(bch_gc_thread, c, "bcache_gc");
+ p = kthread_create(bch2_gc_thread, c, "bcache_gc");
if (IS_ERR(p))
return PTR_ERR(p);
@@ -889,7 +890,7 @@ int bch_gc_thread_start(struct bch_fs *c)
/* Initial GC computes bucket marks during startup */
-static void bch_initial_gc_btree(struct bch_fs *c, enum btree_id id)
+static void bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
{
struct btree_iter iter;
struct btree *b;
@@ -915,29 +916,29 @@ static void bch_initial_gc_btree(struct bch_fs *c, enum btree_id id)
for_each_btree_node_key_unpack(b, k, &node_iter,
btree_node_is_extents(b),
&unpacked)
- bch_btree_mark_key_initial(c, btree_node_type(b), k);
+ bch2_btree_mark_key_initial(c, btree_node_type(b), k);
}
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
- bch_btree_mark_key(c, BKEY_TYPE_BTREE,
+ bch2_btree_mark_key(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&c->btree_roots[id].b->key));
}
-int bch_initial_gc(struct bch_fs *c, struct list_head *journal)
+int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
{
enum btree_id id;
for (id = 0; id < BTREE_ID_NR; id++)
- bch_initial_gc_btree(c, id);
+ bch2_initial_gc_btree(c, id);
if (journal)
- bch_journal_mark(c, journal);
+ bch2_journal_mark(c, journal);
- bch_mark_metadata(c);
+ bch2_mark_metadata(c);
/*
* Skip past versions that might have possibly been used (as nonces),
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index f1794fdf4378..07210d33ac65 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -5,15 +5,15 @@
enum bkey_type;
-void bch_coalesce(struct bch_fs *);
-void bch_gc(struct bch_fs *);
-void bch_gc_thread_stop(struct bch_fs *);
-int bch_gc_thread_start(struct bch_fs *);
-int bch_initial_gc(struct bch_fs *, struct list_head *);
-u8 bch_btree_key_recalc_oldest_gen(struct bch_fs *, struct bkey_s_c);
-u8 bch_btree_mark_key_initial(struct bch_fs *, enum bkey_type,
+void bch2_coalesce(struct bch_fs *);
+void bch2_gc(struct bch_fs *);
+void bch2_gc_thread_stop(struct bch_fs *);
+int bch2_gc_thread_start(struct bch_fs *);
+int bch2_initial_gc(struct bch_fs *, struct list_head *);
+u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *, struct bkey_s_c);
+u8 bch2_btree_mark_key_initial(struct bch_fs *, enum bkey_type,
struct bkey_s_c);
-void bch_mark_dev_metadata(struct bch_fs *, struct bch_dev *);
+void bch2_mark_dev_metadata(struct bch_fs *, struct bch_dev *);
/*
* For concurrent mark and sweep (with other index updates), we define a total
@@ -28,7 +28,7 @@ void bch_mark_dev_metadata(struct bch_fs *, struct bch_dev *);
* between the updater adding/removing the reference and updating the GC marks;
* without that, we would at best double count sometimes.
*
- * That part is important - whenever calling bch_mark_pointers(), a lock _must_
+ * That part is important - whenever calling bch2_mark_pointers(), a lock _must_
* be held that prevents GC from passing the position the updater is at.
*
* (What about the start of gc, when we're clearing all the marks? GC clears the
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 13e280cc4e18..728cbcd9f220 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_update.h"
@@ -231,16 +231,16 @@ static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
BUG_ON(new_size < l.k.size);
- bch_key_resize(&l.k, new_size);
+ bch2_key_resize(&l.k, new_size);
if (bkey_cmp(l.k.p, r.k.p) >= 0)
continue;
- bch_cut_front(l.k.p, &r);
+ bch2_cut_front(l.k.p, &r);
}
if (prev) {
- if (!bkey_pack(out, &l, f)) {
+ if (!bch2_bkey_pack(out, &l, f)) {
BUG_ON(l_packed);
bkey_copy(out, &l);
}
@@ -253,7 +253,7 @@ static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
}
if (prev) {
- if (!bkey_pack(out, &l, f)) {
+ if (!bch2_bkey_pack(out, &l, f)) {
BUG_ON(l_packed);
bkey_copy(out, &l);
}
@@ -285,7 +285,7 @@ static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
return 0;
}
-bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b,
+bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
enum compact_mode mode)
{
const struct bkey_format *f = &b->format;
@@ -377,7 +377,7 @@ bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b,
if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
i->u64s = cpu_to_le16((u64 *) out - i->_data);
set_btree_bset_end(b, t);
- bch_bset_set_no_aux_tree(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
}
}
@@ -410,15 +410,15 @@ bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b,
btree_bounce_free(c, order, used_mempool, whiteouts);
if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
- bch_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
bch_btree_keys_u64s_remaining(c, b);
- bch_verify_btree_nr_keys(b);
+ bch2_verify_btree_nr_keys(b);
return true;
}
-static bool bch_drop_whiteouts(struct btree *b)
+static bool bch2_drop_whiteouts(struct btree *b)
{
struct bset_tree *t;
bool ret = false;
@@ -456,11 +456,11 @@ static bool bch_drop_whiteouts(struct btree *b)
}
i->u64s = cpu_to_le16((u64 *) out - i->_data);
- bch_bset_set_no_aux_tree(b, t);
+ bch2_bset_set_no_aux_tree(b, t);
ret = true;
}
- bch_verify_btree_nr_keys(b);
+ bch2_verify_btree_nr_keys(b);
return ret;
}
@@ -593,7 +593,7 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
if (sorting_entire_node)
- bch_time_stats_update(&c->btree_sort_time, start_time);
+ bch2_time_stats_update(&c->btree_sort_time, start_time);
/* Make sure we preserve bset journal_seq: */
for (t = b->set + start_idx + 1;
@@ -639,11 +639,11 @@ static void btree_node_sort(struct bch_fs *c, struct btree *b,
b->nr.bset_u64s[i] = 0;
set_btree_bset_end(b, &b->set[start_idx]);
- bch_bset_set_no_aux_tree(b, &b->set[start_idx]);
+ bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
btree_bounce_free(c, order, used_mempool, out);
- bch_verify_btree_nr_keys(b);
+ bch2_verify_btree_nr_keys(b);
}
/* Sort + repack in a new format: */
@@ -659,15 +659,15 @@ static struct btree_nr_keys sort_repack(struct bset *dst,
memset(&nr, 0, sizeof(nr));
- while ((in = bch_btree_node_iter_next_all(src_iter, src))) {
+ while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
if (filter_whiteouts && bkey_whiteout(in))
continue;
- if (bch_bkey_transform(out_f, out, bkey_packed(in)
- ? in_f : &bch_bkey_format_current, in))
+ if (bch2_bkey_transform(out_f, out, bkey_packed(in)
+ ? in_f : &bch2_bkey_format_current, in))
out->format = KEY_FORMAT_LOCAL_BTREE;
else
- bkey_unpack(src, (void *) out, in);
+ bch2_bkey_unpack(src, (void *) out, in);
btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out);
@@ -693,7 +693,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
memset(&nr, 0, sizeof(nr));
- while ((k = bch_btree_node_iter_next_all(iter, src))) {
+ while ((k = bch2_btree_node_iter_next_all(iter, src))) {
if (filter_whiteouts && bkey_whiteout(k))
continue;
@@ -701,7 +701,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
* The filter might modify pointers, so we have to unpack the
* key and values to &tmp.k:
*/
- bkey_unpack(src, &tmp.k, k);
+ bch2_bkey_unpack(src, &tmp.k, k);
if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
continue;
@@ -718,7 +718,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
* copy the current key - but first pack prev (in place):
*/
if (prev) {
- bkey_pack(prev, (void *) prev, out_f);
+ bch2_bkey_pack(prev, (void *) prev, out_f);
btree_keys_account_key_add(&nr, 0, prev);
prev = bkey_next(prev);
@@ -730,7 +730,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
}
if (prev) {
- bkey_pack(prev, (void *) prev, out_f);
+ bch2_bkey_pack(prev, (void *) prev, out_f);
btree_keys_account_key_add(&nr, 0, prev);
out = bkey_next(prev);
} else {
@@ -741,7 +741,7 @@ static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
return nr;
}
-void bch_btree_sort_into(struct bch_fs *c,
+void bch2_btree_sort_into(struct bch_fs *c,
struct btree *dst,
struct btree *src)
{
@@ -751,9 +751,9 @@ void bch_btree_sort_into(struct bch_fs *c,
BUG_ON(dst->nsets != 1);
- bch_bset_set_no_aux_tree(dst, dst->set);
+ bch2_bset_set_no_aux_tree(dst, dst->set);
- bch_btree_node_iter_init_from_start(&src_iter, src,
+ bch2_btree_node_iter_init_from_start(&src_iter, src,
btree_node_is_extents(src));
if (btree_node_ops(src)->key_normalize ||
@@ -770,7 +770,7 @@ void bch_btree_sort_into(struct bch_fs *c,
&dst->format,
true);
- bch_time_stats_update(&c->btree_sort_time, start_time);
+ bch2_time_stats_update(&c->btree_sort_time, start_time);
set_btree_bset_end(dst, dst->set);
@@ -779,7 +779,7 @@ void bch_btree_sort_into(struct bch_fs *c,
dst->nr.packed_keys += nr.packed_keys;
dst->nr.unpacked_keys += nr.unpacked_keys;
- bch_verify_btree_nr_keys(dst);
+ bch2_verify_btree_nr_keys(dst);
}
#define SORT_CRIT (4096 / sizeof(u64))
@@ -814,12 +814,12 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b,
return ret;
}
-void bch_btree_build_aux_trees(struct btree *b)
+void bch2_btree_build_aux_trees(struct btree *b)
{
struct bset_tree *t;
for_each_bset(b, t)
- bch_bset_build_aux_tree(b, t,
+ bch2_bset_build_aux_tree(b, t,
bset_unwritten(b, bset(b, t)) &&
t == bset_tree_last(b));
}
@@ -833,7 +833,7 @@ void bch_btree_build_aux_trees(struct btree *b)
*
* Returns true if we sorted (i.e. invalidated iterators
*/
-void bch_btree_init_next(struct bch_fs *c, struct btree *b,
+void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
struct btree_node_entry *bne;
@@ -846,12 +846,12 @@ void bch_btree_init_next(struct bch_fs *c, struct btree *b,
bne = want_new_bset(c, b);
if (bne)
- bch_bset_init_next(b, &bne->keys);
+ bch2_bset_init_next(b, &bne->keys);
- bch_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
if (iter && did_sort)
- bch_btree_iter_reinit_node(iter, b);
+ bch2_btree_iter_reinit_node(iter, b);
}
static struct nonce btree_nonce(struct btree *b,
@@ -868,12 +868,12 @@ static struct nonce btree_nonce(struct btree *b,
static void bset_encrypt(struct bch_fs *c, struct bset *i, struct nonce nonce)
{
- bch_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
+ bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
vstruct_end(i) - (void *) i->_data);
}
#define btree_node_error(b, c, ptr, fmt, ...) \
- bch_fs_inconsistent(c, \
+ bch2_fs_inconsistent(c, \
"btree node error at btree %u level %u/%u bucket %zu block %u u64s %u: " fmt,\
(b)->btree_id, (b)->level, btree_node_root(c, b) \
? btree_node_root(c, b)->level : -1, \
@@ -938,15 +938,15 @@ static const char *validate_bset(struct bch_fs *c, struct btree *b,
}
if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
- bch_bkey_swab(btree_node_type(b), &b->format, k);
+ bch2_bkey_swab(btree_node_type(b), &b->format, k);
u = bkey_disassemble(b, k, &tmp);
- invalid = btree_bkey_invalid(c, b, u);
+ invalid = bch2_btree_bkey_invalid(c, b, u);
if (invalid) {
char buf[160];
- bch_bkey_val_to_text(c, btree_node_type(b),
+ bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), u);
btree_node_error(b, c, ptr,
"invalid bkey %s: %s", buf, invalid);
@@ -999,7 +999,7 @@ static bool extent_contains_ptr(struct bkey_s_c_extent e,
return false;
}
-void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
+void bch2_btree_node_read_done(struct bch_fs *c, struct btree *b,
struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{
@@ -1015,10 +1015,10 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
int ret;
iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
- __bch_btree_node_iter_init(iter, btree_node_is_extents(b));
+ __bch2_btree_node_iter_init(iter, btree_node_is_extents(b));
err = "dynamic fault";
- if (bch_meta_read_fault("btree"))
+ if (bch2_meta_read_fault("btree"))
goto err;
while (b->written < c->sb.btree_node_size) {
@@ -1036,7 +1036,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
goto err;
err = "unknown checksum type";
- if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
+ if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
goto err;
/* XXX: retry checksum errors */
@@ -1045,10 +1045,10 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
err = "bad checksum";
- if (bch_crc_cmp(csum, b->data->csum))
+ if (bch2_crc_cmp(csum, b->data->csum))
goto err;
- bch_encrypt(c, BSET_CSUM_TYPE(i), nonce,
+ bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
&b->data->flags,
(void *) &b->data->keys -
(void *) &b->data->flags);
@@ -1064,8 +1064,8 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
u64 *p = (u64 *) &b->data->ptr;
*p = swab64(*p);
- bch_bpos_swab(&b->data->min_key);
- bch_bpos_swab(&b->data->max_key);
+ bch2_bpos_swab(&b->data->min_key);
+ bch2_bpos_swab(&b->data->max_key);
}
err = "incorrect btree id";
@@ -1085,7 +1085,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
b->data->ptr))
goto err;
- err = bch_bkey_format_validate(&b->data->format);
+ err = bch2_bkey_format_validate(&b->data->format);
if (err)
goto err;
@@ -1100,7 +1100,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
break;
err = "unknown checksum type";
- if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
+ if (!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
goto err;
nonce = btree_nonce(b, i, b->written << 9);
@@ -1122,18 +1122,18 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
b->written += sectors;
err = "insufficient memory";
- ret = bch_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
+ ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
if (ret < 0)
goto err;
if (ret)
continue;
- __bch_btree_node_iter_push(iter, b,
+ __bch2_btree_node_iter_push(iter, b,
i->start,
vstruct_idx(i, whiteout_u64s));
- __bch_btree_node_iter_push(iter, b,
+ __bch2_btree_node_iter_push(iter, b,
vstruct_idx(i, whiteout_u64s),
vstruct_last(i));
}
@@ -1149,8 +1149,8 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
sorted->keys.u64s = 0;
b->nr = btree_node_is_extents(b)
- ? bch_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
- : bch_key_sort_fix_overlapping(&sorted->keys, b, iter);
+ ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
+ : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data;
@@ -1163,7 +1163,7 @@ void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted);
- bch_bset_build_aux_tree(b, b->set, false);
+ bch2_bset_build_aux_tree(b, b->set, false);
set_needs_whiteout(btree_bset_first(b));
@@ -1177,16 +1177,16 @@ err:
goto out;
}
-void bch_btree_node_read(struct bch_fs *c, struct btree *b)
+void bch2_btree_node_read(struct bch_fs *c, struct btree *b)
{
uint64_t start_time = local_clock();
struct bio *bio;
struct extent_pick_ptr pick;
- trace_bcache_btree_read(c, b);
+ trace_btree_read(c, b);
- pick = bch_btree_pick_ptr(c, b);
- if (bch_fs_fatal_err_on(!pick.ca, c,
+ pick = bch2_btree_pick_ptr(c, b);
+ if (bch2_fs_fatal_err_on(!pick.ca, c,
"no cache device for btree node")) {
set_btree_node_read_error(b);
return;
@@ -1197,26 +1197,26 @@ void bch_btree_node_read(struct bch_fs *c, struct btree *b)
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
- bch_bio_map(bio, b->data);
+ bch2_bio_map(bio, b->data);
submit_bio_wait(bio);
- if (bch_dev_fatal_io_err_on(bio->bi_error,
+ if (bch2_dev_fatal_io_err_on(bio->bi_error,
pick.ca, "IO error reading bucket %zu",
PTR_BUCKET_NR(pick.ca, &pick.ptr)) ||
- bch_meta_read_fault("btree")) {
+ bch2_meta_read_fault("btree")) {
set_btree_node_read_error(b);
goto out;
}
- bch_btree_node_read_done(c, b, pick.ca, &pick.ptr);
- bch_time_stats_update(&c->btree_read_time, start_time);
+ bch2_btree_node_read_done(c, b, pick.ca, &pick.ptr);
+ bch2_time_stats_update(&c->btree_read_time, start_time);
out:
bio_put(bio);
percpu_ref_put(&pick.ca->io_ref);
}
-int bch_btree_root_read(struct bch_fs *c, enum btree_id id,
+int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
const struct bkey_i *k, unsigned level)
{
struct closure cl;
@@ -1226,19 +1226,19 @@ int bch_btree_root_read(struct bch_fs *c, enum btree_id id,
closure_init_stack(&cl);
do {
- ret = mca_cannibalize_lock(c, &cl);
+ ret = bch2_btree_node_cannibalize_lock(c, &cl);
closure_sync(&cl);
} while (ret);
- b = mca_alloc(c);
- mca_cannibalize_unlock(c);
+ b = bch2_btree_node_mem_alloc(c);
+ bch2_btree_node_cannibalize_unlock(c);
BUG_ON(IS_ERR(b));
bkey_copy(&b->key, k);
- BUG_ON(mca_hash_insert(c, b, level, id));
+ BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
- bch_btree_node_read(c, b);
+ bch2_btree_node_read(c, b);
six_unlock_write(&b->lock);
if (btree_node_read_error(b)) {
@@ -1246,16 +1246,16 @@ int bch_btree_root_read(struct bch_fs *c, enum btree_id id,
return -EIO;
}
- bch_btree_set_root_initial(c, b, NULL);
+ bch2_btree_set_root_initial(c, b, NULL);
six_unlock_intent(&b->lock);
return 0;
}
-void bch_btree_complete_write(struct bch_fs *c, struct btree *b,
+void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
struct btree_write *w)
{
- bch_journal_pin_drop(&c->journal, &w->journal);
+ bch2_journal_pin_drop(&c->journal, &w->journal);
closure_wake_up(&w->wait);
}
@@ -1264,14 +1264,14 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b)
struct btree_write *w = btree_prev_write(b);
/*
- * Before calling bch_btree_complete_write() - if the write errored, we
+ * Before calling bch2_btree_complete_write() - if the write errored, we
* have to halt new journal writes before they see this btree node
* write as completed:
*/
if (btree_node_write_error(b))
- bch_journal_halt(&c->journal);
+ bch2_journal_halt(&c->journal);
- bch_btree_complete_write(c, b, w);
+ bch2_btree_complete_write(c, b, w);
btree_node_io_unlock(b);
}
@@ -1284,8 +1284,8 @@ static void btree_node_write_endio(struct bio *bio)
struct closure *cl = !wbio->split ? wbio->cl : NULL;
struct bch_dev *ca = wbio->ca;
- if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
- bch_meta_write_fault("btree"))
+ if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
+ bch2_meta_write_fault("btree"))
set_btree_node_write_error(b);
if (wbio->bounce)
@@ -1309,7 +1309,7 @@ static void btree_node_write_endio(struct bio *bio)
percpu_ref_put(&ca->io_ref);
}
-void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
struct closure *parent,
enum six_lock_type lock_type_held,
int idx_to_write)
@@ -1370,10 +1370,10 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
if (lock_type_held == SIX_LOCK_intent) {
six_lock_write(&b->lock);
- __bch_compact_whiteouts(c, b, COMPACT_WRITTEN);
+ __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
six_unlock_write(&b->lock);
} else {
- __bch_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
+ __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
}
BUG_ON(b->uncompacted_whiteout_u64s);
@@ -1448,12 +1448,12 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(i->seq != b->data->keys.seq);
i->version = cpu_to_le16(BCACHE_BSET_VERSION);
- SET_BSET_CSUM_TYPE(i, bch_meta_checksum_type(c));
+ SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
nonce = btree_nonce(b, i, b->written << 9);
if (bn) {
- bch_encrypt(c, BSET_CSUM_TYPE(i), nonce,
+ bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce,
&bn->flags,
(void *) &b->data->keys -
(void *) &b->data->flags);
@@ -1479,7 +1479,7 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size);
- trace_bcache_btree_write(b, bytes_to_write, sectors_to_write);
+ trace_btree_write(b, bytes_to_write, sectors_to_write);
/*
* We handle btree write errors by immediately halting the journal -
@@ -1491,10 +1491,10 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
* reflect that those writes were done and the data flushed from the
* journal:
*
- * Make sure to update b->written so bch_btree_init_next() doesn't
+ * Make sure to update b->written so bch2_btree_init_next() doesn't
* break:
*/
- if (bch_journal_error(&c->journal) ||
+ if (bch2_journal_error(&c->journal) ||
c->opts.nochanges) {
set_btree_node_noevict(b);
b->written += sectors_to_write;
@@ -1520,7 +1520,7 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
if (parent)
closure_get(parent);
- bch_bio_map(bio, data);
+ bch2_bio_map(bio, data);
/*
* If we're appending to a leaf node, we don't technically need FUA -
@@ -1549,13 +1549,13 @@ void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
b->written += sectors_to_write;
- bch_submit_wbio_replicas(wbio, c, &k.key);
+ bch2_submit_wbio_replicas(wbio, c, &k.key);
}
/*
* Work that must be done with write lock held:
*/
-bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
+bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
{
bool invalidated_iter = false;
struct btree_node_entry *bne;
@@ -1586,13 +1586,13 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
btree_node_sort(c, b, NULL, 0, b->nsets, true);
invalidated_iter = true;
} else {
- invalidated_iter = bch_drop_whiteouts(b);
+ invalidated_iter = bch2_drop_whiteouts(b);
}
for_each_bset(b, t)
set_needs_whiteout(bset(b, t));
- bch_btree_verify(c, b);
+ bch2_btree_verify(c, b);
/*
* If later we don't unconditionally sort down to a single bset, we have
@@ -1602,9 +1602,9 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
bne = want_new_bset(c, b);
if (bne)
- bch_bset_init_next(b, &bne->keys);
+ bch2_bset_init_next(b, &bne->keys);
- bch_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
return invalidated_iter;
}
@@ -1612,7 +1612,7 @@ bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
/*
* Use this one if the node is intent locked:
*/
-void bch_btree_node_write(struct bch_fs *c, struct btree *b,
+void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
struct closure *parent,
enum six_lock_type lock_type_held,
int idx_to_write)
@@ -1622,33 +1622,33 @@ void bch_btree_node_write(struct bch_fs *c, struct btree *b,
if (lock_type_held == SIX_LOCK_intent ||
six_trylock_convert(&b->lock, SIX_LOCK_read,
SIX_LOCK_intent)) {
- __bch_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write);
+ __bch2_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write);
six_lock_write(&b->lock);
- bch_btree_post_write_cleanup(c, b);
+ bch2_btree_post_write_cleanup(c, b);
six_unlock_write(&b->lock);
if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->lock);
} else {
- __bch_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write);
+ __bch2_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write);
}
}
-static void bch_btree_node_write_dirty(struct bch_fs *c, struct btree *b,
+static void bch2_btree_node_write_dirty(struct bch_fs *c, struct btree *b,
struct closure *parent)
{
six_lock_read(&b->lock);
BUG_ON(b->level);
- bch_btree_node_write(c, b, parent, SIX_LOCK_read, -1);
+ bch2_btree_node_write(c, b, parent, SIX_LOCK_read, -1);
six_unlock_read(&b->lock);
}
/*
* Write all dirty btree nodes to disk, including roots
*/
-void bch_btree_flush(struct bch_fs *c)
+void bch2_btree_flush(struct bch_fs *c)
{
struct closure cl;
struct btree *b;
@@ -1672,11 +1672,11 @@ restart:
rht_for_each_entry_rcu(b, pos, tbl, i, hash)
/*
* XXX - locking for b->level, when called from
- * bch_journal_move()
+ * bch2_journal_move()
*/
if (!b->level && btree_node_dirty(b)) {
rcu_read_unlock();
- bch_btree_node_write_dirty(c, b, &cl);
+ bch2_btree_node_write_dirty(c, b, &cl);
dropped_lock = true;
rcu_read_lock();
goto restart;
@@ -1702,7 +1702,7 @@ restart:
* that the journal has been flushed so that all the bsets we compacted should
* be visible.
*/
-void bch_btree_node_flush_journal_entries(struct bch_fs *c,
+void bch2_btree_node_flush_journal_entries(struct bch_fs *c,
struct btree *b,
struct closure *cl)
{
@@ -1718,7 +1718,7 @@ void bch_btree_node_flush_journal_entries(struct bch_fs *c,
u64 seq = le64_to_cpu(bset(b, &b->set[i])->journal_seq);
if (seq) {
- bch_journal_flush_seq_async(&c->journal, seq, cl);
+ bch2_journal_flush_seq_async(&c->journal, seq, cl);
break;
}
}
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 0f75f4560be2..290fb5d718d4 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -25,9 +25,9 @@ enum compact_mode {
COMPACT_WRITTEN_NO_WRITE_LOCK,
};
-bool __bch_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode);
+bool __bch2_compact_whiteouts(struct bch_fs *, struct btree *, enum compact_mode);
-static inline bool bch_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
+static inline bool bch2_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b)
{
struct bset_tree *t;
@@ -41,33 +41,33 @@ static inline bool bch_maybe_compact_whiteouts(struct bch_fs *c, struct btree *b
return false;
compact:
- return __bch_compact_whiteouts(c, b, COMPACT_LAZY);
+ return __bch2_compact_whiteouts(c, b, COMPACT_LAZY);
}
-void bch_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
+void bch2_btree_sort_into(struct bch_fs *, struct btree *, struct btree *);
-void bch_btree_build_aux_trees(struct btree *);
-void bch_btree_init_next(struct bch_fs *, struct btree *,
+void bch2_btree_build_aux_trees(struct btree *);
+void bch2_btree_init_next(struct bch_fs *, struct btree *,
struct btree_iter *);
-void bch_btree_node_read_done(struct bch_fs *, struct btree *,
+void bch2_btree_node_read_done(struct bch_fs *, struct btree *,
struct bch_dev *, const struct bch_extent_ptr *);
-void bch_btree_node_read(struct bch_fs *, struct btree *);
-int bch_btree_root_read(struct bch_fs *, enum btree_id,
+void bch2_btree_node_read(struct bch_fs *, struct btree *);
+int bch2_btree_root_read(struct bch_fs *, enum btree_id,
const struct bkey_i *, unsigned);
-void bch_btree_complete_write(struct bch_fs *, struct btree *,
+void bch2_btree_complete_write(struct bch_fs *, struct btree *,
struct btree_write *);
-void __bch_btree_node_write(struct bch_fs *, struct btree *,
+void __bch2_btree_node_write(struct bch_fs *, struct btree *,
struct closure *, enum six_lock_type, int);
-bool bch_btree_post_write_cleanup(struct bch_fs *, struct btree *);
+bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
-void bch_btree_node_write(struct bch_fs *, struct btree *,
+void bch2_btree_node_write(struct bch_fs *, struct btree *,
struct closure *, enum six_lock_type, int);
-void bch_btree_flush(struct bch_fs *);
-void bch_btree_node_flush_journal_entries(struct bch_fs *, struct btree *,
+void bch2_btree_flush(struct bch_fs *);
+void bch2_btree_node_flush_journal_entries(struct bch_fs *, struct btree *,
struct closure *);
#endif /* _BCACHE_BTREE_IO_H */
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 8cc92331a542..fb5c507e9bce 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_iter.h"
@@ -19,10 +19,10 @@ static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
/* Btree node locking: */
/*
- * Updates the saved lock sequence number, so that btree_node_relock() will
+ * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
* succeed:
*/
-void btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
@@ -37,7 +37,7 @@ void btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
six_unlock_write(&b->lock);
}
-void btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
unsigned readers = 0;
@@ -70,24 +70,7 @@ void btree_node_lock_write(struct btree *b, struct btree_iter *iter)
}
}
-/* versions that allow iter to be null: */
-void __btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
-{
- if (likely(iter))
- btree_node_unlock_write(b, iter);
- else
- six_unlock_write(&b->lock);
-}
-
-void __btree_node_lock_write(struct btree *b, struct btree_iter *iter)
-{
- if (likely(iter))
- btree_node_lock_write(b, iter);
- else
- six_lock_write(&b->lock);
-}
-
-bool btree_node_relock(struct btree_iter *iter, unsigned level)
+bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
{
struct btree_iter *linked;
struct btree *b = iter->nodes[level];
@@ -125,7 +108,7 @@ success:
}
/* Slowpath: */
-bool __bch_btree_node_lock(struct btree *b, struct bpos pos,
+bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
unsigned level,
struct btree_iter *iter,
enum six_lock_type type)
@@ -224,7 +207,7 @@ static void btree_iter_drop_extra_locks(struct btree_iter *iter)
}
}
-bool __bch_btree_iter_set_locks_want(struct btree_iter *iter,
+bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
unsigned new_locks_want)
{
struct btree_iter *linked;
@@ -243,7 +226,7 @@ bool __bch_btree_iter_set_locks_want(struct btree_iter *iter,
btree_iter_drop_extra_locks(iter);
for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
- if (!btree_node_relock(iter, l))
+ if (!bch2_btree_node_relock(iter, l))
goto fail;
return true;
@@ -261,7 +244,7 @@ fail:
return false;
}
-static int __bch_btree_iter_unlock(struct btree_iter *iter)
+static int __bch2_btree_iter_unlock(struct btree_iter *iter)
{
BUG_ON(iter->error == -EINTR);
@@ -271,71 +254,71 @@ static int __bch_btree_iter_unlock(struct btree_iter *iter)
return iter->error;
}
-int bch_btree_iter_unlock(struct btree_iter *iter)
+int bch2_btree_iter_unlock(struct btree_iter *iter)
{
struct btree_iter *linked;
for_each_linked_btree_iter(iter, linked)
- __bch_btree_iter_unlock(linked);
- return __bch_btree_iter_unlock(iter);
+ __bch2_btree_iter_unlock(linked);
+ return __bch2_btree_iter_unlock(iter);
}
/* Btree iterator: */
#ifdef CONFIG_BCACHEFS_DEBUG
-static void __bch_btree_iter_verify(struct btree_iter *iter,
+static void __bch2_btree_iter_verify(struct btree_iter *iter,
struct btree *b)
{
struct btree_node_iter *node_iter = &iter->node_iters[b->level];
struct btree_node_iter tmp = *node_iter;
struct bkey_packed *k;
- bch_btree_node_iter_verify(node_iter, b);
+ bch2_btree_node_iter_verify(node_iter, b);
/*
* For interior nodes, the iterator will have skipped past
* deleted keys:
*/
k = b->level
- ? bch_btree_node_iter_prev(&tmp, b)
- : bch_btree_node_iter_prev_all(&tmp, b);
+ ? bch2_btree_node_iter_prev(&tmp, b)
+ : bch2_btree_node_iter_prev_all(&tmp, b);
if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
iter->is_extents)) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch_bkey_to_text(buf, sizeof(buf), &uk);
+ bch2_bkey_to_text(buf, sizeof(buf), &uk);
panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
buf, iter->pos.inode, iter->pos.offset);
}
- k = bch_btree_node_iter_peek_all(node_iter, b);
+ k = bch2_btree_node_iter_peek_all(node_iter, b);
if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
iter->is_extents)) {
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch_bkey_to_text(buf, sizeof(buf), &uk);
+ bch2_bkey_to_text(buf, sizeof(buf), &uk);
panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
iter->pos.inode, iter->pos.offset, buf);
}
}
-void bch_btree_iter_verify(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
if (iter->nodes[b->level] == b)
- __bch_btree_iter_verify(iter, b);
+ __bch2_btree_iter_verify(iter, b);
for_each_linked_btree_node(iter, b, linked)
- __bch_btree_iter_verify(iter, b);
+ __bch2_btree_iter_verify(iter, b);
}
#endif
-static void __bch_btree_node_iter_fix(struct btree_iter *iter,
+static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bset_tree *t,
@@ -357,7 +340,7 @@ static void __bch_btree_node_iter_fix(struct btree_iter *iter,
if (new_u64s &&
btree_iter_pos_cmp_packed(b, &iter->pos, where,
iter->is_extents))
- bch_btree_node_iter_push(node_iter, b, where, end);
+ bch2_btree_node_iter_push(node_iter, b, where, end);
return;
found:
set->end = (int) set->end + shift;
@@ -370,12 +353,12 @@ found:
btree_iter_pos_cmp_packed(b, &iter->pos, where,
iter->is_extents)) {
set->k = offset;
- bch_btree_node_iter_sort(node_iter, b);
+ bch2_btree_node_iter_sort(node_iter, b);
} else if (set->k < offset + clobber_u64s) {
set->k = offset + new_u64s;
if (set->k == set->end)
*set = node_iter->data[--node_iter->used];
- bch_btree_node_iter_sort(node_iter, b);
+ bch2_btree_node_iter_sort(node_iter, b);
} else {
set->k = (int) set->k + shift;
}
@@ -409,11 +392,11 @@ found:
struct bkey_packed *k;
for_each_bset(b, t) {
- if (bch_bkey_to_bset(b, where) == t)
+ if (bch2_bkey_to_bset(b, where) == t)
continue;
- k = bkey_prev_all(b, t,
- bch_btree_node_iter_bset_pos(node_iter, b, t));
+ k = bch2_bkey_prev_all(b, t,
+ bch2_btree_node_iter_bset_pos(node_iter, b, t));
if (k &&
__btree_node_iter_cmp(node_iter, b,
k, where) > 0) {
@@ -424,11 +407,11 @@ found:
btree_node_iter_for_each(node_iter, set)
if (set->k == offset) {
set->k = __btree_node_key_to_offset(b, k);
- bch_btree_node_iter_sort(node_iter, b);
+ bch2_btree_node_iter_sort(node_iter, b);
goto next_bset;
}
- bch_btree_node_iter_push(node_iter, b, k,
+ bch2_btree_node_iter_push(node_iter, b, k,
btree_bkey_last(b, t));
}
next_bset:
@@ -437,7 +420,7 @@ next_bset:
}
}
-void bch_btree_node_iter_fix(struct btree_iter *iter,
+void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bset_tree *t,
@@ -448,22 +431,22 @@ void bch_btree_node_iter_fix(struct btree_iter *iter,
struct btree_iter *linked;
if (node_iter != &iter->node_iters[b->level])
- __bch_btree_node_iter_fix(iter, b, node_iter, t,
+ __bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
if (iter->nodes[b->level] == b)
- __bch_btree_node_iter_fix(iter, b,
+ __bch2_btree_node_iter_fix(iter, b,
&iter->node_iters[b->level], t,
where, clobber_u64s, new_u64s);
for_each_linked_btree_node(iter, b, linked)
- __bch_btree_node_iter_fix(linked, b,
+ __bch2_btree_node_iter_fix(linked, b,
&linked->node_iters[b->level], t,
where, clobber_u64s, new_u64s);
/* interior node iterators are... special... */
if (!b->level)
- bch_btree_iter_verify(iter, b);
+ bch2_btree_iter_verify(iter, b);
}
/* peek_all() doesn't skip deleted keys */
@@ -471,7 +454,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
{
struct btree *b = iter->nodes[iter->level];
struct bkey_packed *k =
- bch_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
+ bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
struct bkey_s_c ret;
EBUG_ON(!btree_node_locked(iter, iter->level));
@@ -482,7 +465,7 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
ret = bkey_disassemble(b, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, b, ret);
return ret;
}
@@ -491,7 +474,7 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
{
struct btree *b = iter->nodes[iter->level];
struct bkey_packed *k =
- bch_btree_node_iter_peek(&iter->node_iters[iter->level], b);
+ bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
struct bkey_s_c ret;
EBUG_ON(!btree_node_locked(iter, iter->level));
@@ -502,14 +485,14 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
ret = bkey_disassemble(b, k, &iter->k);
if (debug_check_bkeys(iter->c))
- bkey_debugcheck(iter->c, b, ret);
+ bch2_bkey_debugcheck(iter->c, b, ret);
return ret;
}
static inline void __btree_iter_advance(struct btree_iter *iter)
{
- bch_btree_node_iter_advance(&iter->node_iters[iter->level],
+ bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
iter->nodes[iter->level]);
}
@@ -527,10 +510,10 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
parent_locked = btree_node_locked(iter, b->level + 1);
- if (!btree_node_relock(iter, b->level + 1))
+ if (!bch2_btree_node_relock(iter, b->level + 1))
return;
- k = bch_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
+ k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
iter->nodes[b->level + 1]);
if (!k ||
bkey_deleted(k) ||
@@ -539,7 +522,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
char buf[100];
struct bkey uk = bkey_unpack_key(b, k);
- bch_bkey_to_text(buf, sizeof(buf), &uk);
+ bch2_bkey_to_text(buf, sizeof(buf), &uk);
panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
buf, b->key.k.p.inode, b->key.k.p.offset);
}
@@ -551,13 +534,13 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
static inline void __btree_iter_init(struct btree_iter *iter,
struct btree *b)
{
- bch_btree_node_iter_init(&iter->node_iters[b->level], b,
+ bch2_btree_node_iter_init(&iter->node_iters[b->level], b,
iter->pos, iter->is_extents,
btree_node_is_extents(b));
/* Skip to first non whiteout: */
if (b->level)
- bch_btree_node_iter_peek(&iter->node_iters[b->level], b);
+ bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
@@ -585,14 +568,14 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
* A btree node is being replaced - update the iterator to point to the new
* node:
*/
-bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
for_each_linked_btree_iter(iter, linked)
if (btree_iter_pos_in_node(linked, b)) {
/*
- * bch_btree_iter_node_drop() has already been called -
+ * bch2_btree_iter_node_drop() has already been called -
* the old node we're replacing has already been
* unlocked and the pointer invalidated
*/
@@ -606,7 +589,7 @@ bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
* progress...
*
* Instead, btree_iter_node_set() sets things up so
- * btree_node_relock() will succeed:
+ * bch2_btree_node_relock() will succeed:
*/
if (btree_want_intent(linked, b->level)) {
@@ -627,7 +610,7 @@ bool bch_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
return true;
}
-void bch_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
unsigned level = b->level;
@@ -639,7 +622,7 @@ void bch_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
}
}
-void bch_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
unsigned level = b->level;
@@ -654,7 +637,7 @@ void bch_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
* A btree node has been modified in such a way as to invalidate iterators - fix
* them:
*/
-void bch_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
@@ -721,7 +704,7 @@ static inline int btree_iter_down(struct btree_iter *iter)
bkey_reassemble(&tmp.k, k);
- b = bch_btree_node_get(iter, &tmp.k, level, lock_type);
+ b = bch2_btree_node_get(iter, &tmp.k, level, lock_type);
if (unlikely(IS_ERR(b)))
return PTR_ERR(b);
@@ -736,14 +719,14 @@ static void btree_iter_up(struct btree_iter *iter)
btree_node_unlock(iter, iter->level++);
}
-int __must_check __bch_btree_iter_traverse(struct btree_iter *);
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
{
struct bch_fs *c = iter->c;
struct btree_iter *linked, *sorted_iters, **i;
retry_all:
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
if (ret != -ENOMEM && ret != -EINTR)
goto io_error;
@@ -754,7 +737,7 @@ retry_all:
closure_init_stack(&cl);
do {
- ret = mca_cannibalize_lock(c, &cl);
+ ret = bch2_btree_node_cannibalize_lock(c, &cl);
closure_sync(&cl);
} while (ret);
}
@@ -790,7 +773,7 @@ retry_all:
iter = sorted_iters;
do {
retry:
- ret = __bch_btree_iter_traverse(iter);
+ ret = __bch2_btree_iter_traverse(iter);
if (unlikely(ret)) {
if (ret == -EINTR)
goto retry;
@@ -802,7 +785,7 @@ retry:
ret = btree_iter_linked(iter) ? -EINTR : 0;
out:
- mca_cannibalize_unlock(c);
+ bch2_btree_node_cannibalize_unlock(c);
return ret;
io_error:
BUG_ON(ret != -EIO);
@@ -819,9 +802,9 @@ io_error:
* Returns 0 on success, -EIO on error (error reading in a btree node).
*
* On error, caller (peek_node()/peek_key()) must return NULL; the error is
- * stashed in the iterator and returned from bch_btree_iter_unlock().
+ * stashed in the iterator and returned from bch2_btree_iter_unlock().
*/
-int __must_check __bch_btree_iter_traverse(struct btree_iter *iter)
+int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
{
unsigned depth_want = iter->level;
@@ -833,7 +816,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter)
for (i = iter->level + 1;
i < iter->locks_want && iter->nodes[i];
i++)
- if (!btree_node_relock(iter, i)) {
+ if (!bch2_btree_node_relock(iter, i)) {
while (iter->nodes[iter->level] &&
iter->level + 1 < iter->locks_want)
btree_iter_up(iter);
@@ -847,7 +830,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter)
*/
while (iter->nodes[iter->level] &&
!(is_btree_node(iter, iter->level) &&
- btree_node_relock(iter, iter->level) &&
+ bch2_btree_node_relock(iter, iter->level) &&
btree_iter_pos_cmp(iter->pos,
&iter->nodes[iter->level]->key.k,
iter->is_extents)))
@@ -884,7 +867,7 @@ int __must_check __bch_btree_iter_traverse(struct btree_iter *iter)
return 0;
}
-int __must_check bch_btree_iter_traverse(struct btree_iter *iter)
+int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
{
int ret;
@@ -893,7 +876,7 @@ int __must_check bch_btree_iter_traverse(struct btree_iter *iter)
iter->at_end_of_leaf = false;
- ret = __bch_btree_iter_traverse(iter);
+ ret = __bch2_btree_iter_traverse(iter);
if (unlikely(ret))
ret = btree_iter_traverse_error(iter, ret);
@@ -902,14 +885,14 @@ int __must_check bch_btree_iter_traverse(struct btree_iter *iter)
/* Iterate across nodes (leaf and interior nodes) */
-struct btree *bch_btree_iter_peek_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
{
struct btree *b;
int ret;
EBUG_ON(iter->is_extents);
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
@@ -923,7 +906,7 @@ struct btree *bch_btree_iter_peek_node(struct btree_iter *iter)
return b;
}
-struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
{
struct btree *b;
int ret;
@@ -936,7 +919,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
return NULL;
/* parent node usually won't be locked: redo traversal if necessary */
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
@@ -953,7 +936,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
: bkey_successor(iter->pos);
iter->level = depth;
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
return NULL;
@@ -967,7 +950,7 @@ struct btree *bch_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
/* Iterate across keys (in leaf nodes only) */
-void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
+void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
{
struct btree *b = iter->nodes[0];
struct btree_node_iter *node_iter = &iter->node_iters[0];
@@ -978,10 +961,10 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p
EBUG_ON(!btree_node_locked(iter, 0));
EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
- while ((k = bch_btree_node_iter_peek_all(node_iter, b)) &&
+ while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
!btree_iter_pos_cmp_packed(b, &new_pos, k,
iter->is_extents))
- bch_btree_node_iter_advance(node_iter, b);
+ bch2_btree_node_iter_advance(node_iter, b);
if (!k &&
!btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents))
@@ -990,25 +973,25 @@ void bch_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_p
iter->pos = new_pos;
}
-void bch_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
iter->pos = new_pos;
}
-void bch_btree_iter_advance_pos(struct btree_iter *iter)
+void bch2_btree_iter_advance_pos(struct btree_iter *iter)
{
/*
* We use iter->k instead of iter->pos for extents: iter->pos will be
* equal to the start of the extent we returned, but we need to advance
* to the end of the extent we returned.
*/
- bch_btree_iter_set_pos(iter,
+ bch2_btree_iter_set_pos(iter,
btree_type_successor(iter->btree_id, iter->k.p));
}
/* XXX: expensive */
-void bch_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
+void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
{
/* incapable of rewinding across nodes: */
BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
@@ -1017,13 +1000,13 @@ void bch_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
__btree_iter_init(iter, iter->nodes[iter->level]);
}
-struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
{
struct bkey_s_c k;
int ret;
while (1) {
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (unlikely(ret)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
return bkey_s_c_err(ret);
@@ -1037,7 +1020,7 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
*/
if (!iter->is_extents ||
bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
- bch_btree_iter_set_pos(iter, bkey_start_pos(k.k));
+ bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
return k;
}
@@ -1045,7 +1028,7 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
if (!bkey_cmp(iter->pos, POS_MAX)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
return bkey_s_c_null;
}
@@ -1053,14 +1036,14 @@ struct bkey_s_c bch_btree_iter_peek(struct btree_iter *iter)
}
}
-struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
{
struct bkey_s_c k;
struct bkey n;
int ret;
while (1) {
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (unlikely(ret)) {
iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
return bkey_s_c_err(ret);
@@ -1082,7 +1065,7 @@ recheck:
if (!k.k)
k.k = &iter->nodes[0]->key.k;
- bch_key_resize(&n,
+ bch2_key_resize(&n,
min_t(u64, KEY_SIZE_MAX,
(k.k->p.inode == n.p.inode
? bkey_start_offset(k.k)
@@ -1102,13 +1085,13 @@ recheck:
}
}
-void __bch_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
+void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned depth)
{
iter->level = depth;
- /* bch_bkey_ops isn't used much, this would be a cache miss */
- /* iter->is_extents = bch_bkey_ops[btree_id]->is_extents; */
+ /* bch2_bkey_ops isn't used much, this would be a cache miss */
+ /* iter->is_extents = bch2_bkey_ops[btree_id]->is_extents; */
iter->is_extents = btree_id == BTREE_ID_EXTENTS;
iter->nodes_locked = 0;
iter->nodes_intent_locked = 0;
@@ -1125,7 +1108,7 @@ void __bch_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
prefetch(c->btree_roots[btree_id].b);
}
-void bch_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
+void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
{
BUG_ON(btree_iter_linked(new));
@@ -1142,9 +1125,9 @@ void bch_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
}
}
-void bch_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
+void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
{
- bch_btree_iter_unlock(dst);
+ bch2_btree_iter_unlock(dst);
memcpy(dst, src, offsetof(struct btree_iter, next));
dst->nodes_locked = dst->nodes_intent_locked = 0;
}
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 2d85b3a751f1..39731f0bcbc6 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -8,7 +8,7 @@ struct btree_iter {
u8 level;
/*
- * Used in bch_btree_iter_traverse(), to indicate whether we're
+ * Used in bch2_btree_iter_traverse(), to indicate whether we're
* searching for @pos or the first key strictly greater than @pos
*/
u8 is_extents;
@@ -23,7 +23,7 @@ struct btree_iter {
enum btree_id btree_id:8;
/*
- * indicates we need to call bch_btree_iter_traverse() to revalidate
+ * indicates we need to call bch2_btree_iter_traverse() to revalidate
* iterator:
*/
u8 at_end_of_leaf;
@@ -44,7 +44,7 @@ struct btree_iter {
* btree_iter_next_node() knows that it's finished with a depth first
* traversal. Just unlocking a node (with btree_node_unlock()) is fine,
* and if you really don't want that node used again (e.g. btree_split()
- * freed it) decrementing lock_seq will cause btree_node_relock() to
+ * freed it) decrementing lock_seq will cause bch2_btree_node_relock() to
* always fail (but since freeing a btree node takes a write lock on the
* node, which increments the node's lock seq, that's not actually
* necessary in that example).
@@ -55,8 +55,8 @@ struct btree_iter {
struct btree_node_iter node_iters[BTREE_MAX_DEPTH];
/*
- * Current unpacked key - so that bch_btree_iter_next()/
- * bch_btree_iter_next_with_holes() can correctly advance pos.
+ * Current unpacked key - so that bch2_btree_iter_next()/
+ * bch2_btree_iter_next_with_holes() can correctly advance pos.
*/
struct bkey k;
@@ -115,27 +115,27 @@ __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
* @_b is assumed to be locked by @_iter
*
* Filters out iterators that don't have a valid btree_node iterator for @_b -
- * i.e. iterators for which btree_node_relock() would not succeed.
+ * i.e. iterators for which bch2_btree_node_relock() would not succeed.
*/
#define for_each_linked_btree_node(_iter, _b, _linked) \
for ((_linked) = (_iter); \
((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
#ifdef CONFIG_BCACHEFS_DEBUG
-void bch_btree_iter_verify(struct btree_iter *, struct btree *);
+void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
#else
-static inline void bch_btree_iter_verify(struct btree_iter *iter,
+static inline void bch2_btree_iter_verify(struct btree_iter *iter,
struct btree *b) {}
#endif
-void bch_btree_node_iter_fix(struct btree_iter *, struct btree *,
+void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bset_tree *,
struct bkey_packed *, unsigned, unsigned);
-int bch_btree_iter_unlock(struct btree_iter *);
-bool __bch_btree_iter_set_locks_want(struct btree_iter *, unsigned);
+int bch2_btree_iter_unlock(struct btree_iter *);
+bool __bch2_btree_iter_set_locks_want(struct btree_iter *, unsigned);
-static inline bool bch_btree_iter_set_locks_want(struct btree_iter *iter,
+static inline bool bch2_btree_iter_set_locks_want(struct btree_iter *iter,
unsigned new_locks_want)
{
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
@@ -144,48 +144,48 @@ static inline bool bch_btree_iter_set_locks_want(struct btree_iter *iter,
iter->nodes_intent_locked == (1 << new_locks_want) - 1)
return true;
- return __bch_btree_iter_set_locks_want(iter, new_locks_want);
+ return __bch2_btree_iter_set_locks_want(iter, new_locks_want);
}
-bool bch_btree_iter_node_replace(struct btree_iter *, struct btree *);
-void bch_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
-void bch_btree_iter_node_drop(struct btree_iter *, struct btree *);
+bool bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
+void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
-void bch_btree_iter_reinit_node(struct btree_iter *, struct btree *);
+void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
-int __must_check bch_btree_iter_traverse(struct btree_iter *);
+int __must_check bch2_btree_iter_traverse(struct btree_iter *);
-struct btree *bch_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch_btree_iter_next_node(struct btree_iter *, unsigned);
+struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
-struct bkey_s_c bch_btree_iter_peek(struct btree_iter *);
-struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *);
-void bch_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
-void bch_btree_iter_set_pos(struct btree_iter *, struct bpos);
-void bch_btree_iter_advance_pos(struct btree_iter *);
-void bch_btree_iter_rewind(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *);
+void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
+void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
+void bch2_btree_iter_advance_pos(struct btree_iter *);
+void bch2_btree_iter_rewind(struct btree_iter *, struct bpos);
-void __bch_btree_iter_init(struct btree_iter *, struct bch_fs *,
+void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
enum btree_id, struct bpos, unsigned , unsigned);
-static inline void bch_btree_iter_init(struct btree_iter *iter,
+static inline void bch2_btree_iter_init(struct btree_iter *iter,
struct bch_fs *c,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, 0, 0);
+ __bch2_btree_iter_init(iter, c, btree_id, pos, 0, 0);
}
-static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
+static inline void bch2_btree_iter_init_intent(struct btree_iter *iter,
struct bch_fs *c,
enum btree_id btree_id,
struct bpos pos)
{
- __bch_btree_iter_init(iter, c, btree_id, pos, 1, 0);
+ __bch2_btree_iter_init(iter, c, btree_id, pos, 1, 0);
}
-void bch_btree_iter_link(struct btree_iter *, struct btree_iter *);
-void bch_btree_iter_copy(struct btree_iter *, struct btree_iter *);
+void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
+void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
static inline struct bpos btree_type_successor(enum btree_id id,
struct bpos pos)
@@ -217,22 +217,22 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
#define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, \
_b, _locks_want) \
- for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
+ for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
_start, _locks_want, _depth), \
(_iter)->is_extents = false, \
- _b = bch_btree_iter_peek_node(_iter); \
+ _b = bch2_btree_iter_peek_node(_iter); \
(_b); \
- (_b) = bch_btree_iter_next_node(_iter, _depth))
+ (_b) = bch2_btree_iter_next_node(_iter, _depth))
#define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b) \
__for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0)
#define __for_each_btree_key(_iter, _c, _btree_id, _start, \
_k, _locks_want) \
- for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
+ for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
_start, _locks_want, 0); \
- !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek(_iter)).k); \
- bch_btree_iter_advance_pos(_iter))
+ !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek(_iter)).k); \
+ bch2_btree_iter_advance_pos(_iter))
#define for_each_btree_key(_iter, _c, _btree_id, _start, _k) \
__for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
@@ -242,10 +242,10 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
#define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \
_start, _k, _locks_want) \
- for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
+ for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), \
_start, _locks_want, 0); \
- !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek_with_holes(_iter)).k);\
- bch_btree_iter_advance_pos(_iter))
+ !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek_with_holes(_iter)).k);\
+ bch2_btree_iter_advance_pos(_iter))
#define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \
__for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
@@ -263,19 +263,19 @@ static inline int btree_iter_err(struct bkey_s_c k)
* Unlocks before scheduling
* Note: does not revalidate iterator
*/
-static inline void bch_btree_iter_cond_resched(struct btree_iter *iter)
+static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
{
struct btree_iter *linked;
if (need_resched()) {
for_each_linked_btree_iter(iter, linked)
- bch_btree_iter_unlock(linked);
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(linked);
+ bch2_btree_iter_unlock(iter);
schedule();
} else if (race_fault()) {
for_each_linked_btree_iter(iter, linked)
- bch_btree_iter_unlock(linked);
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(linked);
+ bch2_btree_iter_unlock(iter);
}
}
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 76f85c0deae3..27709d1d2a53 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -96,7 +96,7 @@ static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
mark_btree_node_unlocked(iter, level);
}
-bool __bch_btree_node_lock(struct btree *, struct bpos, unsigned,
+bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
struct btree_iter *, enum six_lock_type);
static inline bool btree_node_lock(struct btree *b, struct bpos pos,
@@ -105,15 +105,12 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos,
enum six_lock_type type)
{
return likely(six_trylock_type(&b->lock, type)) ||
- __bch_btree_node_lock(b, pos, level, iter, type);
+ __bch2_btree_node_lock(b, pos, level, iter, type);
}
-bool btree_node_relock(struct btree_iter *, unsigned);
+bool bch2_btree_node_relock(struct btree_iter *, unsigned);
-void btree_node_unlock_write(struct btree *, struct btree_iter *);
-void btree_node_lock_write(struct btree *, struct btree_iter *);
-
-void __btree_node_unlock_write(struct btree *, struct btree_iter *);
-void __btree_node_lock_write(struct btree *, struct btree_iter *);
+void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
+void bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
#endif /* _BCACHE_BTREE_LOCKING_H */
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index dd078806d266..915e42c2f185 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -1,7 +1,6 @@
#ifndef _BCACHE_BTREE_TYPES_H
#define _BCACHE_BTREE_TYPES_H
-#include <linux/bcache.h>
#include <linux/list.h>
#include <linux/rhashtable.h>
#include <linux/semaphore.h>
@@ -94,10 +93,10 @@ struct btree {
u8 unpack_fn_len;
/*
- * XXX: add a delete sequence number, so when btree_node_relock() fails
- * because the lock sequence number has changed - i.e. the contents were
- * modified - we can still relock the node if it's still the one we
- * want, without redoing the traversal
+ * XXX: add a delete sequence number, so when bch2_btree_node_relock()
+ * fails because the lock sequence number has changed - i.e. the
+ * contents were modified - we can still relock the node if it's still
+ * the one we want, without redoing the traversal
*/
/*
@@ -240,7 +239,7 @@ static inline enum bkey_type btree_node_type(struct btree *b)
static inline const struct bkey_ops *btree_node_ops(struct btree *b)
{
- return bch_bkey_ops[btree_node_type(b)];
+ return bch2_bkey_ops[btree_node_type(b)];
}
static inline bool btree_node_has_ptrs(struct btree *b)
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index 96348ac6bd36..51dff1b7dc77 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "bkey_methods.h"
#include "btree_cache.h"
@@ -24,13 +24,13 @@ static void btree_interior_update_updated_root(struct bch_fs *,
/* Calculate ideal packed bkey format for new btree nodes: */
-void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b)
+void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
{
struct bkey_packed *k;
struct bset_tree *t;
struct bkey uk;
- bch_bkey_format_add_pos(s, b->data->min_key);
+ bch2_bkey_format_add_pos(s, b->data->min_key);
for_each_bset(b, t)
for (k = btree_bkey_first(b, t);
@@ -38,18 +38,18 @@ void __bch_btree_calc_format(struct bkey_format_state *s, struct btree *b)
k = bkey_next(k))
if (!bkey_whiteout(k)) {
uk = bkey_unpack_key(b, k);
- bch_bkey_format_add_key(s, &uk);
+ bch2_bkey_format_add_key(s, &uk);
}
}
-static struct bkey_format bch_btree_calc_format(struct btree *b)
+static struct bkey_format bch2_btree_calc_format(struct btree *b)
{
struct bkey_format_state s;
- bch_bkey_format_init(&s);
- __bch_btree_calc_format(&s, b);
+ bch2_bkey_format_init(&s);
+ __bch2_btree_calc_format(&s, b);
- return bch_bkey_format_done(&s);
+ return bch2_bkey_format_done(&s);
}
static size_t btree_node_u64s_with_format(struct btree *b,
@@ -75,7 +75,7 @@ static size_t btree_node_u64s_with_format(struct btree *b,
* This assumes all keys can pack with the new format -- it just checks if
* the re-packed keys would fit inside the node itself.
*/
-bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *b,
+bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
struct bkey_format *new_f)
{
size_t u64s = btree_node_u64s_with_format(b, new_f);
@@ -92,7 +92,7 @@ bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *b,
* Must be called _before_ btree_interior_update_updated_root() or
* btree_interior_update_updated_btree:
*/
-static void bch_btree_node_free_index(struct bch_fs *c, struct btree *b,
+static void bch2_btree_node_free_index(struct bch_fs *c, struct btree *b,
enum btree_id id, struct bkey_s_c k,
struct bch_fs_usage *stats)
{
@@ -136,13 +136,13 @@ found:
*/
/*
- * bch_mark_key() compares the current gc pos to the pos we're
+ * bch2_mark_key() compares the current gc pos to the pos we're
* moving this reference from, hence one comparison here:
*/
if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
struct bch_fs_usage tmp = { 0 };
- bch_mark_key(c, bkey_i_to_s_c(&d->key),
+ bch2_mark_key(c, bkey_i_to_s_c(&d->key),
-c->sb.btree_node_size, true, b
? gc_pos_btree_node(b)
: gc_pos_btree_root(id),
@@ -159,7 +159,7 @@ found:
static void __btree_node_free(struct bch_fs *c, struct btree *b,
struct btree_iter *iter)
{
- trace_bcache_btree_node_free(c, b);
+ trace_btree_node_free(c, b);
BUG_ON(b == btree_node_root(c, b));
BUG_ON(b->ob);
@@ -168,10 +168,10 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b,
six_lock_write(&b->lock);
if (btree_node_dirty(b))
- bch_btree_complete_write(c, b, btree_current_write(b));
+ bch2_btree_complete_write(c, b, btree_current_write(b));
clear_btree_node_dirty(b);
- mca_hash_remove(c, b);
+ bch2_btree_node_hash_remove(c, b);
mutex_lock(&c->btree_cache_lock);
list_move(&b->list, &c->btree_cache_freeable);
@@ -179,13 +179,14 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b,
/*
* By using six_unlock_write() directly instead of
- * btree_node_unlock_write(), we don't update the iterator's sequence
- * numbers and cause future btree_node_relock() calls to fail:
+ * bch2_btree_node_unlock_write(), we don't update the iterator's
+ * sequence numbers and cause future bch2_btree_node_relock() calls to
+ * fail:
*/
six_unlock_write(&b->lock);
}
-void bch_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
+void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
{
struct open_bucket *ob = b->ob;
@@ -193,26 +194,26 @@ void bch_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
__btree_node_free(c, b, NULL);
- bch_open_bucket_put(c, ob);
+ bch2_open_bucket_put(c, ob);
}
-void bch_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
+void bch2_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
{
- bch_btree_iter_node_drop_linked(iter, b);
+ bch2_btree_iter_node_drop_linked(iter, b);
__btree_node_free(iter->c, b, iter);
- bch_btree_iter_node_drop(iter, b);
+ bch2_btree_iter_node_drop(iter, b);
}
-static void bch_btree_node_free_ondisk(struct bch_fs *c,
+static void bch2_btree_node_free_ondisk(struct bch_fs *c,
struct pending_btree_node_free *pending)
{
struct bch_fs_usage stats = { 0 };
BUG_ON(!pending->index_update_done);
- bch_mark_key(c, bkey_i_to_s_c(&pending->key),
+ bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
-c->sb.btree_node_size, true,
gc_phase(GC_PHASE_PENDING_DELETE),
&stats, 0);
@@ -222,13 +223,13 @@ static void bch_btree_node_free_ondisk(struct bch_fs *c,
*/
}
-void btree_open_bucket_put(struct bch_fs *c, struct btree *b)
+void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *b)
{
- bch_open_bucket_put(c, b->ob);
+ bch2_open_bucket_put(c, b->ob);
b->ob = NULL;
}
-static struct btree *__bch_btree_node_alloc(struct bch_fs *c,
+static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
bool use_reserve,
struct disk_reservation *res,
struct closure *cl)
@@ -255,7 +256,7 @@ retry:
bkey_extent_init(&tmp.k);
tmp.k.k.size = c->sb.btree_node_size,
- ob = bch_alloc_sectors(c, &c->btree_write_point,
+ ob = bch2_alloc_sectors(c, &c->btree_write_point,
bkey_i_to_extent(&tmp.k),
res->nr_replicas,
c->opts.metadata_replicas_required,
@@ -265,11 +266,11 @@ retry:
return ERR_CAST(ob);
if (tmp.k.k.size < c->sb.btree_node_size) {
- bch_open_bucket_put(c, ob);
+ bch2_open_bucket_put(c, ob);
goto retry;
}
mem_alloc:
- b = mca_alloc(c);
+ b = bch2_btree_node_mem_alloc(c);
/* we hold cannibalize_lock: */
BUG_ON(IS_ERR(b));
@@ -282,7 +283,7 @@ mem_alloc:
return b;
}
-static struct btree *bch_btree_node_alloc(struct bch_fs *c,
+static struct btree *bch2_btree_node_alloc(struct bch_fs *c,
unsigned level, enum btree_id id,
struct btree_reserve *reserve)
{
@@ -292,12 +293,12 @@ static struct btree *bch_btree_node_alloc(struct bch_fs *c,
b = reserve->b[--reserve->nr];
- BUG_ON(mca_hash_insert(c, b, level, id));
+ BUG_ON(bch2_btree_node_hash_insert(c, b, level, id));
set_btree_node_accessed(b);
set_btree_node_dirty(b);
- bch_bset_init_first(b, &b->data->keys);
+ bch2_bset_init_first(b, &b->data->keys);
memset(&b->nr, 0, sizeof(b->nr));
b->data->magic = cpu_to_le64(bset_magic(c));
b->data->flags = 0;
@@ -305,22 +306,22 @@ static struct btree *bch_btree_node_alloc(struct bch_fs *c,
SET_BTREE_NODE_LEVEL(b->data, level);
b->data->ptr = bkey_i_to_extent(&b->key)->v.start->ptr;
- bch_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
- bch_check_mark_super(c, &b->key, true);
+ bch2_check_mark_super(c, &b->key, true);
- trace_bcache_btree_node_alloc(c, b);
+ trace_btree_node_alloc(c, b);
return b;
}
-struct btree *__btree_node_alloc_replacement(struct bch_fs *c,
- struct btree *b,
- struct bkey_format format,
- struct btree_reserve *reserve)
+struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *c,
+ struct btree *b,
+ struct bkey_format format,
+ struct btree_reserve *reserve)
{
struct btree *n;
- n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve);
+ n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
n->data->min_key = b->data->min_key;
n->data->max_key = b->data->max_key;
@@ -328,33 +329,31 @@ struct btree *__btree_node_alloc_replacement(struct bch_fs *c,
btree_node_set_format(n, format);
- bch_btree_sort_into(c, n, b);
+ bch2_btree_sort_into(c, n, b);
btree_node_reset_sib_u64s(n);
n->key.k.p = b->key.k.p;
- trace_bcache_btree_node_alloc_replacement(c, b, n);
-
return n;
}
-struct btree *btree_node_alloc_replacement(struct bch_fs *c,
- struct btree *b,
- struct btree_reserve *reserve)
+static struct btree *bch2_btree_node_alloc_replacement(struct bch_fs *c,
+ struct btree *b,
+ struct btree_reserve *reserve)
{
- struct bkey_format new_f = bch_btree_calc_format(b);
+ struct bkey_format new_f = bch2_btree_calc_format(b);
/*
* The keys might expand with the new format - if they wouldn't fit in
* the btree node anymore, use the old format for now:
*/
- if (!bch_btree_node_format_fits(c, b, &new_f))
+ if (!bch2_btree_node_format_fits(c, b, &new_f))
new_f = b->format;
- return __btree_node_alloc_replacement(c, b, new_f, reserve);
+ return __bch2_btree_node_alloc_replacement(c, b, new_f, reserve);
}
-static void bch_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
+static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
struct btree_reserve *btree_reserve)
{
struct btree *old = btree_node_root(c, b);
@@ -371,28 +370,28 @@ static void bch_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
if (btree_reserve) {
/*
* New allocation (we're not being called because we're in
- * bch_btree_root_read()) - do marking while holding
+ * bch2_btree_root_read()) - do marking while holding
* btree_root_lock:
*/
struct bch_fs_usage stats = { 0 };
- bch_mark_key(c, bkey_i_to_s_c(&b->key),
+ bch2_mark_key(c, bkey_i_to_s_c(&b->key),
c->sb.btree_node_size, true,
gc_pos_btree_root(b->btree_id),
&stats, 0);
if (old)
- bch_btree_node_free_index(c, NULL, old->btree_id,
+ bch2_btree_node_free_index(c, NULL, old->btree_id,
bkey_i_to_s_c(&old->key),
&stats);
- bch_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
+ bch2_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
gc_pos_btree_root(b->btree_id));
}
- bch_recalc_btree_reserve(c);
+ bch2_recalc_btree_reserve(c);
}
-static void bch_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
+static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
{
struct btree_root *r = &c->btree_roots[b->btree_id];
@@ -410,13 +409,13 @@ static void bch_btree_set_root_ondisk(struct bch_fs *c, struct btree *b)
* Only for filesystem bringup, when first reading the btree roots or allocating
* btree roots when initializing a new filesystem:
*/
-void bch_btree_set_root_initial(struct bch_fs *c, struct btree *b,
+void bch2_btree_set_root_initial(struct bch_fs *c, struct btree *b,
struct btree_reserve *btree_reserve)
{
BUG_ON(btree_node_root(c, b));
- bch_btree_set_root_inmem(c, b, btree_reserve);
- bch_btree_set_root_ondisk(c, b);
+ bch2_btree_set_root_inmem(c, b, btree_reserve);
+ bch2_btree_set_root_ondisk(c, b);
}
/**
@@ -431,14 +430,14 @@ void bch_btree_set_root_initial(struct bch_fs *c, struct btree *b,
* is nothing new to be done. This just guarantees that there is a
* journal write.
*/
-static void bch_btree_set_root(struct btree_iter *iter, struct btree *b,
+static void bch2_btree_set_root(struct btree_iter *iter, struct btree *b,
struct btree_interior_update *as,
struct btree_reserve *btree_reserve)
{
struct bch_fs *c = iter->c;
struct btree *old;
- trace_bcache_btree_set_root(c, b);
+ trace_btree_set_root(c, b);
BUG_ON(!b->written);
old = btree_node_root(c, b);
@@ -447,9 +446,9 @@ static void bch_btree_set_root(struct btree_iter *iter, struct btree *b,
* Ensure no one is using the old root while we switch to the
* new root:
*/
- btree_node_lock_write(old, iter);
+ bch2_btree_node_lock_write(old, iter);
- bch_btree_set_root_inmem(c, b, btree_reserve);
+ bch2_btree_set_root_inmem(c, b, btree_reserve);
btree_interior_update_updated_root(c, as, iter->btree_id);
@@ -460,31 +459,31 @@ static void bch_btree_set_root(struct btree_iter *iter, struct btree *b,
* an intent lock on the new root, and any updates that would
* depend on the new root would have to update the new root.
*/
- btree_node_unlock_write(old, iter);
+ bch2_btree_node_unlock_write(old, iter);
}
static struct btree *__btree_root_alloc(struct bch_fs *c, unsigned level,
enum btree_id id,
struct btree_reserve *reserve)
{
- struct btree *b = bch_btree_node_alloc(c, level, id, reserve);
+ struct btree *b = bch2_btree_node_alloc(c, level, id, reserve);
b->data->min_key = POS_MIN;
b->data->max_key = POS_MAX;
- b->data->format = bch_btree_calc_format(b);
+ b->data->format = bch2_btree_calc_format(b);
b->key.k.p = POS_MAX;
btree_node_set_format(b, b->data->format);
- bch_btree_build_aux_trees(b);
+ bch2_btree_build_aux_trees(b);
six_unlock_write(&b->lock);
return b;
}
-void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
+void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
{
- bch_disk_reservation_put(c, &reserve->disk_res);
+ bch2_disk_reservation_put(c, &reserve->disk_res);
mutex_lock(&c->btree_reserve_cache_lock);
@@ -502,7 +501,7 @@ void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
b->ob = NULL;
bkey_copy(&a->k, &b->key);
} else {
- bch_open_bucket_put(c, b->ob);
+ bch2_open_bucket_put(c, b->ob);
b->ob = NULL;
}
@@ -516,7 +515,7 @@ void bch_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
mempool_free(reserve, &c->btree_reserve_pool);
}
-static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c,
+static struct btree_reserve *__bch2_btree_reserve_get(struct bch_fs *c,
unsigned nr_nodes,
unsigned flags,
struct closure *cl)
@@ -535,11 +534,11 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c,
* This check isn't necessary for correctness - it's just to potentially
* prevent us from doing a lot of work that'll end up being wasted:
*/
- ret = bch_journal_error(&c->journal);
+ ret = bch2_journal_error(&c->journal);
if (ret)
return ERR_PTR(ret);
- if (bch_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
+ if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
return ERR_PTR(-ENOSPC);
BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
@@ -548,9 +547,9 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c,
* Protects reaping from the btree node cache and using the btree node
* open bucket reserve:
*/
- ret = mca_cannibalize_lock(c, cl);
+ ret = bch2_btree_node_cannibalize_lock(c, cl);
if (ret) {
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
return ERR_PTR(ret);
}
@@ -560,7 +559,7 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c,
reserve->nr = 0;
while (reserve->nr < nr_nodes) {
- b = __bch_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE,
+ b = __bch2_btree_node_alloc(c, flags & BTREE_INSERT_USE_RESERVE,
&disk_res, cl);
if (IS_ERR(b)) {
ret = PTR_ERR(b);
@@ -570,16 +569,16 @@ static struct btree_reserve *__bch_btree_reserve_get(struct bch_fs *c,
reserve->b[reserve->nr++] = b;
}
- mca_cannibalize_unlock(c);
+ bch2_btree_node_cannibalize_unlock(c);
return reserve;
err_free:
- bch_btree_reserve_put(c, reserve);
- mca_cannibalize_unlock(c);
- trace_bcache_btree_reserve_get_fail(c, nr_nodes, cl);
+ bch2_btree_reserve_put(c, reserve);
+ bch2_btree_node_cannibalize_unlock(c);
+ trace_btree_reserve_get_fail(c, nr_nodes, cl);
return ERR_PTR(ret);
}
-struct btree_reserve *bch_btree_reserve_get(struct bch_fs *c,
+struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
struct btree *b,
unsigned extra_nodes,
unsigned flags,
@@ -588,11 +587,11 @@ struct btree_reserve *bch_btree_reserve_get(struct bch_fs *c,
unsigned depth = btree_node_root(c, b)->level - b->level;
unsigned nr_nodes = btree_reserve_required_nodes(depth) + extra_nodes;
- return __bch_btree_reserve_get(c, nr_nodes, flags, cl);
+ return __bch2_btree_reserve_get(c, nr_nodes, flags, cl);
}
-int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id,
+int bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id,
struct closure *writes)
{
struct closure cl;
@@ -603,7 +602,7 @@ int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id,
while (1) {
/* XXX haven't calculated capacity yet :/ */
- reserve = __bch_btree_reserve_get(c, 1, 0, &cl);
+ reserve = __bch2_btree_reserve_get(c, 1, 0, &cl);
if (!IS_ERR(reserve))
break;
@@ -615,18 +614,18 @@ int bch_btree_root_alloc(struct bch_fs *c, enum btree_id id,
b = __btree_root_alloc(c, 0, id, reserve);
- bch_btree_node_write(c, b, writes, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, b, writes, SIX_LOCK_intent, -1);
- bch_btree_set_root_initial(c, b, reserve);
- btree_open_bucket_put(c, b);
+ bch2_btree_set_root_initial(c, b, reserve);
+ bch2_btree_open_bucket_put(c, b);
six_unlock_intent(&b->lock);
- bch_btree_reserve_put(c, reserve);
+ bch2_btree_reserve_put(c, reserve);
return 0;
}
-static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
+static void bch2_insert_fixup_btree_ptr(struct btree_iter *iter,
struct btree *b,
struct bkey_i *insert,
struct btree_node_iter *node_iter,
@@ -638,33 +637,33 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
struct bkey tmp;
if (bkey_extent_is_data(&insert->k))
- bch_mark_key(c, bkey_i_to_s_c(insert),
+ bch2_mark_key(c, bkey_i_to_s_c(insert),
c->sb.btree_node_size, true,
gc_pos_btree_node(b), &stats, 0);
- while ((k = bch_btree_node_iter_peek_all(node_iter, b)) &&
+ while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
!btree_iter_pos_cmp_packed(b, &insert->k.p, k, false))
- bch_btree_node_iter_advance(node_iter, b);
+ bch2_btree_node_iter_advance(node_iter, b);
/*
* If we're overwriting, look up pending delete and mark so that gc
* marks it on the pending delete list:
*/
if (k && !bkey_cmp_packed(b, k, &insert->k))
- bch_btree_node_free_index(c, b, iter->btree_id,
+ bch2_btree_node_free_index(c, b, iter->btree_id,
bkey_disassemble(b, k, &tmp),
&stats);
- bch_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
+ bch2_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
- bch_btree_bset_insert_key(iter, b, node_iter, insert);
+ bch2_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
}
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
-bool bch_btree_bset_insert_key(struct btree_iter *iter,
+bool bch2_btree_bset_insert_key(struct btree_iter *iter,
struct btree *b,
struct btree_node_iter *node_iter,
struct bkey_i *insert)
@@ -681,11 +680,11 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
bkey_cmp(insert->k.p, b->data->max_key) > 0);
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(iter->c, b));
- k = bch_btree_node_iter_peek_all(node_iter, b);
+ k = bch2_btree_node_iter_peek_all(node_iter, b);
if (k && !bkey_cmp_packed(b, k, &insert->k)) {
BUG_ON(bkey_whiteout(k));
- t = bch_bkey_to_bset(b, k);
+ t = bch2_bkey_to_bset(b, k);
if (bset_unwritten(b, bset(b, t)) &&
bkey_val_u64s(&insert->k) == bkeyp_val_u64s(f, k)) {
@@ -710,8 +709,8 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
* been written to disk) - just delete it:
*/
if (bkey_whiteout(&insert->k) && !k->needs_whiteout) {
- bch_bset_delete(b, k, clobber_u64s);
- bch_btree_node_iter_fix(iter, b, node_iter, t,
+ bch2_bset_delete(b, k, clobber_u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t,
k, clobber_u64s, 0);
return true;
}
@@ -720,7 +719,7 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
}
k->type = KEY_TYPE_DELETED;
- bch_btree_node_iter_fix(iter, b, node_iter, t, k,
+ bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
k->u64s, k->u64s);
if (bkey_whiteout(&insert->k)) {
@@ -740,12 +739,12 @@ bool bch_btree_bset_insert_key(struct btree_iter *iter,
}
t = bset_tree_last(b);
- k = bch_btree_node_iter_bset_pos(node_iter, b, t);
+ k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
clobber_u64s = 0;
overwrite:
- bch_bset_insert(b, node_iter, k, insert, clobber_u64s);
+ bch2_bset_insert(b, node_iter, k, insert, clobber_u64s);
if (k->u64s != clobber_u64s || bkey_whiteout(&insert->k))
- bch_btree_node_iter_fix(iter, b, node_iter, t, k,
+ bch2_btree_node_iter_fix(iter, b, node_iter, t, k,
clobber_u64s, k->u64s);
return true;
}
@@ -772,7 +771,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
* shouldn't:
*/
if (!b->level)
- bch_btree_node_write(c, b, NULL, SIX_LOCK_read, i);
+ bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, i);
six_unlock_read(&b->lock);
}
@@ -786,7 +785,7 @@ static void btree_node_flush1(struct journal *j, struct journal_entry_pin *pin)
return __btree_node_flush(j, pin, 1);
}
-void bch_btree_journal_key(struct btree_insert *trans,
+void bch2_btree_journal_key(struct btree_insert *trans,
struct btree_iter *iter,
struct bkey_i *insert)
{
@@ -800,7 +799,7 @@ void bch_btree_journal_key(struct btree_insert *trans,
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
if (!journal_pin_active(&w->journal))
- bch_journal_pin_add(j, &w->journal,
+ bch2_journal_pin_add(j, &w->journal,
btree_node_write_idx(b) == 0
? btree_node_flush0
: btree_node_flush1);
@@ -813,11 +812,11 @@ void bch_btree_journal_key(struct btree_insert *trans,
* have a bug where we're seeing an extent with an invalid crc
* entry in the journal, trying to track it down:
*/
- BUG_ON(bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert)));
+ BUG_ON(bch2_bkey_invalid(c, b->btree_id, bkey_i_to_s_c(insert)));
/* ick */
insert->k.needs_whiteout = false;
- bch_journal_add_keys(j, &trans->journal_res,
+ bch2_journal_add_keys(j, &trans->journal_res,
b->btree_id, insert);
insert->k.needs_whiteout = needs_whiteout;
@@ -831,18 +830,18 @@ void bch_btree_journal_key(struct btree_insert *trans,
}
static enum btree_insert_ret
-bch_insert_fixup_key(struct btree_insert *trans,
+bch2_insert_fixup_key(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
struct btree_iter *iter = insert->iter;
BUG_ON(iter->level);
- if (bch_btree_bset_insert_key(iter,
+ if (bch2_btree_bset_insert_key(iter,
iter->nodes[0],
&iter->node_iters[0],
insert->k))
- bch_btree_journal_key(trans, iter, insert->k);
+ bch2_btree_journal_key(trans, iter, insert->k);
trans->did_work = true;
return BTREE_INSERT_OK;
@@ -863,24 +862,24 @@ static void btree_node_lock_for_insert(struct btree *b, struct btree_iter *iter)
{
struct bch_fs *c = iter->c;
- btree_node_lock_write(b, iter);
+ bch2_btree_node_lock_write(b, iter);
if (btree_node_just_written(b) &&
- bch_btree_post_write_cleanup(c, b))
- bch_btree_iter_reinit_node(iter, b);
+ bch2_btree_post_write_cleanup(c, b))
+ bch2_btree_iter_reinit_node(iter, b);
/*
* If the last bset has been written, or if it's gotten too big - start
* a new bset to insert into:
*/
if (want_new_bset(c, b))
- bch_btree_init_next(c, b, iter);
+ bch2_btree_init_next(c, b, iter);
}
/* Asynchronous interior node update machinery */
struct btree_interior_update *
-bch_btree_interior_update_alloc(struct bch_fs *c)
+bch2_btree_interior_update_alloc(struct bch_fs *c)
{
struct btree_interior_update *as;
@@ -890,7 +889,7 @@ bch_btree_interior_update_alloc(struct bch_fs *c)
as->c = c;
as->mode = BTREE_INTERIOR_NO_UPDATE;
- bch_keylist_init(&as->parent_keys, as->inline_keys,
+ bch2_keylist_init(&as->parent_keys, as->inline_keys,
ARRAY_SIZE(as->inline_keys));
mutex_lock(&c->btree_interior_update_lock);
@@ -914,12 +913,12 @@ static void btree_interior_update_nodes_reachable(struct closure *cl)
struct bch_fs *c = as->c;
unsigned i;
- bch_journal_pin_drop(&c->journal, &as->journal);
+ bch2_journal_pin_drop(&c->journal, &as->journal);
mutex_lock(&c->btree_interior_update_lock);
for (i = 0; i < as->nr_pending; i++)
- bch_btree_node_free_ondisk(c, &as->pending[i]);
+ bch2_btree_node_free_ondisk(c, &as->pending[i]);
as->nr_pending = 0;
mutex_unlock(&c->btree_interior_update_lock);
@@ -940,7 +939,7 @@ static void btree_interior_update_nodes_written(struct closure *cl)
struct bch_fs *c = as->c;
struct btree *b;
- if (bch_journal_error(&c->journal)) {
+ if (bch2_journal_error(&c->journal)) {
/* XXX what? */
}
@@ -975,7 +974,7 @@ retry:
list_del(&as->write_blocked_list);
if (list_empty(&b->write_blocked))
- bch_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
+ bch2_btree_node_write(c, b, NULL, SIX_LOCK_read, -1);
six_unlock_read(&b->lock);
break;
@@ -1008,7 +1007,7 @@ retry:
BUG_ON(c->btree_roots[b->btree_id].as != as);
c->btree_roots[b->btree_id].as = NULL;
- bch_btree_set_root_ondisk(c, b);
+ bch2_btree_set_root_ondisk(c, b);
/*
* We don't have to wait anything anything here (before
@@ -1043,7 +1042,7 @@ static void btree_interior_update_updated_btree(struct bch_fs *c,
mutex_unlock(&c->btree_interior_update_lock);
- bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
+ bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
continue_at(&as->cl, btree_interior_update_nodes_written,
system_freezable_wq);
@@ -1078,7 +1077,7 @@ static void btree_interior_update_updated_root(struct bch_fs *c,
mutex_unlock(&c->btree_interior_update_lock);
- bch_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
+ bch2_journal_wait_on_seq(&c->journal, as->journal_seq, &as->cl);
continue_at(&as->cl, btree_interior_update_nodes_written,
system_freezable_wq);
@@ -1089,7 +1088,7 @@ static void interior_update_flush(struct journal *j, struct journal_entry_pin *p
struct btree_interior_update *as =
container_of(pin, struct btree_interior_update, journal);
- bch_journal_flush_seq_async(j, as->journal_seq, NULL);
+ bch2_journal_flush_seq_async(j, as->journal_seq, NULL);
}
/*
@@ -1097,7 +1096,7 @@ static void interior_update_flush(struct journal *j, struct journal_entry_pin *p
* nodes and thus outstanding btree_interior_updates - redirect @b's
* btree_interior_updates to point to this btree_interior_update:
*/
-void bch_btree_interior_update_will_free_node(struct bch_fs *c,
+void bch2_btree_interior_update_will_free_node(struct bch_fs *c,
struct btree_interior_update *as,
struct btree *b)
{
@@ -1124,10 +1123,10 @@ void bch_btree_interior_update_will_free_node(struct bch_fs *c,
* oldest pin of any of the nodes we're freeing. We'll release the pin
* when the new nodes are persistent and reachable on disk:
*/
- bch_journal_pin_add_if_older(&c->journal,
+ bch2_journal_pin_add_if_older(&c->journal,
&b->writes[0].journal,
&as->journal, interior_update_flush);
- bch_journal_pin_add_if_older(&c->journal,
+ bch2_journal_pin_add_if_older(&c->journal,
&b->writes[1].journal,
&as->journal, interior_update_flush);
@@ -1171,18 +1170,18 @@ static void btree_node_interior_verify(struct btree *b)
BUG_ON(!b->level);
- bch_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
+ bch2_btree_node_iter_init(&iter, b, b->key.k.p, false, false);
#if 1
- BUG_ON(!(k = bch_btree_node_iter_peek(&iter, b)) ||
+ BUG_ON(!(k = bch2_btree_node_iter_peek(&iter, b)) ||
bkey_cmp_left_packed(b, k, &b->key.k.p));
- BUG_ON((bch_btree_node_iter_advance(&iter, b),
- !bch_btree_node_iter_end(&iter)));
+ BUG_ON((bch2_btree_node_iter_advance(&iter, b),
+ !bch2_btree_node_iter_end(&iter)));
#else
const char *msg;
msg = "not found";
- k = bch_btree_node_iter_peek(&iter, b);
+ k = bch2_btree_node_iter_peek(&iter, b);
if (!k)
goto err;
@@ -1190,14 +1189,14 @@ static void btree_node_interior_verify(struct btree *b)
if (bkey_cmp_left_packed(b, k, &b->key.k.p))
goto err;
- bch_btree_node_iter_advance(&iter, b);
+ bch2_btree_node_iter_advance(&iter, b);
msg = "isn't last key";
- if (!bch_btree_node_iter_end(&iter))
+ if (!bch2_btree_node_iter_end(&iter))
goto err;
return;
err:
- bch_dump_btree_node(b);
+ bch2_dump_btree_node(b);
printk(KERN_ERR "last key %llu:%llu %s\n", b->key.k.p.inode,
b->key.k.p.offset, msg);
BUG();
@@ -1205,7 +1204,7 @@ err:
}
static enum btree_insert_ret
-bch_btree_insert_keys_interior(struct btree *b,
+bch2_btree_insert_keys_interior(struct btree *b,
struct btree_iter *iter,
struct keylist *insert_keys,
struct btree_interior_update *as,
@@ -1214,7 +1213,7 @@ bch_btree_insert_keys_interior(struct btree *b,
struct bch_fs *c = iter->c;
struct btree_iter *linked;
struct btree_node_iter node_iter;
- struct bkey_i *insert = bch_keylist_front(insert_keys);
+ struct bkey_i *insert = bch2_keylist_front(insert_keys);
struct bkey_packed *k;
BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
@@ -1226,7 +1225,7 @@ bch_btree_insert_keys_interior(struct btree *b,
if (bch_keylist_u64s(insert_keys) >
bch_btree_keys_u64s_remaining(c, b)) {
- btree_node_unlock_write(b, iter);
+ bch2_btree_node_unlock_write(b, iter);
return BTREE_INSERT_BTREE_NODE_FULL;
}
@@ -1238,31 +1237,31 @@ bch_btree_insert_keys_interior(struct btree *b,
* the iterator's current position - they know the keys go in
* the node the iterator points to:
*/
- while ((k = bch_btree_node_iter_prev_all(&node_iter, b)) &&
+ while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
(bkey_cmp_packed(b, k, &insert->k) >= 0))
;
- while (!bch_keylist_empty(insert_keys)) {
- insert = bch_keylist_front(insert_keys);
+ while (!bch2_keylist_empty(insert_keys)) {
+ insert = bch2_keylist_front(insert_keys);
- bch_insert_fixup_btree_ptr(iter, b, insert,
+ bch2_insert_fixup_btree_ptr(iter, b, insert,
&node_iter, &res->disk_res);
- bch_keylist_pop_front(insert_keys);
+ bch2_keylist_pop_front(insert_keys);
}
btree_interior_update_updated_btree(c, as, b);
for_each_linked_btree_node(iter, b, linked)
- bch_btree_node_iter_peek(&linked->node_iters[b->level],
+ bch2_btree_node_iter_peek(&linked->node_iters[b->level],
b);
- bch_btree_node_iter_peek(&iter->node_iters[b->level], b);
+ bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
- bch_btree_iter_verify(iter, b);
+ bch2_btree_iter_verify(iter, b);
- if (bch_maybe_compact_whiteouts(c, b))
- bch_btree_iter_reinit_node(iter, b);
+ if (bch2_maybe_compact_whiteouts(c, b))
+ bch2_btree_iter_reinit_node(iter, b);
- btree_node_unlock_write(b, iter);
+ bch2_btree_node_unlock_write(b, iter);
btree_node_interior_verify(b);
return BTREE_INSERT_OK;
@@ -1280,7 +1279,7 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n
struct bset *set1, *set2;
struct bkey_packed *k, *prev = NULL;
- n2 = bch_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
+ n2 = bch2_btree_node_alloc(iter->c, n1->level, iter->btree_id, reserve);
n2->data->max_key = n1->data->max_key;
n2->data->format = n1->format;
n2->key.k.p = n1->key.k.p;
@@ -1343,8 +1342,8 @@ static struct btree *__btree_split_node(struct btree_iter *iter, struct btree *n
btree_node_reset_sib_u64s(n1);
btree_node_reset_sib_u64s(n2);
- bch_verify_btree_nr_keys(n1);
- bch_verify_btree_nr_keys(n2);
+ bch2_verify_btree_nr_keys(n1);
+ bch2_verify_btree_nr_keys(n2);
if (n1->level) {
btree_node_interior_verify(n1);
@@ -1370,24 +1369,24 @@ static void btree_split_insert_keys(struct btree_iter *iter, struct btree *b,
struct btree_reserve *res)
{
struct btree_node_iter node_iter;
- struct bkey_i *k = bch_keylist_front(keys);
+ struct bkey_i *k = bch2_keylist_front(keys);
struct bkey_packed *p;
struct bset *i;
BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
- bch_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
+ bch2_btree_node_iter_init(&node_iter, b, k->k.p, false, false);
- while (!bch_keylist_empty(keys)) {
- k = bch_keylist_front(keys);
+ while (!bch2_keylist_empty(keys)) {
+ k = bch2_keylist_front(keys);
BUG_ON(bch_keylist_u64s(keys) >
bch_btree_keys_u64s_remaining(iter->c, b));
BUG_ON(bkey_cmp(k->k.p, b->data->min_key) < 0);
BUG_ON(bkey_cmp(k->k.p, b->data->max_key) > 0);
- bch_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
- bch_keylist_pop_front(keys);
+ bch2_insert_fixup_btree_ptr(iter, b, k, &node_iter, &res->disk_res);
+ bch2_keylist_pop_front(keys);
}
/*
@@ -1426,31 +1425,31 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
BUG_ON(!parent && (b != btree_node_root(c, b)));
BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
- bch_btree_interior_update_will_free_node(c, as, b);
+ bch2_btree_interior_update_will_free_node(c, as, b);
- n1 = btree_node_alloc_replacement(c, b, reserve);
+ n1 = bch2_btree_node_alloc_replacement(c, b, reserve);
if (b->level)
btree_split_insert_keys(iter, n1, insert_keys, reserve);
if (vstruct_blocks(n1->data, c->block_bits) > BTREE_SPLIT_THRESHOLD(c)) {
- trace_bcache_btree_node_split(c, b, b->nr.live_u64s);
+ trace_btree_node_split(c, b, b->nr.live_u64s);
n2 = __btree_split_node(iter, n1, reserve);
- bch_btree_build_aux_trees(n2);
- bch_btree_build_aux_trees(n1);
+ bch2_btree_build_aux_trees(n2);
+ bch2_btree_build_aux_trees(n1);
six_unlock_write(&n2->lock);
six_unlock_write(&n1->lock);
- bch_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent, -1);
/*
* Note that on recursive parent_keys == insert_keys, so we
* can't start adding new keys to parent_keys before emptying it
* out (which we did with btree_split_insert_keys() above)
*/
- bch_keylist_add(&as->parent_keys, &n1->key);
- bch_keylist_add(&as->parent_keys, &n2->key);
+ bch2_keylist_add(&as->parent_keys, &n1->key);
+ bch2_keylist_add(&as->parent_keys, &n2->key);
if (!parent) {
/* Depth increases, make a new root */
@@ -1462,58 +1461,58 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
btree_split_insert_keys(iter, n3, &as->parent_keys,
reserve);
- bch_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent, -1);
}
} else {
- trace_bcache_btree_node_compact(c, b, b->nr.live_u64s);
+ trace_btree_node_compact(c, b, b->nr.live_u64s);
- bch_btree_build_aux_trees(n1);
+ bch2_btree_build_aux_trees(n1);
six_unlock_write(&n1->lock);
- bch_keylist_add(&as->parent_keys, &n1->key);
+ bch2_keylist_add(&as->parent_keys, &n1->key);
}
- bch_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent, -1);
/* New nodes all written, now make them visible: */
if (parent) {
/* Split a non root node */
- bch_btree_insert_node(parent, iter, &as->parent_keys,
+ bch2_btree_insert_node(parent, iter, &as->parent_keys,
reserve, as);
} else if (n3) {
- bch_btree_set_root(iter, n3, as, reserve);
+ bch2_btree_set_root(iter, n3, as, reserve);
} else {
/* Root filled up but didn't need to be split */
- bch_btree_set_root(iter, n1, as, reserve);
+ bch2_btree_set_root(iter, n1, as, reserve);
}
- btree_open_bucket_put(c, n1);
+ bch2_btree_open_bucket_put(c, n1);
if (n2)
- btree_open_bucket_put(c, n2);
+ bch2_btree_open_bucket_put(c, n2);
if (n3)
- btree_open_bucket_put(c, n3);
+ bch2_btree_open_bucket_put(c, n3);
/*
* Note - at this point other linked iterators could still have @b read
- * locked; we're depending on the bch_btree_iter_node_replace() calls
+ * locked; we're depending on the bch2_btree_iter_node_replace() calls
* below removing all references to @b so we don't return with other
* iterators pointing to a node they have locked that's been freed.
*
- * We have to free the node first because the bch_iter_node_replace()
+ * We have to free the node first because the bch2_iter_node_replace()
* calls will drop _our_ iterator's reference - and intent lock - to @b.
*/
- bch_btree_node_free_inmem(iter, b);
+ bch2_btree_node_free_inmem(iter, b);
/* Successful split, update the iterator to point to the new nodes: */
if (n3)
- bch_btree_iter_node_replace(iter, n3);
+ bch2_btree_iter_node_replace(iter, n3);
if (n2)
- bch_btree_iter_node_replace(iter, n2);
- bch_btree_iter_node_replace(iter, n1);
+ bch2_btree_iter_node_replace(iter, n2);
+ bch2_btree_iter_node_replace(iter, n1);
- bch_time_stats_update(&c->btree_split_time, start_time);
+ bch2_time_stats_update(&c->btree_split_time, start_time);
}
/**
@@ -1528,7 +1527,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
* If a split occurred, this function will return early. This can only happen
* for leaf nodes -- inserts into interior nodes have to be atomic.
*/
-void bch_btree_insert_node(struct btree *b,
+void bch2_btree_insert_node(struct btree *b,
struct btree_iter *iter,
struct keylist *insert_keys,
struct btree_reserve *reserve,
@@ -1537,7 +1536,7 @@ void bch_btree_insert_node(struct btree *b,
BUG_ON(!b->level);
BUG_ON(!reserve || !as);
- switch (bch_btree_insert_keys_interior(b, iter, insert_keys,
+ switch (bch2_btree_insert_keys_interior(b, iter, insert_keys,
as, reserve)) {
case BTREE_INSERT_OK:
break;
@@ -1549,7 +1548,7 @@ void bch_btree_insert_node(struct btree *b,
}
}
-static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags)
+static int bch2_btree_split_leaf(struct btree_iter *iter, unsigned flags)
{
struct bch_fs *c = iter->c;
struct btree *b = iter->nodes[0];
@@ -1562,7 +1561,7 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags)
/* Hack, because gc and splitting nodes doesn't mix yet: */
if (!down_read_trylock(&c->gc_lock)) {
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
down_read(&c->gc_lock);
}
@@ -1570,16 +1569,16 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags)
* XXX: figure out how far we might need to split,
* instead of locking/reserving all the way to the root:
*/
- if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) {
+ if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
ret = -EINTR;
goto out;
}
- reserve = bch_btree_reserve_get(c, b, 0, flags, &cl);
+ reserve = bch2_btree_reserve_get(c, b, 0, flags, &cl);
if (IS_ERR(reserve)) {
ret = PTR_ERR(reserve);
if (ret == -EAGAIN) {
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
up_read(&c->gc_lock);
closure_sync(&cl);
return -EINTR;
@@ -1587,12 +1586,12 @@ static int bch_btree_split_leaf(struct btree_iter *iter, unsigned flags)
goto out;
}
- as = bch_btree_interior_update_alloc(c);
+ as = bch2_btree_interior_update_alloc(c);
btree_split(b, iter, NULL, reserve, as);
- bch_btree_reserve_put(c, reserve);
+ bch2_btree_reserve_put(c, reserve);
- bch_btree_iter_set_locks_want(iter, 1);
+ bch2_btree_iter_set_locks_want(iter, 1);
out:
up_read(&c->gc_lock);
return ret;
@@ -1618,35 +1617,35 @@ static struct btree *btree_node_get_sibling(struct btree_iter *iter,
if (!parent)
return NULL;
- if (!btree_node_relock(iter, level + 1)) {
- bch_btree_iter_set_locks_want(iter, level + 2);
+ if (!bch2_btree_node_relock(iter, level + 1)) {
+ bch2_btree_iter_set_locks_want(iter, level + 2);
return ERR_PTR(-EINTR);
}
node_iter = iter->node_iters[parent->level];
- k = bch_btree_node_iter_peek_all(&node_iter, parent);
+ k = bch2_btree_node_iter_peek_all(&node_iter, parent);
BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
do {
k = sib == btree_prev_sib
- ? bch_btree_node_iter_prev_all(&node_iter, parent)
- : (bch_btree_node_iter_advance(&node_iter, parent),
- bch_btree_node_iter_peek_all(&node_iter, parent));
+ ? bch2_btree_node_iter_prev_all(&node_iter, parent)
+ : (bch2_btree_node_iter_advance(&node_iter, parent),
+ bch2_btree_node_iter_peek_all(&node_iter, parent));
if (!k)
return NULL;
} while (bkey_deleted(k));
- bkey_unpack(parent, &tmp.k, k);
+ bch2_bkey_unpack(parent, &tmp.k, k);
- ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
+ ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
btree_node_unlock(iter, level);
- ret = bch_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
+ ret = bch2_btree_node_get(iter, &tmp.k, level, SIX_LOCK_intent);
}
- if (!IS_ERR(ret) && !btree_node_relock(iter, level)) {
+ if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
six_unlock_intent(&ret->lock);
ret = ERR_PTR(-EINTR);
}
@@ -1670,7 +1669,7 @@ static int __foreground_maybe_merge(struct btree_iter *iter,
closure_init_stack(&cl);
retry:
- if (!btree_node_relock(iter, iter->level))
+ if (!bch2_btree_node_relock(iter, iter->level))
return 0;
b = iter->nodes[iter->level];
@@ -1703,10 +1702,10 @@ retry:
next = m;
}
- bch_bkey_format_init(&new_s);
- __bch_btree_calc_format(&new_s, b);
- __bch_btree_calc_format(&new_s, m);
- new_f = bch_bkey_format_done(&new_s);
+ bch2_bkey_format_init(&new_s);
+ __bch2_btree_calc_format(&new_s, b);
+ __bch2_btree_calc_format(&new_s, m);
+ new_f = bch2_bkey_format_done(&new_s);
sib_u64s = btree_node_u64s_with_format(b, &new_f) +
btree_node_u64s_with_format(m, &new_f);
@@ -1728,7 +1727,7 @@ retry:
/* We're changing btree topology, doesn't mix with gc: */
if (!down_read_trylock(&c->gc_lock)) {
six_unlock_intent(&m->lock);
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
down_read(&c->gc_lock);
up_read(&c->gc_lock);
@@ -1736,12 +1735,12 @@ retry:
goto out;
}
- if (!bch_btree_iter_set_locks_want(iter, U8_MAX)) {
+ if (!bch2_btree_iter_set_locks_want(iter, U8_MAX)) {
ret = -EINTR;
goto out_unlock;
}
- reserve = bch_btree_reserve_get(c, b, 0,
+ reserve = bch2_btree_reserve_get(c, b, 0,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE,
&cl);
@@ -1750,12 +1749,12 @@ retry:
goto out_unlock;
}
- as = bch_btree_interior_update_alloc(c);
+ as = bch2_btree_interior_update_alloc(c);
- bch_btree_interior_update_will_free_node(c, as, b);
- bch_btree_interior_update_will_free_node(c, as, m);
+ bch2_btree_interior_update_will_free_node(c, as, b);
+ bch2_btree_interior_update_will_free_node(c, as, m);
- n = bch_btree_node_alloc(c, b->level, b->btree_id, reserve);
+ n = bch2_btree_node_alloc(c, b->level, b->btree_id, reserve);
n->data->min_key = prev->data->min_key;
n->data->max_key = next->data->max_key;
n->data->format = new_f;
@@ -1763,44 +1762,44 @@ retry:
btree_node_set_format(n, new_f);
- bch_btree_sort_into(c, n, prev);
- bch_btree_sort_into(c, n, next);
+ bch2_btree_sort_into(c, n, prev);
+ bch2_btree_sort_into(c, n, next);
- bch_btree_build_aux_trees(n);
+ bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock);
bkey_init(&delete.k);
delete.k.p = prev->key.k.p;
- bch_keylist_add(&as->parent_keys, &delete);
- bch_keylist_add(&as->parent_keys, &n->key);
+ bch2_keylist_add(&as->parent_keys, &delete);
+ bch2_keylist_add(&as->parent_keys, &n->key);
- bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
- bch_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
+ bch2_btree_insert_node(parent, iter, &as->parent_keys, reserve, as);
- btree_open_bucket_put(c, n);
- bch_btree_node_free_inmem(iter, b);
- bch_btree_node_free_inmem(iter, m);
- bch_btree_iter_node_replace(iter, n);
+ bch2_btree_open_bucket_put(c, n);
+ bch2_btree_node_free_inmem(iter, b);
+ bch2_btree_node_free_inmem(iter, m);
+ bch2_btree_iter_node_replace(iter, n);
- bch_btree_iter_verify(iter, n);
+ bch2_btree_iter_verify(iter, n);
- bch_btree_reserve_put(c, reserve);
+ bch2_btree_reserve_put(c, reserve);
out_unlock:
if (ret != -EINTR && ret != -EAGAIN)
- bch_btree_iter_set_locks_want(iter, 1);
+ bch2_btree_iter_set_locks_want(iter, 1);
six_unlock_intent(&m->lock);
up_read(&c->gc_lock);
out:
if (ret == -EAGAIN || ret == -EINTR) {
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
ret = -EINTR;
}
closure_sync(&cl);
if (ret == -EINTR) {
- ret = bch_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(iter);
if (!ret)
goto retry;
}
@@ -1840,8 +1839,8 @@ btree_insert_key(struct btree_insert *trans,
int live_u64s_added, u64s_added;
ret = !btree_node_is_extents(b)
- ? bch_insert_fixup_key(trans, insert)
- : bch_insert_fixup_extent(trans, insert);
+ ? bch2_insert_fixup_key(trans, insert)
+ : bch2_insert_fixup_extent(trans, insert);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
@@ -1852,10 +1851,10 @@ btree_insert_key(struct btree_insert *trans,
b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
if (u64s_added > live_u64s_added &&
- bch_maybe_compact_whiteouts(iter->c, b))
- bch_btree_iter_reinit_node(iter, b);
+ bch2_maybe_compact_whiteouts(iter->c, b))
+ bch2_btree_iter_reinit_node(iter, b);
- trace_bcache_btree_insert_key(c, b, insert->k);
+ trace_btree_insert_key(c, b, insert->k);
return ret;
}
@@ -1888,7 +1887,7 @@ static void multi_unlock_write(struct btree_insert *trans)
trans_for_each_entry(trans, i)
if (!same_leaf_as_prev(trans, i))
- btree_node_unlock_write(i->iter->nodes[0], i->iter);
+ bch2_btree_node_unlock_write(i->iter->nodes[0], i->iter);
}
static int btree_trans_entry_cmp(const void *_l, const void *_r)
@@ -1912,7 +1911,7 @@ static int btree_trans_entry_cmp(const void *_l, const void *_r)
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
-int __bch_btree_insert_at(struct btree_insert *trans)
+int __bch2_btree_insert_at(struct btree_insert *trans)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
@@ -1934,7 +1933,7 @@ int __bch_btree_insert_at(struct btree_insert *trans)
retry_locks:
ret = -EINTR;
trans_for_each_entry(trans, i)
- if (!bch_btree_iter_set_locks_want(i->iter, 1))
+ if (!bch2_btree_iter_set_locks_want(i->iter, 1))
goto err;
retry:
trans->did_work = false;
@@ -1946,7 +1945,7 @@ retry:
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
ret = !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)
- ? bch_journal_res_get(&c->journal,
+ ? bch2_journal_res_get(&c->journal,
&trans->journal_res,
u64s, u64s)
: 0;
@@ -1962,14 +1961,14 @@ retry:
u64s = 0;
/*
- * bch_btree_node_insert_fits() must be called under write lock:
+ * bch2_btree_node_insert_fits() must be called under write lock:
* with only an intent lock, another thread can still call
- * bch_btree_node_write(), converting an unwritten bset to a
+ * bch2_btree_node_write(), converting an unwritten bset to a
* written one
*/
if (!i->done) {
u64s += i->k->k.u64s + i->extra_res;
- if (!bch_btree_node_insert_fits(c,
+ if (!bch2_btree_node_insert_fits(c,
i->iter->nodes[0], u64s)) {
split = i->iter;
goto unlock;
@@ -2015,7 +2014,7 @@ retry:
}
unlock:
multi_unlock_write(trans);
- bch_journal_res_put(&c->journal, &trans->journal_res);
+ bch2_journal_res_put(&c->journal, &trans->journal_res);
if (split)
goto split;
@@ -2049,7 +2048,7 @@ split:
* allocating new btree nodes, and holding a journal reservation
* potentially blocks the allocator:
*/
- ret = bch_btree_split_leaf(split, trans->flags);
+ ret = bch2_btree_split_leaf(split, trans->flags);
if (ret)
goto err;
/*
@@ -2066,7 +2065,7 @@ err:
if (ret == -EINTR) {
trans_for_each_entry(trans, i) {
- int ret2 = bch_btree_iter_traverse(i->iter);
+ int ret2 = bch2_btree_iter_traverse(i->iter);
if (ret2) {
ret = ret2;
goto out;
@@ -2084,29 +2083,29 @@ err:
goto out;
}
-int bch_btree_insert_list_at(struct btree_iter *iter,
+int bch2_btree_insert_list_at(struct btree_iter *iter,
struct keylist *keys,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
u64 *journal_seq, unsigned flags)
{
BUG_ON(flags & BTREE_INSERT_ATOMIC);
- BUG_ON(bch_keylist_empty(keys));
+ BUG_ON(bch2_keylist_empty(keys));
verify_keys_sorted(keys);
- while (!bch_keylist_empty(keys)) {
+ while (!bch2_keylist_empty(keys)) {
/* need to traverse between each insert */
- int ret = bch_btree_iter_traverse(iter);
+ int ret = bch2_btree_iter_traverse(iter);
if (ret)
return ret;
- ret = bch_btree_insert_at(iter->c, disk_res, hook,
+ ret = bch2_btree_insert_at(iter->c, disk_res, hook,
journal_seq, flags,
- BTREE_INSERT_ENTRY(iter, bch_keylist_front(keys)));
+ BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
return ret;
- bch_keylist_pop_front(keys);
+ bch2_keylist_pop_front(keys);
}
return 0;
@@ -2124,7 +2123,7 @@ int bch_btree_insert_list_at(struct btree_iter *iter,
* -EAGAIN: @iter->cl was put on a waitlist waiting for btree node allocation
* -EINTR: btree node was changed while upgrading to write lock
*/
-int bch_btree_insert_check_key(struct btree_iter *iter,
+int bch2_btree_insert_check_key(struct btree_iter *iter,
struct bkey_i *check_key)
{
struct bpos saved_pos = iter->pos;
@@ -2142,11 +2141,11 @@ int bch_btree_insert_check_key(struct btree_iter *iter,
bkey_copy(&tmp.key, check_key);
- ret = bch_btree_insert_at(iter->c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(iter, &tmp.key));
- bch_btree_iter_rewind(iter, saved_pos);
+ bch2_btree_iter_rewind(iter, saved_pos);
return ret;
}
@@ -2158,7 +2157,7 @@ int bch_btree_insert_check_key(struct btree_iter *iter,
* @insert_keys: list of keys to insert
* @hook: insert callback
*/
-int bch_btree_insert(struct bch_fs *c, enum btree_id id,
+int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct bkey_i *k,
struct disk_reservation *disk_res,
struct extent_insert_hook *hook,
@@ -2167,24 +2166,24 @@ int bch_btree_insert(struct bch_fs *c, enum btree_id id,
struct btree_iter iter;
int ret, ret2;
- bch_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k));
+ bch2_btree_iter_init_intent(&iter, c, id, bkey_start_pos(&k->k));
- ret = bch_btree_iter_traverse(&iter);
+ ret = bch2_btree_iter_traverse(&iter);
if (unlikely(ret))
goto out;
- ret = bch_btree_insert_at(c, disk_res, hook, journal_seq, flags,
+ ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
BTREE_INSERT_ENTRY(&iter, k));
-out: ret2 = bch_btree_iter_unlock(&iter);
+out: ret2 = bch2_btree_iter_unlock(&iter);
return ret ?: ret2;
}
/**
- * bch_btree_update - like bch_btree_insert(), but asserts that we're
+ * bch_btree_update - like bch2_btree_insert(), but asserts that we're
* overwriting an existing key
*/
-int bch_btree_update(struct bch_fs *c, enum btree_id id,
+int bch2_btree_update(struct bch_fs *c, enum btree_id id,
struct bkey_i *k, u64 *journal_seq)
{
struct btree_iter iter;
@@ -2193,21 +2192,21 @@ int bch_btree_update(struct bch_fs *c, enum btree_id id,
EBUG_ON(id == BTREE_ID_EXTENTS);
- bch_btree_iter_init_intent(&iter, c, id, k->k.p);
+ bch2_btree_iter_init_intent(&iter, c, id, k->k.p);
- u = bch_btree_iter_peek_with_holes(&iter);
+ u = bch2_btree_iter_peek_with_holes(&iter);
ret = btree_iter_err(u);
if (ret)
return ret;
if (bkey_deleted(u.k)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return -ENOENT;
}
- ret = bch_btree_insert_at(c, NULL, NULL, journal_seq, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq, 0,
BTREE_INSERT_ENTRY(&iter, k));
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
@@ -2216,7 +2215,7 @@ int bch_btree_update(struct bch_fs *c, enum btree_id id,
*
* Range is a half open interval - [start, end)
*/
-int bch_btree_delete_range(struct bch_fs *c, enum btree_id id,
+int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
struct bpos start,
struct bpos end,
struct bversion version,
@@ -2228,9 +2227,9 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id,
struct bkey_s_c k;
int ret = 0;
- bch_btree_iter_init_intent(&iter, c, id, start);
+ bch2_btree_iter_init_intent(&iter, c, id, start);
- while ((k = bch_btree_iter_peek(&iter)).k &&
+ while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = btree_iter_err(k))) {
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
/* really shouldn't be using a bare, unpadded bkey_i */
@@ -2248,7 +2247,7 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id,
* because the range we want to delete could start in the middle
* of k.
*
- * (bch_btree_iter_peek() does guarantee that iter.pos >=
+ * (bch2_btree_iter_peek() does guarantee that iter.pos >=
* bkey_start_pos(k.k)).
*/
delete.k.p = iter.pos;
@@ -2265,20 +2264,20 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id,
delete.k.type = KEY_TYPE_DISCARD;
/* create the biggest key we can */
- bch_key_resize(&delete.k, max_sectors);
- bch_cut_back(end, &delete.k);
+ bch2_key_resize(&delete.k, max_sectors);
+ bch2_cut_back(end, &delete.k);
}
- ret = bch_btree_insert_at(c, disk_res, hook, journal_seq,
+ ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete));
if (ret)
break;
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
@@ -2288,7 +2287,7 @@ int bch_btree_delete_range(struct bch_fs *c, enum btree_id id,
* Returns 0 on success, -EINTR or -EAGAIN on failure (i.e.
* btree_check_reserve() has to wait)
*/
-int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
+int bch2_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
struct closure *cl)
{
struct bch_fs *c = iter->c;
@@ -2304,42 +2303,42 @@ int bch_btree_node_rewrite(struct btree_iter *iter, struct btree *b,
if (cl)
flags |= BTREE_INSERT_USE_RESERVE;
- if (!bch_btree_iter_set_locks_want(iter, U8_MAX))
+ if (!bch2_btree_iter_set_locks_want(iter, U8_MAX))
return -EINTR;
- reserve = bch_btree_reserve_get(c, b, 0, flags, cl);
+ reserve = bch2_btree_reserve_get(c, b, 0, flags, cl);
if (IS_ERR(reserve)) {
- trace_bcache_btree_gc_rewrite_node_fail(c, b);
+ trace_btree_gc_rewrite_node_fail(c, b);
return PTR_ERR(reserve);
}
- as = bch_btree_interior_update_alloc(c);
+ as = bch2_btree_interior_update_alloc(c);
- bch_btree_interior_update_will_free_node(c, as, b);
+ bch2_btree_interior_update_will_free_node(c, as, b);
- n = btree_node_alloc_replacement(c, b, reserve);
+ n = bch2_btree_node_alloc_replacement(c, b, reserve);
- bch_btree_build_aux_trees(n);
+ bch2_btree_build_aux_trees(n);
six_unlock_write(&n->lock);
- trace_bcache_btree_gc_rewrite_node(c, b);
+ trace_btree_gc_rewrite_node(c, b);
- bch_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
+ bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent, -1);
if (parent) {
- bch_btree_insert_node(parent, iter,
+ bch2_btree_insert_node(parent, iter,
&keylist_single(&n->key),
reserve, as);
} else {
- bch_btree_set_root(iter, n, as, reserve);
+ bch2_btree_set_root(iter, n, as, reserve);
}
- btree_open_bucket_put(c, n);
+ bch2_btree_open_bucket_put(c, n);
- bch_btree_node_free_inmem(iter, b);
+ bch2_btree_node_free_inmem(iter, b);
- BUG_ON(!bch_btree_iter_node_replace(iter, n));
+ BUG_ON(!bch2_btree_iter_node_replace(iter, n));
- bch_btree_reserve_put(c, reserve);
+ bch2_btree_reserve_put(c, reserve);
return 0;
}
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 0be718621f96..b18c44c74444 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -24,8 +24,8 @@ struct btree_reserve {
struct btree *b[BTREE_RESERVE_MAX];
};
-void __bch_btree_calc_format(struct bkey_format_state *, struct btree *);
-bool bch_btree_node_format_fits(struct bch_fs *c, struct btree *,
+void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
+bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
struct bkey_format *);
/* Btree node freeing/allocation: */
@@ -130,40 +130,37 @@ struct btree_interior_update {
list_for_each_entry(as, &c->btree_interior_update_list, list) \
for (p = as->pending; p < as->pending + as->nr_pending; p++)
-void bch_btree_node_free_inmem(struct btree_iter *, struct btree *);
-void bch_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
+void bch2_btree_node_free_inmem(struct btree_iter *, struct btree *);
+void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
+void bch2_btree_open_bucket_put(struct bch_fs *c, struct btree *);
-void btree_open_bucket_put(struct bch_fs *c, struct btree *);
-
-struct btree *__btree_node_alloc_replacement(struct bch_fs *,
+struct btree *__bch2_btree_node_alloc_replacement(struct bch_fs *,
struct btree *,
struct bkey_format,
struct btree_reserve *);
-struct btree *btree_node_alloc_replacement(struct bch_fs *, struct btree *,
- struct btree_reserve *);
struct btree_interior_update *
-bch_btree_interior_update_alloc(struct bch_fs *);
+bch2_btree_interior_update_alloc(struct bch_fs *);
-void bch_btree_interior_update_will_free_node(struct bch_fs *,
+void bch2_btree_interior_update_will_free_node(struct bch_fs *,
struct btree_interior_update *,
struct btree *);
-void bch_btree_set_root_initial(struct bch_fs *, struct btree *,
+void bch2_btree_set_root_initial(struct bch_fs *, struct btree *,
struct btree_reserve *);
-void bch_btree_reserve_put(struct bch_fs *, struct btree_reserve *);
-struct btree_reserve *bch_btree_reserve_get(struct bch_fs *,
+void bch2_btree_reserve_put(struct bch_fs *, struct btree_reserve *);
+struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *,
struct btree *, unsigned,
unsigned, struct closure *);
-int bch_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *);
+int bch2_btree_root_alloc(struct bch_fs *, enum btree_id, struct closure *);
/* Inserting into a given leaf node (last stage of insert): */
-bool bch_btree_bset_insert_key(struct btree_iter *, struct btree *,
+bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_i *);
-void bch_btree_journal_key(struct btree_insert *trans, struct btree_iter *,
+void bch2_btree_journal_key(struct btree_insert *trans, struct btree_iter *,
struct bkey_i *);
static inline void *btree_data_end(struct bch_fs *c, struct btree *b)
@@ -256,12 +253,12 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
* write lock must be held on @b (else the dirty bset that we were going to
* insert into could be written out from under us)
*/
-static inline bool bch_btree_node_insert_fits(struct bch_fs *c,
+static inline bool bch2_btree_node_insert_fits(struct bch_fs *c,
struct btree *b, unsigned u64s)
{
if (btree_node_is_extents(b)) {
/* The insert key might split an existing key
- * (bch_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
+ * (bch2_insert_fixup_extent() -> BCH_EXTENT_OVERLAP_MIDDLE case:
*/
u64s += BKEY_EXTENT_U64s_MAX;
}
@@ -290,7 +287,7 @@ static inline void reserve_whiteout(struct btree *b, struct bset_tree *t,
}
}
-void bch_btree_insert_node(struct btree *, struct btree_iter *,
+void bch2_btree_insert_node(struct btree *, struct btree_iter *,
struct keylist *, struct btree_reserve *,
struct btree_interior_update *as);
@@ -318,7 +315,7 @@ struct btree_insert {
} *entries;
};
-int __bch_btree_insert_at(struct btree_insert *);
+int __bch2_btree_insert_at(struct btree_insert *);
#define _TENTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, N, ...) N
@@ -352,9 +349,9 @@ int __bch_btree_insert_at(struct btree_insert *);
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
-#define bch_btree_insert_at(_c, _disk_res, _hook, \
+#define bch2_btree_insert_at(_c, _disk_res, _hook, \
_journal_seq, _flags, ...) \
- __bch_btree_insert_at(&(struct btree_insert) { \
+ __bch2_btree_insert_at(&(struct btree_insert) { \
.c = (_c), \
.disk_res = (_disk_res), \
.journal_seq = (_journal_seq), \
@@ -383,7 +380,7 @@ int __bch_btree_insert_at(struct btree_insert *);
*/
#define BTREE_INSERT_JOURNAL_REPLAY (1 << 3)
-int bch_btree_insert_list_at(struct btree_iter *, struct keylist *,
+int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *,
struct disk_reservation *,
struct extent_insert_hook *, u64 *, unsigned);
@@ -406,19 +403,19 @@ static inline bool journal_res_insert_fits(struct btree_insert *trans,
return u64s <= trans->journal_res.u64s;
}
-int bch_btree_insert_check_key(struct btree_iter *, struct bkey_i *);
-int bch_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
+int bch2_btree_insert_check_key(struct btree_iter *, struct bkey_i *);
+int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
struct disk_reservation *,
struct extent_insert_hook *, u64 *, int flags);
-int bch_btree_update(struct bch_fs *, enum btree_id,
+int bch2_btree_update(struct bch_fs *, enum btree_id,
struct bkey_i *, u64 *);
-int bch_btree_delete_range(struct bch_fs *, enum btree_id,
+int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
struct bpos, struct bpos, struct bversion,
struct disk_reservation *,
struct extent_insert_hook *, u64 *);
-int bch_btree_node_rewrite(struct btree_iter *, struct btree *, struct closure *);
+int bch2_btree_node_rewrite(struct btree_iter *, struct btree *, struct closure *);
#endif /* _BCACHE_BTREE_INSERT_H */
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 585a7ce68b12..396251d5353e 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -62,7 +62,7 @@
* - free => metadata: cannot happen
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "btree_gc.h"
#include "buckets.h"
@@ -76,10 +76,10 @@
#define lg_local_lock lg_global_lock
#define lg_local_unlock lg_global_unlock
-static void bch_fs_stats_verify(struct bch_fs *c)
+static void bch2_fs_stats_verify(struct bch_fs *c)
{
struct bch_fs_usage stats =
- __bch_fs_usage_read(c);
+ __bch2_fs_usage_read(c);
if ((s64) stats.sectors_dirty < 0)
panic("sectors_dirty underflow: %lli\n", stats.sectors_dirty);
@@ -99,7 +99,7 @@ static void bch_fs_stats_verify(struct bch_fs *c)
#else
-static void bch_fs_stats_verify(struct bch_fs *c) {}
+static void bch2_fs_stats_verify(struct bch_fs *c) {}
#endif
@@ -107,7 +107,7 @@ static void bch_fs_stats_verify(struct bch_fs *c) {}
* Clear journal_seq_valid for buckets for which it's not needed, to prevent
* wraparound:
*/
-void bch_bucket_seq_cleanup(struct bch_fs *c)
+void bch2_bucket_seq_cleanup(struct bch_fs *c)
{
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bch_dev *ca;
@@ -127,7 +127,7 @@ void bch_bucket_seq_cleanup(struct bch_fs *c)
}
}
-#define bch_usage_add(_acc, _stats) \
+#define bch2_usage_add(_acc, _stats) \
do { \
typeof(_acc) _a = (_acc), _s = (_stats); \
unsigned i; \
@@ -136,18 +136,18 @@ do { \
((u64 *) (_a))[i] += ((u64 *) (_s))[i]; \
} while (0)
-#define bch_usage_read_raw(_stats) \
+#define bch2_usage_read_raw(_stats) \
({ \
typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \
int cpu; \
\
for_each_possible_cpu(cpu) \
- bch_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
+ bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
\
_acc; \
})
-#define bch_usage_read_cached(_c, _cached, _uncached) \
+#define bch2_usage_read_cached(_c, _cached, _uncached) \
({ \
typeof(_cached) _ret; \
unsigned _seq; \
@@ -155,35 +155,35 @@ do { \
do { \
_seq = read_seqcount_begin(&(_c)->gc_pos_lock); \
_ret = (_c)->gc_pos.phase == GC_PHASE_DONE \
- ? bch_usage_read_raw(_uncached) \
+ ? bch2_usage_read_raw(_uncached) \
: (_cached); \
} while (read_seqcount_retry(&(_c)->gc_pos_lock, _seq)); \
\
_ret; \
})
-struct bch_dev_usage __bch_dev_usage_read(struct bch_dev *ca)
+struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *ca)
{
- return bch_usage_read_raw(ca->usage_percpu);
+ return bch2_usage_read_raw(ca->usage_percpu);
}
-struct bch_dev_usage bch_dev_usage_read(struct bch_dev *ca)
+struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
{
- return bch_usage_read_cached(ca->fs,
+ return bch2_usage_read_cached(ca->fs,
ca->usage_cached,
ca->usage_percpu);
}
struct bch_fs_usage
-__bch_fs_usage_read(struct bch_fs *c)
+__bch2_fs_usage_read(struct bch_fs *c)
{
- return bch_usage_read_raw(c->usage_percpu);
+ return bch2_usage_read_raw(c->usage_percpu);
}
struct bch_fs_usage
-bch_fs_usage_read(struct bch_fs *c)
+bch2_fs_usage_read(struct bch_fs *c)
{
- return bch_usage_read_cached(c,
+ return bch2_usage_read_cached(c,
c->usage_cached,
c->usage_percpu);
}
@@ -218,7 +218,7 @@ static bool bucket_became_unavailable(struct bch_fs *c,
c && c->gc_pos.phase == GC_PHASE_DONE;
}
-void bch_fs_usage_apply(struct bch_fs *c,
+void bch2_fs_usage_apply(struct bch_fs *c,
struct bch_fs_usage *stats,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
@@ -247,15 +247,15 @@ void bch_fs_usage_apply(struct bch_fs *c,
stats->online_reserved = 0;
if (!gc_will_visit(c, gc_pos))
- bch_usage_add(this_cpu_ptr(c->usage_percpu), stats);
+ bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats);
- bch_fs_stats_verify(c);
+ bch2_fs_stats_verify(c);
lg_local_unlock(&c->usage_lock);
memset(stats, 0, sizeof(*stats));
}
-static void bch_fs_usage_update(struct bch_fs_usage *fs_usage,
+static void bch2_fs_usage_update(struct bch_fs_usage *fs_usage,
struct bucket_mark old, struct bucket_mark new)
{
fs_usage->s[S_COMPRESSED][S_CACHED] +=
@@ -266,13 +266,13 @@ static void bch_fs_usage_update(struct bch_fs_usage *fs_usage,
new.dirty_sectors;
}
-static void bch_dev_usage_update(struct bch_dev *ca,
+static void bch2_dev_usage_update(struct bch_dev *ca,
struct bucket_mark old, struct bucket_mark new)
{
struct bch_fs *c = ca->fs;
struct bch_dev_usage *dev_usage;
- bch_fs_inconsistent_on(old.data_type && new.data_type &&
+ bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
"different types of metadata in same bucket: %u, %u",
old.data_type, new.data_type);
@@ -295,18 +295,18 @@ static void bch_dev_usage_update(struct bch_dev *ca,
preempt_enable();
if (!is_available_bucket(old) && is_available_bucket(new))
- bch_wake_allocator(ca);
+ bch2_wake_allocator(ca);
}
#define bucket_data_cmpxchg(ca, g, new, expr) \
({ \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
- bch_dev_usage_update(ca, _old, new); \
+ bch2_dev_usage_update(ca, _old, new); \
_old; \
})
-void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
+void bch2_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
{
struct bch_fs_usage stats = { 0 };
struct bucket_mark old, new;
@@ -322,14 +322,14 @@ void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
}));
/* XXX: we're not actually updating fs usage's cached sectors... */
- bch_fs_usage_update(&stats, old, new);
+ bch2_fs_usage_update(&stats, old, new);
if (!old.owned_by_allocator && old.cached_sectors)
- trace_bcache_invalidate(ca, g - ca->buckets,
+ trace_invalidate(ca, g - ca->buckets,
old.cached_sectors);
}
-void bch_mark_free_bucket(struct bch_dev *ca, struct bucket *g)
+void bch2_mark_free_bucket(struct bch_dev *ca, struct bucket *g)
{
struct bucket_mark old, new;
@@ -343,7 +343,7 @@ void bch_mark_free_bucket(struct bch_dev *ca, struct bucket *g)
BUG_ON(bucket_became_unavailable(ca->fs, old, new));
}
-void bch_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
+void bch2_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
bool owned_by_allocator)
{
struct bucket_mark new;
@@ -353,7 +353,7 @@ void bch_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
}));
}
-void bch_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
+void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
enum bucket_data_type type,
bool may_make_unavailable)
{
@@ -381,7 +381,7 @@ do { \
dst += (src); \
else { \
dst = (max); \
- trace_bcache_sectors_saturated(ca); \
+ trace_sectors_saturated(ca); \
} \
} while (0)
@@ -418,7 +418,7 @@ static unsigned __compressed_sectors(const union bch_extent_crc *crc, unsigned s
* loop, to avoid racing with the start of gc clearing all the marks - GC does
* that with the gc pos seqlock held.
*/
-static void bch_mark_pointer(struct bch_fs *c,
+static void bch2_mark_pointer(struct bch_fs *c,
struct bkey_s_c_extent e,
const union bch_extent_crc *crc,
const struct bch_extent_ptr *ptr,
@@ -509,7 +509,7 @@ static void bch_mark_pointer(struct bch_fs *c,
&ca->saturated_count) >=
ca->free_inc.size << ca->bucket_bits) {
if (c->gc_thread) {
- trace_bcache_gc_sectors_saturated(c);
+ trace_gc_sectors_saturated(c);
wake_up_process(c->gc_thread);
}
}
@@ -518,7 +518,7 @@ out:
stats->s[S_UNCOMPRESSED][type] += sectors;
}
-static void bch_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e,
+static void bch2_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e,
s64 sectors, bool metadata,
bool may_make_unavailable,
struct bch_fs_usage *stats,
@@ -532,13 +532,13 @@ static void bch_mark_extent(struct bch_fs *c, struct bkey_s_c_extent e,
BUG_ON(!sectors);
extent_for_each_ptr_crc(e, ptr, crc)
- bch_mark_pointer(c, e, crc, ptr, sectors,
+ bch2_mark_pointer(c, e, crc, ptr, sectors,
ptr->cached ? S_CACHED : type,
may_make_unavailable,
stats, gc_will_visit, journal_seq);
}
-static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k,
+static void __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, bool metadata,
bool may_make_unavailable,
struct bch_fs_usage *stats,
@@ -547,7 +547,7 @@ static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k,
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED:
- bch_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata,
+ bch2_mark_extent(c, bkey_s_c_to_extent(k), sectors, metadata,
may_make_unavailable, stats,
gc_will_visit, journal_seq);
break;
@@ -560,26 +560,26 @@ static void __bch_mark_key(struct bch_fs *c, struct bkey_s_c k,
}
}
-void __bch_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
+void __bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, bool metadata,
struct bch_fs_usage *stats)
{
- __bch_mark_key(c, k, sectors, metadata, true, stats, false, 0);
+ __bch2_mark_key(c, k, sectors, metadata, true, stats, false, 0);
}
-void bch_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
+void bch2_gc_mark_key(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, bool metadata)
{
struct bch_fs_usage stats = { 0 };
- __bch_gc_mark_key(c, k, sectors, metadata, &stats);
+ __bch2_gc_mark_key(c, k, sectors, metadata, &stats);
preempt_disable();
- bch_usage_add(this_cpu_ptr(c->usage_percpu), &stats);
+ bch2_usage_add(this_cpu_ptr(c->usage_percpu), &stats);
preempt_enable();
}
-void bch_mark_key(struct bch_fs *c, struct bkey_s_c k,
+void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, bool metadata, struct gc_pos gc_pos,
struct bch_fs_usage *stats, u64 journal_seq)
{
@@ -611,20 +611,20 @@ void bch_mark_key(struct bch_fs *c, struct bkey_s_c k,
* (e.g. the btree node lock, or the relevant allocator lock).
*/
lg_local_lock(&c->usage_lock);
- __bch_mark_key(c, k, sectors, metadata, false, stats,
+ __bch2_mark_key(c, k, sectors, metadata, false, stats,
gc_will_visit(c, gc_pos), journal_seq);
- bch_fs_stats_verify(c);
+ bch2_fs_stats_verify(c);
lg_local_unlock(&c->usage_lock);
}
static u64 __recalc_sectors_available(struct bch_fs *c)
{
- return c->capacity - bch_fs_sectors_used(c);
+ return c->capacity - bch2_fs_sectors_used(c);
}
/* Used by gc when it's starting: */
-void bch_recalc_sectors_available(struct bch_fs *c)
+void bch2_recalc_sectors_available(struct bch_fs *c)
{
int cpu;
@@ -639,7 +639,7 @@ void bch_recalc_sectors_available(struct bch_fs *c)
lg_global_unlock(&c->usage_lock);
}
-void bch_disk_reservation_put(struct bch_fs *c,
+void bch2_disk_reservation_put(struct bch_fs *c,
struct disk_reservation *res)
{
if (res->sectors) {
@@ -647,7 +647,7 @@ void bch_disk_reservation_put(struct bch_fs *c,
this_cpu_sub(c->usage_percpu->online_reserved,
res->sectors);
- bch_fs_stats_verify(c);
+ bch2_fs_stats_verify(c);
lg_local_unlock(&c->usage_lock);
res->sectors = 0;
@@ -656,7 +656,7 @@ void bch_disk_reservation_put(struct bch_fs *c,
#define SECTORS_CACHE 1024
-int bch_disk_reservation_add(struct bch_fs *c,
+int bch2_disk_reservation_add(struct bch_fs *c,
struct disk_reservation *res,
unsigned sectors, int flags)
{
@@ -691,7 +691,7 @@ out:
stats->online_reserved += sectors;
res->sectors += sectors;
- bch_fs_stats_verify(c);
+ bch2_fs_stats_verify(c);
lg_local_unlock(&c->usage_lock);
return 0;
@@ -728,7 +728,7 @@ recalculate:
ret = -ENOSPC;
}
- bch_fs_stats_verify(c);
+ bch2_fs_stats_verify(c);
lg_global_unlock(&c->usage_lock);
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
@@ -736,7 +736,7 @@ recalculate:
return ret;
}
-int bch_disk_reservation_get(struct bch_fs *c,
+int bch2_disk_reservation_get(struct bch_fs *c,
struct disk_reservation *res,
unsigned sectors, int flags)
{
@@ -746,5 +746,5 @@ int bch_disk_reservation_get(struct bch_fs *c,
? c->opts.metadata_replicas
: c->opts.data_replicas;
- return bch_disk_reservation_add(c, res, sectors, flags);
+ return bch2_disk_reservation_add(c, res, sectors, flags);
}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 81355576f33a..9c77304ff5be 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -145,8 +145,8 @@ static inline unsigned bucket_sectors_used(struct bucket *g)
/* Per device stats: */
-struct bch_dev_usage __bch_dev_usage_read(struct bch_dev *);
-struct bch_dev_usage bch_dev_usage_read(struct bch_dev *);
+struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
+struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *);
static inline u64 __dev_buckets_available(struct bch_dev *ca,
struct bch_dev_usage stats)
@@ -163,7 +163,7 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
*/
static inline u64 dev_buckets_available(struct bch_dev *ca)
{
- return __dev_buckets_available(ca, bch_dev_usage_read(ca));
+ return __dev_buckets_available(ca, bch2_dev_usage_read(ca));
}
static inline u64 __dev_buckets_free(struct bch_dev *ca,
@@ -176,19 +176,19 @@ static inline u64 __dev_buckets_free(struct bch_dev *ca,
static inline u64 dev_buckets_free(struct bch_dev *ca)
{
- return __dev_buckets_free(ca, bch_dev_usage_read(ca));
+ return __dev_buckets_free(ca, bch2_dev_usage_read(ca));
}
/* Cache set stats: */
-struct bch_fs_usage __bch_fs_usage_read(struct bch_fs *);
-struct bch_fs_usage bch_fs_usage_read(struct bch_fs *);
-void bch_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
+struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
+struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
+void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, struct gc_pos);
-static inline u64 __bch_fs_sectors_used(struct bch_fs *c)
+static inline u64 __bch2_fs_sectors_used(struct bch_fs *c)
{
- struct bch_fs_usage stats = __bch_fs_usage_read(c);
+ struct bch_fs_usage stats = __bch2_fs_usage_read(c);
u64 reserved = stats.persistent_reserved +
stats.online_reserved;
@@ -198,9 +198,9 @@ static inline u64 __bch_fs_sectors_used(struct bch_fs *c)
(reserved >> 7);
}
-static inline u64 bch_fs_sectors_used(struct bch_fs *c)
+static inline u64 bch2_fs_sectors_used(struct bch_fs *c)
{
- return min(c->capacity, __bch_fs_sectors_used(c));
+ return min(c->capacity, __bch2_fs_sectors_used(c));
}
/* XXX: kill? */
@@ -233,23 +233,23 @@ static inline bool bucket_needs_journal_commit(struct bucket_mark m,
((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
}
-void bch_bucket_seq_cleanup(struct bch_fs *);
+void bch2_bucket_seq_cleanup(struct bch_fs *);
-void bch_invalidate_bucket(struct bch_dev *, struct bucket *);
-void bch_mark_free_bucket(struct bch_dev *, struct bucket *);
-void bch_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool);
-void bch_mark_metadata_bucket(struct bch_dev *, struct bucket *,
+void bch2_invalidate_bucket(struct bch_dev *, struct bucket *);
+void bch2_mark_free_bucket(struct bch_dev *, struct bucket *);
+void bch2_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool);
+void bch2_mark_metadata_bucket(struct bch_dev *, struct bucket *,
enum bucket_data_type, bool);
-void __bch_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
+void __bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
struct bch_fs_usage *);
-void bch_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool);
-void bch_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
+void bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool);
+void bch2_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
struct gc_pos, struct bch_fs_usage *, u64);
-void bch_recalc_sectors_available(struct bch_fs *);
+void bch2_recalc_sectors_available(struct bch_fs *);
-void bch_disk_reservation_put(struct bch_fs *,
+void bch2_disk_reservation_put(struct bch_fs *,
struct disk_reservation *);
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
@@ -257,10 +257,10 @@ void bch_disk_reservation_put(struct bch_fs *,
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2)
#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3)
-int bch_disk_reservation_add(struct bch_fs *,
+int bch2_disk_reservation_add(struct bch_fs *,
struct disk_reservation *,
unsigned, int);
-int bch_disk_reservation_get(struct bch_fs *,
+int bch2_disk_reservation_get(struct bch_fs *,
struct disk_reservation *,
unsigned, int);
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index d59dbcf96c4f..24b92a293781 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -1,4 +1,4 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bcachefs_ioctl.h"
#include "super.h"
#include "super-io.h"
@@ -12,7 +12,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
-static long bch_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
+static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
{
struct bch_ioctl_assemble arg;
const char *err;
@@ -47,7 +47,7 @@ static long bch_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg)
}
}
- err = bch_fs_open(devs, arg.nr_devs, bch_opts_empty(), NULL);
+ err = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty(), NULL);
if (err) {
pr_err("Could not open filesystem: %s", err);
ret = -EINVAL;
@@ -63,7 +63,7 @@ err:
return ret;
}
-static long bch_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
+static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
{
struct bch_ioctl_incremental arg;
const char *err;
@@ -79,30 +79,30 @@ static long bch_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg)
if (!path)
return -ENOMEM;
- err = bch_fs_open_incremental(path);
+ err = bch2_fs_open_incremental(path);
kfree(path);
if (err) {
- pr_err("Could not register bcache devices: %s", err);
+ pr_err("Could not register bcachefs devices: %s", err);
return -EINVAL;
}
return 0;
}
-static long bch_global_ioctl(unsigned cmd, void __user *arg)
+static long bch2_global_ioctl(unsigned cmd, void __user *arg)
{
switch (cmd) {
case BCH_IOCTL_ASSEMBLE:
- return bch_ioctl_assemble(arg);
+ return bch2_ioctl_assemble(arg);
case BCH_IOCTL_INCREMENTAL:
- return bch_ioctl_incremental(arg);
+ return bch2_ioctl_incremental(arg);
default:
return -ENOTTY;
}
}
-static long bch_ioctl_query_uuid(struct bch_fs *c,
+static long bch2_ioctl_query_uuid(struct bch_fs *c,
struct bch_ioctl_query_uuid __user *user_arg)
{
return copy_to_user(&user_arg->uuid,
@@ -110,7 +110,7 @@ static long bch_ioctl_query_uuid(struct bch_fs *c,
sizeof(c->sb.user_uuid));
}
-static long bch_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *user_arg)
+static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *user_arg)
{
struct bch_ioctl_start arg;
@@ -120,17 +120,17 @@ static long bch_ioctl_start(struct bch_fs *c, struct bch_ioctl_start __user *use
if (arg.flags || arg.pad)
return -EINVAL;
- return bch_fs_start(c) ? -EIO : 0;
+ return bch2_fs_start(c) ? -EIO : 0;
}
-static long bch_ioctl_stop(struct bch_fs *c)
+static long bch2_ioctl_stop(struct bch_fs *c)
{
- bch_fs_stop(c);
+ bch2_fs_stop(c);
return 0;
}
/* returns with ref on ca->ref */
-static struct bch_dev *bch_device_lookup(struct bch_fs *c,
+static struct bch_dev *bch2_device_lookup(struct bch_fs *c,
const char __user *dev)
{
struct block_device *bdev;
@@ -158,9 +158,9 @@ found:
}
#if 0
-static struct bch_member *bch_uuid_lookup(struct bch_fs *c, uuid_le uuid)
+static struct bch_member *bch2_uuid_lookup(struct bch_fs *c, uuid_le uuid)
{
- struct bch_sb_field_members *mi = bch_sb_get_members(c->disk_sb);
+ struct bch_sb_field_members *mi = bch2_sb_get_members(c->disk_sb);
unsigned i;
lockdep_assert_held(&c->sb_lock);
@@ -173,8 +173,8 @@ static struct bch_member *bch_uuid_lookup(struct bch_fs *c, uuid_le uuid)
}
#endif
-static long bch_ioctl_disk_add(struct bch_fs *c,
- struct bch_ioctl_disk __user *user_arg)
+static long bch2_ioctl_disk_add(struct bch_fs *c,
+ struct bch_ioctl_disk __user *user_arg)
{
struct bch_ioctl_disk arg;
char *path;
@@ -190,14 +190,14 @@ static long bch_ioctl_disk_add(struct bch_fs *c,
if (!path)
return -ENOMEM;
- ret = bch_dev_add(c, path);
+ ret = bch2_dev_add(c, path);
kfree(path);
return ret;
}
-static long bch_ioctl_disk_remove(struct bch_fs *c,
- struct bch_ioctl_disk __user *user_arg)
+static long bch2_ioctl_disk_remove(struct bch_fs *c,
+ struct bch_ioctl_disk __user *user_arg)
{
struct bch_ioctl_disk arg;
struct bch_dev *ca;
@@ -205,15 +205,15 @@ static long bch_ioctl_disk_remove(struct bch_fs *c,
if (copy_from_user(&arg, user_arg, sizeof(arg)))
return -EFAULT;
- ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
+ ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
if (IS_ERR(ca))
return PTR_ERR(ca);
- return bch_dev_remove(c, ca, arg.flags);
+ return bch2_dev_remove(c, ca, arg.flags);
}
-static long bch_ioctl_disk_online(struct bch_fs *c,
- struct bch_ioctl_disk __user *user_arg)
+static long bch2_ioctl_disk_online(struct bch_fs *c,
+ struct bch_ioctl_disk __user *user_arg)
{
struct bch_ioctl_disk arg;
char *path;
@@ -229,13 +229,13 @@ static long bch_ioctl_disk_online(struct bch_fs *c,
if (!path)
return -ENOMEM;
- ret = bch_dev_online(c, path);
+ ret = bch2_dev_online(c, path);
kfree(path);
return ret;
}
-static long bch_ioctl_disk_offline(struct bch_fs *c,
- struct bch_ioctl_disk __user *user_arg)
+static long bch2_ioctl_disk_offline(struct bch_fs *c,
+ struct bch_ioctl_disk __user *user_arg)
{
struct bch_ioctl_disk arg;
struct bch_dev *ca;
@@ -247,17 +247,17 @@ static long bch_ioctl_disk_offline(struct bch_fs *c,
if (arg.pad)
return -EINVAL;
- ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
+ ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
if (IS_ERR(ca))
return PTR_ERR(ca);
- ret = bch_dev_offline(c, ca, arg.flags);
+ ret = bch2_dev_offline(c, ca, arg.flags);
percpu_ref_put(&ca->ref);
return ret;
}
-static long bch_ioctl_disk_set_state(struct bch_fs *c,
- struct bch_ioctl_disk_set_state __user *user_arg)
+static long bch2_ioctl_disk_set_state(struct bch_fs *c,
+ struct bch_ioctl_disk_set_state __user *user_arg)
{
struct bch_ioctl_disk_set_state arg;
struct bch_dev *ca;
@@ -266,18 +266,18 @@ static long bch_ioctl_disk_set_state(struct bch_fs *c,
if (copy_from_user(&arg, user_arg, sizeof(arg)))
return -EFAULT;
- ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
+ ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
if (IS_ERR(ca))
return PTR_ERR(ca);
- ret = bch_dev_set_state(c, ca, arg.new_state, arg.flags);
+ ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
percpu_ref_put(&ca->ref);
return ret;
}
-static long bch_ioctl_disk_evacuate(struct bch_fs *c,
- struct bch_ioctl_disk __user *user_arg)
+static long bch2_ioctl_disk_evacuate(struct bch_fs *c,
+ struct bch_ioctl_disk __user *user_arg)
{
struct bch_ioctl_disk arg;
struct bch_dev *ca;
@@ -286,22 +286,22 @@ static long bch_ioctl_disk_evacuate(struct bch_fs *c,
if (copy_from_user(&arg, user_arg, sizeof(arg)))
return -EFAULT;
- ca = bch_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
+ ca = bch2_device_lookup(c, (const char __user *)(unsigned long) arg.dev);
if (IS_ERR(ca))
return PTR_ERR(ca);
- ret = bch_dev_evacuate(c, ca);
+ ret = bch2_dev_evacuate(c, ca);
percpu_ref_put(&ca->ref);
return ret;
}
-long bch_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
+long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
{
/* ioctls that don't require admin cap: */
switch (cmd) {
case BCH_IOCTL_QUERY_UUID:
- return bch_ioctl_query_uuid(c, arg);
+ return bch2_ioctl_query_uuid(c, arg);
}
if (!capable(CAP_SYS_ADMIN))
@@ -310,41 +310,41 @@ long bch_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
/* ioctls that do require admin cap: */
switch (cmd) {
case BCH_IOCTL_START:
- return bch_ioctl_start(c, arg);
+ return bch2_ioctl_start(c, arg);
case BCH_IOCTL_STOP:
- return bch_ioctl_stop(c);
+ return bch2_ioctl_stop(c);
case BCH_IOCTL_DISK_ADD:
- return bch_ioctl_disk_add(c, arg);
+ return bch2_ioctl_disk_add(c, arg);
case BCH_IOCTL_DISK_REMOVE:
- return bch_ioctl_disk_remove(c, arg);
+ return bch2_ioctl_disk_remove(c, arg);
case BCH_IOCTL_DISK_ONLINE:
- return bch_ioctl_disk_online(c, arg);
+ return bch2_ioctl_disk_online(c, arg);
case BCH_IOCTL_DISK_OFFLINE:
- return bch_ioctl_disk_offline(c, arg);
+ return bch2_ioctl_disk_offline(c, arg);
case BCH_IOCTL_DISK_SET_STATE:
- return bch_ioctl_disk_set_state(c, arg);
+ return bch2_ioctl_disk_set_state(c, arg);
case BCH_IOCTL_DISK_EVACUATE:
- return bch_ioctl_disk_evacuate(c, arg);
+ return bch2_ioctl_disk_evacuate(c, arg);
default:
return -ENOTTY;
}
}
-static long bch_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
+static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v)
{
struct bch_fs *c = filp->private_data;
void __user *arg = (void __user *) v;
return c
- ? bch_fs_ioctl(c, cmd, arg)
- : bch_global_ioctl(cmd, arg);
+ ? bch2_fs_ioctl(c, cmd, arg)
+ : bch2_global_ioctl(cmd, arg);
}
static const struct file_operations bch_chardev_fops = {
.owner = THIS_MODULE,
- .unlocked_ioctl = bch_chardev_ioctl,
+ .unlocked_ioctl = bch2_chardev_ioctl,
.open = nonseekable_open,
};
@@ -353,7 +353,7 @@ static struct class *bch_chardev_class;
static struct device *bch_chardev;
static DEFINE_IDR(bch_chardev_minor);
-void bch_fs_chardev_exit(struct bch_fs *c)
+void bch2_fs_chardev_exit(struct bch_fs *c)
{
if (!IS_ERR_OR_NULL(c->chardev))
device_unregister(c->chardev);
@@ -361,7 +361,7 @@ void bch_fs_chardev_exit(struct bch_fs *c)
idr_remove(&bch_chardev_minor, c->minor);
}
-int bch_fs_chardev_init(struct bch_fs *c)
+int bch2_fs_chardev_init(struct bch_fs *c)
{
c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL);
if (c->minor < 0)
@@ -369,14 +369,14 @@ int bch_fs_chardev_init(struct bch_fs *c)
c->chardev = device_create(bch_chardev_class, NULL,
MKDEV(bch_chardev_major, c->minor), NULL,
- "bcache%u-ctl", c->minor);
+ "bcachefs%u-ctl", c->minor);
if (IS_ERR(c->chardev))
return PTR_ERR(c->chardev);
return 0;
}
-void bch_chardev_exit(void)
+void bch2_chardev_exit(void)
{
if (!IS_ERR_OR_NULL(bch_chardev_class))
device_destroy(bch_chardev_class,
@@ -384,22 +384,22 @@ void bch_chardev_exit(void)
if (!IS_ERR_OR_NULL(bch_chardev_class))
class_destroy(bch_chardev_class);
if (bch_chardev_major > 0)
- unregister_chrdev(bch_chardev_major, "bcache");
+ unregister_chrdev(bch_chardev_major, "bcachefs");
}
-int __init bch_chardev_init(void)
+int __init bch2_chardev_init(void)
{
- bch_chardev_major = register_chrdev(0, "bcache-ctl", &bch_chardev_fops);
+ bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
if (bch_chardev_major < 0)
return bch_chardev_major;
- bch_chardev_class = class_create(THIS_MODULE, "bcache");
+ bch_chardev_class = class_create(THIS_MODULE, "bcachefs");
if (IS_ERR(bch_chardev_class))
return PTR_ERR(bch_chardev_class);
bch_chardev = device_create(bch_chardev_class, NULL,
MKDEV(bch_chardev_major, 255),
- NULL, "bcache-ctl");
+ NULL, "bcachefs-ctl");
if (IS_ERR(bch_chardev))
return PTR_ERR(bch_chardev);
diff --git a/fs/bcachefs/chardev.h b/fs/bcachefs/chardev.h
index 61a4c2b5a36d..e0e34e24177a 100644
--- a/fs/bcachefs/chardev.h
+++ b/fs/bcachefs/chardev.h
@@ -3,27 +3,27 @@
#ifndef NO_BCACHE_CHARDEV
-long bch_fs_ioctl(struct bch_fs *, unsigned, void __user *);
+long bch2_fs_ioctl(struct bch_fs *, unsigned, void __user *);
-void bch_fs_chardev_exit(struct bch_fs *);
-int bch_fs_chardev_init(struct bch_fs *);
+void bch2_fs_chardev_exit(struct bch_fs *);
+int bch2_fs_chardev_init(struct bch_fs *);
-void bch_chardev_exit(void);
-int __init bch_chardev_init(void);
+void bch2_chardev_exit(void);
+int __init bch2_chardev_init(void);
#else
-static inline long bch_fs_ioctl(struct bch_fs *c,
+static inline long bch2_fs_ioctl(struct bch_fs *c,
unsigned cmd, void __user * arg)
{
return -ENOSYS;
}
-static inline void bch_fs_chardev_exit(struct bch_fs *c) {}
-static inline int bch_fs_chardev_init(struct bch_fs *c) { return 0; }
+static inline void bch2_fs_chardev_exit(struct bch_fs *c) {}
+static inline int bch2_fs_chardev_init(struct bch_fs *c) { return 0; }
-static inline void bch_chardev_exit(void) {}
-static inline int __init bch_chardev_init(void) { return 0; }
+static inline void bch2_chardev_exit(void) {}
+static inline int __init bch2_chardev_init(void) { return 0; }
#endif
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 30928ba1e252..4545a4994700 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -1,5 +1,4 @@
-
-#include "bcache.h"
+#include "bcachefs.h"
#include "checksum.h"
#include "super.h"
#include "super-io.h"
@@ -125,7 +124,7 @@ static const u64 crc_table[256] = {
0x9AFCE626CE85B507ULL,
};
-u64 bch_crc64_update(u64 crc, const void *_data, size_t len)
+u64 bch2_crc64_update(u64 crc, const void *_data, size_t len)
{
const unsigned char *data = _data;
@@ -137,7 +136,7 @@ u64 bch_crc64_update(u64 crc, const void *_data, size_t len)
return crc;
}
-static u64 bch_checksum_init(unsigned type)
+static u64 bch2_checksum_init(unsigned type)
{
switch (type) {
case BCH_CSUM_NONE:
@@ -151,7 +150,7 @@ static u64 bch_checksum_init(unsigned type)
}
}
-static u64 bch_checksum_final(unsigned type, u64 crc)
+static u64 bch2_checksum_final(unsigned type, u64 crc)
{
switch (type) {
case BCH_CSUM_NONE:
@@ -165,7 +164,7 @@ static u64 bch_checksum_final(unsigned type, u64 crc)
}
}
-static u64 bch_checksum_update(unsigned type, u64 crc, const void *data, size_t len)
+static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t len)
{
switch (type) {
case BCH_CSUM_NONE:
@@ -173,7 +172,7 @@ static u64 bch_checksum_update(unsigned type, u64 crc, const void *data, size_t
case BCH_CSUM_CRC32C:
return crc32c(crc, data, len);
case BCH_CSUM_CRC64:
- return bch_crc64_update(crc, data, len);
+ return bch2_crc64_update(crc, data, len);
default:
BUG();
}
@@ -200,7 +199,7 @@ static inline void do_encrypt(struct crypto_blkcipher *tfm,
do_encrypt_sg(tfm, nonce, &sg, len);
}
-int bch_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
+int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
void *buf, size_t len)
{
struct crypto_blkcipher *chacha20 =
@@ -236,17 +235,17 @@ static void gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
crypto_shash_update(desc, key, sizeof(key));
}
-struct bch_csum bch_checksum(struct bch_fs *c, unsigned type,
- struct nonce nonce, const void *data, size_t len)
+struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
+ struct nonce nonce, const void *data, size_t len)
{
switch (type) {
case BCH_CSUM_NONE:
case BCH_CSUM_CRC32C:
case BCH_CSUM_CRC64: {
- u64 crc = bch_checksum_init(type);
+ u64 crc = bch2_checksum_init(type);
- crc = bch_checksum_update(type, crc, data, len);
- crc = bch_checksum_final(type, crc);
+ crc = bch2_checksum_update(type, crc, data, len);
+ crc = bch2_checksum_final(type, crc);
return (struct bch_csum) { .lo = crc };
}
@@ -270,17 +269,17 @@ struct bch_csum bch_checksum(struct bch_fs *c, unsigned type,
}
}
-void bch_encrypt(struct bch_fs *c, unsigned type,
- struct nonce nonce, void *data, size_t len)
+void bch2_encrypt(struct bch_fs *c, unsigned type,
+ struct nonce nonce, void *data, size_t len)
{
- if (!bch_csum_type_is_encryption(type))
+ if (!bch2_csum_type_is_encryption(type))
return;
do_encrypt(c->chacha20, nonce, data, len);
}
-struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
+struct bch_csum bch2_checksum_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
{
struct bio_vec bv;
struct bvec_iter iter;
@@ -290,16 +289,16 @@ struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type,
return (struct bch_csum) { 0 };
case BCH_CSUM_CRC32C:
case BCH_CSUM_CRC64: {
- u64 crc = bch_checksum_init(type);
+ u64 crc = bch2_checksum_init(type);
bio_for_each_contig_segment(bv, bio, iter) {
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
- crc = bch_checksum_update(type,
+ crc = bch2_checksum_update(type,
crc, p, bv.bv_len);
kunmap_atomic(p);
}
- crc = bch_checksum_final(type, crc);
+ crc = bch2_checksum_final(type, crc);
return (struct bch_csum) { .lo = crc };
}
@@ -328,15 +327,15 @@ struct bch_csum bch_checksum_bio(struct bch_fs *c, unsigned type,
}
}
-void bch_encrypt_bio(struct bch_fs *c, unsigned type,
- struct nonce nonce, struct bio *bio)
+void bch2_encrypt_bio(struct bch_fs *c, unsigned type,
+ struct nonce nonce, struct bio *bio)
{
struct bio_vec bv;
struct bvec_iter iter;
struct scatterlist sgl[16], *sg = sgl;
size_t bytes = 0;
- if (!bch_csum_type_is_encryption(type))
+ if (!bch2_csum_type_is_encryption(type))
return;
sg_init_table(sgl, ARRAY_SIZE(sgl));
@@ -363,7 +362,7 @@ void bch_encrypt_bio(struct bch_fs *c, unsigned type,
}
#ifdef __KERNEL__
-int bch_request_key(struct bch_sb *sb, struct bch_key *key)
+int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
{
char key_description[60];
struct key *keyring_key;
@@ -371,7 +370,7 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key)
int ret;
snprintf(key_description, sizeof(key_description),
- "bcache:%pUb", &sb->user_uuid);
+ "bcachefs:%pUb", &sb->user_uuid);
keyring_key = request_key(&key_type_logon, key_description, NULL);
if (IS_ERR(keyring_key))
@@ -394,14 +393,14 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key)
#include <keyutils.h>
#include <uuid/uuid.h>
-int bch_request_key(struct bch_sb *sb, struct bch_key *key)
+int bch2_request_key(struct bch_sb *sb, struct bch_key *key)
{
key_serial_t key_id;
char key_description[60];
char uuid[40];
uuid_unparse_lower(sb->user_uuid.b, uuid);
- sprintf(key_description, "bcache:%s", uuid);
+ sprintf(key_description, "bcachefs:%s", uuid);
key_id = request_key("user", key_description, NULL,
KEY_SPEC_USER_KEYRING);
@@ -415,7 +414,7 @@ int bch_request_key(struct bch_sb *sb, struct bch_key *key)
}
#endif
-static int bch_decrypt_sb_key(struct bch_fs *c,
+static int bch2_decrypt_sb_key(struct bch_fs *c,
struct bch_sb_field_crypt *crypt,
struct bch_key *key)
{
@@ -424,22 +423,22 @@ static int bch_decrypt_sb_key(struct bch_fs *c,
int ret = 0;
/* is key encrypted? */
- if (!bch_key_is_encrypted(&sb_key))
+ if (!bch2_key_is_encrypted(&sb_key))
goto out;
- ret = bch_request_key(c->disk_sb, &user_key);
+ ret = bch2_request_key(c->disk_sb, &user_key);
if (ret) {
bch_err(c, "error requesting encryption key");
goto err;
}
/* decrypt real key: */
- ret = bch_chacha_encrypt_key(&user_key, bch_sb_key_nonce(c),
+ ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
&sb_key, sizeof(sb_key));
if (ret)
goto err;
- if (bch_key_is_encrypted(&sb_key)) {
+ if (bch2_key_is_encrypted(&sb_key)) {
bch_err(c, "incorrect encryption key");
ret = -EINVAL;
goto err;
@@ -452,7 +451,7 @@ err:
return ret;
}
-static int bch_alloc_ciphers(struct bch_fs *c)
+static int bch2_alloc_ciphers(struct bch_fs *c)
{
if (!c->chacha20)
c->chacha20 = crypto_alloc_blkcipher("chacha20", 0,
@@ -468,7 +467,7 @@ static int bch_alloc_ciphers(struct bch_fs *c)
return 0;
}
-int bch_disable_encryption(struct bch_fs *c)
+int bch2_disable_encryption(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
struct bch_key key;
@@ -476,16 +475,16 @@ int bch_disable_encryption(struct bch_fs *c)
mutex_lock(&c->sb_lock);
- crypt = bch_sb_get_crypt(c->disk_sb);
+ crypt = bch2_sb_get_crypt(c->disk_sb);
if (!crypt)
goto out;
/* is key encrypted? */
ret = 0;
- if (bch_key_is_encrypted(&crypt->key))
+ if (bch2_key_is_encrypted(&crypt->key))
goto out;
- ret = bch_decrypt_sb_key(c, crypt, &key);
+ ret = bch2_decrypt_sb_key(c, crypt, &key);
if (ret)
goto out;
@@ -493,14 +492,14 @@ int bch_disable_encryption(struct bch_fs *c)
crypt->key.key = key;
SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 0);
- bch_write_super(c);
+ bch2_write_super(c);
out:
mutex_unlock(&c->sb_lock);
return ret;
}
-int bch_enable_encryption(struct bch_fs *c, bool keyed)
+int bch2_enable_encryption(struct bch_fs *c, bool keyed)
{
struct bch_encrypted_key key;
struct bch_key user_key;
@@ -510,10 +509,10 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed)
mutex_lock(&c->sb_lock);
/* Do we already have an encryption key? */
- if (bch_sb_get_crypt(c->disk_sb))
+ if (bch2_sb_get_crypt(c->disk_sb))
goto err;
- ret = bch_alloc_ciphers(c);
+ ret = bch2_alloc_ciphers(c);
if (ret)
goto err;
@@ -521,14 +520,14 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed)
get_random_bytes(&key.key, sizeof(key.key));
if (keyed) {
- ret = bch_request_key(c->disk_sb, &user_key);
+ ret = bch2_request_key(c->disk_sb, &user_key);
if (ret) {
bch_err(c, "error requesting encryption key");
goto err;
}
- ret = bch_chacha_encrypt_key(&user_key, bch_sb_key_nonce(c),
- &key, sizeof(key));
+ ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
+ &key, sizeof(key));
if (ret)
goto err;
}
@@ -538,7 +537,7 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed)
if (ret)
goto err;
- crypt = bch_fs_sb_resize_crypt(c, sizeof(*crypt) / sizeof(u64));
+ crypt = bch2_fs_sb_resize_crypt(c, sizeof(*crypt) / sizeof(u64));
if (!crypt) {
ret = -ENOMEM; /* XXX this technically could be -ENOSPC */
goto err;
@@ -548,7 +547,7 @@ int bch_enable_encryption(struct bch_fs *c, bool keyed)
/* write superblock */
SET_BCH_SB_ENCRYPTION_TYPE(c->disk_sb, 1);
- bch_write_super(c);
+ bch2_write_super(c);
err:
mutex_unlock(&c->sb_lock);
memzero_explicit(&user_key, sizeof(user_key));
@@ -556,7 +555,7 @@ err:
return ret;
}
-void bch_fs_encryption_exit(struct bch_fs *c)
+void bch2_fs_encryption_exit(struct bch_fs *c)
{
if (!IS_ERR_OR_NULL(c->poly1305))
crypto_free_shash(c->poly1305);
@@ -566,7 +565,7 @@ void bch_fs_encryption_exit(struct bch_fs *c)
crypto_free_shash(c->sha256);
}
-int bch_fs_encryption_init(struct bch_fs *c)
+int bch2_fs_encryption_init(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
struct bch_key key;
@@ -576,15 +575,15 @@ int bch_fs_encryption_init(struct bch_fs *c)
if (IS_ERR(c->sha256))
return PTR_ERR(c->sha256);
- crypt = bch_sb_get_crypt(c->disk_sb);
+ crypt = bch2_sb_get_crypt(c->disk_sb);
if (!crypt)
return 0;
- ret = bch_alloc_ciphers(c);
+ ret = bch2_alloc_ciphers(c);
if (ret)
return ret;
- ret = bch_decrypt_sb_key(c, crypt, &key);
+ ret = bch2_decrypt_sb_key(c, crypt, &key);
if (ret)
goto err;
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 10f62e5bb5fb..f540e3050749 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -1,12 +1,12 @@
#ifndef _BCACHE_CHECKSUM_H
#define _BCACHE_CHECKSUM_H
-#include "bcache.h"
+#include "bcachefs.h"
#include "super-io.h"
#include <crypto/chacha20.h>
-u64 bch_crc64_update(u64, const void *, size_t);
+u64 bch2_crc64_update(u64, const void *, size_t);
#define BCH_NONCE_EXTENT cpu_to_le32(1 << 28)
#define BCH_NONCE_BTREE cpu_to_le32(2 << 28)
@@ -14,7 +14,7 @@ u64 bch_crc64_update(u64, const void *, size_t);
#define BCH_NONCE_PRIO cpu_to_le32(4 << 28)
#define BCH_NONCE_POLY cpu_to_le32(1 << 31)
-struct bch_csum bch_checksum(struct bch_fs *, unsigned, struct nonce,
+struct bch_csum bch2_checksum(struct bch_fs *, unsigned, struct nonce,
const void *, size_t);
/*
@@ -26,27 +26,27 @@ struct bch_csum bch_checksum(struct bch_fs *, unsigned, struct nonce,
const void *start = ((const void *) (_i)) + sizeof((_i)->csum); \
const void *end = vstruct_end(_i); \
\
- bch_checksum(_c, _type, _nonce, start, end - start); \
+ bch2_checksum(_c, _type, _nonce, start, end - start); \
})
-int bch_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
-int bch_request_key(struct bch_sb *, struct bch_key *);
+int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
+int bch2_request_key(struct bch_sb *, struct bch_key *);
-void bch_encrypt(struct bch_fs *, unsigned, struct nonce,
+void bch2_encrypt(struct bch_fs *, unsigned, struct nonce,
void *data, size_t);
-struct bch_csum bch_checksum_bio(struct bch_fs *, unsigned,
+struct bch_csum bch2_checksum_bio(struct bch_fs *, unsigned,
struct nonce, struct bio *);
-void bch_encrypt_bio(struct bch_fs *, unsigned,
+void bch2_encrypt_bio(struct bch_fs *, unsigned,
struct nonce, struct bio *);
-int bch_disable_encryption(struct bch_fs *);
-int bch_enable_encryption(struct bch_fs *, bool);
+int bch2_disable_encryption(struct bch_fs *);
+int bch2_enable_encryption(struct bch_fs *, bool);
-void bch_fs_encryption_exit(struct bch_fs *);
-int bch_fs_encryption_init(struct bch_fs *);
+void bch2_fs_encryption_exit(struct bch_fs *);
+int bch2_fs_encryption_init(struct bch_fs *);
-static inline unsigned bch_data_checksum_type(struct bch_fs *c)
+static inline unsigned bch2_data_checksum_type(struct bch_fs *c)
{
if (c->sb.encryption_type)
return c->opts.wide_macs
@@ -56,20 +56,20 @@ static inline unsigned bch_data_checksum_type(struct bch_fs *c)
return c->opts.data_checksum;
}
-static inline unsigned bch_meta_checksum_type(struct bch_fs *c)
+static inline unsigned bch2_meta_checksum_type(struct bch_fs *c)
{
return c->sb.encryption_type
? BCH_CSUM_CHACHA20_POLY1305_128
: c->opts.metadata_checksum;
}
-static inline bool bch_checksum_type_valid(const struct bch_fs *c,
+static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
unsigned type)
{
if (type >= BCH_CSUM_NR)
return false;
- if (bch_csum_type_is_encryption(type) && !c->chacha20)
+ if (bch2_csum_type_is_encryption(type) && !c->chacha20)
return false;
return true;
@@ -83,7 +83,7 @@ static const unsigned bch_crc_bytes[] = {
[BCH_CSUM_CHACHA20_POLY1305_128] = 16,
};
-static inline bool bch_crc_cmp(struct bch_csum l, struct bch_csum r)
+static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
{
/*
* XXX: need some way of preventing the compiler from optimizing this
@@ -101,14 +101,14 @@ static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
return nonce;
}
-static inline bool bch_key_is_encrypted(struct bch_encrypted_key *key)
+static inline bool bch2_key_is_encrypted(struct bch_encrypted_key *key)
{
return le64_to_cpu(key->magic) != BCH_KEY_MAGIC;
}
-static inline struct nonce __bch_sb_key_nonce(struct bch_sb *sb)
+static inline struct nonce __bch2_sb_key_nonce(struct bch_sb *sb)
{
- __le64 magic = __bch_sb_magic(sb);
+ __le64 magic = __bch2_sb_magic(sb);
return (struct nonce) {{
[0] = 0,
@@ -118,9 +118,9 @@ static inline struct nonce __bch_sb_key_nonce(struct bch_sb *sb)
}};
}
-static inline struct nonce bch_sb_key_nonce(struct bch_fs *c)
+static inline struct nonce bch2_sb_key_nonce(struct bch_fs *c)
{
- __le64 magic = bch_sb_magic(c);
+ __le64 magic = bch2_sb_magic(c);
return (struct nonce) {{
[0] = 0,
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 85891a0356cc..3c3649f0862c 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -1,4 +1,4 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "clock.h"
#include <linux/freezer.h>
@@ -9,7 +9,7 @@ static inline bool io_timer_cmp(struct io_timer *l, struct io_timer *r)
return time_after(l->expire, r->expire);
}
-void bch_io_timer_add(struct io_clock *clock, struct io_timer *timer)
+void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
{
size_t i;
@@ -23,7 +23,7 @@ out:
spin_unlock(&clock->timer_lock);
}
-void bch_io_timer_del(struct io_clock *clock, struct io_timer *timer)
+void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
{
size_t i;
@@ -53,7 +53,7 @@ static void io_clock_wait_fn(struct io_timer *timer)
wake_up_process(wait->task);
}
-void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
+void bch2_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
{
struct io_clock_wait wait;
@@ -62,17 +62,17 @@ void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
wait.timer.fn = io_clock_wait_fn;
wait.task = current;
wait.expired = 0;
- bch_io_timer_add(clock, &wait.timer);
+ bch2_io_timer_add(clock, &wait.timer);
schedule();
- bch_io_timer_del(clock, &wait.timer);
+ bch2_io_timer_del(clock, &wait.timer);
}
/*
* _only_ to be used from a kthread
*/
-void bch_kthread_io_clock_wait(struct io_clock *clock,
+void bch2_kthread_io_clock_wait(struct io_clock *clock,
unsigned long until)
{
struct io_clock_wait wait;
@@ -82,7 +82,7 @@ void bch_kthread_io_clock_wait(struct io_clock *clock,
wait.timer.fn = io_clock_wait_fn;
wait.task = current;
wait.expired = 0;
- bch_io_timer_add(clock, &wait.timer);
+ bch2_io_timer_add(clock, &wait.timer);
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -97,7 +97,7 @@ void bch_kthread_io_clock_wait(struct io_clock *clock,
}
__set_current_state(TASK_RUNNING);
- bch_io_timer_del(clock, &wait.timer);
+ bch2_io_timer_del(clock, &wait.timer);
}
static struct io_timer *get_expired_timer(struct io_clock *clock,
@@ -116,7 +116,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
return ret;
}
-void bch_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
+void bch2_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
{
struct io_clock *clock = &c->io_clock[rw];
struct io_timer *timer;
@@ -139,13 +139,13 @@ void bch_increment_clock(struct bch_fs *c, unsigned sectors, int rw)
timer->fn(timer);
}
-void bch_io_clock_exit(struct io_clock *clock)
+void bch2_io_clock_exit(struct io_clock *clock)
{
free_heap(&clock->timers);
free_percpu(clock->pcpu_buf);
}
-int bch_io_clock_init(struct io_clock *clock)
+int bch2_io_clock_init(struct io_clock *clock)
{
atomic_long_set(&clock->now, 0);
spin_lock_init(&clock->timer_lock);
diff --git a/fs/bcachefs/clock.h b/fs/bcachefs/clock.h
index 9e081d7dfc1e..061bf04a265e 100644
--- a/fs/bcachefs/clock.h
+++ b/fs/bcachefs/clock.h
@@ -1,14 +1,14 @@
#ifndef _BCACHE_CLOCK_H
#define _BCACHE_CLOCK_H
-void bch_io_timer_add(struct io_clock *, struct io_timer *);
-void bch_io_timer_del(struct io_clock *, struct io_timer *);
-void bch_kthread_io_clock_wait(struct io_clock *, unsigned long);
-void bch_increment_clock(struct bch_fs *, unsigned, int);
+void bch2_io_timer_add(struct io_clock *, struct io_timer *);
+void bch2_io_timer_del(struct io_clock *, struct io_timer *);
+void bch2_kthread_io_clock_wait(struct io_clock *, unsigned long);
+void bch2_increment_clock(struct bch_fs *, unsigned, int);
-void bch_io_clock_schedule_timeout(struct io_clock *, unsigned long);
+void bch2_io_clock_schedule_timeout(struct io_clock *, unsigned long);
-#define bch_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
+#define bch2_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
({ \
long __ret = timeout; \
might_sleep(); \
@@ -17,7 +17,7 @@ void bch_io_clock_schedule_timeout(struct io_clock *, unsigned long);
__ret; \
})
-void bch_io_clock_exit(struct io_clock *);
-int bch_io_clock_init(struct io_clock *);
+void bch2_io_clock_exit(struct io_clock *);
+int bch2_io_clock_init(struct io_clock *);
#endif /* _BCACHE_CLOCK_H */
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index d9a64c381a0d..547ea73213e2 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -1,4 +1,4 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "compress.h"
#include "extents.h"
#include "io.h"
@@ -195,7 +195,7 @@ err:
return ret;
}
-int bch_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
+int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
unsigned live_data_sectors,
struct bch_extent_crc128 crc)
{
@@ -242,12 +242,12 @@ use_mempool:
* deadlock:
*/
- bch_bio_free_pages_pool(c, bio);
- bch_bio_alloc_pages_pool(c, bio, live_data_sectors << 9);
+ bch2_bio_free_pages_pool(c, bio);
+ bch2_bio_alloc_pages_pool(c, bio, live_data_sectors << 9);
goto copy_data;
}
-int bch_bio_uncompress(struct bch_fs *c, struct bio *src,
+int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
struct bio *dst, struct bvec_iter dst_iter,
struct bch_extent_crc128 crc)
{
@@ -391,7 +391,7 @@ err:
return ret;
}
-void bch_bio_compress(struct bch_fs *c,
+void bch2_bio_compress(struct bch_fs *c,
struct bio *dst, size_t *dst_len,
struct bio *src, size_t *src_len,
unsigned *compression_type)
@@ -423,30 +423,30 @@ out:
}
/* doesn't write superblock: */
-int bch_check_set_has_compressed_data(struct bch_fs *c,
+int bch2_check_set_has_compressed_data(struct bch_fs *c,
unsigned compression_type)
{
switch (compression_type) {
case BCH_COMPRESSION_NONE:
return 0;
case BCH_COMPRESSION_LZ4:
- if (bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4))
+ if (bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4))
return 0;
- bch_sb_set_feature(c->disk_sb, BCH_FEATURE_LZ4);
+ bch2_sb_set_feature(c->disk_sb, BCH_FEATURE_LZ4);
break;
case BCH_COMPRESSION_GZIP:
- if (bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP))
+ if (bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP))
return 0;
- bch_sb_set_feature(c->disk_sb, BCH_FEATURE_GZIP);
+ bch2_sb_set_feature(c->disk_sb, BCH_FEATURE_GZIP);
break;
}
- return bch_fs_compress_init(c);
+ return bch2_fs_compress_init(c);
}
-void bch_fs_compress_exit(struct bch_fs *c)
+void bch2_fs_compress_exit(struct bch_fs *c)
{
vfree(c->zlib_workspace);
mempool_exit(&c->lz4_workspace_pool);
@@ -458,13 +458,13 @@ void bch_fs_compress_exit(struct bch_fs *c)
max_t(size_t, zlib_inflate_workspacesize(), \
zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL))
-int bch_fs_compress_init(struct bch_fs *c)
+int bch2_fs_compress_init(struct bch_fs *c)
{
unsigned order = get_order(BCH_ENCODED_EXTENT_MAX << 9);
int ret;
- if (!bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4) &&
- !bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP))
+ if (!bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4) &&
+ !bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP))
return 0;
if (!mempool_initialized(&c->compression_bounce[READ])) {
@@ -482,7 +482,7 @@ int bch_fs_compress_init(struct bch_fs *c)
}
if (!mempool_initialized(&c->lz4_workspace_pool) &&
- bch_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) {
+ bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_LZ4)) {
ret = mempool_init_kmalloc_pool(&c->lz4_workspace_pool,
1, LZ4_MEM_COMPRESS);
if (ret)
@@ -490,7 +490,7 @@ int bch_fs_compress_init(struct bch_fs *c)
}
if (!c->zlib_workspace &&
- bch_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) {
+ bch2_sb_test_feature(c->disk_sb, BCH_FEATURE_GZIP)) {
c->zlib_workspace = vmalloc(COMPRESSION_WORKSPACE_SIZE);
if (!c->zlib_workspace)
return -ENOMEM;
diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h
index e8d208a01d78..05804f556f31 100644
--- a/fs/bcachefs/compress.h
+++ b/fs/bcachefs/compress.h
@@ -1,15 +1,15 @@
#ifndef _BCACHE_COMPRESS_H
#define _BCACHE_COMPRESS_H
-int bch_bio_uncompress_inplace(struct bch_fs *, struct bio *,
+int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
unsigned, struct bch_extent_crc128);
-int bch_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
+int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
struct bvec_iter, struct bch_extent_crc128);
-void bch_bio_compress(struct bch_fs *, struct bio *, size_t *,
+void bch2_bio_compress(struct bch_fs *, struct bio *, size_t *,
struct bio *, size_t *, unsigned *);
-int bch_check_set_has_compressed_data(struct bch_fs *, unsigned);
-void bch_fs_compress_exit(struct bch_fs *);
-int bch_fs_compress_init(struct bch_fs *);
+int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned);
+void bch2_fs_compress_exit(struct bch_fs *);
+int bch2_fs_compress_init(struct bch_fs *);
#endif /* _BCACHE_COMPRESS_H */
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index f19ccadfbeb8..248bc7a16b47 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -1,11 +1,11 @@
/*
- * Assorted bcache debug code
+ * Assorted bcachefs debug code
*
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_io.h"
@@ -30,7 +30,7 @@ static struct dentry *bch_debug;
#ifdef CONFIG_BCACHEFS_DEBUG
-void __bch_btree_verify(struct bch_fs *c, struct btree *b)
+void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
{
struct btree *v = c->verify_data;
struct btree_node *n_ondisk, *n_sorted, *n_inmemory;
@@ -52,9 +52,9 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b)
v->written = 0;
v->level = b->level;
v->btree_id = b->btree_id;
- bch_btree_keys_init(v, &c->expensive_debug_checks);
+ bch2_btree_keys_init(v, &c->expensive_debug_checks);
- pick = bch_btree_pick_ptr(c, b);
+ pick = bch2_btree_pick_ptr(c, b);
if (IS_ERR_OR_NULL(pick.ca))
return;
@@ -63,7 +63,7 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b)
bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c);
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
- bch_bio_map(bio, n_sorted);
+ bch2_bio_map(bio, n_sorted);
submit_bio_wait(bio);
@@ -71,7 +71,7 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b)
memcpy(n_ondisk, n_sorted, btree_bytes(c));
- bch_btree_node_read_done(c, v, pick.ca, &pick.ptr);
+ bch2_btree_node_read_done(c, v, pick.ca, &pick.ptr);
n_sorted = c->verify_data->data;
percpu_ref_put(&pick.ca->io_ref);
@@ -90,10 +90,10 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b)
console_lock();
printk(KERN_ERR "*** in memory:\n");
- bch_dump_bset(b, inmemory, 0);
+ bch2_dump_bset(b, inmemory, 0);
printk(KERN_ERR "*** read back in:\n");
- bch_dump_bset(v, sorted, 0);
+ bch2_dump_bset(v, sorted, 0);
while (offset < b->written) {
if (!offset ) {
@@ -110,7 +110,7 @@ void __bch_btree_verify(struct bch_fs *c, struct btree *b)
}
printk(KERN_ERR "*** on disk block %u:\n", offset);
- bch_dump_bset(b, i, offset);
+ bch2_dump_bset(b, i, offset);
offset += sectors;
}
@@ -170,7 +170,7 @@ static int flush_buf(struct dump_iter *i)
return 0;
}
-static int bch_dump_open(struct inode *inode, struct file *file)
+static int bch2_dump_open(struct inode *inode, struct file *file)
{
struct btree_debug *bd = inode->i_private;
struct dump_iter *i;
@@ -187,14 +187,14 @@ static int bch_dump_open(struct inode *inode, struct file *file)
return 0;
}
-static int bch_dump_release(struct inode *inode, struct file *file)
+static int bch2_dump_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
-static ssize_t bch_read_btree(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+static ssize_t bch2_read_btree(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct btree_iter iter;
@@ -212,18 +212,18 @@ static ssize_t bch_read_btree(struct file *file, char __user *buf,
if (!i->size)
return i->ret;
- bch_btree_iter_init(&iter, i->c, i->id, i->from);
+ bch2_btree_iter_init(&iter, i->c, i->id, i->from);
- while ((k = bch_btree_iter_peek(&iter)).k &&
+ while ((k = bch2_btree_iter_peek(&iter)).k &&
!(err = btree_iter_err(k))) {
- bch_bkey_val_to_text(i->c, bkey_type(0, i->id),
+ bch2_bkey_val_to_text(i->c, bkey_type(0, i->id),
i->buf, sizeof(i->buf), k);
i->bytes = strlen(i->buf);
BUG_ON(i->bytes >= PAGE_SIZE);
i->buf[i->bytes] = '\n';
i->bytes++;
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
i->from = iter.pos;
err = flush_buf(i);
@@ -233,20 +233,20 @@ static ssize_t bch_read_btree(struct file *file, char __user *buf,
if (!i->size)
break;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return err < 0 ? err : i->ret;
}
static const struct file_operations btree_debug_ops = {
.owner = THIS_MODULE,
- .open = bch_dump_open,
- .release = bch_dump_release,
- .read = bch_read_btree,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_btree,
};
-static ssize_t bch_read_btree_formats(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct btree_iter iter;
@@ -265,7 +265,7 @@ static ssize_t bch_read_btree_formats(struct file *file, char __user *buf,
return i->ret;
for_each_btree_node(&iter, i->c, i->id, i->from, 0, b) {
- i->bytes = bch_print_btree_node(i->c, b, i->buf,
+ i->bytes = bch2_print_btree_node(i->c, b, i->buf,
sizeof(i->buf));
err = flush_buf(i);
if (err)
@@ -282,20 +282,20 @@ static ssize_t bch_read_btree_formats(struct file *file, char __user *buf,
if (!i->size)
break;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return err < 0 ? err : i->ret;
}
static const struct file_operations btree_format_debug_ops = {
.owner = THIS_MODULE,
- .open = bch_dump_open,
- .release = bch_dump_release,
- .read = bch_read_btree_formats,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_btree_formats,
};
-static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf,
- size_t size, loff_t *ppos)
+static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct btree_iter iter;
@@ -314,16 +314,16 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf,
if (!i->size)
return i->ret;
- bch_btree_iter_init(&iter, i->c, i->id, i->from);
+ bch2_btree_iter_init(&iter, i->c, i->id, i->from);
- while ((k = bch_btree_iter_peek(&iter)).k &&
+ while ((k = bch2_btree_iter_peek(&iter)).k &&
!(err = btree_iter_err(k))) {
struct btree *b = iter.nodes[0];
struct btree_node_iter *node_iter = &iter.node_iters[0];
- struct bkey_packed *_k = bch_btree_node_iter_peek(node_iter, b);
+ struct bkey_packed *_k = bch2_btree_node_iter_peek(node_iter, b);
if (iter.nodes[0] != prev_node) {
- i->bytes = bch_print_btree_node(i->c, b, i->buf,
+ i->bytes = bch2_print_btree_node(i->c, b, i->buf,
sizeof(i->buf));
err = flush_buf(i);
if (err)
@@ -331,13 +331,13 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf,
}
prev_node = iter.nodes[0];
- i->bytes = bch_bkey_print_bfloat(b, _k, i->buf, sizeof(i->buf));
+ i->bytes = bch2_bkey_print_bfloat(b, _k, i->buf, sizeof(i->buf));
err = flush_buf(i);
if (err)
break;
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
i->from = iter.pos;
err = flush_buf(i);
@@ -347,25 +347,25 @@ static ssize_t bch_read_bfloat_failed(struct file *file, char __user *buf,
if (!i->size)
break;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return err < 0 ? err : i->ret;
}
static const struct file_operations bfloat_failed_debug_ops = {
.owner = THIS_MODULE,
- .open = bch_dump_open,
- .release = bch_dump_release,
- .read = bch_read_bfloat_failed,
+ .open = bch2_dump_open,
+ .release = bch2_dump_release,
+ .read = bch2_read_bfloat_failed,
};
-void bch_fs_debug_exit(struct bch_fs *c)
+void bch2_fs_debug_exit(struct bch_fs *c)
{
if (!IS_ERR_OR_NULL(c->debug))
debugfs_remove_recursive(c->debug);
}
-void bch_fs_debug_init(struct bch_fs *c)
+void bch2_fs_debug_init(struct bch_fs *c)
{
struct btree_debug *bd;
char name[100];
@@ -382,18 +382,18 @@ void bch_fs_debug_init(struct bch_fs *c)
bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
bd++) {
bd->id = bd - c->btree_debug;
- bd->btree = debugfs_create_file(bch_btree_ids[bd->id],
+ bd->btree = debugfs_create_file(bch2_btree_ids[bd->id],
0400, c->debug, bd,
&btree_debug_ops);
snprintf(name, sizeof(name), "%s-formats",
- bch_btree_ids[bd->id]);
+ bch2_btree_ids[bd->id]);
bd->btree_format = debugfs_create_file(name, 0400, c->debug, bd,
&btree_format_debug_ops);
snprintf(name, sizeof(name), "%s-bfloat-failed",
- bch_btree_ids[bd->id]);
+ bch2_btree_ids[bd->id]);
bd->failed = debugfs_create_file(name, 0400, c->debug, bd,
&bfloat_failed_debug_ops);
@@ -402,16 +402,16 @@ void bch_fs_debug_init(struct bch_fs *c)
#endif
-void bch_debug_exit(void)
+void bch2_debug_exit(void)
{
if (!IS_ERR_OR_NULL(bch_debug))
debugfs_remove_recursive(bch_debug);
}
-int __init bch_debug_init(void)
+int __init bch2_debug_init(void)
{
int ret = 0;
- bch_debug = debugfs_create_dir("bcache", NULL);
+ bch_debug = debugfs_create_dir("bcachefs", NULL);
return ret;
}
diff --git a/fs/bcachefs/debug.h b/fs/bcachefs/debug.h
index 7cb4f4787a88..7724504588c9 100644
--- a/fs/bcachefs/debug.h
+++ b/fs/bcachefs/debug.h
@@ -1,19 +1,19 @@
#ifndef _BCACHE_DEBUG_H
#define _BCACHE_DEBUG_H
-#include "bcache.h"
+#include "bcachefs.h"
struct bio;
struct btree;
struct bch_fs;
-#define BCH_DEBUG_PARAM(name, description) extern bool bch_##name;
+#define BCH_DEBUG_PARAM(name, description) extern bool bch2_##name;
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
#define BCH_DEBUG_PARAM(name, description) \
static inline bool name(struct bch_fs *c) \
- { return bch_##name || c->name; }
+ { return bch2_##name || c->name; }
BCH_DEBUG_PARAMS_ALWAYS()
#undef BCH_DEBUG_PARAM
@@ -21,11 +21,11 @@ BCH_DEBUG_PARAMS_ALWAYS()
#define BCH_DEBUG_PARAM(name, description) \
static inline bool name(struct bch_fs *c) \
- { return bch_##name || c->name; }
+ { return bch2_##name || c->name; }
BCH_DEBUG_PARAMS_DEBUG()
#undef BCH_DEBUG_PARAM
-void __bch_btree_verify(struct bch_fs *, struct btree *);
+void __bch2_btree_verify(struct bch_fs *, struct btree *);
#define bypass_torture_test(d) ((d)->bypass_torture_test)
@@ -36,27 +36,27 @@ void __bch_btree_verify(struct bch_fs *, struct btree *);
BCH_DEBUG_PARAMS_DEBUG()
#undef BCH_DEBUG_PARAM
-static inline void __bch_btree_verify(struct bch_fs *c, struct btree *b) {}
+static inline void __bch2_btree_verify(struct bch_fs *c, struct btree *b) {}
#define bypass_torture_test(d) 0
#endif
-static inline void bch_btree_verify(struct bch_fs *c, struct btree *b)
+static inline void bch2_btree_verify(struct bch_fs *c, struct btree *b)
{
if (verify_btree_ondisk(c))
- __bch_btree_verify(c, b);
+ __bch2_btree_verify(c, b);
}
#ifdef CONFIG_DEBUG_FS
-void bch_fs_debug_exit(struct bch_fs *);
-void bch_fs_debug_init(struct bch_fs *);
+void bch2_fs_debug_exit(struct bch_fs *);
+void bch2_fs_debug_init(struct bch_fs *);
#else
-static inline void bch_fs_debug_exit(struct bch_fs *c) {}
-static inline void bch_fs_debug_init(struct bch_fs *c) {}
+static inline void bch2_fs_debug_exit(struct bch_fs *c) {}
+static inline void bch2_fs_debug_init(struct bch_fs *c) {}
#endif
-void bch_debug_exit(void);
-int bch_debug_init(void);
+void bch2_debug_exit(void);
+int bch2_debug_init(void);
#endif
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index f961e881f92b..503f0dc4bb08 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_update.h"
#include "extents.h"
@@ -10,7 +10,7 @@
#include <linux/dcache.h>
-unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent d)
+unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent d)
{
unsigned len = bkey_val_bytes(d.k) - sizeof(struct bch_dirent);
@@ -20,35 +20,35 @@ unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent d)
return len;
}
-static u64 bch_dirent_hash(const struct bch_hash_info *info,
- const struct qstr *name)
+static u64 bch2_dirent_hash(const struct bch_hash_info *info,
+ const struct qstr *name)
{
struct bch_str_hash_ctx ctx;
- bch_str_hash_init(&ctx, info);
- bch_str_hash_update(&ctx, info, name->name, name->len);
+ bch2_str_hash_init(&ctx, info);
+ bch2_str_hash_update(&ctx, info, name->name, name->len);
/* [0,2) reserved for dots */
- return max_t(u64, bch_str_hash_end(&ctx, info), 2);
+ return max_t(u64, bch2_str_hash_end(&ctx, info), 2);
}
static u64 dirent_hash_key(const struct bch_hash_info *info, const void *key)
{
- return bch_dirent_hash(info, key);
+ return bch2_dirent_hash(info, key);
}
static u64 dirent_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
- struct qstr name = QSTR_INIT(d.v->d_name, bch_dirent_name_bytes(d));
+ struct qstr name = QSTR_INIT(d.v->d_name, bch2_dirent_name_bytes(d));
- return bch_dirent_hash(info, &name);
+ return bch2_dirent_hash(info, &name);
}
static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
{
struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
- int len = bch_dirent_name_bytes(l);
+ int len = bch2_dirent_name_bytes(l);
const struct qstr *r = _r;
return len - r->len ?: memcmp(l.v->d_name, r->name, len);
@@ -58,8 +58,8 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
{
struct bkey_s_c_dirent l = bkey_s_c_to_dirent(_l);
struct bkey_s_c_dirent r = bkey_s_c_to_dirent(_r);
- int l_len = bch_dirent_name_bytes(l);
- int r_len = bch_dirent_name_bytes(r);
+ int l_len = bch2_dirent_name_bytes(l);
+ int r_len = bch2_dirent_name_bytes(r);
return l_len - r_len ?: memcmp(l.v->d_name, r.v->d_name, l_len);
}
@@ -74,8 +74,8 @@ static const struct bch_hash_desc dirent_hash_desc = {
.cmp_bkey = dirent_cmp_bkey,
};
-static const char *bch_dirent_invalid(const struct bch_fs *c,
- struct bkey_s_c k)
+static const char *bch2_dirent_invalid(const struct bch_fs *c,
+ struct bkey_s_c k)
{
switch (k.k->type) {
case BCH_DIRENT:
@@ -93,8 +93,8 @@ static const char *bch_dirent_invalid(const struct bch_fs *c,
}
}
-static void bch_dirent_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
+static void bch2_dirent_to_text(struct bch_fs *c, char *buf,
+ size_t size, struct bkey_s_c k)
{
struct bkey_s_c_dirent d;
@@ -104,7 +104,7 @@ static void bch_dirent_to_text(struct bch_fs *c, char *buf,
if (size) {
unsigned n = min_t(unsigned, size,
- bch_dirent_name_bytes(d));
+ bch2_dirent_name_bytes(d));
memcpy(buf, d.v->d_name, n);
buf[size - 1] = '\0';
buf += n;
@@ -119,9 +119,9 @@ static void bch_dirent_to_text(struct bch_fs *c, char *buf,
}
}
-const struct bkey_ops bch_bkey_dirent_ops = {
- .key_invalid = bch_dirent_invalid,
- .val_to_text = bch_dirent_to_text,
+const struct bkey_ops bch2_bkey_dirent_ops = {
+ .key_invalid = bch2_dirent_invalid,
+ .val_to_text = bch2_dirent_to_text,
};
static struct bkey_i_dirent *dirent_create_key(u8 type,
@@ -146,15 +146,15 @@ static struct bkey_i_dirent *dirent_create_key(u8 type,
bkey_val_bytes(&dirent->k) -
(sizeof(struct bch_dirent) + name->len));
- EBUG_ON(bch_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len);
+ EBUG_ON(bch2_dirent_name_bytes(dirent_i_to_s_c(dirent)) != name->len);
return dirent;
}
-int bch_dirent_create(struct bch_fs *c, u64 dir_inum,
- const struct bch_hash_info *hash_info,
- u8 type, const struct qstr *name, u64 dst_inum,
- u64 *journal_seq, int flags)
+int bch2_dirent_create(struct bch_fs *c, u64 dir_inum,
+ const struct bch_hash_info *hash_info,
+ u8 type, const struct qstr *name, u64 dst_inum,
+ u64 *journal_seq, int flags)
{
struct bkey_i_dirent *dirent;
int ret;
@@ -163,7 +163,7 @@ int bch_dirent_create(struct bch_fs *c, u64 dir_inum,
if (!dirent)
return -ENOMEM;
- ret = bch_hash_set(dirent_hash_desc, hash_info, c, dir_inum,
+ ret = bch2_hash_set(dirent_hash_desc, hash_info, c, dir_inum,
journal_seq, &dirent->k_i, flags);
kfree(dirent);
@@ -177,16 +177,16 @@ static void dirent_copy_target(struct bkey_i_dirent *dst,
dst->v.d_type = src.v->d_type;
}
-static struct bpos bch_dirent_pos(struct bch_inode_info *ei,
- const struct qstr *name)
+static struct bpos bch2_dirent_pos(struct bch_inode_info *ei,
+ const struct qstr *name)
{
- return POS(ei->vfs_inode.i_ino, bch_dirent_hash(&ei->str_hash, name));
+ return POS(ei->vfs_inode.i_ino, bch2_dirent_hash(&ei->str_hash, name));
}
-int bch_dirent_rename(struct bch_fs *c,
- struct inode *src_dir, const struct qstr *src_name,
- struct inode *dst_dir, const struct qstr *dst_name,
- u64 *journal_seq, enum bch_rename_mode mode)
+int bch2_dirent_rename(struct bch_fs *c,
+ struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name,
+ u64 *journal_seq, enum bch_rename_mode mode)
{
struct bch_inode_info *src_ei = to_bch_ei(src_dir);
struct bch_inode_info *dst_ei = to_bch_ei(dst_dir);
@@ -194,17 +194,17 @@ int bch_dirent_rename(struct bch_fs *c,
struct bkey_s_c old_src, old_dst;
struct bkey delete;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
- struct bpos src_pos = bch_dirent_pos(src_ei, src_name);
- struct bpos dst_pos = bch_dirent_pos(dst_ei, dst_name);
+ struct bpos src_pos = bch2_dirent_pos(src_ei, src_name);
+ struct bpos dst_pos = bch2_dirent_pos(dst_ei, dst_name);
bool need_whiteout;
int ret = -ENOMEM;
- bch_btree_iter_init_intent(&src_iter, c, BTREE_ID_DIRENTS, src_pos);
- bch_btree_iter_init_intent(&dst_iter, c, BTREE_ID_DIRENTS, dst_pos);
- bch_btree_iter_link(&src_iter, &dst_iter);
+ bch2_btree_iter_init_intent(&src_iter, c, BTREE_ID_DIRENTS, src_pos);
+ bch2_btree_iter_init_intent(&dst_iter, c, BTREE_ID_DIRENTS, dst_pos);
+ bch2_btree_iter_link(&src_iter, &dst_iter);
- bch_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos);
- bch_btree_iter_link(&src_iter, &whiteout_iter);
+ bch2_btree_iter_init(&whiteout_iter, c, BTREE_ID_DIRENTS, src_pos);
+ bch2_btree_iter_link(&src_iter, &whiteout_iter);
if (mode == BCH_RENAME_EXCHANGE) {
new_src = dirent_create_key(0, src_name, 0);
@@ -223,13 +223,13 @@ retry:
* from the original hashed position (like we do when creating dirents,
* in bch_hash_set) - we never move existing dirents to different slot:
*/
- old_src = bch_hash_lookup_at(dirent_hash_desc,
+ old_src = bch2_hash_lookup_at(dirent_hash_desc,
&src_ei->str_hash,
&src_iter, src_name);
if ((ret = btree_iter_err(old_src)))
goto err;
- ret = bch_hash_needs_whiteout(dirent_hash_desc,
+ ret = bch2_hash_needs_whiteout(dirent_hash_desc,
&src_ei->str_hash,
&whiteout_iter, &src_iter);
if (ret < 0)
@@ -242,8 +242,8 @@ retry:
* to do that check for us for correctness:
*/
old_dst = mode == BCH_RENAME
- ? bch_hash_hole_at(dirent_hash_desc, &dst_iter)
- : bch_hash_lookup_at(dirent_hash_desc,
+ ? bch2_hash_hole_at(dirent_hash_desc, &dst_iter)
+ : bch2_hash_lookup_at(dirent_hash_desc,
&dst_ei->str_hash,
&dst_iter, dst_name);
if ((ret = btree_iter_err(old_dst)))
@@ -265,13 +265,13 @@ retry:
* were going to delete:
*
* Note: this is a correctness issue, in this
- * situation bch_hash_needs_whiteout() could
+ * situation bch2_hash_needs_whiteout() could
* return false when the whiteout would have
* been needed if we inserted at the pos
* __dirent_find_hole() found
*/
new_dst->k.p = src_iter.pos;
- ret = bch_btree_insert_at(c, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
journal_seq,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&src_iter,
@@ -307,7 +307,7 @@ retry:
new_src->k.p = src_iter.pos;
new_dst->k.p = dst_iter.pos;
- ret = bch_btree_insert_at(c, NULL, NULL, journal_seq,
+ ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&src_iter, &new_src->k_i),
BTREE_INSERT_ENTRY(&dst_iter, &new_dst->k_i));
@@ -315,9 +315,9 @@ err:
if (ret == -EINTR)
goto retry;
- bch_btree_iter_unlock(&whiteout_iter);
- bch_btree_iter_unlock(&dst_iter);
- bch_btree_iter_unlock(&src_iter);
+ bch2_btree_iter_unlock(&whiteout_iter);
+ bch2_btree_iter_unlock(&dst_iter);
+ bch2_btree_iter_unlock(&src_iter);
if (new_src != (void *) &delete)
kfree(new_src);
@@ -325,37 +325,37 @@ err:
return ret;
}
-int bch_dirent_delete(struct bch_fs *c, u64 dir_inum,
- const struct bch_hash_info *hash_info,
- const struct qstr *name,
- u64 *journal_seq)
+int bch2_dirent_delete(struct bch_fs *c, u64 dir_inum,
+ const struct bch_hash_info *hash_info,
+ const struct qstr *name,
+ u64 *journal_seq)
{
- return bch_hash_delete(dirent_hash_desc, hash_info,
+ return bch2_hash_delete(dirent_hash_desc, hash_info,
c, dir_inum, journal_seq, name);
}
-u64 bch_dirent_lookup(struct bch_fs *c, u64 dir_inum,
- const struct bch_hash_info *hash_info,
- const struct qstr *name)
+u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
+ const struct bch_hash_info *hash_info,
+ const struct qstr *name)
{
struct btree_iter iter;
struct bkey_s_c k;
u64 inum;
- k = bch_hash_lookup(dirent_hash_desc, hash_info, c,
+ k = bch2_hash_lookup(dirent_hash_desc, hash_info, c,
dir_inum, &iter, name);
if (IS_ERR(k.k)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return 0;
}
inum = le64_to_cpu(bkey_s_c_to_dirent(k).v->d_inum);
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return inum;
}
-int bch_empty_dir(struct bch_fs *c, u64 dir_inum)
+int bch2_empty_dir(struct bch_fs *c, u64 dir_inum)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -370,13 +370,13 @@ int bch_empty_dir(struct bch_fs *c, u64 dir_inum)
break;
}
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
-int bch_readdir(struct bch_fs *c, struct file *file,
- struct dir_context *ctx)
+int bch2_readdir(struct bch_fs *c, struct file *file,
+ struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct btree_iter iter;
@@ -406,7 +406,7 @@ int bch_readdir(struct bch_fs *c, struct file *file,
if (k.k->p.inode > inode->i_ino)
break;
- len = bch_dirent_name_bytes(dirent);
+ len = bch2_dirent_name_bytes(dirent);
pr_debug("emitting %s", dirent.v->d_name);
@@ -421,7 +421,7 @@ int bch_readdir(struct bch_fs *c, struct file *file,
ctx->pos = k.k->p.offset + 1;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return 0;
}
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index 158d4cae36e5..b1a30bda10af 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -1,7 +1,7 @@
#ifndef _BCACHE_DIRENT_H
#define _BCACHE_DIRENT_H
-extern const struct bkey_ops bch_bkey_dirent_ops;
+extern const struct bkey_ops bch2_bkey_dirent_ops;
struct qstr;
struct file;
@@ -9,11 +9,11 @@ struct dir_context;
struct bch_fs;
struct bch_hash_info;
-unsigned bch_dirent_name_bytes(struct bkey_s_c_dirent);
-int bch_dirent_create(struct bch_fs *c, u64, const struct bch_hash_info *,
- u8, const struct qstr *, u64, u64 *, int);
-int bch_dirent_delete(struct bch_fs *, u64, const struct bch_hash_info *,
- const struct qstr *, u64 *);
+unsigned bch2_dirent_name_bytes(struct bkey_s_c_dirent);
+int bch2_dirent_create(struct bch_fs *c, u64, const struct bch_hash_info *,
+ u8, const struct qstr *, u64, u64 *, int);
+int bch2_dirent_delete(struct bch_fs *, u64, const struct bch_hash_info *,
+ const struct qstr *, u64 *);
enum bch_rename_mode {
BCH_RENAME,
@@ -21,16 +21,16 @@ enum bch_rename_mode {
BCH_RENAME_EXCHANGE,
};
-int bch_dirent_rename(struct bch_fs *,
- struct inode *, const struct qstr *,
- struct inode *, const struct qstr *,
- u64 *, enum bch_rename_mode);
+int bch2_dirent_rename(struct bch_fs *,
+ struct inode *, const struct qstr *,
+ struct inode *, const struct qstr *,
+ u64 *, enum bch_rename_mode);
-u64 bch_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
- const struct qstr *);
+u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
+ const struct qstr *);
-int bch_empty_dir(struct bch_fs *, u64);
-int bch_readdir(struct bch_fs *, struct file *, struct dir_context *);
+int bch2_empty_dir(struct bch_fs *, u64);
+int bch2_readdir(struct bch_fs *, struct file *, struct dir_context *);
#endif /* _BCACHE_DIRENT_H */
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 0c8ac1d79c97..8babf196b5c2 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -1,9 +1,9 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "error.h"
#include "io.h"
#include "super.h"
-void bch_inconsistent_error(struct bch_fs *c)
+void bch2_inconsistent_error(struct bch_fs *c)
{
set_bit(BCH_FS_ERROR, &c->flags);
@@ -11,41 +11,41 @@ void bch_inconsistent_error(struct bch_fs *c)
case BCH_ON_ERROR_CONTINUE:
break;
case BCH_ON_ERROR_RO:
- if (bch_fs_emergency_read_only(c))
+ if (bch2_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
break;
case BCH_ON_ERROR_PANIC:
- panic(bch_fmt(c, "panic after error"));
+ panic(bch2_fmt(c, "panic after error"));
break;
}
}
-void bch_fatal_error(struct bch_fs *c)
+void bch2_fatal_error(struct bch_fs *c)
{
- if (bch_fs_emergency_read_only(c))
+ if (bch2_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
}
-void bch_nonfatal_io_error_work(struct work_struct *work)
+void bch2_nonfatal_io_error_work(struct work_struct *work)
{
struct bch_dev *ca = container_of(work, struct bch_dev, io_error_work);
struct bch_fs *c = ca->fs;
bool dev;
mutex_lock(&c->state_lock);
- dev = bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO,
+ dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO,
BCH_FORCE_IF_DEGRADED);
if (dev
- ? __bch_dev_set_state(c, ca, BCH_MEMBER_STATE_RO,
+ ? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_RO,
BCH_FORCE_IF_DEGRADED)
- : bch_fs_emergency_read_only(c))
+ : bch2_fs_emergency_read_only(c))
bch_err(ca,
"too many IO errors, setting %s RO",
dev ? "device" : "filesystem");
mutex_unlock(&c->state_lock);
}
-void bch_nonfatal_io_error(struct bch_dev *ca)
+void bch2_nonfatal_io_error(struct bch_dev *ca)
{
queue_work(system_long_wq, &ca->io_error_work);
}
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 2d7f15803ae9..83d3a6274eb7 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -21,16 +21,16 @@ struct bch_fs;
* XXX: audit and convert to inconsistent() checks
*/
-#define bch_fs_bug(c, ...) \
+#define bch2_fs_bug(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
BUG(); \
} while (0)
-#define bch_fs_bug_on(cond, c, ...) \
+#define bch2_fs_bug_on(cond, c, ...) \
do { \
if (cond) \
- bch_fs_bug(c, __VA_ARGS__); \
+ bch2_fs_bug(c, __VA_ARGS__); \
} while (0)
/*
@@ -44,20 +44,20 @@ do { \
* BCH_ON_ERROR_CONTINUE mode
*/
-void bch_inconsistent_error(struct bch_fs *);
+void bch2_inconsistent_error(struct bch_fs *);
-#define bch_fs_inconsistent(c, ...) \
+#define bch2_fs_inconsistent(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
- bch_inconsistent_error(c); \
+ bch2_inconsistent_error(c); \
} while (0)
-#define bch_fs_inconsistent_on(cond, c, ...) \
+#define bch2_fs_inconsistent_on(cond, c, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- bch_fs_inconsistent(c, __VA_ARGS__); \
+ bch2_fs_inconsistent(c, __VA_ARGS__); \
_ret; \
})
@@ -66,18 +66,18 @@ do { \
* entire filesystem:
*/
-#define bch_dev_inconsistent(ca, ...) \
+#define bch2_dev_inconsistent(ca, ...) \
do { \
bch_err(ca, __VA_ARGS__); \
- bch_inconsistent_error((ca)->fs); \
+ bch2_inconsistent_error((ca)->fs); \
} while (0)
-#define bch_dev_inconsistent_on(cond, ca, ...) \
+#define bch2_dev_inconsistent_on(cond, ca, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- bch_dev_inconsistent(ca, __VA_ARGS__); \
+ bch2_dev_inconsistent(ca, __VA_ARGS__); \
_ret; \
})
@@ -145,43 +145,43 @@ enum {
* mode - pretty much just due to metadata IO errors:
*/
-void bch_fatal_error(struct bch_fs *);
+void bch2_fatal_error(struct bch_fs *);
-#define bch_fs_fatal_error(c, ...) \
+#define bch2_fs_fatal_error(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
- bch_fatal_error(c); \
+ bch2_fatal_error(c); \
} while (0)
-#define bch_fs_fatal_err_on(cond, c, ...) \
+#define bch2_fs_fatal_err_on(cond, c, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- bch_fs_fatal_error(c, __VA_ARGS__); \
+ bch2_fs_fatal_error(c, __VA_ARGS__); \
_ret; \
})
-#define bch_dev_fatal_error(ca, ...) \
+#define bch2_dev_fatal_error(ca, ...) \
do { \
bch_err(ca, __VA_ARGS__); \
- bch_fatal_error(c); \
+ bch2_fatal_error(c); \
} while (0)
-#define bch_dev_fatal_io_error(ca, fmt, ...) \
+#define bch2_dev_fatal_io_error(ca, fmt, ...) \
do { \
- printk_ratelimited(KERN_ERR bch_fmt((ca)->fs, \
+ printk_ratelimited(KERN_ERR bch2_fmt((ca)->fs, \
"fatal IO error on %s for " fmt), \
(ca)->name, ##__VA_ARGS__); \
- bch_fatal_error((ca)->fs); \
+ bch2_fatal_error((ca)->fs); \
} while (0)
-#define bch_dev_fatal_io_err_on(cond, ca, ...) \
+#define bch2_dev_fatal_io_err_on(cond, ca, ...) \
({ \
int _ret = !!(cond); \
\
if (_ret) \
- bch_dev_fatal_io_error(ca, __VA_ARGS__); \
+ bch2_dev_fatal_io_error(ca, __VA_ARGS__); \
_ret; \
})
@@ -191,41 +191,41 @@ do { \
* don't (necessarily) want to shut down the fs:
*/
-void bch_nonfatal_io_error_work(struct work_struct *);
+void bch2_nonfatal_io_error_work(struct work_struct *);
/* Does the error handling without logging a message */
-void bch_nonfatal_io_error(struct bch_dev *);
+void bch2_nonfatal_io_error(struct bch_dev *);
#if 0
-#define bch_fs_nonfatal_io_error(c, ...) \
+#define bch2_fs_nonfatal_io_error(c, ...) \
do { \
bch_err(c, __VA_ARGS__); \
- bch_nonfatal_io_error(c); \
+ bch2_nonfatal_io_error(c); \
} while (0)
#endif
/* Logs message and handles the error: */
-#define bch_dev_nonfatal_io_error(ca, fmt, ...) \
+#define bch2_dev_nonfatal_io_error(ca, fmt, ...) \
do { \
- printk_ratelimited(KERN_ERR bch_fmt((ca)->fs, \
+ printk_ratelimited(KERN_ERR bch2_fmt((ca)->fs, \
"IO error on %s for " fmt), \
(ca)->name, ##__VA_ARGS__); \
- bch_nonfatal_io_error(ca); \
+ bch2_nonfatal_io_error(ca); \
} while (0)
-#define bch_dev_nonfatal_io_err_on(cond, ca, ...) \
+#define bch2_dev_nonfatal_io_err_on(cond, ca, ...) \
({ \
bool _ret = (cond); \
\
if (_ret) \
- bch_dev_nonfatal_io_error(ca, __VA_ARGS__); \
+ bch2_dev_nonfatal_io_error(ca, __VA_ARGS__); \
_ret; \
})
/* kill? */
#define __bcache_io_error(c, fmt, ...) \
- printk_ratelimited(KERN_ERR bch_fmt(c, \
+ printk_ratelimited(KERN_ERR bch2_fmt(c, \
"IO error: " fmt), ##__VA_ARGS__)
#define bcache_io_error(c, bio, fmt, ...) \
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 28f5766049fc..26f9352af38c 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -5,7 +5,7 @@
* dirty sector count.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_gc.h"
#include "btree_update.h"
@@ -21,8 +21,8 @@
#include <trace/events/bcachefs.h>
-static enum merge_result bch_extent_merge(struct bch_fs *, struct btree *,
- struct bkey_i *, struct bkey_i *);
+static enum merge_result bch2_extent_merge(struct bch_fs *, struct btree *,
+ struct bkey_i *, struct bkey_i *);
static void sort_key_next(struct btree_node_iter *iter,
struct btree *b,
@@ -76,7 +76,7 @@ static inline bool should_drop_next_key(struct btree_node_iter *iter,
__btree_node_offset_to_key(b, r->k));
}
-struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst,
+struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
struct btree *b,
struct btree_node_iter *iter)
{
@@ -87,7 +87,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst,
heap_resort(iter, key_sort_cmp);
- while (!bch_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_end(iter)) {
if (!should_drop_next_key(iter, b)) {
struct bkey_packed *k =
__btree_node_offset_to_key(b, iter->data->k);
@@ -108,7 +108,7 @@ struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *dst,
/* Common among btree and extent ptrs */
const struct bch_extent_ptr *
-bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
+bch2_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
{
const struct bch_extent_ptr *ptr;
@@ -119,7 +119,7 @@ bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
return NULL;
}
-unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e)
+unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent e)
{
const struct bch_extent_ptr *ptr;
unsigned nr_ptrs = 0;
@@ -130,7 +130,7 @@ unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent e)
return nr_ptrs;
}
-unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c k)
+unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c k)
{
struct bkey_s_c_extent e;
const struct bch_extent_ptr *ptr;
@@ -161,7 +161,7 @@ static bool crc_cmp(union bch_extent_crc *l, union bch_extent_crc *r)
}
/* Increment pointers after @crc by crc's offset until the next crc entry: */
-void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
+void bch2_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc *crc)
{
union bch_extent_entry *entry;
@@ -196,7 +196,7 @@ void bch_extent_crc_narrow_pointers(struct bkey_s_extent e, union bch_extent_crc
*
* note: doesn't work with encryption
*/
-void bch_extent_narrow_crcs(struct bkey_s_extent e)
+void bch2_extent_narrow_crcs(struct bkey_s_extent e)
{
union bch_extent_crc *crc;
bool have_wide = false, have_narrow = false;
@@ -205,7 +205,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e)
extent_for_each_crc(e, crc) {
if (crc_compression_type(crc) ||
- bch_csum_type_is_encryption(crc_csum_type(crc)))
+ bch2_csum_type_is_encryption(crc_csum_type(crc)))
continue;
if (crc_uncompressed_size(e.k, crc) != e.k->size) {
@@ -232,7 +232,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e)
if (bch_crc_bytes[csum_type] > 4)
continue;
- bch_extent_crc_narrow_pointers(e, crc);
+ bch2_extent_crc_narrow_pointers(e, crc);
crc->crc32._compressed_size = e.k->size - 1;
crc->crc32._uncompressed_size = e.k->size - 1;
crc->crc32.offset = 0;
@@ -243,7 +243,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e)
if (bch_crc_bytes[csum_type] > 10)
continue;
- bch_extent_crc_narrow_pointers(e, crc);
+ bch2_extent_crc_narrow_pointers(e, crc);
crc->crc64._compressed_size = e.k->size - 1;
crc->crc64._uncompressed_size = e.k->size - 1;
crc->crc64.offset = 0;
@@ -255,7 +255,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e)
if (bch_crc_bytes[csum_type] > 16)
continue;
- bch_extent_crc_narrow_pointers(e, crc);
+ bch2_extent_crc_narrow_pointers(e, crc);
crc->crc128._compressed_size = e.k->size - 1;
crc->crc128._uncompressed_size = e.k->size - 1;
crc->crc128.offset = 0;
@@ -267,7 +267,7 @@ void bch_extent_narrow_crcs(struct bkey_s_extent e)
}
}
-void bch_extent_drop_redundant_crcs(struct bkey_s_extent e)
+void bch2_extent_drop_redundant_crcs(struct bkey_s_extent e)
{
union bch_extent_entry *entry = e.v->start;
union bch_extent_crc *crc, *prev = NULL;
@@ -300,7 +300,7 @@ void bch_extent_drop_redundant_crcs(struct bkey_s_extent e)
!crc_csum_type(crc) &&
!crc_compression_type(crc)) {
/* null crc entry: */
- bch_extent_crc_narrow_pointers(e, crc);
+ bch2_extent_crc_narrow_pointers(e, crc);
goto drop;
}
@@ -314,7 +314,7 @@ drop:
e.k->u64s -= crc_u64s;
}
- EBUG_ON(bkey_val_u64s(e.k) && !bch_extent_nr_ptrs(e.c));
+ EBUG_ON(bkey_val_u64s(e.k) && !bch2_extent_nr_ptrs(e.c));
}
static bool should_drop_ptr(const struct bch_fs *c,
@@ -324,29 +324,29 @@ static bool should_drop_ptr(const struct bch_fs *c,
return ptr->cached && ptr_stale(c->devs[ptr->dev], ptr);
}
-static void bch_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
+static void bch2_extent_drop_stale(struct bch_fs *c, struct bkey_s_extent e)
{
struct bch_extent_ptr *ptr = &e.v->start->ptr;
bool dropped = false;
while ((ptr = extent_ptr_next(e, ptr)))
if (should_drop_ptr(c, e.c, ptr)) {
- __bch_extent_drop_ptr(e, ptr);
+ __bch2_extent_drop_ptr(e, ptr);
dropped = true;
} else
ptr++;
if (dropped)
- bch_extent_drop_redundant_crcs(e);
+ bch2_extent_drop_redundant_crcs(e);
}
-static bool bch_ptr_normalize(struct bch_fs *c, struct btree *bk,
+static bool bch2_ptr_normalize(struct bch_fs *c, struct btree *bk,
struct bkey_s k)
{
- return bch_extent_normalize(c, k);
+ return bch2_extent_normalize(c, k);
}
-static void bch_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
+static void bch2_ptr_swab(const struct bkey_format *f, struct bkey_packed *k)
{
switch (k->type) {
case BCH_EXTENT:
@@ -471,7 +471,7 @@ out:
/* Btree ptrs */
-static const char *bch_btree_ptr_invalid(const struct bch_fs *c,
+static const char *bch2_btree_ptr_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
if (bkey_extent_is_cached(k.k))
@@ -548,9 +548,9 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
}
if (replicas < c->sb.meta_replicas_have) {
- bch_bkey_val_to_text(c, btree_node_type(b),
+ bch2_bkey_val_to_text(c, btree_node_type(b),
buf, sizeof(buf), k);
- bch_fs_bug(c,
+ bch2_fs_bug(c,
"btree key bad (too few replicas, %u < %u): %s",
replicas, c->sb.meta_replicas_have, buf);
return;
@@ -558,8 +558,8 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
return;
err:
- bch_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
- bch_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i "
+ bch2_bkey_val_to_text(c, btree_node_type(b), buf, sizeof(buf), k);
+ bch2_fs_bug(c, "%s btree pointer %s: bucket %zi prio %i "
"gen %i last_gc %i mark %08x",
err, buf, PTR_BUCKET_NR(ca, ptr),
g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
@@ -567,7 +567,7 @@ err:
(unsigned) g->mark.counter);
}
-static void bch_btree_ptr_to_text(struct bch_fs *c, char *buf,
+static void bch2_btree_ptr_to_text(struct bch_fs *c, char *buf,
size_t size, struct bkey_s_c k)
{
char *out = buf, *end = buf + size;
@@ -578,14 +578,14 @@ static void bch_btree_ptr_to_text(struct bch_fs *c, char *buf,
if (bkey_extent_is_data(k.k))
out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
- invalid = bch_btree_ptr_invalid(c, k);
+ invalid = bch2_btree_ptr_invalid(c, k);
if (invalid)
p(" invalid: %s", invalid);
#undef p
}
struct extent_pick_ptr
-bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
+bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
{
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
const union bch_extent_crc *crc;
@@ -596,13 +596,13 @@ bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
struct bch_dev *ca = c->devs[ptr->dev];
struct btree *root = btree_node_root(c, b);
- if (bch_fs_inconsistent_on(crc, c,
+ if (bch2_fs_inconsistent_on(crc, c,
"btree node pointer with crc at btree %u level %u/%u bucket %zu",
b->btree_id, b->level, root ? root->level : -1,
PTR_BUCKET_NR(ca, ptr)))
break;
- if (bch_dev_inconsistent_on(ptr_stale(ca, ptr), ca,
+ if (bch2_dev_inconsistent_on(ptr_stale(ca, ptr), ca,
"stale btree node pointer at btree %u level %u/%u bucket %zu",
b->btree_id, b->level, root ? root->level : -1,
PTR_BUCKET_NR(ca, ptr)))
@@ -627,16 +627,16 @@ bch_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
return pick;
}
-const struct bkey_ops bch_bkey_btree_ops = {
- .key_invalid = bch_btree_ptr_invalid,
+const struct bkey_ops bch2_bkey_btree_ops = {
+ .key_invalid = bch2_btree_ptr_invalid,
.key_debugcheck = btree_ptr_debugcheck,
- .val_to_text = bch_btree_ptr_to_text,
- .swab = bch_ptr_swab,
+ .val_to_text = bch2_btree_ptr_to_text,
+ .swab = bch2_ptr_swab,
};
/* Extents */
-static bool __bch_cut_front(struct bpos where, struct bkey_s k)
+static bool __bch2_cut_front(struct bpos where, struct bkey_s k)
{
u64 len = 0;
@@ -687,12 +687,12 @@ static bool __bch_cut_front(struct bpos where, struct bkey_s k)
return true;
}
-bool bch_cut_front(struct bpos where, struct bkey_i *k)
+bool bch2_cut_front(struct bpos where, struct bkey_i *k)
{
- return __bch_cut_front(where, bkey_i_to_s(k));
+ return __bch2_cut_front(where, bkey_i_to_s(k));
}
-bool bch_cut_back(struct bpos where, struct bkey *k)
+bool bch2_cut_back(struct bpos where, struct bkey *k)
{
u64 len = 0;
@@ -719,7 +719,7 @@ bool bch_cut_back(struct bpos where, struct bkey *k)
*
* bkey_start_offset(k) will be preserved, modifies where the extent ends
*/
-void bch_key_resize(struct bkey *k,
+void bch2_key_resize(struct bkey *k,
unsigned new_size)
{
k->p.offset -= k->size;
@@ -744,11 +744,11 @@ static bool __extent_save(struct btree *b, struct btree_node_iter *iter,
dst_unpacked->k = *src;
ret = true;
} else {
- ret = bkey_pack_key(dst, src, f);
+ ret = bch2_bkey_pack_key(dst, src, f);
}
if (ret && iter)
- bch_verify_key_order(b, iter, dst);
+ bch2_verify_key_order(b, iter, dst);
return ret;
}
@@ -804,14 +804,14 @@ static void extent_sort_append(struct bch_fs *c,
if (bkey_whiteout(k))
return;
- bkey_unpack(b, &tmp.k, k);
+ bch2_bkey_unpack(b, &tmp.k, k);
if (*prev &&
- bch_extent_merge(c, b, (void *) *prev, &tmp.k))
+ bch2_extent_merge(c, b, (void *) *prev, &tmp.k))
return;
if (*prev) {
- bkey_pack(*prev, (void *) *prev, f);
+ bch2_bkey_pack(*prev, (void *) *prev, f);
btree_keys_account_key_add(nr, 0, *prev);
*prev = bkey_next(*prev);
@@ -822,7 +822,7 @@ static void extent_sort_append(struct bch_fs *c,
bkey_copy(*prev, &tmp.k);
}
-struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
+struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
struct bset *dst,
struct btree *b,
struct btree_node_iter *iter)
@@ -838,7 +838,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
heap_resort(iter, extent_sort_cmp);
- while (!bch_btree_node_iter_end(iter)) {
+ while (!bch2_btree_node_iter_end(iter)) {
lk = __btree_node_offset_to_key(b, _l->k);
if (iter->used == 1) {
@@ -885,7 +885,7 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
if (bkey_cmp(l.k->p, r.k->p) >= 0) {
sort_key_next(iter, b, _r);
} else {
- __bch_cut_front(l.k->p, r);
+ __bch2_cut_front(l.k->p, r);
extent_save(b, NULL, rk, r.k);
}
@@ -897,9 +897,9 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
* r wins, but it overlaps in the middle of l - split l:
*/
bkey_reassemble(&tmp.k, l.s_c);
- bch_cut_back(bkey_start_pos(r.k), &tmp.k.k);
+ bch2_cut_back(bkey_start_pos(r.k), &tmp.k.k);
- __bch_cut_front(r.k->p, l);
+ __bch2_cut_front(r.k->p, l);
extent_save(b, NULL, lk, l.k);
extent_sort_sift(iter, b, 0);
@@ -907,13 +907,13 @@ struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
extent_sort_append(c, b, &nr, dst->start, &prev,
bkey_to_packed(&tmp.k));
} else {
- bch_cut_back(bkey_start_pos(r.k), l.k);
+ bch2_cut_back(bkey_start_pos(r.k), l.k);
extent_save(b, NULL, lk, l.k);
}
}
if (prev) {
- bkey_pack(prev, (void *) prev, f);
+ bch2_bkey_pack(prev, (void *) prev, f);
btree_keys_account_key_add(&nr, 0, prev);
out = bkey_next(prev);
} else {
@@ -936,7 +936,7 @@ struct extent_insert_state {
bool deleting;
};
-static void bch_add_sectors(struct extent_insert_state *s,
+static void bch2_add_sectors(struct extent_insert_state *s,
struct bkey_s_c k, u64 offset, s64 sectors)
{
struct bch_fs *c = s->trans->c;
@@ -947,47 +947,47 @@ static void bch_add_sectors(struct extent_insert_state *s,
if (!sectors)
return;
- bch_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
+ bch2_mark_key(c, k, sectors, false, gc_pos_btree_node(b),
&s->stats, s->trans->journal_res.seq);
}
-static void bch_subtract_sectors(struct extent_insert_state *s,
+static void bch2_subtract_sectors(struct extent_insert_state *s,
struct bkey_s_c k, u64 offset, s64 sectors)
{
- bch_add_sectors(s, k, offset, -sectors);
+ bch2_add_sectors(s, k, offset, -sectors);
}
/* These wrappers subtract exactly the sectors that we're removing from @k */
-static void bch_cut_subtract_back(struct extent_insert_state *s,
+static void bch2_cut_subtract_back(struct extent_insert_state *s,
struct bpos where, struct bkey_s k)
{
- bch_subtract_sectors(s, k.s_c, where.offset,
+ bch2_subtract_sectors(s, k.s_c, where.offset,
k.k->p.offset - where.offset);
- bch_cut_back(where, k.k);
+ bch2_cut_back(where, k.k);
}
-static void bch_cut_subtract_front(struct extent_insert_state *s,
+static void bch2_cut_subtract_front(struct extent_insert_state *s,
struct bpos where, struct bkey_s k)
{
- bch_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
+ bch2_subtract_sectors(s, k.s_c, bkey_start_offset(k.k),
where.offset - bkey_start_offset(k.k));
- __bch_cut_front(where, k);
+ __bch2_cut_front(where, k);
}
-static void bch_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
+static void bch2_drop_subtract(struct extent_insert_state *s, struct bkey_s k)
{
if (k.k->size)
- bch_subtract_sectors(s, k.s_c,
+ bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
k.k->size = 0;
__set_bkey_deleted(k.k);
}
-static bool bch_extent_merge_inline(struct bch_fs *,
- struct btree_iter *,
- struct bkey_packed *,
- struct bkey_packed *,
- bool);
+static bool bch2_extent_merge_inline(struct bch_fs *,
+ struct btree_iter *,
+ struct bkey_packed *,
+ struct bkey_packed *,
+ bool);
#define MAX_LOCK_HOLD_TIME (5 * NSEC_PER_MSEC)
@@ -1005,7 +1005,7 @@ extent_insert_should_stop(struct extent_insert_state *s)
* will insert two keys, and one iteration of this room will insert one
* key, so we need room for three keys.
*/
- if (!bch_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
+ if (!bch2_btree_node_insert_fits(s->trans->c, b, s->insert->k->k.u64s))
return BTREE_INSERT_BTREE_NODE_FULL;
else if (!journal_res_insert_fits(s->trans, s->insert))
return BTREE_INSERT_JOURNAL_RES_FULL; /* XXX worth tracing */
@@ -1020,8 +1020,8 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
struct btree_node_iter *node_iter = &iter->node_iters[0];
struct bset_tree *t = bset_tree_last(b);
struct bkey_packed *where =
- bch_btree_node_iter_bset_pos(node_iter, b, t);
- struct bkey_packed *prev = bkey_prev(b, t, where);
+ bch2_btree_node_iter_bset_pos(node_iter, b, t);
+ struct bkey_packed *prev = bch2_bkey_prev(b, t, where);
struct bkey_packed *next_live_key = where;
unsigned clobber_u64s;
@@ -1039,21 +1039,21 @@ static void extent_bset_insert(struct bch_fs *c, struct btree_iter *iter,
clobber_u64s = (u64 *) next_live_key - (u64 *) where;
if (prev &&
- bch_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
+ bch2_extent_merge_inline(c, iter, prev, bkey_to_packed(insert), true))
goto drop_deleted_keys;
if (next_live_key != btree_bkey_last(b, t) &&
- bch_extent_merge_inline(c, iter, bkey_to_packed(insert),
+ bch2_extent_merge_inline(c, iter, bkey_to_packed(insert),
next_live_key, false))
goto drop_deleted_keys;
- bch_bset_insert(b, node_iter, where, insert, clobber_u64s);
- bch_btree_node_iter_fix(iter, b, node_iter, t, where,
+ bch2_bset_insert(b, node_iter, where, insert, clobber_u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t, where,
clobber_u64s, where->u64s);
return;
drop_deleted_keys:
- bch_bset_delete(b, where, clobber_u64s);
- bch_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
+ bch2_bset_delete(b, where, clobber_u64s);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t, where, clobber_u64s, 0);
}
static void extent_insert_committed(struct extent_insert_state *s)
@@ -1072,7 +1072,7 @@ static void extent_insert_committed(struct extent_insert_state *s)
return;
if (s->deleting && !s->do_journal) {
- bch_cut_front(s->committed, insert);
+ bch2_cut_front(s->committed, insert);
goto done;
}
@@ -1084,27 +1084,27 @@ static void extent_insert_committed(struct extent_insert_state *s)
bkey_cmp(s->committed, insert->k.p) &&
bkey_extent_is_compressed(bkey_i_to_s_c(insert))) {
/* XXX: possibly need to increase our reservation? */
- bch_cut_subtract_back(s, s->committed,
+ bch2_cut_subtract_back(s, s->committed,
bkey_i_to_s(&split.k));
- bch_cut_front(s->committed, insert);
- bch_add_sectors(s, bkey_i_to_s_c(insert),
+ bch2_cut_front(s->committed, insert);
+ bch2_add_sectors(s, bkey_i_to_s_c(insert),
bkey_start_offset(&insert->k),
insert->k.size);
} else {
- bch_cut_back(s->committed, &split.k.k);
- bch_cut_front(s->committed, insert);
+ bch2_cut_back(s->committed, &split.k.k);
+ bch2_cut_front(s->committed, insert);
}
if (debug_check_bkeys(c))
- bkey_debugcheck(c, iter->nodes[iter->level],
+ bch2_bkey_debugcheck(c, iter->nodes[iter->level],
bkey_i_to_s_c(&split.k));
- bch_btree_journal_key(s->trans, iter, &split.k);
+ bch2_btree_journal_key(s->trans, iter, &split.k);
if (!s->deleting)
extent_bset_insert(c, iter, &split.k);
done:
- bch_btree_iter_set_pos_same_leaf(iter, s->committed);
+ bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
insert->k.needs_whiteout = false;
s->do_journal = false;
@@ -1142,9 +1142,9 @@ __extent_insert_advance_pos(struct extent_insert_state *s,
break;
case BTREE_HOOK_NO_INSERT:
extent_insert_committed(s);
- bch_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
+ bch2_cut_subtract_front(s, next_pos, bkey_i_to_s(s->insert->k));
- bch_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
+ bch2_btree_iter_set_pos_same_leaf(s->insert->iter, next_pos);
break;
case BTREE_HOOK_RESTART_TRANS:
return ret;
@@ -1210,7 +1210,7 @@ extent_insert_check_split_compressed(struct extent_insert_state *s,
if (s->trans->flags & BTREE_INSERT_NOFAIL)
flags |= BCH_DISK_RESERVATION_NOFAIL;
- switch (bch_disk_reservation_add(c,
+ switch (bch2_disk_reservation_add(c,
s->trans->disk_res,
sectors, flags)) {
case 0:
@@ -1240,14 +1240,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
/* insert overlaps with start of k: */
- bch_cut_subtract_front(s, insert->k.p, k);
+ bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, node_iter, _k, k.k);
break;
case BCH_EXTENT_OVERLAP_BACK:
/* insert overlaps with end of k: */
- bch_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
+ bch2_cut_subtract_back(s, bkey_start_pos(&insert->k), k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, node_iter, _k, k.k);
@@ -1256,8 +1256,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
* key and we've just changed the end, update the
* auxiliary tree.
*/
- bch_bset_fix_invalidated_key(b, t, _k);
- bch_btree_node_iter_fix(iter, b, node_iter, t,
+ bch2_bset_fix_invalidated_key(b, t, _k);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t,
_k, _k->u64s, _k->u64s);
break;
@@ -1269,7 +1269,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
btree_keys_account_key_drop(&b->nr,
t - b->set, _k);
- bch_drop_subtract(s, k);
+ bch2_drop_subtract(s, k);
k.k->p = bkey_start_pos(&insert->k);
if (!__extent_save(b, node_iter, _k, k.k)) {
/*
@@ -1294,8 +1294,8 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
*/
EBUG_ON(bkey_cmp(s->committed, k.k->p));
} else {
- bch_bset_fix_invalidated_key(b, t, _k);
- bch_btree_node_iter_fix(iter, b, node_iter, t,
+ bch2_bset_fix_invalidated_key(b, t, _k);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t,
_k, _k->u64s, _k->u64s);
}
@@ -1320,14 +1320,14 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
bkey_reassemble(&split.k, k.s_c);
split.k.k.needs_whiteout |= bset_written(b, bset(b, t));
- bch_cut_back(bkey_start_pos(&insert->k), &split.k.k);
+ bch2_cut_back(bkey_start_pos(&insert->k), &split.k.k);
BUG_ON(bkey_deleted(&split.k.k));
- bch_cut_subtract_front(s, insert->k.p, k);
+ bch2_cut_subtract_front(s, insert->k.p, k);
BUG_ON(bkey_deleted(k.k));
extent_save(b, node_iter, _k, k.k);
- bch_add_sectors(s, bkey_i_to_s_c(&split.k),
+ bch2_add_sectors(s, bkey_i_to_s_c(&split.k),
bkey_start_offset(&split.k.k),
split.k.k.size);
extent_bset_insert(c, iter, &split.k);
@@ -1339,7 +1339,7 @@ extent_squash(struct extent_insert_state *s, struct bkey_i *insert,
}
static enum btree_insert_ret
-bch_delete_fixup_extent(struct extent_insert_state *s)
+bch2_delete_fixup_extent(struct extent_insert_state *s)
{
struct bch_fs *c = s->trans->c;
struct btree_iter *iter = s->insert->iter;
@@ -1357,8 +1357,8 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
while (bkey_cmp(s->committed, insert->k.p) < 0 &&
(ret = extent_insert_should_stop(s)) == BTREE_INSERT_OK &&
- (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
- struct bset_tree *t = bch_bkey_to_bset(b, _k);
+ (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
+ struct bset_tree *t = bch2_bkey_to_bset(b, _k);
struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
enum bch_extent_overlap overlap;
@@ -1373,7 +1373,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
goto next;
}
- overlap = bch_extent_overlap(&insert->k, k.k);
+ overlap = bch2_extent_overlap(&insert->k, k.k);
ret = extent_insert_check_split_compressed(s, k.s_c, overlap);
if (ret != BTREE_INSERT_OK)
@@ -1394,7 +1394,7 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
if (overlap == BCH_EXTENT_OVERLAP_ALL) {
btree_keys_account_key_drop(&b->nr,
t - b->set, _k);
- bch_subtract_sectors(s, k.s_c,
+ bch2_subtract_sectors(s, k.s_c,
bkey_start_offset(k.k), k.k->size);
_k->type = KEY_TYPE_DISCARD;
reserve_whiteout(b, t, _k);
@@ -1404,10 +1404,10 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
switch (overlap) {
case BCH_EXTENT_OVERLAP_FRONT:
- bch_cut_front(bkey_start_pos(k.k), &discard);
+ bch2_cut_front(bkey_start_pos(k.k), &discard);
break;
case BCH_EXTENT_OVERLAP_BACK:
- bch_cut_back(k.k->p, &discard.k);
+ bch2_cut_back(k.k->p, &discard.k);
break;
default:
break;
@@ -1424,8 +1424,8 @@ bch_delete_fixup_extent(struct extent_insert_state *s)
BUG_ON(ret != BTREE_INSERT_OK);
}
next:
- bch_cut_front(s->committed, insert);
- bch_btree_iter_set_pos_same_leaf(iter, s->committed);
+ bch2_cut_front(s->committed, insert);
+ bch2_btree_iter_set_pos_same_leaf(iter, s->committed);
}
if (bkey_cmp(s->committed, insert->k.p) < 0 &&
@@ -1435,13 +1435,13 @@ next:
stop:
extent_insert_committed(s);
- bch_fs_usage_apply(c, &s->stats, s->trans->disk_res,
+ bch2_fs_usage_apply(c, &s->stats, s->trans->disk_res,
gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, s->committed));
EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) != iter->at_end_of_leaf);
- bch_cut_front(iter->pos, insert);
+ bch2_cut_front(iter->pos, insert);
if (insert->k.size && iter->at_end_of_leaf)
ret = BTREE_INSERT_NEED_TRAVERSE;
@@ -1458,7 +1458,7 @@ stop:
* of the insert key. For cmpxchg operations this is where that logic lives.
*
* All subsets of @insert that need to be inserted are inserted using
- * bch_btree_insert_and_journal(). If @b or @res fills up, this function
+ * bch2_btree_insert_and_journal(). If @b or @res fills up, this function
* returns false, setting @iter->pos for the prefix of @insert that actually got
* inserted.
*
@@ -1482,7 +1482,7 @@ stop:
* i.e. no two overlapping keys _of nonzero size_
*
* We can't realistically maintain this invariant for zero size keys because of
- * the key merging done in bch_btree_insert_key() - for two mergeable keys k, j
+ * the key merging done in bch2_btree_insert_key() - for two mergeable keys k, j
* there may be another 0 size key between them in another bset, and it will
* thus overlap with the merged key.
*
@@ -1491,8 +1491,8 @@ stop:
* key insertion needs to continue/be retried.
*/
enum btree_insert_ret
-bch_insert_fixup_extent(struct btree_insert *trans,
- struct btree_insert_entry *insert)
+bch2_insert_fixup_extent(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter = insert->iter;
@@ -1513,7 +1513,7 @@ bch_insert_fixup_extent(struct btree_insert *trans,
EBUG_ON(bkey_deleted(&insert->k->k) || !insert->k->k.size);
if (s.deleting)
- return bch_delete_fixup_extent(&s);
+ return bch2_delete_fixup_extent(&s);
/*
* As we process overlapping extents, we advance @iter->pos both to
@@ -1524,14 +1524,14 @@ bch_insert_fixup_extent(struct btree_insert *trans,
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- bch_add_sectors(&s, bkey_i_to_s_c(insert->k),
+ bch2_add_sectors(&s, bkey_i_to_s_c(insert->k),
bkey_start_offset(&insert->k->k),
insert->k->k.size);
while (bkey_cmp(s.committed, insert->k->k.p) < 0 &&
(ret = extent_insert_should_stop(&s)) == BTREE_INSERT_OK &&
- (_k = bch_btree_node_iter_peek_all(node_iter, b))) {
- struct bset_tree *t = bch_bkey_to_bset(b, _k);
+ (_k = bch2_btree_node_iter_peek_all(node_iter, b))) {
+ struct bset_tree *t = bch2_bkey_to_bset(b, _k);
struct bkey_s k = __bkey_disassemble(b, _k, &unpacked);
enum bch_extent_overlap overlap;
@@ -1541,7 +1541,7 @@ bch_insert_fixup_extent(struct btree_insert *trans,
if (bkey_cmp(bkey_start_pos(k.k), insert->k->k.p) >= 0)
break;
- overlap = bch_extent_overlap(&insert->k->k, k.k);
+ overlap = bch2_extent_overlap(&insert->k->k, k.k);
ret = extent_insert_check_split_compressed(&s, k.s_c, overlap);
if (ret != BTREE_INSERT_OK)
@@ -1593,11 +1593,11 @@ stop:
*/
if (insert->k->k.size &&
!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- bch_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
+ bch2_subtract_sectors(&s, bkey_i_to_s_c(insert->k),
bkey_start_offset(&insert->k->k),
insert->k->k.size);
- bch_fs_usage_apply(c, &s.stats, trans->disk_res,
+ bch2_fs_usage_apply(c, &s.stats, trans->disk_res,
gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
@@ -1612,8 +1612,8 @@ stop:
return ret;
}
-static const char *bch_extent_invalid(const struct bch_fs *c,
- struct bkey_s_c k)
+static const char *bch2_extent_invalid(const struct bch_fs *c,
+ struct bkey_s_c k)
{
if (bkey_val_u64s(k.k) > BKEY_EXTENT_VAL_U64s_MAX)
return "value too big";
@@ -1644,7 +1644,7 @@ static const char *bch_extent_invalid(const struct bch_fs *c,
size_ondisk = crc_compressed_size(e.k, crc);
- if (!bch_checksum_type_valid(c, crc_csum_type(crc)))
+ if (!bch2_checksum_type_valid(c, crc_csum_type(crc)))
return "invalid checksum type";
if (crc_compression_type(crc) >= BCH_COMPRESSION_NR)
@@ -1679,8 +1679,8 @@ static const char *bch_extent_invalid(const struct bch_fs *c,
}
}
-static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
- struct bkey_s_c_extent e)
+static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c_extent e)
{
const struct bch_extent_ptr *ptr;
struct bch_dev *ca;
@@ -1693,7 +1693,7 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
/*
* XXX: we should be doing most/all of these checks at startup time,
- * where we check bkey_invalid() in btree_node_read_done()
+ * where we check bch2_bkey_invalid() in btree_node_read_done()
*
* But note that we can't check for stale pointers or incorrect gc marks
* until after journal replay is done (it might be an extent that's
@@ -1728,10 +1728,10 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
stale = ptr_stale(ca, ptr);
- bch_fs_bug_on(stale && !ptr->cached, c,
+ bch2_fs_bug_on(stale && !ptr->cached, c,
"stale dirty pointer");
- bch_fs_bug_on(stale > 96, c,
+ bch2_fs_bug_on(stale > 96, c,
"key too stale: %i",
stale);
@@ -1751,9 +1751,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
}
if (replicas > BCH_REPLICAS_MAX) {
- bch_bkey_val_to_text(c, btree_node_type(b), buf,
+ bch2_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- bch_fs_bug(c,
+ bch2_fs_bug(c,
"extent key bad (too many replicas: %u): %s",
replicas, buf);
return;
@@ -1761,9 +1761,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
if (!bkey_extent_is_cached(e.k) &&
replicas < c->sb.data_replicas_have) {
- bch_bkey_val_to_text(c, btree_node_type(b), buf,
+ bch2_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- bch_fs_bug(c,
+ bch2_fs_bug(c,
"extent key bad (too few replicas, %u < %u): %s",
replicas, c->sb.data_replicas_have, buf);
return;
@@ -1772,9 +1772,9 @@ static void bch_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
return;
bad_ptr:
- bch_bkey_val_to_text(c, btree_node_type(b), buf,
+ bch2_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
- bch_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
+ bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu prio %i "
"gen %i last_gc %i mark 0x%08x",
buf, PTR_BUCKET_NR(ca, ptr),
g->read_prio, PTR_BUCKET(ca, ptr)->mark.gen,
@@ -1783,13 +1783,13 @@ bad_ptr:
return;
}
-static void bch_extent_debugcheck(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k)
+static void bch2_extent_debugcheck(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k)
{
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED:
- bch_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
+ bch2_extent_debugcheck_extent(c, b, bkey_s_c_to_extent(k));
break;
case BCH_RESERVATION:
break;
@@ -1798,8 +1798,8 @@ static void bch_extent_debugcheck(struct bch_fs *c, struct btree *b,
}
}
-static void bch_extent_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
+static void bch2_extent_to_text(struct bch_fs *c, char *buf,
+ size_t size, struct bkey_s_c k)
{
char *out = buf, *end = buf + size;
const char *invalid;
@@ -1809,7 +1809,7 @@ static void bch_extent_to_text(struct bch_fs *c, char *buf,
if (bkey_extent_is_data(k.k))
out += extent_print_ptrs(c, buf, size, bkey_s_c_to_extent(k));
- invalid = bch_extent_invalid(c, k);
+ invalid = bch2_extent_invalid(c, k);
if (invalid)
p(" invalid: %s", invalid);
#undef p
@@ -1821,12 +1821,12 @@ static unsigned PTR_TIER(struct bch_fs *c,
return c->devs[ptr->dev]->mi.tier;
}
-static void bch_extent_crc_init(union bch_extent_crc *crc,
- unsigned compressed_size,
- unsigned uncompressed_size,
- unsigned compression_type,
- unsigned nonce,
- struct bch_csum csum, unsigned csum_type)
+static void bch2_extent_crc_init(union bch_extent_crc *crc,
+ unsigned compressed_size,
+ unsigned uncompressed_size,
+ unsigned compression_type,
+ unsigned nonce,
+ struct bch_csum csum, unsigned csum_type)
{
if (bch_crc_bytes[csum_type] <= 4 &&
uncompressed_size <= CRC32_SIZE_MAX &&
@@ -1879,12 +1879,12 @@ static void bch_extent_crc_init(union bch_extent_crc *crc,
BUG();
}
-void bch_extent_crc_append(struct bkey_i_extent *e,
- unsigned compressed_size,
- unsigned uncompressed_size,
- unsigned compression_type,
- unsigned nonce,
- struct bch_csum csum, unsigned csum_type)
+void bch2_extent_crc_append(struct bkey_i_extent *e,
+ unsigned compressed_size,
+ unsigned uncompressed_size,
+ unsigned compression_type,
+ unsigned nonce,
+ struct bch_csum csum, unsigned csum_type)
{
union bch_extent_crc *crc;
@@ -1913,7 +1913,7 @@ void bch_extent_crc_append(struct bkey_i_extent *e,
crc_csum(crc).hi == csum.hi)
return;
- bch_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
+ bch2_extent_crc_init((void *) extent_entry_last(extent_i_to_s(e)),
compressed_size,
uncompressed_size,
compression_type,
@@ -1929,7 +1929,7 @@ void bch_extent_crc_append(struct bkey_i_extent *e,
* For existing keys, only called when btree nodes are being rewritten, not when
* they're merely being compacted/resorted in memory.
*/
-bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k)
+bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
{
struct bkey_s_extent e;
@@ -1948,7 +1948,7 @@ bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k)
case BCH_EXTENT_CACHED:
e = bkey_s_to_extent(k);
- bch_extent_drop_stale(c, e);
+ bch2_extent_drop_stale(c, e);
if (!bkey_val_u64s(e.k)) {
if (bkey_extent_is_cached(e.k)) {
@@ -1968,9 +1968,9 @@ bool bch_extent_normalize(struct bch_fs *c, struct bkey_s k)
}
}
-void bch_extent_mark_replicas_cached(struct bch_fs *c,
- struct bkey_s_extent e,
- unsigned nr_cached)
+void bch2_extent_mark_replicas_cached(struct bch_fs *c,
+ struct bkey_s_extent e,
+ unsigned nr_cached)
{
struct bch_extent_ptr *ptr;
bool have_higher_tier;
@@ -2007,9 +2007,9 @@ void bch_extent_mark_replicas_cached(struct bch_fs *c,
* as the pointers are sorted by tier, hence preferring pointers to tier 0
* rather than pointers to tier 1.
*/
-void bch_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k,
- struct bch_dev *avoid,
- struct extent_pick_ptr *ret)
+void bch2_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k,
+ struct bch_dev *avoid,
+ struct extent_pick_ptr *ret)
{
struct bkey_s_c_extent e;
const union bch_extent_crc *crc;
@@ -2071,9 +2071,9 @@ void bch_extent_pick_ptr_avoiding(struct bch_fs *c, struct bkey_s_c k,
}
}
-static enum merge_result bch_extent_merge(struct bch_fs *c,
- struct btree *bk,
- struct bkey_i *l, struct bkey_i *r)
+static enum merge_result bch2_extent_merge(struct bch_fs *c,
+ struct btree *bk,
+ struct bkey_i *l, struct bkey_i *r)
{
struct bkey_s_extent el, er;
union bch_extent_entry *en_l, *en_r;
@@ -2152,12 +2152,12 @@ static enum merge_result bch_extent_merge(struct bch_fs *c,
* overflow KEY_SIZE
*/
if ((u64) l->k.size + r->k.size > KEY_SIZE_MAX) {
- bch_key_resize(&l->k, KEY_SIZE_MAX);
- bch_cut_front(l->k.p, r);
+ bch2_key_resize(&l->k, KEY_SIZE_MAX);
+ bch2_cut_front(l->k.p, r);
return BCH_MERGE_PARTIAL;
}
- bch_key_resize(&l->k, l->k.size + r->k.size);
+ bch2_key_resize(&l->k, l->k.size + r->k.size);
return BCH_MERGE_MERGE;
}
@@ -2171,7 +2171,7 @@ static void extent_i_save(struct btree *b, struct bkey_packed *dst,
BUG_ON(bkeyp_val_u64s(f, dst) != bkey_val_u64s(&src->k));
/*
- * We don't want the bch_verify_key_order() call in extent_save(),
+ * We don't want the bch2_verify_key_order() call in extent_save(),
* because we may be out of order with deleted keys that are about to be
* removed by extent_bset_insert()
*/
@@ -2179,7 +2179,7 @@ static void extent_i_save(struct btree *b, struct bkey_packed *dst,
if ((dst_unpacked = packed_to_bkey(dst)))
bkey_copy(dst_unpacked, src);
else
- BUG_ON(!bkey_pack(dst, src, f));
+ BUG_ON(!bch2_bkey_pack(dst, src, f));
}
static bool extent_merge_one_overlapping(struct btree_iter *iter,
@@ -2198,8 +2198,8 @@ static bool extent_merge_one_overlapping(struct btree_iter *iter,
} else {
uk.p = new_pos;
extent_save(b, node_iter, k, &uk);
- bch_bset_fix_invalidated_key(b, t, k);
- bch_btree_node_iter_fix(iter, b, node_iter, t,
+ bch2_bset_fix_invalidated_key(b, t, k);
+ bch2_btree_node_iter_fix(iter, b, node_iter, t,
k, k->u64s, k->u64s);
return true;
}
@@ -2235,10 +2235,10 @@ do_fixup:
* if we don't find this bset in the iterator we already got to
* the end of that bset, so start searching from the end.
*/
- k = bch_btree_node_iter_bset_pos(node_iter, b, t);
+ k = bch2_btree_node_iter_bset_pos(node_iter, b, t);
if (k == btree_bkey_last(b, t))
- k = bkey_prev_all(b, t, k);
+ k = bch2_bkey_prev_all(b, t, k);
if (!k)
continue;
@@ -2252,7 +2252,7 @@ do_fixup:
k &&
(uk = bkey_unpack_key(b, k),
bkey_cmp(uk.p, bkey_start_pos(m)) > 0);
- k = bkey_prev_all(b, t, k)) {
+ k = bch2_bkey_prev_all(b, t, k)) {
if (bkey_cmp(uk.p, m->p) >= 0)
continue;
@@ -2294,11 +2294,11 @@ do_fixup:
*
* Also unpacks and repacks.
*/
-static bool bch_extent_merge_inline(struct bch_fs *c,
- struct btree_iter *iter,
- struct bkey_packed *l,
- struct bkey_packed *r,
- bool back_merge)
+static bool bch2_extent_merge_inline(struct bch_fs *c,
+ struct btree_iter *iter,
+ struct bkey_packed *l,
+ struct bkey_packed *r,
+ bool back_merge)
{
struct btree *b = iter->nodes[0];
struct btree_node_iter *node_iter = &iter->node_iters[0];
@@ -2314,27 +2314,27 @@ static bool bch_extent_merge_inline(struct bch_fs *c,
* We need to save copies of both l and r, because we might get a
* partial merge (which modifies both) and then fails to repack
*/
- bkey_unpack(b, &li.k, l);
- bkey_unpack(b, &ri.k, r);
+ bch2_bkey_unpack(b, &li.k, l);
+ bch2_bkey_unpack(b, &ri.k, r);
m = back_merge ? l : r;
mi = back_merge ? &li.k : &ri.k;
/* l & r should be in last bset: */
- EBUG_ON(bch_bkey_to_bset(b, m) != t);
+ EBUG_ON(bch2_bkey_to_bset(b, m) != t);
- switch (bch_extent_merge(c, b, &li.k, &ri.k)) {
+ switch (bch2_extent_merge(c, b, &li.k, &ri.k)) {
case BCH_MERGE_NOMERGE:
return false;
case BCH_MERGE_PARTIAL:
- if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &mi->k, f))
+ if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &mi->k, f))
return false;
if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
return false;
extent_i_save(b, m, mi);
- bch_bset_fix_invalidated_key(b, t, m);
+ bch2_bset_fix_invalidated_key(b, t, m);
/*
* Update iterator to reflect what we just inserted - otherwise,
@@ -2342,9 +2342,9 @@ static bool bch_extent_merge_inline(struct bch_fs *c,
* just partially merged with:
*/
if (back_merge)
- bch_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
+ bch2_btree_iter_set_pos_same_leaf(iter, li.k.k.p);
- bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
+ bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
t, m, m->u64s, m->u64s);
if (!back_merge)
@@ -2353,16 +2353,16 @@ static bool bch_extent_merge_inline(struct bch_fs *c,
bkey_copy(packed_to_bkey(r), &ri.k);
return false;
case BCH_MERGE_MERGE:
- if (bkey_packed(m) && !bkey_pack_key((void *) &tmp, &li.k.k, f))
+ if (bkey_packed(m) && !bch2_bkey_pack_key((void *) &tmp, &li.k.k, f))
return false;
if (!extent_merge_do_overlapping(iter, &li.k.k, back_merge))
return false;
extent_i_save(b, m, &li.k);
- bch_bset_fix_invalidated_key(b, t, m);
+ bch2_bset_fix_invalidated_key(b, t, m);
- bch_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
+ bch2_btree_node_iter_fix(iter, iter->nodes[0], node_iter,
t, m, m->u64s, m->u64s);
return true;
default:
@@ -2370,12 +2370,12 @@ static bool bch_extent_merge_inline(struct bch_fs *c,
}
}
-const struct bkey_ops bch_bkey_extent_ops = {
- .key_invalid = bch_extent_invalid,
- .key_debugcheck = bch_extent_debugcheck,
- .val_to_text = bch_extent_to_text,
- .swab = bch_ptr_swab,
- .key_normalize = bch_ptr_normalize,
- .key_merge = bch_extent_merge,
+const struct bkey_ops bch2_bkey_extent_ops = {
+ .key_invalid = bch2_extent_invalid,
+ .key_debugcheck = bch2_extent_debugcheck,
+ .val_to_text = bch2_extent_to_text,
+ .swab = bch2_ptr_swab,
+ .key_normalize = bch2_ptr_normalize,
+ .key_merge = bch2_extent_merge,
.is_extents = true,
};
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index 070b64048168..3a9524846e27 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -1,7 +1,7 @@
#ifndef _BCACHE_EXTENTS_H
#define _BCACHE_EXTENTS_H
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey.h"
struct btree_node_iter;
@@ -9,16 +9,16 @@ struct btree_insert;
struct btree_insert_entry;
struct extent_insert_hook;
-struct btree_nr_keys bch_key_sort_fix_overlapping(struct bset *,
+struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *,
struct btree *,
struct btree_node_iter *);
-struct btree_nr_keys bch_extent_sort_fix_overlapping(struct bch_fs *c,
+struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
struct bset *,
struct btree *,
struct btree_node_iter *);
-extern const struct bkey_ops bch_bkey_btree_ops;
-extern const struct bkey_ops bch_bkey_extent_ops;
+extern const struct bkey_ops bch2_bkey_btree_ops;
+extern const struct bkey_ops bch2_bkey_extent_ops;
struct bch_fs;
struct journal_res;
@@ -30,28 +30,28 @@ struct extent_pick_ptr {
};
struct extent_pick_ptr
-bch_btree_pick_ptr(struct bch_fs *, const struct btree *);
+bch2_btree_pick_ptr(struct bch_fs *, const struct btree *);
-void bch_extent_pick_ptr_avoiding(struct bch_fs *, struct bkey_s_c,
+void bch2_extent_pick_ptr_avoiding(struct bch_fs *, struct bkey_s_c,
struct bch_dev *, struct extent_pick_ptr *);
static inline void
-bch_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
+bch2_extent_pick_ptr(struct bch_fs *c, struct bkey_s_c k,
struct extent_pick_ptr *ret)
{
- bch_extent_pick_ptr_avoiding(c, k, NULL, ret);
+ bch2_extent_pick_ptr_avoiding(c, k, NULL, ret);
}
enum btree_insert_ret
-bch_insert_fixup_extent(struct btree_insert *,
+bch2_insert_fixup_extent(struct btree_insert *,
struct btree_insert_entry *);
-bool bch_extent_normalize(struct bch_fs *, struct bkey_s);
-void bch_extent_mark_replicas_cached(struct bch_fs *,
+bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
+void bch2_extent_mark_replicas_cached(struct bch_fs *,
struct bkey_s_extent, unsigned);
-unsigned bch_extent_nr_ptrs(struct bkey_s_c_extent);
-unsigned bch_extent_nr_dirty_ptrs(struct bkey_s_c);
+unsigned bch2_extent_nr_ptrs(struct bkey_s_c_extent);
+unsigned bch2_extent_nr_dirty_ptrs(struct bkey_s_c);
static inline bool bkey_extent_is_data(const struct bkey *k)
{
@@ -324,7 +324,7 @@ out: \
(_ptr); \
(_ptr) = extent_ptr_prev(_e, _ptr))
-void bch_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned,
+void bch2_extent_crc_append(struct bkey_i_extent *, unsigned, unsigned,
unsigned, unsigned, struct bch_csum, unsigned);
static inline void __extent_entry_push(struct bkey_i_extent *e)
@@ -543,17 +543,17 @@ static inline unsigned extent_current_nonce(struct bkey_s_c_extent e)
const union bch_extent_crc *crc;
extent_for_each_crc(e, crc)
- if (bch_csum_type_is_encryption(crc_csum_type(crc)))
+ if (bch2_csum_type_is_encryption(crc_csum_type(crc)))
return crc_offset(crc) + crc_nonce(crc);
return 0;
}
-void bch_extent_narrow_crcs(struct bkey_s_extent);
-void bch_extent_drop_redundant_crcs(struct bkey_s_extent);
+void bch2_extent_narrow_crcs(struct bkey_s_extent);
+void bch2_extent_drop_redundant_crcs(struct bkey_s_extent);
/* Doesn't cleanup redundant crcs */
-static inline void __bch_extent_drop_ptr(struct bkey_s_extent e,
+static inline void __bch2_extent_drop_ptr(struct bkey_s_extent e,
struct bch_extent_ptr *ptr)
{
EBUG_ON(ptr < &e.v->start->ptr ||
@@ -564,18 +564,18 @@ static inline void __bch_extent_drop_ptr(struct bkey_s_extent e,
e.k->u64s -= sizeof(*ptr) / sizeof(u64);
}
-static inline void bch_extent_drop_ptr(struct bkey_s_extent e,
+static inline void bch2_extent_drop_ptr(struct bkey_s_extent e,
struct bch_extent_ptr *ptr)
{
- __bch_extent_drop_ptr(e, ptr);
- bch_extent_drop_redundant_crcs(e);
+ __bch2_extent_drop_ptr(e, ptr);
+ bch2_extent_drop_redundant_crcs(e);
}
const struct bch_extent_ptr *
-bch_extent_has_device(struct bkey_s_c_extent, unsigned);
+bch2_extent_has_device(struct bkey_s_c_extent, unsigned);
-bool bch_cut_front(struct bpos, struct bkey_i *);
-bool bch_cut_back(struct bpos, struct bkey *);
-void bch_key_resize(struct bkey *, unsigned);
+bool bch2_cut_front(struct bpos, struct bkey_i *);
+bool bch2_cut_back(struct bpos, struct bkey *);
+void bch2_key_resize(struct bkey *, unsigned);
#endif /* _BCACHE_EXTENTS_H */
diff --git a/fs/bcachefs/fs-gc.c b/fs/bcachefs/fs-gc.c
index f718c3d6665a..20f552d2c97d 100644
--- a/fs/bcachefs/fs-gc.c
+++ b/fs/bcachefs/fs-gc.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_update.h"
#include "dirent.h"
#include "error.h"
@@ -23,7 +23,7 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter,
int ret;
char *buf;
- name.len = bch_dirent_name_bytes(dirent);
+ name.len = bch2_dirent_name_bytes(dirent);
buf = kmalloc(name.len + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -33,15 +33,15 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter,
name.name = buf;
/* Unlock iter so we don't deadlock, after copying name: */
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
- ret = bch_inode_find_by_inum(c, dir_inum, &dir_inode);
+ ret = bch2_inode_find_by_inum(c, dir_inum, &dir_inode);
if (ret)
goto err;
- dir_hash_info = bch_hash_info_init(c, &dir_inode);
+ dir_hash_info = bch2_hash_info_init(c, &dir_inode);
- ret = bch_dirent_delete(c, dir_inum, &dir_hash_info, &name, NULL);
+ ret = bch2_dirent_delete(c, dir_inum, &dir_hash_info, &name, NULL);
err:
kfree(buf);
return ret;
@@ -52,7 +52,7 @@ static int reattach_inode(struct bch_fs *c,
u64 inum)
{
struct bch_hash_info lostfound_hash_info =
- bch_hash_info_init(c, lostfound_inode);
+ bch2_hash_info_init(c, lostfound_inode);
struct bkey_inode_buf packed;
char name_buf[20];
struct qstr name;
@@ -63,14 +63,14 @@ static int reattach_inode(struct bch_fs *c,
lostfound_inode->i_nlink++;
- bch_inode_pack(&packed, lostfound_inode);
+ bch2_inode_pack(&packed, lostfound_inode);
- ret = bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL, 0);
if (ret)
return ret;
- return bch_dirent_create(c, lostfound_inode->inum,
+ return bch2_dirent_create(c, lostfound_inode->inum,
&lostfound_hash_info,
DT_DIR, &name, inum, NULL, 0);
}
@@ -96,7 +96,7 @@ static int walk_inode(struct bch_fs *c, struct inode_walker *w, u64 inum)
w->cur_inum = inum;
if (w->first_this_inode) {
- int ret = bch_inode_find_by_inum(c, inum, &w->inode);
+ int ret = bch2_inode_find_by_inum(c, inum, &w->inode);
if (ret && ret != -ENOENT)
return ret;
@@ -135,7 +135,7 @@ static int check_extents(struct bch_fs *c)
unfixable_fsck_err_on(w.first_this_inode && w.have_inode &&
w.inode.i_sectors !=
- (i_sectors = bch_count_inode_sectors(c, w.cur_inum)),
+ (i_sectors = bch2_count_inode_sectors(c, w.cur_inum)),
c, "i_sectors wrong: got %llu, should be %llu",
w.inode.i_sectors, i_sectors);
@@ -150,7 +150,7 @@ static int check_extents(struct bch_fs *c)
k.k->type, k.k->p.offset, k.k->p.inode, w.inode.i_size);
}
fsck_err:
- return bch_btree_iter_unlock(&iter) ?: ret;
+ return bch2_btree_iter_unlock(&iter) ?: ret;
}
/*
@@ -198,7 +198,7 @@ static int check_dirents(struct bch_fs *c)
continue;
}
- ret = bch_inode_find_by_inum(c, d_inum, &target);
+ ret = bch2_inode_find_by_inum(c, d_inum, &target);
if (ret && ret != -ENOENT)
break;
@@ -232,7 +232,7 @@ static int check_dirents(struct bch_fs *c)
bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(le16_to_cpu(target.i_mode));
- ret = bch_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &n->k_i));
kfree(n);
@@ -243,7 +243,7 @@ static int check_dirents(struct bch_fs *c)
}
err:
fsck_err:
- return bch_btree_iter_unlock(&iter) ?: ret;
+ return bch2_btree_iter_unlock(&iter) ?: ret;
}
/*
@@ -268,7 +268,7 @@ static int check_xattrs(struct bch_fs *c)
k.k->p.inode);
}
fsck_err:
- return bch_btree_iter_unlock(&iter) ?: ret;
+ return bch2_btree_iter_unlock(&iter) ?: ret;
}
/* Get root directory, create if it doesn't exist: */
@@ -277,7 +277,7 @@ static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode)
struct bkey_inode_buf packed;
int ret;
- ret = bch_inode_find_by_inum(c, BCACHE_ROOT_INO, root_inode);
+ ret = bch2_inode_find_by_inum(c, BCACHE_ROOT_INO, root_inode);
if (ret && ret != -ENOENT)
return ret;
@@ -292,12 +292,12 @@ static int check_root(struct bch_fs *c, struct bch_inode_unpacked *root_inode)
fsck_err:
return ret;
create_root:
- bch_inode_init(c, root_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
+ bch2_inode_init(c, root_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
root_inode->inum = BCACHE_ROOT_INO;
- bch_inode_pack(&packed, root_inode);
+ bch2_inode_pack(&packed, root_inode);
- return bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
+ return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL, 0);
}
@@ -308,19 +308,19 @@ static int check_lostfound(struct bch_fs *c,
{
struct qstr lostfound = QSTR("lost+found");
struct bch_hash_info root_hash_info =
- bch_hash_info_init(c, root_inode);
+ bch2_hash_info_init(c, root_inode);
struct bkey_inode_buf packed;
u64 inum;
int ret;
- inum = bch_dirent_lookup(c, BCACHE_ROOT_INO, &root_hash_info,
+ inum = bch2_dirent_lookup(c, BCACHE_ROOT_INO, &root_hash_info,
&lostfound);
if (!inum) {
bch_notice(c, "creating lost+found");
goto create_lostfound;
}
- ret = bch_inode_find_by_inum(c, inum, lostfound_inode);
+ ret = bch2_inode_find_by_inum(c, inum, lostfound_inode);
if (ret && ret != -ENOENT)
return ret;
@@ -337,24 +337,24 @@ fsck_err:
create_lostfound:
root_inode->i_nlink++;
- bch_inode_pack(&packed, root_inode);
+ bch2_inode_pack(&packed, root_inode);
- ret = bch_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
+ ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, NULL, 0);
if (ret)
return ret;
- bch_inode_init(c, lostfound_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
- bch_inode_pack(&packed, lostfound_inode);
+ bch2_inode_init(c, lostfound_inode, 0, 0, S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
+ bch2_inode_pack(&packed, lostfound_inode);
- ret = bch_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0,
+ ret = bch2_inode_create(c, &packed.inode.k_i, BLOCKDEV_INODE_MAX, 0,
&c->unused_inode_hint);
if (ret)
return ret;
lostfound_inode->inum = packed.inode.k.p.inode;
- ret = bch_dirent_create(c, BCACHE_ROOT_INO, &root_hash_info, DT_DIR,
+ ret = bch2_dirent_create(c, BCACHE_ROOT_INO, &root_hash_info, DT_DIR,
&lostfound, lostfound_inode->inum, NULL, 0);
if (ret)
return ret;
@@ -488,10 +488,10 @@ next:
if (ret)
goto err;
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
goto next;
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
goto err;
up:
@@ -508,7 +508,7 @@ up:
if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.inode), c,
"unreachable directory found (inum %llu)",
k.k->p.inode)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
ret = reattach_inode(c, lostfound_inode, k.k->p.inode);
if (ret)
@@ -517,7 +517,7 @@ up:
had_unreachable = true;
}
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
goto err;
@@ -536,7 +536,7 @@ out:
return ret;
err:
fsck_err:
- ret = bch_btree_iter_unlock(&iter) ?: ret;
+ ret = bch2_btree_iter_unlock(&iter) ?: ret;
goto out;
}
@@ -570,7 +570,7 @@ static void inc_link(struct bch_fs *c, nlink_table *links,
}
noinline_for_stack
-static int bch_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
+static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
u64 range_start, u64 *range_end)
{
struct btree_iter iter;
@@ -597,16 +597,16 @@ static int bch_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
break;
}
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
bch_err(c, "error in fs gc: btree error %i while walking dirents", ret);
return ret;
}
-s64 bch_count_inode_sectors(struct bch_fs *c, u64 inum)
+s64 bch2_count_inode_sectors(struct bch_fs *c, u64 inum)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -620,10 +620,10 @@ s64 bch_count_inode_sectors(struct bch_fs *c, u64 inum)
sectors += k.k->size;
}
- return bch_btree_iter_unlock(&iter) ?: sectors;
+ return bch2_btree_iter_unlock(&iter) ?: sectors;
}
-static int bch_gc_do_inode(struct bch_fs *c,
+static int bch2_gc_do_inode(struct bch_fs *c,
struct bch_inode_unpacked *lostfound_inode,
struct btree_iter *iter,
struct bkey_s_c_inode inode, struct nlink link)
@@ -633,8 +633,8 @@ static int bch_gc_do_inode(struct bch_fs *c,
u32 i_nlink, real_i_nlink;
bool do_update = false;
- ret = bch_inode_unpack(inode, &u);
- if (bch_fs_inconsistent_on(ret, c,
+ ret = bch2_inode_unpack(inode, &u);
+ if (bch2_fs_inconsistent_on(ret, c,
"error unpacking inode %llu in fs-gc",
inode.k->p.inode))
return ret;
@@ -668,7 +668,7 @@ static int bch_gc_do_inode(struct bch_fs *c,
inode.k->p.inode);
if (fsck_err_on(S_ISDIR(u.i_mode) &&
- bch_empty_dir(c, inode.k->p.inode), c,
+ bch2_empty_dir(c, inode.k->p.inode), c,
"non empty directory with link count 0, "
"inode nlink %u, dir links found %u",
i_nlink, link.dir_count)) {
@@ -680,7 +680,7 @@ static int bch_gc_do_inode(struct bch_fs *c,
bch_verbose(c, "deleting inode %llu", inode.k->p.inode);
- ret = bch_inode_rm(c, inode.k->p.inode);
+ ret = bch2_inode_rm(c, inode.k->p.inode);
if (ret)
bch_err(c, "error in fs gc: error %i "
"while deleting inode", ret);
@@ -700,7 +700,7 @@ static int bch_gc_do_inode(struct bch_fs *c,
* just switch units to bytes and that issue goes away
*/
- ret = bch_inode_truncate(c, inode.k->p.inode,
+ ret = bch2_inode_truncate(c, inode.k->p.inode,
round_up(u.i_size, PAGE_SIZE) >> 9,
NULL, NULL);
if (ret) {
@@ -730,7 +730,7 @@ static int bch_gc_do_inode(struct bch_fs *c,
bch_verbose(c, "recounting sectors for inode %llu",
inode.k->p.inode);
- sectors = bch_count_inode_sectors(c, inode.k->p.inode);
+ sectors = bch2_count_inode_sectors(c, inode.k->p.inode);
if (sectors < 0) {
bch_err(c, "error in fs gc: error %i "
"recounting inode sectors",
@@ -760,9 +760,9 @@ static int bch_gc_do_inode(struct bch_fs *c,
if (do_update) {
struct bkey_inode_buf p;
- bch_inode_pack(&p, &u);
+ bch2_inode_pack(&p, &u);
- ret = bch_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
if (ret && ret != -EINTR)
@@ -774,7 +774,7 @@ fsck_err:
}
noinline_for_stack
-static int bch_gc_walk_inodes(struct bch_fs *c,
+static int bch2_gc_walk_inodes(struct bch_fs *c,
struct bch_inode_unpacked *lostfound_inode,
nlink_table *links,
u64 range_start, u64 range_end)
@@ -786,10 +786,10 @@ static int bch_gc_walk_inodes(struct bch_fs *c,
int ret = 0, ret2 = 0;
u64 nlinks_pos;
- bch_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0));
+ bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0));
genradix_iter_init(&nlinks_iter);
- while ((k = bch_btree_iter_peek(&iter)).k &&
+ while ((k = bch2_btree_iter_peek(&iter)).k &&
!btree_iter_err(k)) {
peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
@@ -814,9 +814,9 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
* Avoid potential deadlocks with iter for
* truncate/rm/etc.:
*/
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
- ret = bch_gc_do_inode(c, lostfound_inode, &iter,
+ ret = bch2_gc_do_inode(c, lostfound_inode, &iter,
bkey_s_c_to_inode(k), *link);
if (ret == -EINTR)
continue;
@@ -835,11 +835,11 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
if (nlinks_pos == iter.pos.inode)
genradix_iter_advance(&nlinks_iter, links);
- bch_btree_iter_advance_pos(&iter);
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
fsck_err:
- ret2 = bch_btree_iter_unlock(&iter);
+ ret2 = bch2_btree_iter_unlock(&iter);
if (ret2)
bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2);
@@ -860,13 +860,13 @@ static int check_inode_nlinks(struct bch_fs *c,
this_iter_range_start = next_iter_range_start;
next_iter_range_start = U64_MAX;
- ret = bch_gc_walk_dirents(c, &links,
+ ret = bch2_gc_walk_dirents(c, &links,
this_iter_range_start,
&next_iter_range_start);
if (ret)
break;
- ret = bch_gc_walk_inodes(c, lostfound_inode, &links,
+ ret = bch2_gc_walk_inodes(c, lostfound_inode, &links,
this_iter_range_start,
next_iter_range_start);
if (ret)
@@ -884,7 +884,7 @@ static int check_inode_nlinks(struct bch_fs *c,
* Checks for inconsistencies that shouldn't happen, unless we have a bug.
* Doesn't fix them yet, mainly because they haven't yet been observed:
*/
-int bch_fsck(struct bch_fs *c, bool full_fsck)
+int bch2_fsck(struct bch_fs *c, bool full_fsck)
{
struct bch_inode_unpacked root_inode, lostfound_inode;
int ret;
diff --git a/fs/bcachefs/fs-gc.h b/fs/bcachefs/fs-gc.h
index ac86fd22aba8..4bde1bda89c7 100644
--- a/fs/bcachefs/fs-gc.h
+++ b/fs/bcachefs/fs-gc.h
@@ -1,7 +1,7 @@
#ifndef _BCACHE_FS_GC_H
#define _BCACHE_FS_GC_H
-s64 bch_count_inode_sectors(struct bch_fs *, u64);
-int bch_fsck(struct bch_fs *, bool);
+s64 bch2_count_inode_sectors(struct bch_fs *, u64);
+int bch2_fsck(struct bch_fs *, bool);
#endif /* _BCACHE_FS_GC_H */
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 8a73d4a3b07c..8ad192c4b32c 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_update.h"
#include "buckets.h"
#include "clock.h"
@@ -23,9 +23,9 @@
#include <linux/writeback.h>
#include <trace/events/writeback.h>
-struct bio_set *bch_writepage_bioset;
-struct bio_set *bch_dio_read_bioset;
-struct bio_set *bch_dio_write_bioset;
+struct bio_set *bch2_writepage_bioset;
+struct bio_set *bch2_dio_read_bioset;
+struct bio_set *bch2_dio_write_bioset;
/* pagecache_block must be held */
static int write_invalidate_inode_pages_range(struct address_space *mapping,
@@ -77,11 +77,11 @@ static int inode_set_size(struct bch_inode_info *ei,
return 0;
}
-static int __must_check bch_write_inode_size(struct bch_fs *c,
+static int __must_check bch2_write_inode_size(struct bch_fs *c,
struct bch_inode_info *ei,
loff_t new_size)
{
- return __bch_write_inode(c, ei, inode_set_size, &new_size);
+ return __bch2_write_inode(c, ei, inode_set_size, &new_size);
}
static inline void i_size_dirty_put(struct bch_inode_info *ei)
@@ -159,7 +159,7 @@ static void i_sectors_dirty_put(struct bch_inode_info *ei,
if (atomic_long_dec_and_test(&ei->i_sectors_dirty_count)) {
struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
- int ret = __bch_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL);
+ int ret = __bch2_write_inode(c, ei, inode_clear_i_sectors_dirty, NULL);
ret = ret;
}
@@ -186,7 +186,7 @@ static int __must_check i_sectors_dirty_get(struct bch_inode_info *ei,
if (!(ei->i_flags & BCH_INODE_I_SECTORS_DIRTY)) {
struct bch_fs *c = ei->vfs_inode.i_sb->s_fs_info;
- ret = __bch_write_inode(c, ei, inode_set_i_sectors_dirty, NULL);
+ ret = __bch2_write_inode(c, ei, inode_set_i_sectors_dirty, NULL);
}
if (!ret)
@@ -265,7 +265,7 @@ bchfs_extent_update_hook(struct extent_insert_hook *hook,
}
if (do_pack)
- bch_inode_pack(&h->inode_p, &h->inode_u);
+ bch2_inode_pack(&h->inode_p, &h->inode_u);
return BTREE_HOOK_DO_INSERT;
}
@@ -277,14 +277,14 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
struct keylist *keys = &op->op.insert_keys;
struct btree_iter extent_iter, inode_iter;
struct bchfs_extent_trans_hook hook;
- struct bkey_i *k = bch_keylist_front(keys);
+ struct bkey_i *k = bch2_keylist_front(keys);
int ret;
BUG_ON(k->k.p.inode != op->ei->vfs_inode.i_ino);
- bch_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch_keylist_front(keys)->k));
- bch_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES,
+ bch2_btree_iter_init_intent(&extent_iter, wop->c, BTREE_ID_EXTENTS,
+ bkey_start_pos(&bch2_keylist_front(keys)->k));
+ bch2_btree_iter_init_intent(&inode_iter, wop->c, BTREE_ID_INODES,
POS(extent_iter.pos.inode, 0));
hook.op = op;
@@ -292,12 +292,12 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
hook.need_inode_update = false;
do {
- ret = bch_btree_iter_traverse(&extent_iter);
+ ret = bch2_btree_iter_traverse(&extent_iter);
if (ret)
goto err;
/* XXX: ei->i_size locking */
- k = bch_keylist_front(keys);
+ k = bch2_keylist_front(keys);
if (min(k->k.p.offset << 9, op->new_i_size) > op->ei->i_size)
hook.need_inode_update = true;
@@ -305,9 +305,9 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
struct bkey_s_c inode;
if (!btree_iter_linked(&inode_iter))
- bch_btree_iter_link(&extent_iter, &inode_iter);
+ bch2_btree_iter_link(&extent_iter, &inode_iter);
- inode = bch_btree_iter_peek_with_holes(&inode_iter);
+ inode = bch2_btree_iter_peek_with_holes(&inode_iter);
if ((ret = btree_iter_err(inode)))
goto err;
@@ -329,7 +329,7 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
}
bkey_reassemble(&hook.inode_p.inode.k_i, inode);
- ret = bch_inode_unpack(bkey_s_c_to_inode(inode),
+ ret = bch2_inode_unpack(bkey_s_c_to_inode(inode),
&hook.inode_u);
if (WARN_ONCE(ret,
"error %i unpacking inode %llu",
@@ -338,14 +338,14 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
break;
}
- ret = bch_btree_insert_at(wop->c, &wop->res,
+ ret = bch2_btree_insert_at(wop->c, &wop->res,
&hook.hook, op_journal_seq(wop),
BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&extent_iter, k),
BTREE_INSERT_ENTRY_EXTRA_RES(&inode_iter,
&hook.inode_p.inode.k_i, 2));
} else {
- ret = bch_btree_insert_at(wop->c, &wop->res,
+ ret = bch2_btree_insert_at(wop->c, &wop->res,
&hook.hook, op_journal_seq(wop),
BTREE_INSERT_NOFAIL|BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&extent_iter, k));
@@ -356,11 +356,11 @@ err:
if (ret)
break;
- bch_keylist_pop_front(keys);
- } while (!bch_keylist_empty(keys));
+ bch2_keylist_pop_front(keys);
+ } while (!bch2_keylist_empty(keys));
- bch_btree_iter_unlock(&extent_iter);
- bch_btree_iter_unlock(&inode_iter);
+ bch2_btree_iter_unlock(&extent_iter);
+ bch2_btree_iter_unlock(&inode_iter);
return ret;
}
@@ -371,7 +371,7 @@ err:
/*
* bch_page_state has to (unfortunately) be manipulated with cmpxchg - we could
- * almost protected it with the page lock, except that bch_writepage_io_done has
+ * almost protected it with the page lock, except that bch2_writepage_io_done has
* to update the sector counts (and from interrupt/bottom half context).
*/
struct bch_page_state {
@@ -427,7 +427,7 @@ static inline struct bch_page_state *page_state(struct page *page)
return s;
}
-static void bch_put_page_reservation(struct bch_fs *c, struct page *page)
+static void bch2_put_page_reservation(struct bch_fs *c, struct page *page)
{
struct disk_reservation res = { .sectors = PAGE_SECTORS };
struct bch_page_state s;
@@ -438,10 +438,10 @@ static void bch_put_page_reservation(struct bch_fs *c, struct page *page)
s.reserved = 0;
});
- bch_disk_reservation_put(c, &res);
+ bch2_disk_reservation_put(c, &res);
}
-static int bch_get_page_reservation(struct bch_fs *c, struct page *page,
+static int bch2_get_page_reservation(struct bch_fs *c, struct page *page,
bool check_enospc)
{
struct bch_page_state *s = page_state(page), new;
@@ -453,14 +453,14 @@ static int bch_get_page_reservation(struct bch_fs *c, struct page *page,
if (s->allocated || s->reserved)
return 0;
- ret = bch_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
+ ret = bch2_disk_reservation_get(c, &res, PAGE_SECTORS, !check_enospc
? BCH_DISK_RESERVATION_NOFAIL : 0);
if (ret)
return ret;
page_state_cmpxchg(s, new, {
if (new.reserved) {
- bch_disk_reservation_put(c, &res);
+ bch2_disk_reservation_put(c, &res);
return 0;
}
new.reserved = 1;
@@ -470,7 +470,7 @@ static int bch_get_page_reservation(struct bch_fs *c, struct page *page,
return 0;
}
-static void bch_clear_page_bits(struct page *page)
+static void bch2_clear_page_bits(struct page *page)
{
struct inode *inode = page->mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -490,10 +490,10 @@ static void bch_clear_page_bits(struct page *page)
}
if (s.reserved)
- bch_disk_reservation_put(c, &res);
+ bch2_disk_reservation_put(c, &res);
}
-int bch_set_page_dirty(struct page *page)
+int bch2_set_page_dirty(struct page *page)
{
struct bch_page_state old, new;
@@ -548,7 +548,7 @@ static int bio_add_page_contig(struct bio *bio, struct page *page)
return 0;
}
-static void bch_readpages_end_io(struct bio *bio)
+static void bch2_readpages_end_io(struct bio *bio)
{
struct bio_vec *bv;
int i;
@@ -611,7 +611,7 @@ static inline struct page *readpage_iter_next(struct readpages_iter *iter)
for (; \
((_page) = __readpage_next_page(&(_iter)));) \
-static void bch_mark_pages_unalloc(struct bio *bio)
+static void bch2_mark_pages_unalloc(struct bio *bio)
{
struct bvec_iter iter;
struct bio_vec bv;
@@ -620,7 +620,7 @@ static void bch_mark_pages_unalloc(struct bio *bio)
page_state(bv.bv_page)->allocated = 0;
}
-static void bch_add_page_sectors(struct bio *bio, struct bkey_s_c k)
+static void bch2_add_page_sectors(struct bio *bio, struct bkey_s_c k)
{
struct bvec_iter iter;
struct bio_vec bv;
@@ -634,10 +634,10 @@ static void bch_add_page_sectors(struct bio *bio, struct bkey_s_c k)
unsigned page_sectors = min(bv.bv_len >> 9, k_sectors);
if (!s->sectors)
- s->nr_replicas = bch_extent_nr_dirty_ptrs(k);
+ s->nr_replicas = bch2_extent_nr_dirty_ptrs(k);
else
s->nr_replicas = min_t(unsigned, s->nr_replicas,
- bch_extent_nr_dirty_ptrs(k));
+ bch2_extent_nr_dirty_ptrs(k));
BUG_ON(s->sectors + page_sectors > PAGE_SECTORS);
s->sectors += page_sectors;
@@ -708,13 +708,13 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
unsigned bytes;
bool is_last;
- bch_btree_iter_set_pos(iter, POS(inode, bio->bi_iter.bi_sector));
+ bch2_btree_iter_set_pos(iter, POS(inode, bio->bi_iter.bi_sector));
- k = bch_btree_iter_peek_with_holes(iter);
+ k = bch2_btree_iter_peek_with_holes(iter);
BUG_ON(!k.k);
if (IS_ERR(k.k)) {
- int ret = bch_btree_iter_unlock(iter);
+ int ret = bch2_btree_iter_unlock(iter);
BUG_ON(!ret);
bcache_io_error(c, bio, "btree IO error %i", ret);
bio_endio(bio);
@@ -722,10 +722,10 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
}
bkey_reassemble(&tmp.k, k);
- bch_btree_iter_unlock(iter);
+ bch2_btree_iter_unlock(iter);
k = bkey_i_to_s_c(&tmp.k);
- bch_extent_pick_ptr(c, k, &pick);
+ bch2_extent_pick_ptr(c, k, &pick);
if (IS_ERR(pick.ca)) {
bcache_io_error(c, bio, "no device to read from");
bio_endio(bio);
@@ -745,11 +745,11 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
swap(bio->bi_iter.bi_size, bytes);
if (bkey_extent_is_allocation(k.k))
- bch_add_page_sectors(bio, k);
+ bch2_add_page_sectors(bio, k);
if (!bkey_extent_is_allocation(k.k) ||
bkey_extent_is_compressed(k))
- bch_mark_pages_unalloc(bio);
+ bch2_mark_pages_unalloc(bio);
if (is_last)
flags |= BCH_READ_IS_LAST;
@@ -758,7 +758,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
c->prio_clock[READ].hand;
- bch_read_extent(c, rbio, k, &pick, flags);
+ bch2_read_extent(c, rbio, k, &pick, flags);
flags &= ~BCH_READ_MAY_REUSE_BIO;
} else {
zero_fill_bio(bio);
@@ -775,7 +775,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
}
}
-int bch_readpages(struct file *file, struct address_space *mapping,
+int bch2_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
@@ -786,7 +786,7 @@ int bch_readpages(struct file *file, struct address_space *mapping,
.mapping = mapping, .nr_pages = nr_pages
};
- bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
INIT_LIST_HEAD(&readpages_iter.pages);
list_add(&readpages_iter.pages, pages);
@@ -805,7 +805,7 @@ int bch_readpages(struct file *file, struct address_space *mapping,
&c->bio_read),
struct bch_read_bio, bio);
- rbio->bio.bi_end_io = bch_readpages_end_io;
+ rbio->bio.bi_end_io = bch2_readpages_end_io;
bio_add_page_contig(&rbio->bio, page);
bchfs_read(c, &iter, rbio, inode->i_ino, &readpages_iter);
}
@@ -841,11 +841,11 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
bio_set_op_attrs(&rbio->bio, REQ_OP_READ, REQ_SYNC);
bio_add_page_contig(&rbio->bio, page);
- bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
bchfs_read(c, &iter, rbio, inode, NULL);
}
-int bch_readpage(struct file *file, struct page *page)
+int bch2_readpage(struct file *file, struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
@@ -855,7 +855,7 @@ int bch_readpage(struct file *file, struct page *page)
rbio = container_of(bio_alloc_bioset(GFP_NOFS, 1,
&c->bio_read),
struct bch_read_bio, bio);
- rbio->bio.bi_end_io = bch_readpages_end_io;
+ rbio->bio.bi_end_io = bch2_readpages_end_io;
__bchfs_readpage(c, rbio, inode->i_ino, page);
return 0;
@@ -865,7 +865,7 @@ struct bch_writepage_state {
struct bch_writepage_io *io;
};
-static void bch_writepage_io_free(struct closure *cl)
+static void bch2_writepage_io_free(struct closure *cl)
{
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
@@ -874,7 +874,7 @@ static void bch_writepage_io_free(struct closure *cl)
bio_put(bio);
}
-static void bch_writepage_io_done(struct closure *cl)
+static void bch2_writepage_io_done(struct closure *cl)
{
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
@@ -932,10 +932,10 @@ static void bch_writepage_io_done(struct closure *cl)
bio_for_each_segment_all(bvec, bio, i)
end_page_writeback(bvec->bv_page);
- closure_return_with_destructor(&io->cl, bch_writepage_io_free);
+ closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
}
-static void bch_writepage_do_io(struct bch_writepage_state *w)
+static void bch2_writepage_do_io(struct bch_writepage_state *w)
{
struct bch_writepage_io *io = w->io;
@@ -944,18 +944,18 @@ static void bch_writepage_do_io(struct bch_writepage_state *w)
io->op.op.pos.offset = io->bio.bio.bi_iter.bi_sector;
- closure_call(&io->op.op.cl, bch_write, NULL, &io->cl);
- continue_at(&io->cl, bch_writepage_io_done, NULL);
+ closure_call(&io->op.op.cl, bch2_write, NULL, &io->cl);
+ continue_at(&io->cl, bch2_writepage_io_done, NULL);
}
/*
* Get a bch_writepage_io and add @page to it - appending to an existing one if
* possible, else allocating a new one:
*/
-static void bch_writepage_io_alloc(struct bch_fs *c,
- struct bch_writepage_state *w,
- struct bch_inode_info *ei,
- struct page *page)
+static void bch2_writepage_io_alloc(struct bch_fs *c,
+ struct bch_writepage_state *w,
+ struct bch_inode_info *ei,
+ struct page *page)
{
u64 inum = ei->vfs_inode.i_ino;
unsigned nr_replicas = page_state(page)->nr_replicas;
@@ -967,14 +967,14 @@ static void bch_writepage_io_alloc(struct bch_fs *c,
alloc_io:
w->io = container_of(bio_alloc_bioset(GFP_NOFS,
BIO_MAX_PAGES,
- bch_writepage_bioset),
+ bch2_writepage_bioset),
struct bch_writepage_io, bio.bio);
closure_init(&w->io->cl, NULL);
w->io->op.ei = ei;
w->io->op.sectors_added = 0;
w->io->op.is_dio = false;
- bch_write_op_init(&w->io->op.op, c, &w->io->bio,
+ bch2_write_op_init(&w->io->op.op, c, &w->io->bio,
(struct disk_reservation) {
.nr_replicas = c->opts.data_replicas,
},
@@ -986,7 +986,7 @@ alloc_io:
if (w->io->op.op.res.nr_replicas != nr_replicas ||
bio_add_page_contig(&w->io->bio.bio, page)) {
- bch_writepage_do_io(w);
+ bch2_writepage_do_io(w);
goto alloc_io;
}
@@ -997,9 +997,9 @@ alloc_io:
BUG_ON(ei != w->io->op.ei);
}
-static int __bch_writepage(struct bch_fs *c, struct page *page,
- struct writeback_control *wbc,
- struct bch_writepage_state *w)
+static int __bch2_writepage(struct bch_fs *c, struct page *page,
+ struct writeback_control *wbc,
+ struct bch_writepage_state *w)
{
struct inode *inode = page->mapping->host;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -1030,7 +1030,7 @@ static int __bch_writepage(struct bch_fs *c, struct page *page,
*/
zero_user_segment(page, offset, PAGE_SIZE);
do_io:
- bch_writepage_io_alloc(c, w, ei, page);
+ bch2_writepage_io_alloc(c, w, ei, page);
/* while page is locked: */
w->io->op.new_i_size = i_size;
@@ -1063,7 +1063,7 @@ out:
return 0;
}
-int bch_writepages(struct address_space *mapping, struct writeback_control *wbc)
+int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct bch_fs *c = mapping->host->i_sb->s_fs_info;
struct bch_writepage_state w = { NULL };
@@ -1109,7 +1109,7 @@ get_pages:
if (w.io &&
!bio_can_add_page_contig(&w.io->bio.bio, page))
- bch_writepage_do_io(&w);
+ bch2_writepage_do_io(&w);
if (!w.io &&
atomic_read(&c->writeback_pages) >=
@@ -1156,7 +1156,7 @@ continue_unlock:
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- ret = __bch_writepage(c, page, wbc, &w);
+ ret = __bch2_writepage(c, page, wbc, &w);
if (unlikely(ret)) {
if (ret == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
@@ -1192,7 +1192,7 @@ continue_unlock:
pagecache_iter_release(&iter);
if (w.io)
- bch_writepage_do_io(&w);
+ bch2_writepage_do_io(&w);
if (!cycled && !done) {
/*
@@ -1211,26 +1211,26 @@ continue_unlock:
return ret;
}
-int bch_writepage(struct page *page, struct writeback_control *wbc)
+int bch2_writepage(struct page *page, struct writeback_control *wbc)
{
struct bch_fs *c = page->mapping->host->i_sb->s_fs_info;
struct bch_writepage_state w = { NULL };
int ret;
- ret = __bch_writepage(c, page, wbc, &w);
+ ret = __bch2_writepage(c, page, wbc, &w);
if (w.io)
- bch_writepage_do_io(&w);
+ bch2_writepage_do_io(&w);
return ret;
}
-static void bch_read_single_page_end_io(struct bio *bio)
+static void bch2_read_single_page_end_io(struct bio *bio)
{
complete(bio->bi_private);
}
-static int bch_read_single_page(struct page *page,
- struct address_space *mapping)
+static int bch2_read_single_page(struct page *page,
+ struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -1242,7 +1242,7 @@ static int bch_read_single_page(struct page *page,
&c->bio_read),
struct bch_read_bio, bio);
rbio->bio.bi_private = &done;
- rbio->bio.bi_end_io = bch_read_single_page_end_io;
+ rbio->bio.bi_end_io = bch2_read_single_page_end_io;
__bchfs_readpage(c, rbio, inode->i_ino, page);
wait_for_completion(&done);
@@ -1257,9 +1257,9 @@ static int bch_read_single_page(struct page *page,
return 0;
}
-int bch_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
+int bch2_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -1296,11 +1296,11 @@ int bch_write_begin(struct file *file, struct address_space *mapping,
goto out;
}
readpage:
- ret = bch_read_single_page(page, mapping);
+ ret = bch2_read_single_page(page, mapping);
if (ret)
goto err;
out:
- ret = bch_get_page_reservation(c, page, true);
+ ret = bch2_get_page_reservation(c, page, true);
if (ret) {
if (!PageUptodate(page)) {
/*
@@ -1326,9 +1326,9 @@ err_unlock:
return ret;
}
-int bch_write_end(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+int bch2_write_end(struct file *filp, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -1355,7 +1355,7 @@ int bch_write_end(struct file *filp, struct address_space *mapping,
if (!PageDirty(page))
set_page_dirty(page);
} else {
- bch_put_page_reservation(c, page);
+ bch2_put_page_reservation(c, page);
}
unlock_page(page);
@@ -1367,7 +1367,7 @@ int bch_write_end(struct file *filp, struct address_space *mapping,
/* O_DIRECT */
-static void bch_dio_read_complete(struct closure *cl)
+static void bch2_dio_read_complete(struct closure *cl)
{
struct dio_read *dio = container_of(cl, struct dio_read, cl);
@@ -1375,7 +1375,7 @@ static void bch_dio_read_complete(struct closure *cl)
bio_check_pages_dirty(&dio->rbio.bio); /* transfers ownership */
}
-static void bch_direct_IO_read_endio(struct bio *bio)
+static void bch2_direct_IO_read_endio(struct bio *bio)
{
struct dio_read *dio = bio->bi_private;
@@ -1385,15 +1385,15 @@ static void bch_direct_IO_read_endio(struct bio *bio)
closure_put(&dio->cl);
}
-static void bch_direct_IO_read_split_endio(struct bio *bio)
+static void bch2_direct_IO_read_split_endio(struct bio *bio)
{
- bch_direct_IO_read_endio(bio);
+ bch2_direct_IO_read_endio(bio);
bio_check_pages_dirty(bio); /* transfers ownership */
}
-static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req,
- struct file *file, struct inode *inode,
- struct iov_iter *iter, loff_t offset)
+static int bch2_direct_IO_read(struct bch_fs *c, struct kiocb *req,
+ struct file *file, struct inode *inode,
+ struct iov_iter *iter, loff_t offset)
{
struct dio_read *dio;
struct bio *bio;
@@ -1412,9 +1412,9 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req,
bio = bio_alloc_bioset(GFP_KERNEL,
iov_iter_npages(iter, BIO_MAX_PAGES),
- bch_dio_read_bioset);
+ bch2_dio_read_bioset);
- bio->bi_end_io = bch_direct_IO_read_endio;
+ bio->bi_end_io = bch2_direct_IO_read_endio;
dio = container_of(bio, struct dio_read, rbio.bio);
closure_init(&dio->cl, NULL);
@@ -1424,7 +1424,7 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req,
* end:
*/
if (!sync) {
- set_closure_fn(&dio->cl, bch_dio_read_complete, NULL);
+ set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
atomic_set(&dio->cl.remaining,
CLOSURE_REMAINING_INITIALIZER -
CLOSURE_RUNNING +
@@ -1442,7 +1442,7 @@ static int bch_direct_IO_read(struct bch_fs *c, struct kiocb *req,
bio = bio_alloc_bioset(GFP_KERNEL,
iov_iter_npages(iter, BIO_MAX_PAGES),
&c->bio_read);
- bio->bi_end_io = bch_direct_IO_read_split_endio;
+ bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC);
bio->bi_iter.bi_sector = offset >> 9;
@@ -1462,7 +1462,7 @@ start:
if (iter->count)
closure_get(&dio->cl);
- bch_read(c, container_of(bio,
+ bch2_read(c, container_of(bio,
struct bch_read_bio, bio),
inode->i_ino);
}
@@ -1478,14 +1478,14 @@ start:
}
}
-static long __bch_dio_write_complete(struct dio_write *dio)
+static long __bch2_dio_write_complete(struct dio_write *dio)
{
struct file *file = dio->req->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = file->f_inode;
long ret = dio->error ?: dio->written;
- bch_disk_reservation_put(dio->c, &dio->res);
+ bch2_disk_reservation_put(dio->c, &dio->res);
__pagecache_block_put(&mapping->add_lock);
inode_dio_end(inode);
@@ -1497,15 +1497,15 @@ static long __bch_dio_write_complete(struct dio_write *dio)
return ret;
}
-static void bch_dio_write_complete(struct closure *cl)
+static void bch2_dio_write_complete(struct closure *cl)
{
struct dio_write *dio = container_of(cl, struct dio_write, cl);
struct kiocb *req = dio->req;
- req->ki_complete(req, __bch_dio_write_complete(dio), 0);
+ req->ki_complete(req, __bch2_dio_write_complete(dio), 0);
}
-static void bch_dio_write_done(struct dio_write *dio)
+static void bch2_dio_write_done(struct dio_write *dio)
{
struct bio_vec *bv;
int i;
@@ -1522,7 +1522,7 @@ static void bch_dio_write_done(struct dio_write *dio)
bio_reset(&dio->bio.bio);
}
-static void bch_do_direct_IO_write(struct dio_write *dio)
+static void bch2_do_direct_IO_write(struct dio_write *dio)
{
struct file *file = dio->req->ki_filp;
struct inode *inode = file->f_inode;
@@ -1540,7 +1540,7 @@ static void bch_do_direct_IO_write(struct dio_write *dio)
ret = bio_get_user_pages(bio, &dio->iter, 0);
if (ret < 0) {
/*
- * these didn't get initialized, but bch_dio_write_done() will
+ * these didn't get initialized, but bch2_dio_write_done() will
* look at them:
*/
dio->iop.op.error = 0;
@@ -1553,7 +1553,7 @@ static void bch_do_direct_IO_write(struct dio_write *dio)
dio->iop.sectors_added = 0;
dio->iop.is_dio = true;
dio->iop.new_i_size = U64_MAX;
- bch_write_op_init(&dio->iop.op, dio->c, &dio->bio,
+ bch2_write_op_init(&dio->iop.op, dio->c, &dio->bio,
dio->res,
foreground_write_point(dio->c, inode->i_ino),
POS(inode->i_ino, bio->bi_iter.bi_sector),
@@ -1565,40 +1565,40 @@ static void bch_do_direct_IO_write(struct dio_write *dio)
task_io_account_write(bio->bi_iter.bi_size);
- closure_call(&dio->iop.op.cl, bch_write, NULL, &dio->cl);
+ closure_call(&dio->iop.op.cl, bch2_write, NULL, &dio->cl);
}
-static void bch_dio_write_loop_async(struct closure *cl)
+static void bch2_dio_write_loop_async(struct closure *cl)
{
struct dio_write *dio =
container_of(cl, struct dio_write, cl);
struct address_space *mapping = dio->req->ki_filp->f_mapping;
- bch_dio_write_done(dio);
+ bch2_dio_write_done(dio);
if (dio->iter.count && !dio->error) {
use_mm(dio->mm);
pagecache_block_get(&mapping->add_lock);
- bch_do_direct_IO_write(dio);
+ bch2_do_direct_IO_write(dio);
pagecache_block_put(&mapping->add_lock);
unuse_mm(dio->mm);
- continue_at(&dio->cl, bch_dio_write_loop_async, NULL);
+ continue_at(&dio->cl, bch2_dio_write_loop_async, NULL);
} else {
#if 0
- closure_return_with_destructor(cl, bch_dio_write_complete);
+ closure_return_with_destructor(cl, bch2_dio_write_complete);
#else
closure_debug_destroy(cl);
- bch_dio_write_complete(cl);
+ bch2_dio_write_complete(cl);
#endif
}
}
-static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req,
- struct file *file, struct inode *inode,
- struct iov_iter *iter, loff_t offset)
+static int bch2_direct_IO_write(struct bch_fs *c, struct kiocb *req,
+ struct file *file, struct inode *inode,
+ struct iov_iter *iter, loff_t offset)
{
struct address_space *mapping = file->f_mapping;
struct dio_write *dio;
@@ -1616,7 +1616,7 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req,
bio = bio_alloc_bioset(GFP_KERNEL,
iov_iter_npages(iter, BIO_MAX_PAGES),
- bch_dio_write_bioset);
+ bch2_dio_write_bioset);
dio = container_of(bio, struct dio_write, bio.bio);
dio->req = req;
dio->c = c;
@@ -1639,7 +1639,7 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req,
* Have to then guard against racing with truncate (deleting data that
* we would have been overwriting)
*/
- ret = bch_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
+ ret = bch2_disk_reservation_get(c, &dio->res, iter->count >> 9, 0);
if (unlikely(ret)) {
closure_debug_destroy(&dio->cl);
bio_put(bio);
@@ -1651,16 +1651,16 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req,
if (sync) {
do {
- bch_do_direct_IO_write(dio);
+ bch2_do_direct_IO_write(dio);
closure_sync(&dio->cl);
- bch_dio_write_done(dio);
+ bch2_dio_write_done(dio);
} while (dio->iter.count && !dio->error);
closure_debug_destroy(&dio->cl);
- return __bch_dio_write_complete(dio);
+ return __bch2_dio_write_complete(dio);
} else {
- bch_do_direct_IO_write(dio);
+ bch2_do_direct_IO_write(dio);
if (dio->iter.count && !dio->error) {
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
@@ -1679,12 +1679,12 @@ static int bch_direct_IO_write(struct bch_fs *c, struct kiocb *req,
dio->iter.iov = dio->iovec;
}
- continue_at_noreturn(&dio->cl, bch_dio_write_loop_async, NULL);
+ continue_at_noreturn(&dio->cl, bch2_dio_write_loop_async, NULL);
return -EIOCBQUEUED;
}
}
-ssize_t bch_direct_IO(struct kiocb *req, struct iov_iter *iter)
+ssize_t bch2_direct_IO(struct kiocb *req, struct iov_iter *iter)
{
struct file *file = req->ki_filp;
struct inode *inode = file->f_inode;
@@ -1694,15 +1694,15 @@ ssize_t bch_direct_IO(struct kiocb *req, struct iov_iter *iter)
blk_start_plug(&plug);
ret = ((iov_iter_rw(iter) == WRITE)
- ? bch_direct_IO_write
- : bch_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
+ ? bch2_direct_IO_write
+ : bch2_direct_IO_read)(c, req, file, inode, iter, req->ki_pos);
blk_finish_plug(&plug);
return ret;
}
static ssize_t
-bch_direct_write(struct kiocb *iocb, struct iov_iter *iter)
+bch2_direct_write(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_inode;
@@ -1719,14 +1719,14 @@ bch_direct_write(struct kiocb *iocb, struct iov_iter *iter)
if (unlikely(ret))
goto err;
- ret = bch_direct_IO_write(c, iocb, file, inode, iter, pos);
+ ret = bch2_direct_IO_write(c, iocb, file, inode, iter, pos);
err:
pagecache_block_put(&mapping->add_lock);
return ret;
}
-static ssize_t __bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t __bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
@@ -1744,7 +1744,7 @@ static ssize_t __bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
goto out;
ret = iocb->ki_flags & IOCB_DIRECT
- ? bch_direct_write(iocb, from)
+ ? bch2_direct_write(iocb, from)
: generic_perform_write(file, from, iocb->ki_pos);
if (likely(ret > 0))
@@ -1754,7 +1754,7 @@ out:
return ret;
}
-ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
@@ -1764,7 +1764,7 @@ ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
ret = generic_write_checks(iocb, from);
if (ret > 0)
- ret = __bch_write_iter(iocb, from);
+ ret = __bch2_write_iter(iocb, from);
inode_unlock(inode);
if (ret > 0 && !direct)
@@ -1773,7 +1773,7 @@ ssize_t bch_write_iter(struct kiocb *iocb, struct iov_iter *from)
return ret;
}
-int bch_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+int bch2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
@@ -1801,7 +1801,7 @@ int bch_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
goto out;
}
- if (bch_get_page_reservation(c, page, true)) {
+ if (bch2_get_page_reservation(c, page, true)) {
unlock_page(page);
ret = VM_FAULT_SIGBUS;
goto out;
@@ -1817,8 +1817,8 @@ out:
return ret;
}
-void bch_invalidatepage(struct page *page, unsigned int offset,
- unsigned int length)
+void bch2_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int length)
{
EBUG_ON(!PageLocked(page));
EBUG_ON(PageWriteback(page));
@@ -1826,10 +1826,10 @@ void bch_invalidatepage(struct page *page, unsigned int offset,
if (offset || length < PAGE_SIZE)
return;
- bch_clear_page_bits(page);
+ bch2_clear_page_bits(page);
}
-int bch_releasepage(struct page *page, gfp_t gfp_mask)
+int bch2_releasepage(struct page *page, gfp_t gfp_mask)
{
EBUG_ON(!PageLocked(page));
EBUG_ON(PageWriteback(page));
@@ -1837,13 +1837,13 @@ int bch_releasepage(struct page *page, gfp_t gfp_mask)
if (PageDirty(page))
return 0;
- bch_clear_page_bits(page);
+ bch2_clear_page_bits(page);
return 1;
}
#ifdef CONFIG_MIGRATION
-int bch_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page, enum migrate_mode mode)
+int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
{
int ret;
@@ -1861,7 +1861,7 @@ int bch_migrate_page(struct address_space *mapping, struct page *newpage,
}
#endif
-int bch_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct inode *inode = file->f_mapping->host;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -1875,11 +1875,11 @@ int bch_fsync(struct file *file, loff_t start, loff_t end, int datasync)
if (c->opts.journal_flush_disabled)
return 0;
- return bch_journal_flush_seq(&c->journal, ei->journal_seq);
+ return bch2_journal_flush_seq(&c->journal, ei->journal_seq);
}
-static int __bch_truncate_page(struct address_space *mapping,
- pgoff_t index, loff_t start, loff_t end)
+static int __bch2_truncate_page(struct address_space *mapping,
+ pgoff_t index, loff_t start, loff_t end)
{
struct inode *inode = mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -1916,11 +1916,11 @@ static int __bch_truncate_page(struct address_space *mapping,
if (k.k->type != KEY_TYPE_DISCARD &&
k.k->type != BCH_RESERVATION) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
goto create;
}
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return 0;
create:
page = find_or_create_page(mapping, index, GFP_KERNEL);
@@ -1931,7 +1931,7 @@ create:
}
if (!PageUptodate(page)) {
- ret = bch_read_single_page(page, mapping);
+ ret = bch2_read_single_page(page, mapping);
if (ret)
goto unlock;
}
@@ -1942,7 +1942,7 @@ create:
* XXX: because we aren't currently tracking whether the page has actual
* data in it (vs. just 0s, or only partially written) this wrong. ick.
*/
- ret = bch_get_page_reservation(c, page, false);
+ ret = bch2_get_page_reservation(c, page, false);
BUG_ON(ret);
if (index == start >> PAGE_SHIFT &&
@@ -1962,13 +1962,13 @@ out:
return ret;
}
-static int bch_truncate_page(struct address_space *mapping, loff_t from)
+static int bch2_truncate_page(struct address_space *mapping, loff_t from)
{
- return __bch_truncate_page(mapping, from >> PAGE_SHIFT,
+ return __bch2_truncate_page(mapping, from >> PAGE_SHIFT,
from, from + PAGE_SIZE);
}
-int bch_truncate(struct inode *inode, struct iattr *iattr)
+int bch2_truncate(struct inode *inode, struct iattr *iattr)
{
struct address_space *mapping = inode->i_mapping;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -1990,7 +1990,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr)
mutex_lock(&ei->update_lock);
i_size_dirty_get(ei);
- ret = bch_write_inode_size(c, ei, inode->i_size);
+ ret = bch2_write_inode_size(c, ei, inode->i_size);
mutex_unlock(&ei->update_lock);
if (unlikely(ret))
@@ -1998,7 +1998,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr)
/*
* There might be persistent reservations (from fallocate())
- * above i_size, which bch_inode_truncate() will discard - we're
+ * above i_size, which bch2_inode_truncate() will discard - we're
* only supposed to discard them if we're doing a real truncate
* here (new i_size < current i_size):
*/
@@ -2010,13 +2010,13 @@ int bch_truncate(struct inode *inode, struct iattr *iattr)
if (unlikely(ret))
goto err;
- ret = bch_truncate_page(inode->i_mapping, iattr->ia_size);
+ ret = bch2_truncate_page(inode->i_mapping, iattr->ia_size);
if (unlikely(ret)) {
i_sectors_dirty_put(ei, &i_sectors_hook);
goto err;
}
- ret = bch_inode_truncate(c, inode->i_ino,
+ ret = bch2_inode_truncate(c, inode->i_ino,
round_up(iattr->ia_size, PAGE_SIZE) >> 9,
&i_sectors_hook.hook,
&ei->journal_seq);
@@ -2033,7 +2033,7 @@ int bch_truncate(struct inode *inode, struct iattr *iattr)
/* clear I_SIZE_DIRTY: */
i_size_dirty_put(ei);
- ret = bch_write_inode_size(c, ei, inode->i_size);
+ ret = bch2_write_inode_size(c, ei, inode->i_size);
mutex_unlock(&ei->update_lock);
pagecache_block_put(&mapping->add_lock);
@@ -2046,7 +2046,7 @@ err_put_pagecache:
return ret;
}
-static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
+static long bch2_fpunch(struct inode *inode, loff_t offset, loff_t len)
{
struct address_space *mapping = inode->i_mapping;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -2060,7 +2060,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
inode_dio_wait(inode);
pagecache_block_get(&mapping->add_lock);
- ret = __bch_truncate_page(inode->i_mapping,
+ ret = __bch2_truncate_page(inode->i_mapping,
offset >> PAGE_SHIFT,
offset, offset + len);
if (unlikely(ret))
@@ -2068,7 +2068,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
if (offset >> PAGE_SHIFT !=
(offset + len) >> PAGE_SHIFT) {
- ret = __bch_truncate_page(inode->i_mapping,
+ ret = __bch2_truncate_page(inode->i_mapping,
(offset + len) >> PAGE_SHIFT,
offset, offset + len);
if (unlikely(ret))
@@ -2082,13 +2082,13 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
struct i_sectors_hook i_sectors_hook;
int ret;
- BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
+ BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
ret = i_sectors_dirty_get(ei, &i_sectors_hook);
if (unlikely(ret))
goto out;
- ret = bch_discard(c,
+ ret = bch2_discard(c,
POS(ino, discard_start),
POS(ino, discard_end),
ZERO_VERSION,
@@ -2097,7 +2097,7 @@ static long bch_fpunch(struct inode *inode, loff_t offset, loff_t len)
&ei->journal_seq);
i_sectors_dirty_put(ei, &i_sectors_hook);
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
}
out:
pagecache_block_put(&mapping->add_lock);
@@ -2106,7 +2106,7 @@ out:
return ret;
}
-static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len)
+static long bch2_fcollapse(struct inode *inode, loff_t offset, loff_t len)
{
struct address_space *mapping = inode->i_mapping;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -2122,11 +2122,11 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len)
if ((offset | len) & (PAGE_SIZE - 1))
return -EINVAL;
- bch_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS,
+ bch2_btree_iter_init_intent(&dst, c, BTREE_ID_EXTENTS,
POS(inode->i_ino, offset >> 9));
/* position will be set from dst iter's position: */
- bch_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN);
- bch_btree_iter_link(&src, &dst);
+ bch2_btree_iter_init(&src, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_link(&src, &dst);
/*
* We need i_mutex to keep the page cache consistent with the extents
@@ -2161,14 +2161,14 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len)
round_up(new_size, PAGE_SIZE) >> 9)) < 0) {
struct disk_reservation disk_res;
- bch_btree_iter_set_pos(&src,
+ bch2_btree_iter_set_pos(&src,
POS(dst.pos.inode, dst.pos.offset + (len >> 9)));
- ret = bch_btree_iter_traverse(&dst);
+ ret = bch2_btree_iter_traverse(&dst);
if (ret)
goto btree_iter_err;
- k = bch_btree_iter_peek_with_holes(&src);
+ k = bch2_btree_iter_peek_with_holes(&src);
if ((ret = btree_iter_err(k)))
goto btree_iter_err;
@@ -2177,32 +2177,32 @@ static long bch_fcollapse(struct inode *inode, loff_t offset, loff_t len)
if (bkey_deleted(&copy.k.k))
copy.k.k.type = KEY_TYPE_DISCARD;
- bch_cut_front(src.pos, &copy.k);
+ bch2_cut_front(src.pos, &copy.k);
copy.k.k.p.offset -= len >> 9;
BUG_ON(bkey_cmp(dst.pos, bkey_start_pos(&copy.k.k)));
- ret = bch_disk_reservation_get(c, &disk_res, copy.k.k.size,
+ ret = bch2_disk_reservation_get(c, &disk_res, copy.k.k.size,
BCH_DISK_RESERVATION_NOFAIL);
BUG_ON(ret);
- ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
+ ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
&ei->journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&dst, &copy.k));
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
btree_iter_err:
if (ret < 0 && ret != -EINTR)
goto err_unwind;
- bch_btree_iter_cond_resched(&src);
+ bch2_btree_iter_cond_resched(&src);
}
- bch_btree_iter_unlock(&src);
- bch_btree_iter_unlock(&dst);
+ bch2_btree_iter_unlock(&src);
+ bch2_btree_iter_unlock(&dst);
- ret = bch_inode_truncate(c, inode->i_ino,
+ ret = bch2_inode_truncate(c, inode->i_ino,
round_up(new_size, PAGE_SIZE) >> 9,
&i_sectors_hook.hook,
&ei->journal_seq);
@@ -2213,7 +2213,7 @@ btree_iter_err:
mutex_lock(&ei->update_lock);
i_size_write(inode, new_size);
- ret = bch_write_inode_size(c, ei, inode->i_size);
+ ret = bch2_write_inode_size(c, ei, inode->i_size);
mutex_unlock(&ei->update_lock);
pagecache_block_put(&mapping->add_lock);
@@ -2227,15 +2227,15 @@ err_unwind:
*/
i_sectors_dirty_put(ei, &i_sectors_hook);
err:
- bch_btree_iter_unlock(&src);
- bch_btree_iter_unlock(&dst);
+ bch2_btree_iter_unlock(&src);
+ bch2_btree_iter_unlock(&dst);
pagecache_block_put(&mapping->add_lock);
inode_unlock(inode);
return ret;
}
-static long bch_fallocate(struct inode *inode, int mode,
- loff_t offset, loff_t len)
+static long bch2_fallocate(struct inode *inode, int mode,
+ loff_t offset, loff_t len)
{
struct address_space *mapping = inode->i_mapping;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -2249,7 +2249,7 @@ static long bch_fallocate(struct inode *inode, int mode,
unsigned replicas = READ_ONCE(c->opts.data_replicas);
int ret;
- bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
inode_lock(inode);
inode_dio_wait(inode);
@@ -2263,14 +2263,14 @@ static long bch_fallocate(struct inode *inode, int mode,
}
if (mode & FALLOC_FL_ZERO_RANGE) {
- ret = __bch_truncate_page(inode->i_mapping,
+ ret = __bch2_truncate_page(inode->i_mapping,
offset >> PAGE_SHIFT,
offset, offset + len);
if (!ret &&
offset >> PAGE_SHIFT !=
(offset + len) >> PAGE_SHIFT)
- ret = __bch_truncate_page(inode->i_mapping,
+ ret = __bch2_truncate_page(inode->i_mapping,
(offset + len) >> PAGE_SHIFT,
offset, offset + len);
@@ -2286,7 +2286,7 @@ static long bch_fallocate(struct inode *inode, int mode,
block_end = round_up(offset + len, PAGE_SIZE);
}
- bch_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9));
+ bch2_btree_iter_set_pos(&iter, POS(inode->i_ino, block_start >> 9));
end = POS(inode->i_ino, block_end >> 9);
ret = i_sectors_dirty_get(ei, &i_sectors_hook);
@@ -2298,20 +2298,20 @@ static long bch_fallocate(struct inode *inode, int mode,
struct bkey_i_reservation reservation;
struct bkey_s_c k;
- k = bch_btree_iter_peek_with_holes(&iter);
+ k = bch2_btree_iter_peek_with_holes(&iter);
if ((ret = btree_iter_err(k)))
goto btree_iter_err;
/* already reserved */
if (k.k->type == BCH_RESERVATION &&
bkey_s_c_to_reservation(k).v->nr_replicas >= replicas) {
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
continue;
}
if (bkey_extent_is_data(k.k)) {
if (!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
continue;
}
}
@@ -2321,15 +2321,15 @@ static long bch_fallocate(struct inode *inode, int mode,
reservation.k.p = k.k->p;
reservation.k.size = k.k->size;
- bch_cut_front(iter.pos, &reservation.k_i);
- bch_cut_back(end, &reservation.k);
+ bch2_cut_front(iter.pos, &reservation.k_i);
+ bch2_cut_back(end, &reservation.k);
sectors = reservation.k.size;
- reservation.v.nr_replicas = bch_extent_nr_dirty_ptrs(k);
+ reservation.v.nr_replicas = bch2_extent_nr_dirty_ptrs(k);
if (reservation.v.nr_replicas < replicas ||
bkey_extent_is_compressed(k)) {
- ret = bch_disk_reservation_get(c, &disk_res,
+ ret = bch2_disk_reservation_get(c, &disk_res,
sectors, 0);
if (ret)
goto err_put_sectors_dirty;
@@ -2337,18 +2337,18 @@ static long bch_fallocate(struct inode *inode, int mode,
reservation.v.nr_replicas = disk_res.nr_replicas;
}
- ret = bch_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
+ ret = bch2_btree_insert_at(c, &disk_res, &i_sectors_hook.hook,
&ei->journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &reservation.k_i));
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
btree_iter_err:
if (ret < 0 && ret != -EINTR)
goto err_put_sectors_dirty;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
i_sectors_dirty_put(ei, &i_sectors_hook);
@@ -2357,7 +2357,7 @@ btree_iter_err:
i_size_write(inode, new_size);
mutex_lock(&ei->update_lock);
- ret = bch_write_inode_size(c, ei, inode->i_size);
+ ret = bch2_write_inode_size(c, ei, inode->i_size);
mutex_unlock(&ei->update_lock);
}
@@ -2372,7 +2372,7 @@ btree_iter_err:
if (ei->i_size != inode->i_size) {
mutex_lock(&ei->update_lock);
- ret = bch_write_inode_size(c, ei, inode->i_size);
+ ret = bch2_write_inode_size(c, ei, inode->i_size);
mutex_unlock(&ei->update_lock);
}
}
@@ -2384,25 +2384,25 @@ btree_iter_err:
err_put_sectors_dirty:
i_sectors_dirty_put(ei, &i_sectors_hook);
err:
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
pagecache_block_put(&mapping->add_lock);
inode_unlock(inode);
return ret;
}
-long bch_fallocate_dispatch(struct file *file, int mode,
- loff_t offset, loff_t len)
+long bch2_fallocate_dispatch(struct file *file, int mode,
+ loff_t offset, loff_t len)
{
struct inode *inode = file_inode(file);
if (!(mode & ~(FALLOC_FL_KEEP_SIZE|FALLOC_FL_ZERO_RANGE)))
- return bch_fallocate(inode, mode, offset, len);
+ return bch2_fallocate(inode, mode, offset, len);
if (mode == (FALLOC_FL_PUNCH_HOLE|FALLOC_FL_KEEP_SIZE))
- return bch_fpunch(inode, offset, len);
+ return bch2_fpunch(inode, offset, len);
if (mode == FALLOC_FL_COLLAPSE_RANGE)
- return bch_fcollapse(inode, offset, len);
+ return bch2_fcollapse(inode, offset, len);
return -EOPNOTSUPP;
}
@@ -2415,7 +2415,7 @@ static bool page_is_data(struct page *page)
page_state(page)->dirty_sectors);
}
-static loff_t bch_next_pagecache_data(struct inode *inode,
+static loff_t bch2_next_pagecache_data(struct inode *inode,
loff_t start_offset,
loff_t end_offset)
{
@@ -2445,7 +2445,7 @@ static loff_t bch_next_pagecache_data(struct inode *inode,
return end_offset;
}
-static loff_t bch_seek_data(struct file *file, u64 offset)
+static loff_t bch2_seek_data(struct file *file, u64 offset)
{
struct inode *inode = file->f_mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -2469,12 +2469,12 @@ static loff_t bch_seek_data(struct file *file, u64 offset)
break;
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
return ret;
if (next_data > offset)
- next_data = bch_next_pagecache_data(inode, offset, next_data);
+ next_data = bch2_next_pagecache_data(inode, offset, next_data);
if (next_data > isize)
return -ENXIO;
@@ -2497,7 +2497,7 @@ static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
return ret;
}
-static loff_t bch_next_pagecache_hole(struct inode *inode,
+static loff_t bch2_next_pagecache_hole(struct inode *inode,
loff_t start_offset,
loff_t end_offset)
{
@@ -2514,7 +2514,7 @@ static loff_t bch_next_pagecache_hole(struct inode *inode,
return end_offset;
}
-static loff_t bch_seek_hole(struct file *file, u64 offset)
+static loff_t bch2_seek_hole(struct file *file, u64 offset)
{
struct inode *inode = file->f_mapping->host;
struct bch_fs *c = inode->i_sb->s_fs_info;
@@ -2530,11 +2530,11 @@ static loff_t bch_seek_hole(struct file *file, u64 offset)
for_each_btree_key_with_holes(&iter, c, BTREE_ID_EXTENTS,
POS(inode->i_ino, offset >> 9), k) {
if (k.k->p.inode != inode->i_ino) {
- next_hole = bch_next_pagecache_hole(inode,
+ next_hole = bch2_next_pagecache_hole(inode,
offset, MAX_LFS_FILESIZE);
break;
} else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch_next_pagecache_hole(inode,
+ next_hole = bch2_next_pagecache_hole(inode,
max(offset, bkey_start_offset(k.k) << 9),
k.k->p.offset << 9);
@@ -2545,7 +2545,7 @@ static loff_t bch_seek_hole(struct file *file, u64 offset)
}
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
return ret;
@@ -2555,7 +2555,7 @@ static loff_t bch_seek_hole(struct file *file, u64 offset)
return vfs_setpos(file, next_hole, MAX_LFS_FILESIZE);
}
-loff_t bch_llseek(struct file *file, loff_t offset, int whence)
+loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
{
switch (whence) {
case SEEK_SET:
@@ -2563,9 +2563,9 @@ loff_t bch_llseek(struct file *file, loff_t offset, int whence)
case SEEK_END:
return generic_file_llseek(file, offset, whence);
case SEEK_DATA:
- return bch_seek_data(file, offset);
+ return bch2_seek_data(file, offset);
case SEEK_HOLE:
- return bch_seek_hole(file, offset);
+ return bch2_seek_hole(file, offset);
}
return -EINVAL;
diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h
index 4c428978a90f..f3fcf947d98b 100644
--- a/fs/bcachefs/fs-io.h
+++ b/fs/bcachefs/fs-io.h
@@ -4,36 +4,36 @@
#include "buckets.h"
#include <linux/uio.h>
-int bch_set_page_dirty(struct page *);
+int bch2_set_page_dirty(struct page *);
-int bch_writepage(struct page *, struct writeback_control *);
-int bch_readpage(struct file *, struct page *);
+int bch2_writepage(struct page *, struct writeback_control *);
+int bch2_readpage(struct file *, struct page *);
-int bch_writepages(struct address_space *, struct writeback_control *);
-int bch_readpages(struct file *, struct address_space *,
- struct list_head *, unsigned);
+int bch2_writepages(struct address_space *, struct writeback_control *);
+int bch2_readpages(struct file *, struct address_space *,
+ struct list_head *, unsigned);
-int bch_write_begin(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page **, void **);
-int bch_write_end(struct file *, struct address_space *, loff_t,
- unsigned, unsigned, struct page *, void *);
+int bch2_write_begin(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page **, void **);
+int bch2_write_end(struct file *, struct address_space *, loff_t,
+ unsigned, unsigned, struct page *, void *);
-ssize_t bch_direct_IO(struct kiocb *, struct iov_iter *);
+ssize_t bch2_direct_IO(struct kiocb *, struct iov_iter *);
-ssize_t bch_write_iter(struct kiocb *, struct iov_iter *);
+ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
-int bch_fsync(struct file *, loff_t, loff_t, int);
+int bch2_fsync(struct file *, loff_t, loff_t, int);
-int bch_truncate(struct inode *, struct iattr *);
-long bch_fallocate_dispatch(struct file *, int, loff_t, loff_t);
+int bch2_truncate(struct inode *, struct iattr *);
+long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
-loff_t bch_llseek(struct file *, loff_t, int);
+loff_t bch2_llseek(struct file *, loff_t, int);
-int bch_page_mkwrite(struct vm_area_struct *, struct vm_fault *);
-void bch_invalidatepage(struct page *, unsigned int, unsigned int);
-int bch_releasepage(struct page *, gfp_t);
-int bch_migrate_page(struct address_space *, struct page *,
- struct page *, enum migrate_mode);
+int bch2_page_mkwrite(struct vm_area_struct *, struct vm_fault *);
+void bch2_invalidatepage(struct page *, unsigned int, unsigned int);
+int bch2_releasepage(struct page *, gfp_t);
+int bch2_migrate_page(struct address_space *, struct page *,
+ struct page *, enum migrate_mode);
struct i_sectors_hook {
struct extent_insert_hook hook;
@@ -58,7 +58,7 @@ struct bch_writepage_io {
struct bch_write_bio bio;
};
-extern struct bio_set *bch_writepage_bioset;
+extern struct bio_set *bch2_writepage_bioset;
struct dio_write {
struct closure cl;
@@ -82,7 +82,7 @@ struct dio_write {
struct bch_write_bio bio;
};
-extern struct bio_set *bch_dio_write_bioset;
+extern struct bio_set *bch2_dio_write_bioset;
struct dio_read {
struct closure cl;
@@ -91,6 +91,6 @@ struct dio_read {
struct bch_read_bio rbio;
};
-extern struct bio_set *bch_dio_read_bioset;
+extern struct bio_set *bch2_dio_read_bioset;
#endif /* _BCACHE_FS_IO_H */
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index f45babd7fd2e..94c5a9e6bdd7 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "acl.h"
#include "btree_update.h"
#include "buckets.h"
@@ -24,11 +24,11 @@
#include <linux/statfs.h>
#include <linux/xattr.h>
-static struct kmem_cache *bch_inode_cache;
+static struct kmem_cache *bch2_inode_cache;
-static void bch_vfs_inode_init(struct bch_fs *,
- struct bch_inode_info *,
- struct bch_inode_unpacked *);
+static void bch2_vfs_inode_init(struct bch_fs *,
+ struct bch_inode_info *,
+ struct bch_inode_unpacked *);
/*
* I_SIZE_DIRTY requires special handling:
@@ -58,10 +58,10 @@ static void bch_vfs_inode_init(struct bch_fs *,
* be set explicitly.
*/
-int __must_check __bch_write_inode(struct bch_fs *c,
- struct bch_inode_info *ei,
- inode_set_fn set,
- void *p)
+int __must_check __bch2_write_inode(struct bch_fs *c,
+ struct bch_inode_info *ei,
+ inode_set_fn set,
+ void *p)
{
struct btree_iter iter;
struct inode *inode = &ei->vfs_inode;
@@ -81,21 +81,21 @@ int __must_check __bch_write_inode(struct bch_fs *c,
lockdep_assert_held(&ei->update_lock);
- bch_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(inum, 0));
+ bch2_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(inum, 0));
do {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
if ((ret = btree_iter_err(k)))
goto out;
if (WARN_ONCE(k.k->type != BCH_INODE_FS,
"inode %llu not found when updating", inum)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return -ENOENT;
}
- ret = bch_inode_unpack(bkey_s_c_to_inode(k), &inode_u);
+ ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u);
if (WARN_ONCE(ret,
"error %i unpacking inode %llu", ret, inum)) {
ret = -ENOENT;
@@ -115,13 +115,13 @@ int __must_check __bch_write_inode(struct bch_fs *c,
inode_u.i_gid = i_gid_read(inode);
inode_u.i_nlink = i_nlink - nlink_bias(inode->i_mode);
inode_u.i_dev = inode->i_rdev;
- inode_u.i_atime = timespec_to_bch_time(c, inode->i_atime);
- inode_u.i_mtime = timespec_to_bch_time(c, inode->i_mtime);
- inode_u.i_ctime = timespec_to_bch_time(c, inode->i_ctime);
+ inode_u.i_atime = timespec_to_bch2_time(c, inode->i_atime);
+ inode_u.i_mtime = timespec_to_bch2_time(c, inode->i_mtime);
+ inode_u.i_ctime = timespec_to_bch2_time(c, inode->i_ctime);
- bch_inode_pack(&inode_p, &inode_u);
+ bch2_inode_pack(&inode_p, &inode_u);
- ret = bch_btree_insert_at(c, NULL, NULL, &ei->journal_seq,
+ ret = bch2_btree_insert_at(c, NULL, NULL, &ei->journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &inode_p.inode.k_i));
@@ -132,42 +132,42 @@ int __must_check __bch_write_inode(struct bch_fs *c,
ei->i_flags = inode_u.i_flags;
}
out:
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret < 0 ? ret : 0;
}
-int __must_check bch_write_inode(struct bch_fs *c,
- struct bch_inode_info *ei)
+int __must_check bch2_write_inode(struct bch_fs *c,
+ struct bch_inode_info *ei)
{
- return __bch_write_inode(c, ei, NULL, NULL);
+ return __bch2_write_inode(c, ei, NULL, NULL);
}
-int bch_inc_nlink(struct bch_fs *c, struct bch_inode_info *ei)
+int bch2_inc_nlink(struct bch_fs *c, struct bch_inode_info *ei)
{
int ret;
mutex_lock(&ei->update_lock);
inc_nlink(&ei->vfs_inode);
- ret = bch_write_inode(c, ei);
+ ret = bch2_write_inode(c, ei);
mutex_unlock(&ei->update_lock);
return ret;
}
-int bch_dec_nlink(struct bch_fs *c, struct bch_inode_info *ei)
+int bch2_dec_nlink(struct bch_fs *c, struct bch_inode_info *ei)
{
int ret = 0;
mutex_lock(&ei->update_lock);
drop_nlink(&ei->vfs_inode);
- ret = bch_write_inode(c, ei);
+ ret = bch2_write_inode(c, ei);
mutex_unlock(&ei->update_lock);
return ret;
}
-static struct inode *bch_vfs_inode_get(struct super_block *sb, u64 inum)
+static struct inode *bch2_vfs_inode_get(struct super_block *sb, u64 inum)
{
struct bch_fs *c = sb->s_fs_info;
struct inode *inode;
@@ -183,25 +183,25 @@ static struct inode *bch_vfs_inode_get(struct super_block *sb, u64 inum)
if (!(inode->i_state & I_NEW))
return inode;
- ret = bch_inode_find_by_inum(c, inum, &inode_u);
+ ret = bch2_inode_find_by_inum(c, inum, &inode_u);
if (ret) {
iget_failed(inode);
return ERR_PTR(ret);
}
ei = to_bch_ei(inode);
- bch_vfs_inode_init(c, ei, &inode_u);
+ bch2_vfs_inode_init(c, ei, &inode_u);
- ei->journal_seq = bch_inode_journal_seq(&c->journal, inum);
+ ei->journal_seq = bch2_inode_journal_seq(&c->journal, inum);
unlock_new_inode(inode);
return inode;
}
-static struct inode *bch_vfs_inode_create(struct bch_fs *c,
- struct inode *parent,
- umode_t mode, dev_t rdev)
+static struct inode *bch2_vfs_inode_create(struct bch_fs *c,
+ struct inode *parent,
+ umode_t mode, dev_t rdev)
{
struct inode *inode;
struct posix_acl *default_acl = NULL, *acl = NULL;
@@ -224,11 +224,11 @@ static struct inode *bch_vfs_inode_create(struct bch_fs *c,
ei = to_bch_ei(inode);
- bch_inode_init(c, &inode_u, i_uid_read(inode),
+ bch2_inode_init(c, &inode_u, i_uid_read(inode),
i_gid_read(inode), inode->i_mode, rdev);
- bch_inode_pack(&inode_p, &inode_u);
+ bch2_inode_pack(&inode_p, &inode_u);
- ret = bch_inode_create(c, &inode_p.inode.k_i,
+ ret = bch2_inode_create(c, &inode_p.inode.k_i,
BLOCKDEV_INODE_MAX, 0,
&c->unused_inode_hint);
if (unlikely(ret)) {
@@ -241,16 +241,16 @@ static struct inode *bch_vfs_inode_create(struct bch_fs *c,
}
inode_u.inum = inode_p.inode.k.p.inode;
- bch_vfs_inode_init(c, ei, &inode_u);
+ bch2_vfs_inode_init(c, ei, &inode_u);
if (default_acl) {
- ret = bch_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ ret = bch2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
if (unlikely(ret))
goto err;
}
if (acl) {
- ret = bch_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ ret = bch2_set_acl(inode, acl, ACL_TYPE_ACCESS);
if (unlikely(ret))
goto err;
}
@@ -268,14 +268,14 @@ err:
goto out;
}
-static int bch_vfs_dirent_create(struct bch_fs *c, struct inode *dir,
- u8 type, const struct qstr *name,
- struct inode *dst)
+static int bch2_vfs_dirent_create(struct bch_fs *c, struct inode *dir,
+ u8 type, const struct qstr *name,
+ struct inode *dst)
{
struct bch_inode_info *dir_ei = to_bch_ei(dir);
int ret;
- ret = bch_dirent_create(c, dir->i_ino, &dir_ei->str_hash,
+ ret = bch2_dirent_create(c, dir->i_ino, &dir_ei->str_hash,
type, name, dst->i_ino,
&dir_ei->journal_seq,
BCH_HASH_SET_MUST_CREATE);
@@ -287,8 +287,8 @@ static int bch_vfs_dirent_create(struct bch_fs *c, struct inode *dir,
return 0;
}
-static int __bch_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int __bch2_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t rdev)
{
struct bch_inode_info *dir_ei = to_bch_ei(dir);
struct bch_fs *c = dir->i_sb->s_fs_info;
@@ -296,13 +296,13 @@ static int __bch_create(struct inode *dir, struct dentry *dentry,
struct bch_inode_info *ei;
int ret;
- inode = bch_vfs_inode_create(c, dir, mode, rdev);
+ inode = bch2_vfs_inode_create(c, dir, mode, rdev);
if (unlikely(IS_ERR(inode)))
return PTR_ERR(inode);
ei = to_bch_ei(inode);
- ret = bch_vfs_dirent_create(c, dir, mode_to_type(mode),
+ ret = bch2_vfs_dirent_create(c, dir, mode_to_type(mode),
&dentry->d_name, inode);
if (unlikely(ret)) {
clear_nlink(inode);
@@ -319,32 +319,32 @@ static int __bch_create(struct inode *dir, struct dentry *dentry,
/* methods */
-static struct dentry *bch_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+static struct dentry *bch2_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct bch_inode_info *dir_ei = to_bch_ei(dir);
struct inode *inode = NULL;
u64 inum;
- inum = bch_dirent_lookup(c, dir->i_ino,
+ inum = bch2_dirent_lookup(c, dir->i_ino,
&dir_ei->str_hash,
&dentry->d_name);
if (inum)
- inode = bch_vfs_inode_get(dir->i_sb, inum);
+ inode = bch2_vfs_inode_get(dir->i_sb, inum);
return d_splice_alias(inode, dentry);
}
-static int bch_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, bool excl)
+static int bch2_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, bool excl)
{
- return __bch_create(dir, dentry, mode|S_IFREG, 0);
+ return __bch2_create(dir, dentry, mode|S_IFREG, 0);
}
-static int bch_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *dentry)
+static int bch2_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct inode *inode = old_dentry->d_inode;
@@ -355,16 +355,16 @@ static int bch_link(struct dentry *old_dentry, struct inode *dir,
inode->i_ctime = current_fs_time(dir->i_sb);
- ret = bch_inc_nlink(c, ei);
+ ret = bch2_inc_nlink(c, ei);
if (ret)
return ret;
ihold(inode);
- ret = bch_vfs_dirent_create(c, dir, mode_to_type(inode->i_mode),
+ ret = bch2_vfs_dirent_create(c, dir, mode_to_type(inode->i_mode),
&dentry->d_name, inode);
if (unlikely(ret)) {
- bch_dec_nlink(c, ei);
+ bch2_dec_nlink(c, ei);
iput(inode);
return ret;
}
@@ -373,7 +373,7 @@ static int bch_link(struct dentry *old_dentry, struct inode *dir,
return 0;
}
-static int bch_unlink(struct inode *dir, struct dentry *dentry)
+static int bch2_unlink(struct inode *dir, struct dentry *dentry)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct bch_inode_info *dir_ei = to_bch_ei(dir);
@@ -383,7 +383,7 @@ static int bch_unlink(struct inode *dir, struct dentry *dentry)
lockdep_assert_held(&inode->i_rwsem);
- ret = bch_dirent_delete(c, dir->i_ino, &dir_ei->str_hash,
+ ret = bch2_dirent_delete(c, dir->i_ino, &dir_ei->str_hash,
&dentry->d_name, &dir_ei->journal_seq);
if (ret)
return ret;
@@ -394,24 +394,24 @@ static int bch_unlink(struct inode *dir, struct dentry *dentry)
inode->i_ctime = dir->i_ctime;
if (S_ISDIR(inode->i_mode)) {
- bch_dec_nlink(c, dir_ei);
+ bch2_dec_nlink(c, dir_ei);
drop_nlink(inode);
}
- bch_dec_nlink(c, ei);
+ bch2_dec_nlink(c, ei);
return 0;
}
-static int bch_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
+static int bch2_symlink(struct inode *dir, struct dentry *dentry,
+ const char *symname)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct inode *inode;
struct bch_inode_info *ei, *dir_ei = to_bch_ei(dir);
int ret;
- inode = bch_vfs_inode_create(c, dir, S_IFLNK|S_IRWXUGO, 0);
+ inode = bch2_vfs_inode_create(c, dir, S_IFLNK|S_IRWXUGO, 0);
if (unlikely(IS_ERR(inode)))
return PTR_ERR(inode);
@@ -432,7 +432,7 @@ static int bch_symlink(struct inode *dir, struct dentry *dentry,
if (dir_ei->journal_seq < ei->journal_seq)
dir_ei->journal_seq = ei->journal_seq;
- ret = bch_vfs_dirent_create(c, dir, DT_LNK, &dentry->d_name, inode);
+ ret = bch2_vfs_dirent_create(c, dir, DT_LNK, &dentry->d_name, inode);
if (unlikely(ret))
goto err;
@@ -444,41 +444,41 @@ err:
return ret;
}
-static int bch_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int bch2_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
int ret;
lockdep_assert_held(&dir->i_rwsem);
- ret = __bch_create(dir, dentry, mode|S_IFDIR, 0);
+ ret = __bch2_create(dir, dentry, mode|S_IFDIR, 0);
if (unlikely(ret))
return ret;
- bch_inc_nlink(c, to_bch_ei(dir));
+ bch2_inc_nlink(c, to_bch_ei(dir));
return 0;
}
-static int bch_rmdir(struct inode *dir, struct dentry *dentry)
+static int bch2_rmdir(struct inode *dir, struct dentry *dentry)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct inode *inode = dentry->d_inode;
- if (bch_empty_dir(c, inode->i_ino))
+ if (bch2_empty_dir(c, inode->i_ino))
return -ENOTEMPTY;
- return bch_unlink(dir, dentry);
+ return bch2_unlink(dir, dentry);
}
-static int bch_mknod(struct inode *dir, struct dentry *dentry,
- umode_t mode, dev_t rdev)
+static int bch2_mknod(struct inode *dir, struct dentry *dentry,
+ umode_t mode, dev_t rdev)
{
- return __bch_create(dir, dentry, mode, rdev);
+ return __bch2_create(dir, dentry, mode, rdev);
}
-static int bch_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int bch2_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct bch_fs *c = old_dir->i_sb->s_fs_info;
struct inode *old_inode = old_dentry->d_inode;
@@ -500,10 +500,10 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!S_ISDIR(new_inode->i_mode))
return -ENOTDIR;
- if (bch_empty_dir(c, new_inode->i_ino))
+ if (bch2_empty_dir(c, new_inode->i_ino))
return -ENOTEMPTY;
- ret = bch_dirent_rename(c,
+ ret = bch2_dirent_rename(c,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name,
&ei->journal_seq, BCH_RENAME_OVERWRITE);
@@ -511,11 +511,11 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry,
return ret;
clear_nlink(new_inode);
- bch_dec_nlink(c, to_bch_ei(old_dir));
+ bch2_dec_nlink(c, to_bch_ei(old_dir));
} else if (new_inode) {
lockdep_assert_held(&new_inode->i_rwsem);
- ret = bch_dirent_rename(c,
+ ret = bch2_dirent_rename(c,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name,
&ei->journal_seq, BCH_RENAME_OVERWRITE);
@@ -523,19 +523,19 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry,
return ret;
new_inode->i_ctime = now;
- bch_dec_nlink(c, to_bch_ei(new_inode));
+ bch2_dec_nlink(c, to_bch_ei(new_inode));
} else if (S_ISDIR(old_inode->i_mode)) {
- ret = bch_dirent_rename(c,
+ ret = bch2_dirent_rename(c,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name,
&ei->journal_seq, BCH_RENAME);
if (unlikely(ret))
return ret;
- bch_inc_nlink(c, to_bch_ei(new_dir));
- bch_dec_nlink(c, to_bch_ei(old_dir));
+ bch2_inc_nlink(c, to_bch_ei(new_dir));
+ bch2_dec_nlink(c, to_bch_ei(old_dir));
} else {
- ret = bch_dirent_rename(c,
+ ret = bch2_dirent_rename(c,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name,
&ei->journal_seq, BCH_RENAME);
@@ -554,8 +554,8 @@ static int bch_rename(struct inode *old_dir, struct dentry *old_dentry,
return 0;
}
-static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int bch2_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct bch_fs *c = old_dir->i_sb->s_fs_info;
struct inode *old_inode = old_dentry->d_inode;
@@ -564,7 +564,7 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
struct timespec now = current_fs_time(old_dir->i_sb);
int ret;
- ret = bch_dirent_rename(c,
+ ret = bch2_dirent_rename(c,
old_dir, &old_dentry->d_name,
new_dir, &new_dentry->d_name,
&ei->journal_seq, BCH_RENAME_EXCHANGE);
@@ -574,11 +574,11 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(old_inode->i_mode) !=
S_ISDIR(new_inode->i_mode)) {
if (S_ISDIR(old_inode->i_mode)) {
- bch_inc_nlink(c, to_bch_ei(new_dir));
- bch_dec_nlink(c, to_bch_ei(old_dir));
+ bch2_inc_nlink(c, to_bch_ei(new_dir));
+ bch2_dec_nlink(c, to_bch_ei(old_dir));
} else {
- bch_dec_nlink(c, to_bch_ei(new_dir));
- bch_inc_nlink(c, to_bch_ei(old_dir));
+ bch2_dec_nlink(c, to_bch_ei(new_dir));
+ bch2_inc_nlink(c, to_bch_ei(old_dir));
}
}
@@ -595,21 +595,21 @@ static int bch_rename_exchange(struct inode *old_dir, struct dentry *old_dentry,
return 0;
}
-static int bch_rename2(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry,
- unsigned flags)
+static int bch2_rename2(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned flags)
{
if (flags & ~(RENAME_NOREPLACE|RENAME_EXCHANGE))
return -EINVAL;
if (flags & RENAME_EXCHANGE)
- return bch_rename_exchange(old_dir, old_dentry,
+ return bch2_rename_exchange(old_dir, old_dentry,
new_dir, new_dentry);
- return bch_rename(old_dir, old_dentry, new_dir, new_dentry);
+ return bch2_rename(old_dir, old_dentry, new_dir, new_dentry);
}
-static int bch_setattr(struct dentry *dentry, struct iattr *iattr)
+static int bch2_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -626,11 +626,11 @@ static int bch_setattr(struct dentry *dentry, struct iattr *iattr)
return ret;
if (iattr->ia_valid & ATTR_SIZE) {
- ret = bch_truncate(inode, iattr);
+ ret = bch2_truncate(inode, iattr);
} else {
mutex_lock(&ei->update_lock);
setattr_copy(inode, iattr);
- ret = bch_write_inode(c, ei);
+ ret = bch2_write_inode(c, ei);
mutex_unlock(&ei->update_lock);
}
@@ -643,13 +643,13 @@ static int bch_setattr(struct dentry *dentry, struct iattr *iattr)
return ret;
}
-static int bch_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int bch2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct bch_fs *c = dir->i_sb->s_fs_info;
struct inode *inode;
/* XXX: i_nlink should be 0? */
- inode = bch_vfs_inode_create(c, dir, mode, 0);
+ inode = bch2_vfs_inode_create(c, dir, mode, 0);
if (unlikely(IS_ERR(inode)))
return PTR_ERR(inode);
@@ -657,8 +657,8 @@ static int bch_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
}
-static int bch_fill_extent(struct fiemap_extent_info *info,
- const struct bkey_i *k, unsigned flags)
+static int bch2_fill_extent(struct fiemap_extent_info *info,
+ const struct bkey_i *k, unsigned flags)
{
if (bkey_extent_is_data(&k->k)) {
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
@@ -700,8 +700,8 @@ static int bch_fill_extent(struct fiemap_extent_info *info,
}
}
-static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info,
- u64 start, u64 len)
+static int bch2_fiemap(struct inode *inode, struct fiemap_extent_info *info,
+ u64 start, u64 len)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
struct btree_iter iter;
@@ -722,7 +722,7 @@ static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info,
break;
if (have_extent) {
- ret = bch_fill_extent(info, &tmp.k, 0);
+ ret = bch2_fill_extent(info, &tmp.k, 0);
if (ret)
goto out;
}
@@ -732,19 +732,19 @@ static int bch_fiemap(struct inode *inode, struct fiemap_extent_info *info,
}
if (have_extent)
- ret = bch_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST);
+ ret = bch2_fill_extent(info, &tmp.k, FIEMAP_EXTENT_LAST);
out:
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret < 0 ? ret : 0;
}
static const struct vm_operations_struct bch_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
- .page_mkwrite = bch_page_mkwrite,
+ .page_mkwrite = bch2_page_mkwrite,
};
-static int bch_mmap(struct file *file, struct vm_area_struct *vma)
+static int bch2_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
@@ -769,8 +769,8 @@ static const unsigned bch_inode_flags_to_user_flags_map[] = {
[__BCH_INODE_NOATIME] = FS_NOATIME_FL,
};
-/* Set VFS inode flags from bcache inode: */
-static void bch_inode_flags_to_vfs(struct inode *inode)
+/* Set VFS inode flags from bcachefs inode: */
+static void bch2_inode_flags_to_vfs(struct inode *inode)
{
unsigned i, flags = to_bch_ei(inode)->i_flags;
@@ -781,8 +781,8 @@ static void bch_inode_flags_to_vfs(struct inode *inode)
inode->i_flags &= ~bch_inode_flags_to_vfs_flags_map[i];
}
-/* Get FS_IOC_GETFLAGS flags from bcache inode: */
-static unsigned bch_inode_flags_to_user_flags(unsigned flags)
+/* Get FS_IOC_GETFLAGS flags from bcachefs inode: */
+static unsigned bch2_inode_flags_to_user_flags(unsigned flags)
{
unsigned i, ret = 0;
@@ -793,16 +793,16 @@ static unsigned bch_inode_flags_to_user_flags(unsigned flags)
return ret;
}
-static int bch_inode_user_flags_set(struct bch_inode_info *ei,
- struct bch_inode_unpacked *bi,
- void *p)
+static int bch2_inode_user_flags_set(struct bch_inode_info *ei,
+ struct bch_inode_unpacked *bi,
+ void *p)
{
/*
* We're relying on btree locking here for exclusion with other ioctl
* calls - use the flags in the btree (@bi), not ei->i_flags:
*/
unsigned bch_flags = bi->i_flags;
- unsigned oldflags = bch_inode_flags_to_user_flags(bch_flags);
+ unsigned oldflags = bch2_inode_flags_to_user_flags(bch_flags);
unsigned newflags = *((unsigned *) p);
unsigned i;
@@ -831,8 +831,8 @@ static int bch_inode_user_flags_set(struct bch_inode_info *ei,
#define FS_IOC_GOINGDOWN _IOR ('X', 125, __u32)
-static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static long bch2_fs_file_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct super_block *sb = inode->i_sb;
@@ -843,7 +843,7 @@ static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd,
switch (cmd) {
case FS_IOC_GETFLAGS:
- return put_user(bch_inode_flags_to_user_flags(ei->i_flags),
+ return put_user(bch2_inode_flags_to_user_flags(ei->i_flags),
(int __user *) arg);
case FS_IOC_SETFLAGS: {
@@ -871,11 +871,11 @@ static long bch_fs_file_ioctl(struct file *filp, unsigned int cmd,
inode_lock(inode);
mutex_lock(&ei->update_lock);
- ret = __bch_write_inode(c, ei, bch_inode_user_flags_set, &flags);
+ ret = __bch2_write_inode(c, ei, bch2_inode_user_flags_set, &flags);
mutex_unlock(&ei->update_lock);
if (!ret)
- bch_inode_flags_to_vfs(inode);
+ bch2_inode_flags_to_vfs(inode);
inode_unlock(inode);
setflags_out:
@@ -894,17 +894,17 @@ setflags_out:
down_write(&sb->s_umount);
sb->s_flags |= MS_RDONLY;
- bch_fs_emergency_read_only(c);
+ bch2_fs_emergency_read_only(c);
up_write(&sb->s_umount);
return 0;
default:
- return bch_fs_ioctl(c, cmd, (void __user *) arg);
+ return bch2_fs_ioctl(c, cmd, (void __user *) arg);
}
}
#ifdef CONFIG_COMPAT
-static long bch_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long bch2_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
/* These are just misnamed, they actually get/put from/to user an int */
switch (cmd) {
@@ -917,114 +917,114 @@ static long bch_compat_fs_ioctl(struct file *file, unsigned int cmd, unsigned lo
default:
return -ENOIOCTLCMD;
}
- return bch_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+ return bch2_fs_file_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
}
#endif
/* Directories: */
-static loff_t bch_dir_llseek(struct file *file, loff_t offset, int whence)
+static loff_t bch2_dir_llseek(struct file *file, loff_t offset, int whence)
{
return generic_file_llseek_size(file, offset, whence,
S64_MAX, S64_MAX);
}
-static int bch_vfs_readdir(struct file *file, struct dir_context *ctx)
+static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct bch_fs *c = inode->i_sb->s_fs_info;
- return bch_readdir(c, file, ctx);
+ return bch2_readdir(c, file, ctx);
}
static const struct file_operations bch_file_operations = {
- .llseek = bch_llseek,
+ .llseek = bch2_llseek,
.read_iter = generic_file_read_iter,
- .write_iter = bch_write_iter,
- .mmap = bch_mmap,
+ .write_iter = bch2_write_iter,
+ .mmap = bch2_mmap,
.open = generic_file_open,
- .fsync = bch_fsync,
+ .fsync = bch2_fsync,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
- .fallocate = bch_fallocate_dispatch,
- .unlocked_ioctl = bch_fs_file_ioctl,
+ .fallocate = bch2_fallocate_dispatch,
+ .unlocked_ioctl = bch2_fs_file_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = bch_compat_fs_ioctl,
+ .compat_ioctl = bch2_compat_fs_ioctl,
#endif
};
static const struct inode_operations bch_file_inode_operations = {
- .setattr = bch_setattr,
- .fiemap = bch_fiemap,
- .listxattr = bch_xattr_list,
- .get_acl = bch_get_acl,
- .set_acl = bch_set_acl,
+ .setattr = bch2_setattr,
+ .fiemap = bch2_fiemap,
+ .listxattr = bch2_xattr_list,
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
};
static const struct inode_operations bch_dir_inode_operations = {
- .lookup = bch_lookup,
- .create = bch_create,
- .link = bch_link,
- .unlink = bch_unlink,
- .symlink = bch_symlink,
- .mkdir = bch_mkdir,
- .rmdir = bch_rmdir,
- .mknod = bch_mknod,
- .rename = bch_rename2,
- .setattr = bch_setattr,
- .tmpfile = bch_tmpfile,
- .listxattr = bch_xattr_list,
- .get_acl = bch_get_acl,
- .set_acl = bch_set_acl,
+ .lookup = bch2_lookup,
+ .create = bch2_create,
+ .link = bch2_link,
+ .unlink = bch2_unlink,
+ .symlink = bch2_symlink,
+ .mkdir = bch2_mkdir,
+ .rmdir = bch2_rmdir,
+ .mknod = bch2_mknod,
+ .rename = bch2_rename2,
+ .setattr = bch2_setattr,
+ .tmpfile = bch2_tmpfile,
+ .listxattr = bch2_xattr_list,
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
};
static const struct file_operations bch_dir_file_operations = {
- .llseek = bch_dir_llseek,
+ .llseek = bch2_dir_llseek,
.read = generic_read_dir,
- .iterate = bch_vfs_readdir,
- .fsync = bch_fsync,
- .unlocked_ioctl = bch_fs_file_ioctl,
+ .iterate = bch2_vfs_readdir,
+ .fsync = bch2_fsync,
+ .unlocked_ioctl = bch2_fs_file_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = bch_compat_fs_ioctl,
+ .compat_ioctl = bch2_compat_fs_ioctl,
#endif
};
static const struct inode_operations bch_symlink_inode_operations = {
.readlink = generic_readlink,
.get_link = page_get_link,
- .setattr = bch_setattr,
- .listxattr = bch_xattr_list,
- .get_acl = bch_get_acl,
- .set_acl = bch_set_acl,
+ .setattr = bch2_setattr,
+ .listxattr = bch2_xattr_list,
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
};
static const struct inode_operations bch_special_inode_operations = {
- .setattr = bch_setattr,
- .listxattr = bch_xattr_list,
- .get_acl = bch_get_acl,
- .set_acl = bch_set_acl,
+ .setattr = bch2_setattr,
+ .listxattr = bch2_xattr_list,
+ .get_acl = bch2_get_acl,
+ .set_acl = bch2_set_acl,
};
static const struct address_space_operations bch_address_space_operations = {
- .writepage = bch_writepage,
- .readpage = bch_readpage,
- .writepages = bch_writepages,
- .readpages = bch_readpages,
- .set_page_dirty = bch_set_page_dirty,
- .write_begin = bch_write_begin,
- .write_end = bch_write_end,
- .invalidatepage = bch_invalidatepage,
- .releasepage = bch_releasepage,
- .direct_IO = bch_direct_IO,
+ .writepage = bch2_writepage,
+ .readpage = bch2_readpage,
+ .writepages = bch2_writepages,
+ .readpages = bch2_readpages,
+ .set_page_dirty = bch2_set_page_dirty,
+ .write_begin = bch2_write_begin,
+ .write_end = bch2_write_end,
+ .invalidatepage = bch2_invalidatepage,
+ .releasepage = bch2_releasepage,
+ .direct_IO = bch2_direct_IO,
#ifdef CONFIG_MIGRATION
- .migratepage = bch_migrate_page,
+ .migratepage = bch2_migrate_page,
#endif
.error_remove_page = generic_error_remove_page,
};
-static void bch_vfs_inode_init(struct bch_fs *c,
- struct bch_inode_info *ei,
- struct bch_inode_unpacked *bi)
+static void bch2_vfs_inode_init(struct bch_fs *c,
+ struct bch_inode_info *ei,
+ struct bch_inode_unpacked *bi)
{
struct inode *inode = &ei->vfs_inode;
@@ -1046,12 +1046,12 @@ static void bch_vfs_inode_init(struct bch_fs *c,
inode->i_rdev = bi->i_dev;
inode->i_generation = bi->i_generation;
inode->i_size = bi->i_size;
- inode->i_atime = bch_time_to_timespec(c, bi->i_atime);
- inode->i_mtime = bch_time_to_timespec(c, bi->i_mtime);
- inode->i_ctime = bch_time_to_timespec(c, bi->i_ctime);
- bch_inode_flags_to_vfs(inode);
+ inode->i_atime = bch2_time_to_timespec(c, bi->i_atime);
+ inode->i_mtime = bch2_time_to_timespec(c, bi->i_mtime);
+ inode->i_ctime = bch2_time_to_timespec(c, bi->i_ctime);
+ bch2_inode_flags_to_vfs(inode);
- ei->str_hash = bch_hash_info_init(c, bi);
+ ei->str_hash = bch2_hash_info_init(c, bi);
inode->i_mapping->a_ops = &bch_address_space_operations;
@@ -1075,11 +1075,11 @@ static void bch_vfs_inode_init(struct bch_fs *c,
}
}
-static struct inode *bch_alloc_inode(struct super_block *sb)
+static struct inode *bch2_alloc_inode(struct super_block *sb)
{
struct bch_inode_info *ei;
- ei = kmem_cache_alloc(bch_inode_cache, GFP_NOFS);
+ ei = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
if (!ei)
return NULL;
@@ -1094,45 +1094,45 @@ static struct inode *bch_alloc_inode(struct super_block *sb)
return &ei->vfs_inode;
}
-static void bch_i_callback(struct rcu_head *head)
+static void bch2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(bch_inode_cache, to_bch_ei(inode));
+ kmem_cache_free(bch2_inode_cache, to_bch_ei(inode));
}
-static void bch_destroy_inode(struct inode *inode)
+static void bch2_destroy_inode(struct inode *inode)
{
- call_rcu(&inode->i_rcu, bch_i_callback);
+ call_rcu(&inode->i_rcu, bch2_i_callback);
}
-static int bch_vfs_write_inode(struct inode *inode,
- struct writeback_control *wbc)
+static int bch2_vfs_write_inode(struct inode *inode,
+ struct writeback_control *wbc)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
struct bch_inode_info *ei = to_bch_ei(inode);
int ret;
mutex_lock(&ei->update_lock);
- ret = bch_write_inode(c, ei);
+ ret = bch2_write_inode(c, ei);
mutex_unlock(&ei->update_lock);
if (c->opts.journal_flush_disabled)
return ret;
if (!ret && wbc->sync_mode == WB_SYNC_ALL)
- ret = bch_journal_flush_seq(&c->journal, ei->journal_seq);
+ ret = bch2_journal_flush_seq(&c->journal, ei->journal_seq);
return ret;
}
-static void bch_evict_inode(struct inode *inode)
+static void bch2_evict_inode(struct inode *inode)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
truncate_inode_pages_final(&inode->i_data);
- if (!bch_journal_error(&c->journal) && !is_bad_inode(inode)) {
+ if (!bch2_journal_error(&c->journal) && !is_bad_inode(inode)) {
struct bch_inode_info *ei = to_bch_ei(inode);
/* XXX - we want to check this stuff iff there weren't IO errors: */
@@ -1143,12 +1143,12 @@ static void bch_evict_inode(struct inode *inode)
clear_inode(inode);
if (!inode->i_nlink && !is_bad_inode(inode)) {
- bch_inode_rm(c, inode->i_ino);
+ bch2_inode_rm(c, inode->i_ino);
atomic_long_dec(&c->nr_inodes);
}
}
-static int bch_statfs(struct dentry *dentry, struct kstatfs *buf)
+static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct bch_fs *c = sb->s_fs_info;
@@ -1157,7 +1157,7 @@ static int bch_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = BCACHE_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = c->capacity >> PAGE_SECTOR_SHIFT;
- buf->f_bfree = (c->capacity - bch_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT;
+ buf->f_bfree = (c->capacity - bch2_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT;
buf->f_bavail = buf->f_bfree;
buf->f_files = atomic_long_read(&c->nr_inodes);
buf->f_ffree = U64_MAX;
@@ -1171,20 +1171,20 @@ static int bch_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
-static int bch_sync_fs(struct super_block *sb, int wait)
+static int bch2_sync_fs(struct super_block *sb, int wait)
{
struct bch_fs *c = sb->s_fs_info;
if (!wait) {
- bch_journal_flush_async(&c->journal, NULL);
+ bch2_journal_flush_async(&c->journal, NULL);
return 0;
}
- return bch_journal_flush(&c->journal);
+ return bch2_journal_flush(&c->journal);
}
-static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name,
- struct bch_opts opts)
+static struct bch_fs *bch2_open_as_blockdevs(const char *_dev_name,
+ struct bch_opts opts)
{
size_t nr_devs = 0, i = 0;
char *dev_name, *s, **devs;
@@ -1207,7 +1207,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name,
(s = strchr(s, ':')) && (*s++ = '\0'))
devs[i++] = s;
- err = bch_fs_open(devs, nr_devs, opts, &c);
+ err = bch2_fs_open(devs, nr_devs, opts, &c);
if (err) {
/*
* Already open?
@@ -1222,7 +1222,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name,
if (IS_ERR(bdev))
goto err;
- c2 = bch_bdev_to_fs(bdev);
+ c2 = bch2_bdev_to_fs(bdev);
bdput(bdev);
if (!c)
@@ -1240,7 +1240,7 @@ static struct bch_fs *bch_open_as_blockdevs(const char *_dev_name,
mutex_lock(&c->state_lock);
- if (!bch_fs_running(c)) {
+ if (!bch2_fs_running(c)) {
mutex_unlock(&c->state_lock);
closure_put(&c->cl);
err = "incomplete filesystem";
@@ -1261,15 +1261,15 @@ err:
return c;
}
-static int bch_remount(struct super_block *sb, int *flags, char *data)
+static int bch2_remount(struct super_block *sb, int *flags, char *data)
{
struct bch_fs *c = sb->s_fs_info;
- struct bch_opts opts = bch_opts_empty();
+ struct bch_opts opts = bch2_opts_empty();
int ret;
opts.read_only = (*flags & MS_RDONLY) != 0;
- ret = bch_parse_mount_opts(&opts, data);
+ ret = bch2_parse_mount_opts(&opts, data);
if (ret)
return ret;
@@ -1278,11 +1278,11 @@ static int bch_remount(struct super_block *sb, int *flags, char *data)
const char *err = NULL;
if (opts.read_only) {
- bch_fs_read_only(c);
+ bch2_fs_read_only(c);
sb->s_flags |= MS_RDONLY;
} else {
- err = bch_fs_read_write(c);
+ err = bch2_fs_read_write(c);
if (err) {
bch_err(c, "error going rw: %s", err);
return -EINVAL;
@@ -1301,54 +1301,54 @@ static int bch_remount(struct super_block *sb, int *flags, char *data)
}
static const struct super_operations bch_super_operations = {
- .alloc_inode = bch_alloc_inode,
- .destroy_inode = bch_destroy_inode,
- .write_inode = bch_vfs_write_inode,
- .evict_inode = bch_evict_inode,
- .sync_fs = bch_sync_fs,
- .statfs = bch_statfs,
+ .alloc_inode = bch2_alloc_inode,
+ .destroy_inode = bch2_destroy_inode,
+ .write_inode = bch2_vfs_write_inode,
+ .evict_inode = bch2_evict_inode,
+ .sync_fs = bch2_sync_fs,
+ .statfs = bch2_statfs,
.show_options = generic_show_options,
- .remount_fs = bch_remount,
+ .remount_fs = bch2_remount,
#if 0
- .put_super = bch_put_super,
- .freeze_fs = bch_freeze,
- .unfreeze_fs = bch_unfreeze,
+ .put_super = bch2_put_super,
+ .freeze_fs = bch2_freeze,
+ .unfreeze_fs = bch2_unfreeze,
#endif
};
-static int bch_test_super(struct super_block *s, void *data)
+static int bch2_test_super(struct super_block *s, void *data)
{
return s->s_fs_info == data;
}
-static int bch_set_super(struct super_block *s, void *data)
+static int bch2_set_super(struct super_block *s, void *data)
{
s->s_fs_info = data;
return 0;
}
-static struct dentry *bch_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static struct dentry *bch2_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
struct bch_fs *c;
struct bch_dev *ca;
struct super_block *sb;
struct inode *inode;
- struct bch_opts opts = bch_opts_empty();
+ struct bch_opts opts = bch2_opts_empty();
unsigned i;
int ret;
opts.read_only = (flags & MS_RDONLY) != 0;
- ret = bch_parse_mount_opts(&opts, data);
+ ret = bch2_parse_mount_opts(&opts, data);
if (ret)
return ERR_PTR(ret);
- c = bch_open_as_blockdevs(dev_name, opts);
+ c = bch2_open_as_blockdevs(dev_name, opts);
if (!c)
return ERR_PTR(-ENOENT);
- sb = sget(fs_type, bch_test_super, bch_set_super, flags|MS_NOSEC, c);
+ sb = sget(fs_type, bch2_test_super, bch2_set_super, flags|MS_NOSEC, c);
if (IS_ERR(sb)) {
closure_put(&c->cl);
return ERR_CAST(sb);
@@ -1371,7 +1371,7 @@ static struct dentry *bch_mount(struct file_system_type *fs_type,
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_op = &bch_super_operations;
- sb->s_xattr = bch_xattr_handlers;
+ sb->s_xattr = bch2_xattr_handlers;
sb->s_magic = BCACHE_STATFS_MAGIC;
sb->s_time_gran = c->sb.time_precision;
c->vfs_sb = sb;
@@ -1393,7 +1393,7 @@ static struct dentry *bch_mount(struct file_system_type *fs_type,
else
sb->s_flags |= opts.posix_acl ? MS_POSIXACL : 0;
- inode = bch_vfs_inode_get(sb, BCACHE_ROOT_INO);
+ inode = bch2_vfs_inode_get(sb, BCACHE_ROOT_INO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto err_put_super;
@@ -1414,60 +1414,60 @@ err_put_super:
return ERR_PTR(ret);
}
-static void bch_kill_sb(struct super_block *sb)
+static void bch2_kill_sb(struct super_block *sb)
{
struct bch_fs *c = sb->s_fs_info;
generic_shutdown_super(sb);
if (test_bit(BCH_FS_BDEV_MOUNTED, &c->flags))
- bch_fs_stop(c);
+ bch2_fs_stop(c);
else
closure_put(&c->cl);
}
static struct file_system_type bcache_fs_type = {
.owner = THIS_MODULE,
- .name = "bcache",
- .mount = bch_mount,
- .kill_sb = bch_kill_sb,
+ .name = "bcachefs",
+ .mount = bch2_mount,
+ .kill_sb = bch2_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
-MODULE_ALIAS_FS("bcache");
+MODULE_ALIAS_FS("bcachefs");
-void bch_vfs_exit(void)
+void bch2_vfs_exit(void)
{
unregister_filesystem(&bcache_fs_type);
- if (bch_dio_write_bioset)
- bioset_free(bch_dio_write_bioset);
- if (bch_dio_read_bioset)
- bioset_free(bch_dio_read_bioset);
- if (bch_writepage_bioset)
- bioset_free(bch_writepage_bioset);
- if (bch_inode_cache)
- kmem_cache_destroy(bch_inode_cache);
+ if (bch2_dio_write_bioset)
+ bioset_free(bch2_dio_write_bioset);
+ if (bch2_dio_read_bioset)
+ bioset_free(bch2_dio_read_bioset);
+ if (bch2_writepage_bioset)
+ bioset_free(bch2_writepage_bioset);
+ if (bch2_inode_cache)
+ kmem_cache_destroy(bch2_inode_cache);
}
-int __init bch_vfs_init(void)
+int __init bch2_vfs_init(void)
{
int ret = -ENOMEM;
- bch_inode_cache = KMEM_CACHE(bch_inode_info, 0);
- if (!bch_inode_cache)
+ bch2_inode_cache = KMEM_CACHE(bch_inode_info, 0);
+ if (!bch2_inode_cache)
goto err;
- bch_writepage_bioset =
+ bch2_writepage_bioset =
bioset_create(4, offsetof(struct bch_writepage_io, bio.bio));
- if (!bch_writepage_bioset)
+ if (!bch2_writepage_bioset)
goto err;
- bch_dio_read_bioset = bioset_create(4, offsetof(struct dio_read, rbio.bio));
- if (!bch_dio_read_bioset)
+ bch2_dio_read_bioset = bioset_create(4, offsetof(struct dio_read, rbio.bio));
+ if (!bch2_dio_read_bioset)
goto err;
- bch_dio_write_bioset = bioset_create(4, offsetof(struct dio_write, bio.bio));
- if (!bch_dio_write_bioset)
+ bch2_dio_write_bioset = bioset_create(4, offsetof(struct dio_write, bio.bio));
+ if (!bch2_dio_write_bioset)
goto err;
ret = register_filesystem(&bcache_fs_type);
@@ -1476,6 +1476,6 @@ int __init bch_vfs_init(void)
return 0;
err:
- bch_vfs_exit();
+ bch2_vfs_exit();
return ret;
}
diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h
index 1c0a2b15882a..f7cad296388c 100644
--- a/fs/bcachefs/fs.h
+++ b/fs/bcachefs/fs.h
@@ -47,18 +47,18 @@ struct bch_inode_unpacked;
typedef int (*inode_set_fn)(struct bch_inode_info *,
struct bch_inode_unpacked *, void *);
-int __must_check __bch_write_inode(struct bch_fs *, struct bch_inode_info *,
- inode_set_fn, void *);
-int __must_check bch_write_inode(struct bch_fs *,
- struct bch_inode_info *);
+int __must_check __bch2_write_inode(struct bch_fs *, struct bch_inode_info *,
+ inode_set_fn, void *);
+int __must_check bch2_write_inode(struct bch_fs *,
+ struct bch_inode_info *);
-void bch_vfs_exit(void);
-int bch_vfs_init(void);
+void bch2_vfs_exit(void);
+int bch2_vfs_init(void);
#else
-static inline void bch_vfs_exit(void) {}
-static inline int bch_vfs_init(void) { return 0; }
+static inline void bch2_vfs_exit(void) {}
+static inline int bch2_vfs_init(void) { return 0; }
#endif
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index c65cece8ed4b..7a8467c4580e 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_update.h"
#include "extents.h"
@@ -103,8 +103,8 @@ static int inode_decode_field(const u8 *in, const u8 *end,
return bytes;
}
-void bch_inode_pack(struct bkey_inode_buf *packed,
- const struct bch_inode_unpacked *inode)
+void bch2_inode_pack(struct bkey_inode_buf *packed,
+ const struct bch_inode_unpacked *inode)
{
u8 *out = packed->inode.v.fields;
u8 *end = (void *) &packed[1];
@@ -145,7 +145,7 @@ void bch_inode_pack(struct bkey_inode_buf *packed,
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
struct bch_inode_unpacked unpacked;
- int ret = bch_inode_unpack(inode_i_to_s_c(&packed->inode),
+ int ret = bch2_inode_unpack(inode_i_to_s_c(&packed->inode),
&unpacked);
BUG_ON(ret);
BUG_ON(unpacked.inum != inode->inum);
@@ -158,8 +158,8 @@ void bch_inode_pack(struct bkey_inode_buf *packed,
}
}
-int bch_inode_unpack(struct bkey_s_c_inode inode,
- struct bch_inode_unpacked *unpacked)
+int bch2_inode_unpack(struct bkey_s_c_inode inode,
+ struct bch_inode_unpacked *unpacked)
{
const u8 *in = inode.v->fields;
const u8 *end = (void *) inode.v + bkey_val_bytes(inode.k);
@@ -198,8 +198,8 @@ int bch_inode_unpack(struct bkey_s_c_inode inode,
return 0;
}
-static const char *bch_inode_invalid(const struct bch_fs *c,
- struct bkey_s_c k)
+static const char *bch2_inode_invalid(const struct bch_fs *c,
+ struct bkey_s_c k)
{
if (k.k->p.offset)
return "nonzero offset";
@@ -218,7 +218,7 @@ static const char *bch_inode_invalid(const struct bch_fs *c,
if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR)
return "invalid str hash type";
- if (bch_inode_unpack(inode, &unpacked))
+ if (bch2_inode_unpack(inode, &unpacked))
return "invalid variable length fields";
return NULL;
@@ -236,8 +236,8 @@ static const char *bch_inode_invalid(const struct bch_fs *c,
}
}
-static void bch_inode_to_text(struct bch_fs *c, char *buf,
- size_t size, struct bkey_s_c k)
+static void bch2_inode_to_text(struct bch_fs *c, char *buf,
+ size_t size, struct bkey_s_c k)
{
struct bkey_s_c_inode inode;
struct bch_inode_unpacked unpacked;
@@ -245,7 +245,7 @@ static void bch_inode_to_text(struct bch_fs *c, char *buf,
switch (k.k->type) {
case BCH_INODE_FS:
inode = bkey_s_c_to_inode(k);
- if (bch_inode_unpack(inode, &unpacked)) {
+ if (bch2_inode_unpack(inode, &unpacked)) {
scnprintf(buf, size, "(unpack error)");
break;
}
@@ -255,15 +255,15 @@ static void bch_inode_to_text(struct bch_fs *c, char *buf,
}
}
-const struct bkey_ops bch_bkey_inode_ops = {
- .key_invalid = bch_inode_invalid,
- .val_to_text = bch_inode_to_text,
+const struct bkey_ops bch2_bkey_inode_ops = {
+ .key_invalid = bch2_inode_invalid,
+ .val_to_text = bch2_inode_to_text,
};
-void bch_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
- uid_t uid, gid_t gid, umode_t mode, dev_t rdev)
+void bch2_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
+ uid_t uid, gid_t gid, umode_t mode, dev_t rdev)
{
- s64 now = timespec_to_bch_time(c, CURRENT_TIME);
+ s64 now = timespec_to_bch2_time(c, CURRENT_TIME);
memset(inode_u, 0, sizeof(*inode_u));
@@ -281,8 +281,8 @@ void bch_inode_init(struct bch_fs *c, struct bch_inode_unpacked *inode_u,
inode_u->i_otime = now;
}
-int bch_inode_create(struct bch_fs *c, struct bkey_i *inode,
- u64 min, u64 max, u64 *hint)
+int bch2_inode_create(struct bch_fs *c, struct bkey_i *inode,
+ u64 min, u64 max, u64 *hint)
{
struct btree_iter iter;
bool searched_from_start = false;
@@ -300,14 +300,14 @@ int bch_inode_create(struct bch_fs *c, struct bkey_i *inode,
if (*hint == min)
searched_from_start = true;
again:
- bch_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(*hint, 0));
+ bch2_btree_iter_init_intent(&iter, c, BTREE_ID_INODES, POS(*hint, 0));
while (1) {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
ret = btree_iter_err(k);
if (ret) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
@@ -317,14 +317,14 @@ again:
pr_debug("inserting inode %llu (size %u)",
inode->k.p.inode, inode->k.u64s);
- ret = bch_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&iter, inode));
if (ret == -EINTR)
continue;
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
if (!ret)
*hint = k.k->p.inode + 1;
@@ -333,10 +333,10 @@ again:
if (iter.pos.inode == max)
break;
/* slot used */
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
}
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
if (!searched_from_start) {
/* Retry from start */
@@ -348,23 +348,23 @@ again:
return -ENOSPC;
}
-int bch_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size,
- struct extent_insert_hook *hook, u64 *journal_seq)
+int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size,
+ struct extent_insert_hook *hook, u64 *journal_seq)
{
- return bch_discard(c, POS(inode_nr, new_size), POS(inode_nr + 1, 0),
+ return bch2_discard(c, POS(inode_nr, new_size), POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, hook, journal_seq);
}
-int bch_inode_rm(struct bch_fs *c, u64 inode_nr)
+int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
{
struct bkey_i delete;
int ret;
- ret = bch_inode_truncate(c, inode_nr, 0, NULL, NULL);
+ ret = bch2_inode_truncate(c, inode_nr, 0, NULL, NULL);
if (ret < 0)
return ret;
- ret = bch_btree_delete_range(c, BTREE_ID_XATTRS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
@@ -379,7 +379,7 @@ int bch_inode_rm(struct bch_fs *c, u64 inode_nr)
* XXX: the dirent could ideally would delete whitouts when they're no
* longer needed
*/
- ret = bch_btree_delete_range(c, BTREE_ID_DIRENTS,
+ ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(inode_nr, 0),
POS(inode_nr + 1, 0),
ZERO_VERSION, NULL, NULL, NULL);
@@ -389,12 +389,12 @@ int bch_inode_rm(struct bch_fs *c, u64 inode_nr)
bkey_init(&delete.k);
delete.k.p.inode = inode_nr;
- return bch_btree_insert(c, BTREE_ID_INODES, &delete, NULL,
+ return bch2_btree_insert(c, BTREE_ID_INODES, &delete, NULL,
NULL, NULL, BTREE_INSERT_NOFAIL);
}
-int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
- struct bch_inode_unpacked *inode)
+int bch2_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
+ struct bch_inode_unpacked *inode)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -404,7 +404,7 @@ int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
POS(inode_nr, 0), k) {
switch (k.k->type) {
case BCH_INODE_FS:
- ret = bch_inode_unpack(bkey_s_c_to_inode(k), inode);
+ ret = bch2_inode_unpack(bkey_s_c_to_inode(k), inode);
break;
default:
/* hole, not found */
@@ -415,11 +415,11 @@ int bch_inode_find_by_inum(struct bch_fs *c, u64 inode_nr,
}
- return bch_btree_iter_unlock(&iter) ?: ret;
+ return bch2_btree_iter_unlock(&iter) ?: ret;
}
-int bch_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid,
- struct bkey_i_inode_blockdev *ret)
+int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid,
+ struct bkey_i_inode_blockdev *ret)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -439,13 +439,13 @@ int bch_cached_dev_inode_find_by_uuid(struct bch_fs *c, uuid_le *uuid,
if (CACHED_DEV(inode.v) &&
!memcmp(uuid, &inode.v->i_uuid, 16)) {
bkey_reassemble(&ret->k_i, k);
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return 0;
}
}
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return -ENOENT;
}
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index 41e344d5da16..277d4e42aea0 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -3,7 +3,7 @@
#include <linux/math64.h>
-extern const struct bkey_ops bch_bkey_inode_ops;
+extern const struct bkey_ops bch2_bkey_inode_ops;
struct bch_inode_unpacked {
u64 inum;
@@ -24,27 +24,27 @@ struct bkey_inode_buf {
#undef BCH_INODE_FIELD
} __packed;
-void bch_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
-int bch_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *);
+void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
+int bch2_inode_unpack(struct bkey_s_c_inode, struct bch_inode_unpacked *);
-void bch_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
+void bch2_inode_init(struct bch_fs *, struct bch_inode_unpacked *,
uid_t, gid_t, umode_t, dev_t);
-int bch_inode_create(struct bch_fs *, struct bkey_i *, u64, u64, u64 *);
-int bch_inode_truncate(struct bch_fs *, u64, u64,
+int bch2_inode_create(struct bch_fs *, struct bkey_i *, u64, u64, u64 *);
+int bch2_inode_truncate(struct bch_fs *, u64, u64,
struct extent_insert_hook *, u64 *);
-int bch_inode_rm(struct bch_fs *, u64);
+int bch2_inode_rm(struct bch_fs *, u64);
-int bch_inode_find_by_inum(struct bch_fs *, u64,
+int bch2_inode_find_by_inum(struct bch_fs *, u64,
struct bch_inode_unpacked *);
-int bch_cached_dev_inode_find_by_uuid(struct bch_fs *, uuid_le *,
+int bch2_cached_dev_inode_find_by_uuid(struct bch_fs *, uuid_le *,
struct bkey_i_inode_blockdev *);
-static inline struct timespec bch_time_to_timespec(struct bch_fs *c, u64 time)
+static inline struct timespec bch2_time_to_timespec(struct bch_fs *c, u64 time)
{
return ns_to_timespec(time * c->sb.time_precision + c->sb.time_base_lo);
}
-static inline u64 timespec_to_bch_time(struct bch_fs *c, struct timespec ts)
+static inline u64 timespec_to_bch2_time(struct bch_fs *c, struct timespec ts)
{
s64 ns = timespec_to_ns(&ts) - c->sb.time_base_lo;
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index bdf93c4f3632..212a5a6533fd 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -5,7 +5,7 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "bset.h"
#include "btree_update.h"
@@ -36,7 +36,7 @@ static inline void __bio_inc_remaining(struct bio *bio)
/* Allocate, free from mempool: */
-void bch_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
+void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
{
struct bio_vec *bv;
unsigned i;
@@ -47,7 +47,7 @@ void bch_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
bio->bi_vcnt = 0;
}
-static void bch_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
+static void bch2_bio_alloc_page_pool(struct bch_fs *c, struct bio *bio,
bool *using_mempool)
{
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt++];
@@ -69,7 +69,7 @@ pool_alloc:
bv->bv_offset = 0;
}
-void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
+void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
size_t bytes)
{
bool using_mempool = false;
@@ -77,7 +77,7 @@ void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
bio->bi_iter.bi_size = bytes;
while (bio->bi_vcnt < DIV_ROUND_UP(bytes, PAGE_SIZE))
- bch_bio_alloc_page_pool(c, bio, &using_mempool);
+ bch2_bio_alloc_page_pool(c, bio, &using_mempool);
if (using_mempool)
mutex_unlock(&c->bio_bounce_pages_lock);
@@ -85,7 +85,7 @@ void bch_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
/* Bios with headers */
-static void bch_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio,
+static void bch2_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio,
struct bch_dev *ca, const struct bch_extent_ptr *ptr)
{
wbio->ca = ca;
@@ -99,8 +99,8 @@ static void bch_submit_wbio(struct bch_fs *c, struct bch_write_bio *wbio,
generic_make_request(&wbio->bio);
}
-void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
- const struct bkey_i *k)
+void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
+ const struct bkey_i *k)
{
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
const struct bch_extent_ptr *ptr;
@@ -115,7 +115,7 @@ void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
extent_for_each_ptr(e, ptr) {
ca = c->devs[ptr->dev];
if (!percpu_ref_tryget(&ca->io_ref)) {
- bch_submit_wbio(c, wbio, NULL, ptr);
+ bch2_submit_wbio(c, wbio, NULL, ptr);
break;
}
@@ -139,7 +139,7 @@ void bch_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
if (!journal_flushes_device(ca))
n->bio.bi_opf |= REQ_FUA;
- bch_submit_wbio(c, n, ca, ptr);
+ bch2_submit_wbio(c, n, ca, ptr);
}
}
@@ -154,20 +154,20 @@ static struct workqueue_struct *index_update_wq(struct bch_write_op *op)
: op->c->wq;
}
-static void __bch_write(struct closure *);
+static void __bch2_write(struct closure *);
-static void bch_write_done(struct closure *cl)
+static void bch2_write_done(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
BUG_ON(!(op->flags & BCH_WRITE_DONE));
if (!op->error && (op->flags & BCH_WRITE_FLUSH))
- op->error = bch_journal_error(&op->c->journal);
+ op->error = bch2_journal_error(&op->c->journal);
- bch_disk_reservation_put(op->c, &op->res);
+ bch2_disk_reservation_put(op->c, &op->res);
percpu_ref_put(&op->c->writes);
- bch_keylist_free(&op->insert_keys, op->inline_keys);
+ bch2_keylist_free(&op->insert_keys, op->inline_keys);
closure_return(cl);
}
@@ -182,19 +182,19 @@ static u64 keylist_sectors(struct keylist *keys)
return ret;
}
-static int bch_write_index_default(struct bch_write_op *op)
+static int bch2_write_index_default(struct bch_write_op *op)
{
struct keylist *keys = &op->insert_keys;
struct btree_iter iter;
int ret;
- bch_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch_keylist_front(keys)->k));
+ bch2_btree_iter_init_intent(&iter, op->c, BTREE_ID_EXTENTS,
+ bkey_start_pos(&bch2_keylist_front(keys)->k));
- ret = bch_btree_insert_list_at(&iter, keys, &op->res,
+ ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
NULL, op_journal_seq(op),
BTREE_INSERT_NOFAIL);
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
@@ -202,7 +202,7 @@ static int bch_write_index_default(struct bch_write_op *op)
/**
* bch_write_index - after a write, update index to point to new data
*/
-static void bch_write_index(struct closure *cl)
+static void bch2_write_index(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bch_fs *c = op->c;
@@ -211,7 +211,7 @@ static void bch_write_index(struct closure *cl)
op->flags |= BCH_WRITE_LOOPED;
- if (!bch_keylist_empty(keys)) {
+ if (!bch2_keylist_empty(keys)) {
u64 sectors_start = keylist_sectors(keys);
int ret = op->index_update_fn(op);
@@ -227,22 +227,22 @@ static void bch_write_index(struct closure *cl)
for (i = 0; i < ARRAY_SIZE(op->open_buckets); i++)
if (op->open_buckets[i]) {
- bch_open_bucket_put(c,
- c->open_buckets +
- op->open_buckets[i]);
+ bch2_open_bucket_put(c,
+ c->open_buckets +
+ op->open_buckets[i]);
op->open_buckets[i] = 0;
}
if (!(op->flags & BCH_WRITE_DONE))
- continue_at(cl, __bch_write, op->io_wq);
+ continue_at(cl, __bch2_write, op->io_wq);
if (!op->error && (op->flags & BCH_WRITE_FLUSH)) {
- bch_journal_flush_seq_async(&c->journal,
- *op_journal_seq(op),
- cl);
- continue_at(cl, bch_write_done, index_update_wq(op));
+ bch2_journal_flush_seq_async(&c->journal,
+ *op_journal_seq(op),
+ cl);
+ continue_at(cl, bch2_write_done, index_update_wq(op));
} else {
- continue_at_nobarrier(cl, bch_write_done, NULL);
+ continue_at_nobarrier(cl, bch2_write_done, NULL);
}
}
@@ -252,7 +252,7 @@ static void bch_write_index(struct closure *cl)
* Used to implement discard, and to handle when writethrough write hits
* a write error on the cache device.
*/
-static void bch_write_discard(struct closure *cl)
+static void bch2_write_discard(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bio *bio = &op->bio->bio;
@@ -260,20 +260,20 @@ static void bch_write_discard(struct closure *cl)
end.offset += bio_sectors(bio);
- op->error = bch_discard(op->c, op->pos, end, op->version,
+ op->error = bch2_discard(op->c, op->pos, end, op->version,
&op->res, NULL, NULL);
}
/*
* Convert extents to be inserted to discards after an error:
*/
-static void bch_write_io_error(struct closure *cl)
+static void bch2_write_io_error(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
- struct bkey_i *src = bch_keylist_front(&op->insert_keys);
- struct bkey_i *dst = bch_keylist_front(&op->insert_keys);
+ struct bkey_i *src = bch2_keylist_front(&op->insert_keys);
+ struct bkey_i *dst = bch2_keylist_front(&op->insert_keys);
/*
* Our data write just errored, which means we've got a bunch
@@ -301,17 +301,17 @@ static void bch_write_io_error(struct closure *cl)
op->flags |= BCH_WRITE_DISCARD;
} else {
/* TODO: We could try to recover from this. */
- while (!bch_keylist_empty(&op->insert_keys))
- bch_keylist_pop_front(&op->insert_keys);
+ while (!bch2_keylist_empty(&op->insert_keys))
+ bch2_keylist_pop_front(&op->insert_keys);
op->error = -EIO;
op->flags |= BCH_WRITE_DONE;
}
- bch_write_index(cl);
+ bch2_write_index(cl);
}
-static void bch_write_endio(struct bio *bio)
+static void bch2_write_endio(struct bio *bio)
{
struct closure *cl = bio->bi_private;
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
@@ -320,9 +320,9 @@ static void bch_write_endio(struct bio *bio)
struct bio *orig = wbio->orig;
struct bch_dev *ca = wbio->ca;
- if (bch_dev_nonfatal_io_err_on(bio->bi_error, ca,
+ if (bch2_dev_nonfatal_io_err_on(bio->bi_error, ca,
"data write")) {
- set_closure_fn(cl, bch_write_io_error, index_update_wq(op));
+ set_closure_fn(cl, bch2_write_io_error, index_update_wq(op));
}
if (ca)
@@ -332,7 +332,7 @@ static void bch_write_endio(struct bio *bio)
orig->bi_error = bio->bi_error;
if (wbio->bounce)
- bch_bio_free_pages_pool(c, bio);
+ bch2_bio_free_pages_pool(c, bio);
if (wbio->put_bio)
bio_put(bio);
@@ -374,19 +374,19 @@ static void init_append_extent(struct bch_write_op *op,
e->k.version = op->version;
bkey_extent_set_cached(&e->k, op->flags & BCH_WRITE_CACHED);
- bch_extent_crc_append(e, compressed_size,
+ bch2_extent_crc_append(e, compressed_size,
uncompressed_size,
compression_type,
nonce, csum, csum_type);
- bch_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
+ bch2_alloc_sectors_append_ptrs(op->c, e, op->nr_replicas,
ob, compressed_size);
bkey_extent_set_cached(&e->k, (op->flags & BCH_WRITE_CACHED));
- bch_keylist_push(&op->insert_keys);
+ bch2_keylist_push(&op->insert_keys);
}
-static int bch_write_extent(struct bch_write_op *op,
+static int bch2_write_extent(struct bch_write_op *op,
struct open_bucket *ob,
struct bio *orig)
{
@@ -409,7 +409,7 @@ static int bch_write_extent(struct bch_write_op *op,
crc_compressed_size(NULL, &op->crc) > ob->sectors_free)) {
int ret;
- ret = bch_bio_uncompress_inplace(c, orig, op->size, op->crc);
+ ret = bch2_bio_uncompress_inplace(c, orig, op->size, op->crc);
if (ret)
return ret;
@@ -437,7 +437,7 @@ static int bch_write_extent(struct bch_write_op *op,
/* all units here in bytes */
unsigned total_output = 0, output_available =
min(ob->sectors_free << 9, orig->bi_iter.bi_size);
- unsigned crc_nonce = bch_csum_type_is_encryption(csum_type)
+ unsigned crc_nonce = bch2_csum_type_is_encryption(csum_type)
? op->nonce : 0;
struct bch_csum csum;
struct nonce nonce;
@@ -449,7 +449,7 @@ static int bch_write_extent(struct bch_write_op *op,
* XXX: can't use mempool for more than
* BCH_COMPRESSED_EXTENT_MAX worth of pages
*/
- bch_bio_alloc_pages_pool(c, bio, output_available);
+ bch2_bio_alloc_pages_pool(c, bio, output_available);
/* copy WRITE_SYNC flag */
bio->bi_opf = orig->bi_opf;
@@ -462,7 +462,7 @@ static int bch_write_extent(struct bch_write_op *op,
unsigned fragment_compression_type = compression_type;
size_t dst_len, src_len;
- bch_bio_compress(c, bio, &dst_len,
+ bch2_bio_compress(c, bio, &dst_len,
orig, &src_len,
&fragment_compression_type);
@@ -477,9 +477,9 @@ static int bch_write_extent(struct bch_write_op *op,
src_len >> 9,
compression_type),
- bch_encrypt_bio(c, csum_type, nonce, bio);
+ bch2_encrypt_bio(c, csum_type, nonce, bio);
- csum = bch_checksum_bio(c, csum_type, nonce, bio);
+ csum = bch2_checksum_bio(c, csum_type, nonce, bio);
swap(bio->bi_iter.bi_size, dst_len);
init_append_extent(op,
@@ -492,7 +492,7 @@ static int bch_write_extent(struct bch_write_op *op,
bio_advance(orig, src_len);
} while (bio->bi_iter.bi_size &&
orig->bi_iter.bi_size &&
- !bch_keylist_realloc(&op->insert_keys,
+ !bch2_keylist_realloc(&op->insert_keys,
op->inline_keys,
ARRAY_SIZE(op->inline_keys),
BKEY_EXTENT_U64s_MAX));
@@ -527,7 +527,7 @@ static int bch_write_extent(struct bch_write_op *op,
ret = bio != orig;
}
- bio->bi_end_io = bch_write_endio;
+ bio->bi_end_io = bch2_write_endio;
bio->bi_private = &op->cl;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -537,13 +537,13 @@ static int bch_write_extent(struct bch_write_op *op,
key_to_write = (void *) (op->insert_keys.keys_p + key_to_write_offset);
- bch_check_mark_super(c, key_to_write, false);
+ bch2_check_mark_super(c, key_to_write, false);
- bch_submit_wbio_replicas(to_wbio(bio), c, key_to_write);
+ bch2_submit_wbio_replicas(to_wbio(bio), c, key_to_write);
return ret;
}
-static void __bch_write(struct closure *cl)
+static void __bch2_write(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bch_fs *c = op->c;
@@ -556,9 +556,9 @@ static void __bch_write(struct closure *cl)
if (op->flags & BCH_WRITE_DISCARD) {
op->flags |= BCH_WRITE_DONE;
- bch_write_discard(cl);
+ bch2_write_discard(cl);
bio_put(bio);
- continue_at(cl, bch_write_done, index_update_wq(op));
+ continue_at(cl, bch2_write_done, index_update_wq(op));
}
/*
@@ -572,16 +572,16 @@ static void __bch_write(struct closure *cl)
EBUG_ON(!bio_sectors(bio));
if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
- continue_at(cl, bch_write_index, index_update_wq(op));
+ continue_at(cl, bch2_write_index, index_update_wq(op));
/* for the device pointers and 1 for the chksum */
- if (bch_keylist_realloc(&op->insert_keys,
+ if (bch2_keylist_realloc(&op->insert_keys,
op->inline_keys,
ARRAY_SIZE(op->inline_keys),
BKEY_EXTENT_U64s_MAX))
- continue_at(cl, bch_write_index, index_update_wq(op));
+ continue_at(cl, bch2_write_index, index_update_wq(op));
- b = bch_alloc_sectors_start(c, op->wp,
+ b = bch2_alloc_sectors_start(c, op->wp,
op->nr_replicas,
c->opts.data_replicas_required,
op->alloc_reserve,
@@ -599,17 +599,17 @@ static void __bch_write(struct closure *cl)
* before allocating another open bucket. We only hit
* this case if open_bucket_nr > 1.
*/
- if (!bch_keylist_empty(&op->insert_keys))
- continue_at(cl, bch_write_index,
+ if (!bch2_keylist_empty(&op->insert_keys))
+ continue_at(cl, bch2_write_index,
index_update_wq(op));
/*
* If we've looped, we're running out of a workqueue -
- * not the bch_write() caller's context - and we don't
+ * not the bch2_write() caller's context - and we don't
* want to block the workqueue:
*/
if (op->flags & BCH_WRITE_LOOPED)
- continue_at(cl, __bch_write, op->io_wq);
+ continue_at(cl, __bch2_write, op->io_wq);
/*
* Otherwise, we do want to block the caller on alloc
@@ -627,16 +627,16 @@ static void __bch_write(struct closure *cl)
b - c->open_buckets > U8_MAX);
op->open_buckets[open_bucket_nr++] = b - c->open_buckets;
- ret = bch_write_extent(op, b, bio);
+ ret = bch2_write_extent(op, b, bio);
- bch_alloc_sectors_done(c, op->wp, b);
+ bch2_alloc_sectors_done(c, op->wp, b);
if (ret < 0)
goto err;
} while (ret);
op->flags |= BCH_WRITE_DONE;
- continue_at(cl, bch_write_index, index_update_wq(op));
+ continue_at(cl, bch2_write_index, index_update_wq(op));
err:
if (op->flags & BCH_WRITE_DISCARD_ON_ERROR) {
/*
@@ -646,7 +646,7 @@ err:
* reclaiming it.
*/
- bch_write_discard(cl);
+ bch2_write_discard(cl);
} else {
/*
* Right now we can only error here if we went RO - the
@@ -666,12 +666,12 @@ err:
* written (especially for a cmpxchg operation that's moving data
* around)
*/
- continue_at(cl, !bch_keylist_empty(&op->insert_keys)
- ? bch_write_index
- : bch_write_done, index_update_wq(op));
+ continue_at(cl, !bch2_keylist_empty(&op->insert_keys)
+ ? bch2_write_index
+ : bch2_write_done, index_update_wq(op));
}
-void bch_wake_delayed_writes(unsigned long data)
+void bch2_wake_delayed_writes(unsigned long data)
{
struct bch_fs *c = (void *) data;
struct bch_write_op *op;
@@ -714,7 +714,7 @@ void bch_wake_delayed_writes(unsigned long data)
* If op->discard is true, instead of inserting the data it invalidates the
* region of the cache represented by op->bio and op->inode.
*/
-void bch_write(struct closure *cl)
+void bch2_write(struct closure *cl)
{
struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
struct bio *bio = &op->bio->bio;
@@ -725,19 +725,19 @@ void bch_write(struct closure *cl)
!percpu_ref_tryget(&c->writes)) {
__bcache_io_error(c, "read only");
op->error = -EROFS;
- bch_disk_reservation_put(c, &op->res);
+ bch2_disk_reservation_put(c, &op->res);
closure_return(cl);
}
if (bversion_zero(op->version) &&
- bch_csum_type_is_encryption(op->csum_type))
+ bch2_csum_type_is_encryption(op->csum_type))
op->version.lo =
atomic64_inc_return(&c->key_version) + 1;
if (!(op->flags & BCH_WRITE_DISCARD))
- bch_increment_clock(c, bio_sectors(bio), WRITE);
+ bch2_increment_clock(c, bio_sectors(bio), WRITE);
- /* Don't call bch_next_delay() if rate is >= 1 GB/sec */
+ /* Don't call bch2_next_delay() if rate is >= 1 GB/sec */
if (c->foreground_write_ratelimit_enabled &&
c->foreground_write_pd.rate.rate < (1 << 30) &&
@@ -746,13 +746,13 @@ void bch_write(struct closure *cl)
u64 delay;
spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
- bch_ratelimit_increment(&c->foreground_write_pd.rate,
+ bch2_ratelimit_increment(&c->foreground_write_pd.rate,
bio->bi_iter.bi_size);
- delay = bch_ratelimit_delay(&c->foreground_write_pd.rate);
+ delay = bch2_ratelimit_delay(&c->foreground_write_pd.rate);
if (delay >= HZ / 100) {
- trace_bcache_write_throttle(c, inode, bio, delay);
+ trace_write_throttle(c, inode, bio, delay);
closure_get(&op->cl); /* list takes a ref */
@@ -771,16 +771,16 @@ void bch_write(struct closure *cl)
spin_unlock_irqrestore(&c->foreground_write_pd_lock,
flags);
- continue_at(cl, __bch_write, index_update_wq(op));
+ continue_at(cl, __bch2_write, index_update_wq(op));
}
spin_unlock_irqrestore(&c->foreground_write_pd_lock, flags);
}
- continue_at_nobarrier(cl, __bch_write, NULL);
+ continue_at_nobarrier(cl, __bch2_write, NULL);
}
-void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c,
+void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
struct bch_write_bio *bio, struct disk_reservation res,
struct write_point *wp, struct bpos pos,
u64 *journal_seq, unsigned flags)
@@ -793,7 +793,7 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->written = 0;
op->error = 0;
op->flags = flags;
- op->csum_type = bch_data_checksum_type(c);
+ op->csum_type = bch2_data_checksum_type(c);
op->compression_type = c->opts.compression;
op->nr_replicas = res.nr_replicas;
op->alloc_reserve = RESERVE_NONE;
@@ -810,11 +810,11 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->journal_seq = 0;
}
- op->index_update_fn = bch_write_index_default;
+ op->index_update_fn = bch2_write_index_default;
- bch_keylist_init(&op->insert_keys,
- op->inline_keys,
- ARRAY_SIZE(op->inline_keys));
+ bch2_keylist_init(&op->insert_keys,
+ op->inline_keys,
+ ARRAY_SIZE(op->inline_keys));
if (version_stress_test(c))
get_random_bytes(&op->version, sizeof(op->version));
@@ -837,13 +837,13 @@ void bch_write_op_init(struct bch_write_op *op, struct bch_fs *c,
* XXX: this needs to be refactored with inode_truncate, or more
* appropriately inode_truncate should call this
*/
-int bch_discard(struct bch_fs *c, struct bpos start,
- struct bpos end, struct bversion version,
- struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
- u64 *journal_seq)
+int bch2_discard(struct bch_fs *c, struct bpos start,
+ struct bpos end, struct bversion version,
+ struct disk_reservation *disk_res,
+ struct extent_insert_hook *hook,
+ u64 *journal_seq)
{
- return bch_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
+ return bch2_btree_delete_range(c, BTREE_ID_EXTENTS, start, end, version,
disk_res, hook, journal_seq);
}
@@ -861,7 +861,7 @@ static int bio_checksum_uncompress(struct bch_fs *c,
struct bch_read_bio *rbio)
{
struct bio *src = &rbio->bio;
- struct bio *dst = &bch_rbio_parent(rbio)->bio;
+ struct bio *dst = &bch2_rbio_parent(rbio)->bio;
struct bvec_iter dst_iter = rbio->parent_iter;
struct nonce nonce = extent_nonce(rbio->version,
rbio->crc.nonce,
@@ -884,8 +884,9 @@ static int bio_checksum_uncompress(struct bch_fs *c,
src->bi_iter = rbio->parent_iter;
}
- csum = bch_checksum_bio(c, rbio->crc.csum_type, nonce, src);
- if (bch_dev_nonfatal_io_err_on(bch_crc_cmp(rbio->crc.csum, csum), rbio->ca,
+ csum = bch2_checksum_bio(c, rbio->crc.csum_type, nonce, src);
+ if (bch2_dev_nonfatal_io_err_on(bch2_crc_cmp(rbio->crc.csum, csum),
+ rbio->ca,
"data checksum error, inode %llu offset %llu: expected %0llx%0llx got %0llx%0llx (type %u)",
rbio->inode, (u64) rbio->parent_iter.bi_sector << 9,
rbio->crc.csum.hi, rbio->crc.csum.lo, csum.hi, csum.lo,
@@ -898,8 +899,8 @@ static int bio_checksum_uncompress(struct bch_fs *c,
*/
if (rbio->crc.compression_type != BCH_COMPRESSION_NONE) {
if (!ret) {
- bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
- ret = bch_bio_uncompress(c, src, dst,
+ bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
+ ret = bch2_bio_uncompress(c, src, dst,
dst_iter, rbio->crc);
if (ret)
__bcache_io_error(c, "decompression error");
@@ -913,19 +914,19 @@ static int bio_checksum_uncompress(struct bch_fs *c,
nonce = nonce_add(nonce, rbio->crc.offset << 9);
- bch_encrypt_bio(c, rbio->crc.csum_type,
+ bch2_encrypt_bio(c, rbio->crc.csum_type,
nonce, src);
bio_copy_data_iter(dst, dst_iter,
src, src->bi_iter);
} else {
- bch_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
+ bch2_encrypt_bio(c, rbio->crc.csum_type, nonce, src);
}
return ret;
}
-static void bch_rbio_free(struct bch_read_bio *rbio)
+static void bch2_rbio_free(struct bch_read_bio *rbio)
{
struct bch_fs *c = rbio->c;
struct bio *bio = &rbio->bio;
@@ -936,14 +937,14 @@ static void bch_rbio_free(struct bch_read_bio *rbio)
if (rbio->promote)
kfree(rbio->promote);
if (rbio->bounce)
- bch_bio_free_pages_pool(c, bio);
+ bch2_bio_free_pages_pool(c, bio);
bio_put(bio);
}
-static void bch_rbio_done(struct bch_read_bio *rbio)
+static void bch2_rbio_done(struct bch_read_bio *rbio)
{
- struct bio *orig = &bch_rbio_parent(rbio)->bio;
+ struct bio *orig = &bch2_rbio_parent(rbio)->bio;
percpu_ref_put(&rbio->ca->io_ref);
rbio->ca = NULL;
@@ -953,7 +954,7 @@ static void bch_rbio_done(struct bch_read_bio *rbio)
orig->bi_error = rbio->bio.bi_error;
bio_endio(orig);
- bch_rbio_free(rbio);
+ bch2_rbio_free(rbio);
} else {
if (rbio->promote)
kfree(rbio->promote);
@@ -963,13 +964,13 @@ static void bch_rbio_done(struct bch_read_bio *rbio)
}
}
-static void bch_rbio_error(struct bch_read_bio *rbio, int error)
+static void bch2_rbio_error(struct bch_read_bio *rbio, int error)
{
- bch_rbio_parent(rbio)->bio.bi_error = error;
- bch_rbio_done(rbio);
+ bch2_rbio_parent(rbio)->bio.bi_error = error;
+ bch2_rbio_done(rbio);
}
-static void bch_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio)
+static void bch2_rbio_retry(struct bch_fs *c, struct bch_read_bio *rbio)
{
unsigned long flags;
@@ -987,12 +988,12 @@ static void cache_promote_done(struct closure *cl)
struct cache_promote_op *op =
container_of(cl, struct cache_promote_op, cl);
- bch_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
+ bch2_bio_free_pages_pool(op->write.op.c, &op->write.wbio.bio);
kfree(op);
}
/* Inner part that may run in process context */
-static void __bch_read_endio(struct work_struct *work)
+static void __bch2_read_endio(struct work_struct *work)
{
struct bch_read_bio *rbio =
container_of(work, struct bch_read_bio, work);
@@ -1008,9 +1009,9 @@ static void __bch_read_endio(struct work_struct *work)
*/
if (!rbio->bounce && (rbio->flags & BCH_READ_USER_MAPPED)) {
rbio->flags |= BCH_READ_FORCE_BOUNCE;
- bch_rbio_retry(c, rbio);
+ bch2_rbio_retry(c, rbio);
} else {
- bch_rbio_error(rbio, -EIO);
+ bch2_rbio_error(rbio, -EIO);
}
return;
}
@@ -1021,31 +1022,31 @@ static void __bch_read_endio(struct work_struct *work)
BUG_ON(!rbio->split || !rbio->bounce);
- trace_bcache_promote(&rbio->bio);
+ trace_promote(&rbio->bio);
/* we now own pages: */
swap(promote->write.wbio.bio.bi_vcnt, rbio->bio.bi_vcnt);
rbio->promote = NULL;
- bch_rbio_done(rbio);
+ bch2_rbio_done(rbio);
closure_init(cl, &c->cl);
- closure_call(&promote->write.op.cl, bch_write, c->wq, cl);
+ closure_call(&promote->write.op.cl, bch2_write, c->wq, cl);
closure_return_with_destructor(cl, cache_promote_done);
} else {
- bch_rbio_done(rbio);
+ bch2_rbio_done(rbio);
}
}
-static void bch_read_endio(struct bio *bio)
+static void bch2_read_endio(struct bio *bio)
{
struct bch_read_bio *rbio =
container_of(bio, struct bch_read_bio, bio);
struct bch_fs *c = rbio->c;
- if (bch_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) {
+ if (bch2_dev_nonfatal_io_err_on(bio->bi_error, rbio->ca, "data read")) {
/* XXX: retry IO errors when we have another replica */
- bch_rbio_error(rbio, bio->bi_error);
+ bch2_rbio_error(rbio, bio->bi_error);
return;
}
@@ -1055,19 +1056,19 @@ static void bch_read_endio(struct bio *bio)
atomic_long_inc(&c->cache_read_races);
if (rbio->flags & BCH_READ_RETRY_IF_STALE)
- bch_rbio_retry(c, rbio);
+ bch2_rbio_retry(c, rbio);
else
- bch_rbio_error(rbio, -EINTR);
+ bch2_rbio_error(rbio, -EINTR);
return;
}
if (rbio->crc.compression_type ||
- bch_csum_type_is_encryption(rbio->crc.csum_type))
+ bch2_csum_type_is_encryption(rbio->crc.csum_type))
queue_work(system_unbound_wq, &rbio->work);
else if (rbio->crc.csum_type)
queue_work(system_highpri_wq, &rbio->work);
else
- __bch_read_endio(&rbio->work);
+ __bch2_read_endio(&rbio->work);
}
static bool should_promote(struct bch_fs *c,
@@ -1083,7 +1084,7 @@ static bool should_promote(struct bch_fs *c,
c->fastest_tier < c->tiers + pick->ca->mi.tier;
}
-void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
+void bch2_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
struct extent_pick_ptr *pick, unsigned flags)
{
@@ -1092,7 +1093,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
bool bounce = false, split, read_full = false;
- bch_increment_clock(c, bio_sectors(&orig->bio), READ);
+ bch2_increment_clock(c, bio_sectors(&orig->bio), READ);
EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
k.k->p.offset < bvec_iter_end_sector(iter));
@@ -1106,7 +1107,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
if (should_promote(c, pick, flags)) {
/*
* biovec needs to be big enough to hold decompressed data, if
- * the bch_write_extent() has to decompress/recompress it:
+ * the bch2_write_extent() has to decompress/recompress it:
*/
unsigned sectors =
max_t(unsigned, k.k->size,
@@ -1133,7 +1134,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
if (pick->crc.compression_type != BCH_COMPRESSION_NONE ||
(pick->crc.csum_type != BCH_CSUM_NONE &&
(bvec_iter_sectors(iter) != crc_uncompressed_size(NULL, &pick->crc) ||
- (bch_csum_type_is_encryption(pick->crc.csum_type) &&
+ (bch2_csum_type_is_encryption(pick->crc.csum_type) &&
(flags & BCH_READ_USER_MAPPED)) ||
(flags & BCH_READ_FORCE_BOUNCE)))) {
read_full = true;
@@ -1150,7 +1151,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
&c->bio_read_split),
struct bch_read_bio, bio);
- bch_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
+ bch2_bio_alloc_pages_pool(c, &rbio->bio, sectors << 9);
split = true;
} else if (!(flags & BCH_READ_MAY_REUSE_BIO) ||
!(flags & BCH_READ_IS_LAST)) {
@@ -1200,12 +1201,12 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
rbio->version = k.k->version;
rbio->promote = promote_op;
rbio->inode = k.k->p.inode;
- INIT_WORK(&rbio->work, __bch_read_endio);
+ INIT_WORK(&rbio->work, __bch2_read_endio);
rbio->bio.bi_bdev = pick->ca->disk_sb.bdev;
rbio->bio.bi_opf = orig->bio.bi_opf;
rbio->bio.bi_iter.bi_sector = pick->ptr.offset;
- rbio->bio.bi_end_io = bch_read_endio;
+ rbio->bio.bi_end_io = bch2_read_endio;
if (promote_op) {
struct bio *promote_bio = &promote_op->write.wbio.bio;
@@ -1214,7 +1215,7 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
memcpy(promote_bio->bi_io_vec, rbio->bio.bi_io_vec,
sizeof(struct bio_vec) * rbio->bio.bi_vcnt);
- bch_migrate_write_init(c, &promote_op->write,
+ bch2_migrate_write_init(c, &promote_op->write,
&c->promote_write_point,
k, NULL,
BCH_WRITE_ALLOC_NOWAIT|
@@ -1254,15 +1255,15 @@ void bch_read_extent_iter(struct bch_fs *c, struct bch_read_bio *orig,
rbio->submit_time_us = local_clock_us();
if (bounce)
- trace_bcache_read_bounce(&rbio->bio);
+ trace_read_bounce(&rbio->bio);
if (!(flags & BCH_READ_IS_LAST))
- trace_bcache_read_split(&rbio->bio);
+ trace_read_split(&rbio->bio);
generic_make_request(&rbio->bio);
}
-static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
+static void bch2_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
struct bvec_iter bvec_iter, u64 inode,
unsigned flags)
{
@@ -1284,9 +1285,9 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
*/
bkey_reassemble(&tmp.k, k);
k = bkey_i_to_s_c(&tmp.k);
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
- bch_extent_pick_ptr(c, k, &pick);
+ bch2_extent_pick_ptr(c, k, &pick);
if (IS_ERR(pick.ca)) {
bcache_io_error(c, bio, "no device to read from");
bio_endio(bio);
@@ -1307,7 +1308,7 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
c->prio_clock[READ].hand;
- bch_read_extent_iter(c, rbio, bvec_iter,
+ bch2_read_extent_iter(c, rbio, bvec_iter,
k, &pick, flags);
flags &= ~BCH_READ_MAY_REUSE_BIO;
@@ -1329,15 +1330,15 @@ static void bch_read_iter(struct bch_fs *c, struct bch_read_bio *rbio,
* If we get here, it better have been because there was an error
* reading a btree node
*/
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
BUG_ON(!ret);
bcache_io_error(c, bio, "btree IO error %i", ret);
bio_endio(bio);
}
-void bch_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
+void bch2_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
{
- bch_read_iter(c, bio, bio->bio.bi_iter, inode,
+ bch2_read_iter(c, bio, bio->bio.bi_iter, inode,
BCH_READ_RETRY_IF_STALE|
BCH_READ_PROMOTE|
BCH_READ_MAY_REUSE_BIO|
@@ -1345,26 +1346,26 @@ void bch_read(struct bch_fs *c, struct bch_read_bio *bio, u64 inode)
}
/**
- * bch_read_retry - re-submit a bio originally from bch_read()
+ * bch_read_retry - re-submit a bio originally from bch2_read()
*/
-static void bch_read_retry(struct bch_fs *c, struct bch_read_bio *rbio)
+static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio)
{
- struct bch_read_bio *parent = bch_rbio_parent(rbio);
+ struct bch_read_bio *parent = bch2_rbio_parent(rbio);
struct bvec_iter iter = rbio->parent_iter;
unsigned flags = rbio->flags;
u64 inode = rbio->inode;
- trace_bcache_read_retry(&rbio->bio);
+ trace_read_retry(&rbio->bio);
if (rbio->split)
- bch_rbio_free(rbio);
+ bch2_rbio_free(rbio);
else
rbio->bio.bi_end_io = rbio->orig_bi_end_io;
- bch_read_iter(c, parent, iter, inode, flags);
+ bch2_read_iter(c, parent, iter, inode, flags);
}
-void bch_read_retry_work(struct work_struct *work)
+void bch2_read_retry_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs,
read_retry_work);
@@ -1381,6 +1382,6 @@ void bch_read_retry_work(struct work_struct *work)
break;
rbio = container_of(bio, struct bch_read_bio, bio);
- bch_read_retry(c, rbio);
+ bch2_read_retry(c, rbio);
}
}
diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h
index bc7c9cf81494..253316a4be99 100644
--- a/fs/bcachefs/io.h
+++ b/fs/bcachefs/io.h
@@ -9,8 +9,8 @@
#define to_rbio(_bio) \
container_of((_bio), struct bch_read_bio, bio)
-void bch_bio_free_pages_pool(struct bch_fs *, struct bio *);
-void bch_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
+void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
+void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
enum bch_write_flags {
BCH_WRITE_ALLOC_NOWAIT = (1 << 0),
@@ -39,27 +39,27 @@ static inline struct write_point *foreground_write_point(struct bch_fs *c,
hash_long(v, ilog2(ARRAY_SIZE(c->write_points)));
}
-void bch_write_op_init(struct bch_write_op *, struct bch_fs *,
- struct bch_write_bio *,
- struct disk_reservation, struct write_point *,
- struct bpos, u64 *, unsigned);
-void bch_write(struct closure *);
+void bch2_write_op_init(struct bch_write_op *, struct bch_fs *,
+ struct bch_write_bio *,
+ struct disk_reservation, struct write_point *,
+ struct bpos, u64 *, unsigned);
+void bch2_write(struct closure *);
struct cache_promote_op;
struct extent_pick_ptr;
-void bch_read_extent_iter(struct bch_fs *, struct bch_read_bio *,
- struct bvec_iter, struct bkey_s_c k,
- struct extent_pick_ptr *, unsigned);
+void bch2_read_extent_iter(struct bch_fs *, struct bch_read_bio *,
+ struct bvec_iter, struct bkey_s_c k,
+ struct extent_pick_ptr *, unsigned);
-static inline void bch_read_extent(struct bch_fs *c,
- struct bch_read_bio *orig,
- struct bkey_s_c k,
- struct extent_pick_ptr *pick,
- unsigned flags)
+static inline void bch2_read_extent(struct bch_fs *c,
+ struct bch_read_bio *orig,
+ struct bkey_s_c k,
+ struct extent_pick_ptr *pick,
+ unsigned flags)
{
- bch_read_extent_iter(c, orig, orig->bio.bi_iter,
+ bch2_read_extent_iter(c, orig, orig->bio.bi_iter,
k, pick, flags);
}
@@ -72,16 +72,16 @@ enum bch_read_flags {
BCH_READ_USER_MAPPED = 1 << 5,
};
-void bch_read(struct bch_fs *, struct bch_read_bio *, u64);
+void bch2_read(struct bch_fs *, struct bch_read_bio *, u64);
-void bch_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
- const struct bkey_i *);
+void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
+ const struct bkey_i *);
-int bch_discard(struct bch_fs *, struct bpos, struct bpos,
- struct bversion, struct disk_reservation *,
- struct extent_insert_hook *, u64 *);
+int bch2_discard(struct bch_fs *, struct bpos, struct bpos,
+ struct bversion, struct disk_reservation *,
+ struct extent_insert_hook *, u64 *);
-void bch_read_retry_work(struct work_struct *);
-void bch_wake_delayed_writes(unsigned long data);
+void bch2_read_retry_work(struct work_struct *);
+void bch2_wake_delayed_writes(unsigned long data);
#endif /* _BCACHE_IO_H */
diff --git a/fs/bcachefs/io_types.h b/fs/bcachefs/io_types.h
index ca1b0192fad0..07ea67c69aff 100644
--- a/fs/bcachefs/io_types.h
+++ b/fs/bcachefs/io_types.h
@@ -57,7 +57,7 @@ struct bch_read_bio {
};
static inline struct bch_read_bio *
-bch_rbio_parent(struct bch_read_bio *rbio)
+bch2_rbio_parent(struct bch_read_bio *rbio)
{
return rbio->split ? rbio->parent : rbio;
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 136b8c87ea3a..60c5c9b0309b 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -1,10 +1,10 @@
/*
- * bcache journalling code, for btree insertions
+ * bcachefs journalling code, for btree insertions
*
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "bkey_methods.h"
#include "buckets.h"
@@ -75,19 +75,19 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
for_each_jset_entry_type(entry, jset, JOURNAL_ENTRY_BTREE_KEYS) \
vstruct_for_each_safe(entry, k, _n)
-static inline void bch_journal_add_entry(struct journal_buf *buf,
+static inline void bch2_journal_add_entry(struct journal_buf *buf,
const void *data, size_t u64s,
unsigned type, enum btree_id id,
unsigned level)
{
struct jset *jset = buf->data;
- bch_journal_add_entry_at(buf, data, u64s, type, id, level,
+ bch2_journal_add_entry_at(buf, data, u64s, type, id, level,
le32_to_cpu(jset->u64s));
le32_add_cpu(&jset->u64s, jset_u64s(u64s));
}
-static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type,
+static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
enum btree_id id)
{
struct jset_entry *entry;
@@ -99,12 +99,12 @@ static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type,
return NULL;
}
-struct bkey_i *bch_journal_find_btree_root(struct bch_fs *c, struct jset *j,
+struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
enum btree_id id, unsigned *level)
{
struct bkey_i *k;
struct jset_entry *entry =
- bch_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
+ bch2_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
if (!entry)
return NULL;
@@ -115,15 +115,15 @@ struct bkey_i *bch_journal_find_btree_root(struct bch_fs *c, struct jset *j,
return k;
}
-static void bch_journal_add_btree_root(struct journal_buf *buf,
+static void bch2_journal_add_btree_root(struct journal_buf *buf,
enum btree_id id, struct bkey_i *k,
unsigned level)
{
- bch_journal_add_entry(buf, k, k->k.u64s,
+ bch2_journal_add_entry(buf, k, k->k.u64s,
JOURNAL_ENTRY_BTREE_ROOT, id, level);
}
-static inline void bch_journal_add_prios(struct journal *j,
+static inline void bch2_journal_add_prios(struct journal *j,
struct journal_buf *buf)
{
/*
@@ -133,7 +133,7 @@ static inline void bch_journal_add_prios(struct journal *j,
if (!buf->nr_prio_buckets)
return;
- bch_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets,
+ bch2_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets,
JOURNAL_ENTRY_PRIO_PTRS, 0, 0);
}
@@ -163,18 +163,18 @@ static void journal_seq_blacklist_flush(struct journal *j,
n = bl->entries[i];
mutex_unlock(&j->blacklist_lock);
- bch_btree_iter_init(&iter, c, n.btree_id, n.pos);
+ bch2_btree_iter_init(&iter, c, n.btree_id, n.pos);
iter.is_extents = false;
redo_peek:
- b = bch_btree_iter_peek_node(&iter);
+ b = bch2_btree_iter_peek_node(&iter);
/* The node might have already been rewritten: */
if (b->data->keys.seq == n.seq &&
!bkey_cmp(b->key.k.p, n.pos)) {
- ret = bch_btree_node_rewrite(&iter, b, &cl);
+ ret = bch2_btree_node_rewrite(&iter, b, &cl);
if (ret) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
closure_sync(&cl);
if (ret == -EAGAIN ||
@@ -187,7 +187,7 @@ redo_peek:
}
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
}
closure_sync(&cl);
@@ -226,7 +226,7 @@ redo_wait:
mutex_lock(&j->blacklist_lock);
- bch_journal_pin_drop(j, &bl->pin);
+ bch2_journal_pin_drop(j, &bl->pin);
list_del(&bl->list);
kfree(bl->entries);
kfree(bl);
@@ -249,7 +249,7 @@ journal_seq_blacklist_find(struct journal *j, u64 seq)
}
static struct journal_seq_blacklist *
-bch_journal_seq_blacklisted_new(struct journal *j, u64 seq)
+bch2_journal_seq_blacklisted_new(struct journal *j, u64 seq)
{
struct journal_seq_blacklist *bl;
@@ -270,7 +270,7 @@ bch_journal_seq_blacklisted_new(struct journal *j, u64 seq)
* as blacklisted so that on future restarts the corresponding data will still
* be ignored:
*/
-int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
+int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
{
struct journal *j = &c->journal;
struct journal_seq_blacklist *bl = NULL;
@@ -301,7 +301,7 @@ int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
* Decrease this back to j->seq + 2 when we next rev the on disk format:
* increasing it temporarily to work around bug in old kernels
*/
- bch_fs_inconsistent_on(seq > journal_seq + 4, c,
+ bch2_fs_inconsistent_on(seq > journal_seq + 4, c,
"bset journal seq too far in the future: %llu > %llu",
seq, journal_seq);
@@ -309,14 +309,14 @@ int bch_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq);
/*
- * When we start the journal, bch_journal_start() will skip over @seq:
+ * When we start the journal, bch2_journal_start() will skip over @seq:
*/
mutex_lock(&j->blacklist_lock);
for (i = journal_seq + 1; i <= seq; i++) {
bl = journal_seq_blacklist_find(j, i) ?:
- bch_journal_seq_blacklisted_new(j, i);
+ bch2_journal_seq_blacklisted_new(j, i);
if (!bl) {
ret = -ENOMEM;
@@ -357,7 +357,7 @@ out:
/*
* Journal replay/recovery:
*
- * This code is all driven from bch_fs_start(); we first read the journal
+ * This code is all driven from bch2_fs_start(); we first read the journal
* entries, do some other stuff, then we mark all the keys in the journal
* entries (same as garbage collection would), then we replay them - reinserting
* them into the cache in precisely the same order as they appear in the
@@ -505,11 +505,11 @@ static int journal_validate_key(struct bch_fs *c, struct jset *j,
}
if (JSET_BIG_ENDIAN(j) != CPU_BIG_ENDIAN)
- bch_bkey_swab(key_type, NULL, bkey_to_packed(k));
+ bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
- invalid = bkey_invalid(c, key_type, bkey_i_to_s_c(k));
+ invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
if (invalid) {
- bch_bkey_val_to_text(c, key_type, buf, sizeof(buf),
+ bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
bkey_i_to_s_c(k));
mustfix_fsck_err(c, "invalid %s in journal: %s", type, buf);
@@ -555,20 +555,20 @@ static int journal_entry_validate(struct bch_fs *c,
if (bytes > sectors_read << 9)
return JOURNAL_ENTRY_REREAD;
- if (fsck_err_on(!bch_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c,
+ if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c,
"journal entry with unknown csum type %llu sector %lluu",
JSET_CSUM_TYPE(j), sector))
return JOURNAL_ENTRY_BAD;
csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
- if (mustfix_fsck_err_on(bch_crc_cmp(csum, j->csum), c,
+ if (mustfix_fsck_err_on(bch2_crc_cmp(csum, j->csum), c,
"journal checksum bad, sector %llu", sector)) {
/* XXX: retry IO, when we start retrying checksum errors */
/* XXX: note we might have missing journal entries */
return JOURNAL_ENTRY_BAD;
}
- bch_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
+ bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
j->encrypted_start,
vstruct_end(j) - (void *) j->encrypted_start);
@@ -686,14 +686,14 @@ reread: sectors_read = min_t(unsigned,
bio->bi_iter.bi_sector = offset;
bio->bi_iter.bi_size = sectors_read << 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bch_bio_map(bio, buf->data);
+ bch2_bio_map(bio, buf->data);
ret = submit_bio_wait(bio);
- if (bch_dev_fatal_io_err_on(ret, ca,
+ if (bch2_dev_fatal_io_err_on(ret, ca,
"journal read from sector %llu",
offset) ||
- bch_meta_read_fault("journal"))
+ bch2_meta_read_fault("journal"))
return -EIO;
j = buf->data;
@@ -761,7 +761,7 @@ next_block:
return 0;
}
-static void bch_journal_read_device(struct closure *cl)
+static void bch2_journal_read_device(struct closure *cl)
{
#define read_bucket(b) \
({ \
@@ -907,7 +907,7 @@ err:
#undef read_bucket
}
-void bch_journal_entries_free(struct list_head *list)
+void bch2_journal_entries_free(struct list_head *list)
{
while (!list_empty(list)) {
@@ -933,7 +933,7 @@ static int journal_seq_blacklist_read(struct journal *j,
bch_verbose(c, "blacklisting existing journal seq %llu", seq);
- bl = bch_journal_seq_blacklisted_new(j, seq);
+ bl = bch2_journal_seq_blacklisted_new(j, seq);
if (!bl)
return -ENOMEM;
@@ -958,7 +958,7 @@ static inline bool journal_has_keys(struct list_head *list)
return false;
}
-int bch_journal_read(struct bch_fs *c, struct list_head *list)
+int bch2_journal_read(struct bch_fs *c, struct list_head *list)
{
struct jset_entry *prio_ptrs;
struct journal_list jlist;
@@ -978,7 +978,7 @@ int bch_journal_read(struct bch_fs *c, struct list_head *list)
for_each_readable_member(ca, c, iter) {
percpu_ref_get(&ca->io_ref);
closure_call(&ca->journal.read,
- bch_journal_read_device,
+ bch2_journal_read_device,
system_unbound_wq,
&jlist.cl);
}
@@ -1066,7 +1066,7 @@ int bch_journal_read(struct bch_fs *c, struct list_head *list)
cur_seq = le64_to_cpu(i->j.seq) + 1;
}
- prio_ptrs = bch_journal_find_entry(j, JOURNAL_ENTRY_PRIO_PTRS, 0);
+ prio_ptrs = bch2_journal_find_entry(j, JOURNAL_ENTRY_PRIO_PTRS, 0);
if (prio_ptrs) {
memcpy_u64s(c->journal.prio_buckets,
prio_ptrs->_data,
@@ -1077,7 +1077,7 @@ fsck_err:
return ret;
}
-void bch_journal_mark(struct bch_fs *c, struct list_head *list)
+void bch2_journal_mark(struct bch_fs *c, struct list_head *list)
{
struct bkey_i *k, *n;
struct jset_entry *j;
@@ -1089,7 +1089,7 @@ void bch_journal_mark(struct bch_fs *c, struct list_head *list)
struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
if (btree_type_has_ptrs(type))
- bch_btree_mark_key_initial(c, type, k_s_c);
+ bch2_btree_mark_key_initial(c, type, k_s_c);
}
}
@@ -1098,13 +1098,13 @@ static bool journal_entry_is_open(struct journal *j)
return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
}
-void bch_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
+void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
if (!need_write_just_set &&
test_bit(JOURNAL_NEED_WRITE, &j->flags))
- __bch_time_stats_update(j->delay_time,
+ __bch2_time_stats_update(j->delay_time,
j->need_write_time);
#if 0
closure_call(&j->io, journal_write, NULL, &c->cl);
@@ -1116,7 +1116,7 @@ void bch_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
#endif
}
-static void __bch_journal_next_entry(struct journal *j)
+static void __bch2_journal_next_entry(struct journal *j)
{
struct journal_entry_pin_list pin_list, *p;
struct journal_buf *buf;
@@ -1210,24 +1210,24 @@ static enum {
BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
atomic_dec_bug(&fifo_peek_back(&j->pin).count);
- __bch_journal_next_entry(j);
+ __bch2_journal_next_entry(j);
cancel_delayed_work(&j->write_work);
spin_unlock(&j->lock);
if (c->bucket_journal_seq > 1 << 14) {
c->bucket_journal_seq = 0;
- bch_bucket_seq_cleanup(c);
+ bch2_bucket_seq_cleanup(c);
}
/* ugh - might be called from __journal_res_get() under wait_event() */
__set_current_state(TASK_RUNNING);
- bch_journal_buf_put(j, old.idx, need_write_just_set);
+ bch2_journal_buf_put(j, old.idx, need_write_just_set);
return JOURNAL_UNLOCKED;
}
-void bch_journal_halt(struct journal *j)
+void bch2_journal_halt(struct journal *j)
{
union journal_res_state old, new;
u64 v = atomic64_read(&j->reservations.counter);
@@ -1301,7 +1301,7 @@ static int journal_entry_sectors(struct journal *j)
* for the previous entry we have to make sure we have space for
* it too:
*/
- if (bch_extent_has_device(e.c, ca->dev_idx)) {
+ if (bch2_extent_has_device(e.c, ca->dev_idx)) {
if (j->prev_buf_sectors > ca->journal.sectors_free)
buckets_required++;
@@ -1391,7 +1391,7 @@ static int journal_entry_open(struct journal *j)
wake_up(&j->wait);
if (j->res_get_blocked_start) {
- __bch_time_stats_update(j->blocked_time,
+ __bch2_time_stats_update(j->blocked_time,
j->res_get_blocked_start);
j->res_get_blocked_start = 0;
}
@@ -1404,7 +1404,7 @@ static int journal_entry_open(struct journal *j)
return ret;
}
-void bch_journal_start(struct bch_fs *c)
+void bch2_journal_start(struct bch_fs *c)
{
struct journal *j = &c->journal;
struct journal_seq_blacklist *bl;
@@ -1433,7 +1433,7 @@ void bch_journal_start(struct bch_fs *c)
* closes an open journal entry - the very first journal entry gets
* initialized here:
*/
- __bch_journal_next_entry(j);
+ __bch2_journal_next_entry(j);
/*
* Adding entries to the next journal entry before allocating space on
@@ -1442,7 +1442,7 @@ void bch_journal_start(struct bch_fs *c)
*/
list_for_each_entry(bl, &j->seq_blacklist, list)
if (!bl->written) {
- bch_journal_add_entry(journal_cur_buf(j), &bl->seq, 1,
+ bch2_journal_add_entry(journal_cur_buf(j), &bl->seq, 1,
JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED,
0, 0);
@@ -1458,7 +1458,7 @@ void bch_journal_start(struct bch_fs *c)
queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
}
-int bch_journal_replay(struct bch_fs *c, struct list_head *list)
+int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
struct journal *j = &c->journal;
@@ -1480,13 +1480,13 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list)
* We might cause compressed extents to be split, so we
* need to pass in a disk_reservation:
*/
- BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
+ BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
- ret = bch_btree_insert(c, entry->btree_id, k,
+ ret = bch2_btree_insert(c, entry->btree_id, k,
&disk_res, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_REPLAY);
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
if (ret)
goto err;
@@ -1502,7 +1502,7 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list)
}
if (keys) {
- bch_btree_flush(c);
+ bch2_btree_flush(c);
/*
* Write a new journal entry _before_ we start journalling new data -
@@ -1510,7 +1510,7 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list)
* arbitrarily far in the future vs. the most recently written journal
* entry on disk, if we crash before writing the next journal entry:
*/
- ret = bch_journal_meta(&c->journal);
+ ret = bch2_journal_meta(&c->journal);
if (ret)
goto err;
}
@@ -1518,12 +1518,12 @@ int bch_journal_replay(struct bch_fs *c, struct list_head *list)
bch_info(c, "journal replay done, %i keys in %i entries, seq %llu",
keys, entries, (u64) atomic64_read(&j->seq));
- bch_journal_set_replay_done(&c->journal);
+ bch2_journal_set_replay_done(&c->journal);
err:
if (ret)
bch_err(c, "journal replay error: %d", ret);
- bch_journal_entries_free(list);
+ bch2_journal_entries_free(list);
return ret;
}
@@ -1533,7 +1533,7 @@ err:
* Allocate more journal space at runtime - not currently making use if it, but
* the code works:
*/
-static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
+static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
unsigned nr)
{
struct journal *j = &c->journal;
@@ -1557,7 +1557,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
* reservation to ensure we'll actually be able to allocate:
*/
- if (bch_disk_reservation_get(c, &disk_res,
+ if (bch2_disk_reservation_get(c, &disk_res,
(nr - ja->nr) << ca->bucket_bits, 0))
return -ENOSPC;
@@ -1569,7 +1569,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
if (!new_buckets || !new_bucket_seq)
goto err;
- journal_buckets = bch_sb_resize_journal(&ca->disk_sb,
+ journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
nr + sizeof(*journal_buckets) / sizeof(u64));
if (!journal_buckets)
goto err;
@@ -1582,7 +1582,7 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
while (ja->nr < nr) {
/* must happen under journal lock, to avoid racing with gc: */
- u64 b = bch_bucket_alloc(ca, RESERVE_NONE);
+ u64 b = bch2_bucket_alloc(ca, RESERVE_NONE);
if (!b) {
if (!closure_wait(&c->freelist_wait, &cl)) {
spin_unlock(&j->lock);
@@ -1592,9 +1592,9 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
continue;
}
- bch_mark_metadata_bucket(ca, &ca->buckets[b],
+ bch2_mark_metadata_bucket(ca, &ca->buckets[b],
BUCKET_JOURNAL, false);
- bch_mark_alloc_bucket(ca, &ca->buckets[b], false);
+ bch2_mark_alloc_bucket(ca, &ca->buckets[b], false);
memmove(ja->buckets + ja->last_idx + 1,
ja->buckets + ja->last_idx,
@@ -1619,9 +1619,9 @@ static int bch_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
}
spin_unlock(&j->lock);
- BUG_ON(bch_validate_journal_layout(ca->disk_sb.sb, ca->mi));
+ BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi));
- bch_write_super(c);
+ bch2_write_super(c);
ret = 0;
err:
@@ -1629,20 +1629,20 @@ err:
kfree(new_bucket_seq);
kfree(new_buckets);
- bch_disk_reservation_put(c, &disk_res);
+ bch2_disk_reservation_put(c, &disk_res);
return ret;
}
#endif
-int bch_dev_journal_alloc(struct bch_dev *ca)
+int bch2_dev_journal_alloc(struct bch_dev *ca)
{
struct journal_device *ja = &ca->journal;
struct bch_sb_field_journal *journal_buckets;
unsigned i, nr;
u64 b, *p;
- if (dynamic_fault("bcache:add:journal_alloc"))
+ if (dynamic_fault("bcachefs:add:journal_alloc"))
return -ENOMEM;
/*
@@ -1668,7 +1668,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca)
ja->buckets = p;
- journal_buckets = bch_sb_resize_journal(&ca->disk_sb,
+ journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
nr + sizeof(*journal_buckets) / sizeof(u64));
if (!journal_buckets)
return -ENOMEM;
@@ -1678,7 +1678,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca)
if (!is_available_bucket(ca->buckets[b].mark))
continue;
- bch_mark_metadata_bucket(ca, &ca->buckets[b],
+ bch2_mark_metadata_bucket(ca, &ca->buckets[b],
BUCKET_JOURNAL, true);
ja->buckets[i] = b;
journal_buckets->buckets[i] = cpu_to_le64(b);
@@ -1688,7 +1688,7 @@ int bch_dev_journal_alloc(struct bch_dev *ca)
if (i < nr)
return -ENOSPC;
- BUG_ON(bch_validate_journal_layout(ca->disk_sb.sb, ca->mi));
+ BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi));
ja->nr = nr;
@@ -1757,7 +1757,7 @@ static void journal_pin_add_entry(struct journal *j,
spin_unlock_irq(&j->pin_lock);
}
-void bch_journal_pin_add(struct journal *j,
+void bch2_journal_pin_add(struct journal *j,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
@@ -1780,7 +1780,7 @@ static inline bool __journal_pin_drop(struct journal *j,
return atomic_dec_and_test(&pin_list->count);
}
-void bch_journal_pin_drop(struct journal *j,
+void bch2_journal_pin_drop(struct journal *j,
struct journal_entry_pin *pin)
{
unsigned long flags;
@@ -1803,7 +1803,7 @@ void bch_journal_pin_drop(struct journal *j,
wake_up(&j->wait);
}
-void bch_journal_pin_add_if_older(struct journal *j,
+void bch2_journal_pin_add_if_older(struct journal *j,
struct journal_entry_pin *src_pin,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
@@ -1844,7 +1844,7 @@ journal_get_next_pin(struct journal *j, u64 seq_to_flush)
ret = list_first_entry_or_null(&pin_list->list,
struct journal_entry_pin, list);
if (ret) {
- /* must be list_del_init(), see bch_journal_pin_drop() */
+ /* must be list_del_init(), see bch2_journal_pin_drop() */
list_del_init(&ret->list);
break;
}
@@ -1867,14 +1867,14 @@ static bool journal_has_pins(struct journal *j)
return ret;
}
-void bch_journal_flush_pins(struct journal *j)
+void bch2_journal_flush_pins(struct journal *j)
{
struct journal_entry_pin *pin;
while ((pin = journal_get_next_pin(j, U64_MAX)))
pin->flush(j, pin);
- wait_event(j->wait, !journal_has_pins(j) || bch_journal_error(j));
+ wait_event(j->wait, !journal_has_pins(j) || bch2_journal_error(j));
}
static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
@@ -2032,12 +2032,12 @@ static int journal_write_alloc(struct journal *j, unsigned sectors)
if (ca->mi.state != BCH_MEMBER_STATE_RW ||
ca->journal.sectors_free <= sectors)
- __bch_extent_drop_ptr(e, ptr);
+ __bch2_extent_drop_ptr(e, ptr);
else
ca->journal.sectors_free -= sectors;
}
- replicas = bch_extent_nr_ptrs(e.c);
+ replicas = bch2_extent_nr_ptrs(e.c);
spin_lock(&j->devs.lock);
@@ -2067,7 +2067,7 @@ static int journal_write_alloc(struct journal *j, unsigned sectors)
* Check that we can use this device, and aren't already using
* it:
*/
- if (bch_extent_has_device(e.c, ca->dev_idx) ||
+ if (bch2_extent_has_device(e.c, ca->dev_idx) ||
!journal_dev_buckets_available(j, ca) ||
sectors > ca->mi.bucket_size)
continue;
@@ -2145,9 +2145,9 @@ static void journal_write_endio(struct bio *bio)
struct bch_dev *ca = bio->bi_private;
struct journal *j = &ca->fs->journal;
- if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
- bch_meta_write_fault("journal"))
- bch_journal_halt(j);
+ if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
+ bch2_meta_write_fault("journal"))
+ bch2_journal_halt(j);
closure_put(&j->io);
percpu_ref_put(&ca->io_ref);
@@ -2160,7 +2160,7 @@ static void journal_write_done(struct closure *cl)
j->last_seq_ondisk = le64_to_cpu(w->data->last_seq);
- __bch_time_stats_update(j->write_time, j->write_start_time);
+ __bch2_time_stats_update(j->write_time, j->write_start_time);
BUG_ON(!j->reservations.prev_buf_unwritten);
atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
@@ -2204,14 +2204,14 @@ static void journal_write(struct closure *cl)
j->write_start_time = local_clock();
- bch_journal_add_prios(j, w);
+ bch2_journal_add_prios(j, w);
mutex_lock(&c->btree_root_lock);
for (i = 0; i < BTREE_ID_NR; i++) {
struct btree_root *r = &c->btree_roots[i];
if (r->alive)
- bch_journal_add_btree_root(w, i, &r->key, r->level);
+ bch2_journal_add_btree_root(w, i, &r->key, r->level);
}
mutex_unlock(&c->btree_root_lock);
@@ -2223,9 +2223,9 @@ static void journal_write(struct closure *cl)
jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
- SET_JSET_CSUM_TYPE(jset, bch_meta_checksum_type(c));
+ SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
- bch_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
+ bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
jset->encrypted_start,
vstruct_end(jset) - (void *) jset->encrypted_start);
@@ -2239,13 +2239,13 @@ static void journal_write(struct closure *cl)
memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
if (journal_write_alloc(j, sectors)) {
- bch_journal_halt(j);
+ bch2_journal_halt(j);
bch_err(c, "Unable to allocate journal write");
- bch_fatal_error(c);
+ bch2_fatal_error(c);
closure_return_with_destructor(cl, journal_write_done);
}
- bch_check_mark_super(c, &j->key, true);
+ bch2_check_mark_super(c, &j->key, true);
/*
* XXX: we really should just disable the entire journal in nochanges
@@ -2273,9 +2273,9 @@ static void journal_write(struct closure *cl)
bio->bi_private = ca;
bio_set_op_attrs(bio, REQ_OP_WRITE,
REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch_bio_map(bio, jset);
+ bch2_bio_map(bio, jset);
- trace_bcache_journal_write(bio);
+ trace_journal_write(bio);
closure_bio_submit(bio, cl);
ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
@@ -2283,7 +2283,7 @@ static void journal_write(struct closure *cl)
for_each_rw_member(ca, c, i)
if (journal_flushes_device(ca) &&
- !bch_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) {
+ !bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) {
percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio;
@@ -2318,7 +2318,7 @@ static void journal_write_work(struct work_struct *work)
* hasn't yet been flushed, return the journal sequence number that needs to be
* flushed:
*/
-u64 bch_inode_journal_seq(struct journal *j, u64 inode)
+u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
{
size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
u64 seq = 0;
@@ -2370,7 +2370,7 @@ retry:
case JOURNAL_ENTRY_INUSE:
/* haven't finished writing out the previous one: */
spin_unlock(&j->lock);
- trace_bcache_journal_entry_full(c);
+ trace_journal_entry_full(c);
goto blocked;
case JOURNAL_ENTRY_CLOSED:
break;
@@ -2395,7 +2395,7 @@ retry:
*/
journal_reclaim_work(&j->reclaim_work.work);
- trace_bcache_journal_full(c);
+ trace_journal_full(c);
blocked:
if (!j->res_get_blocked_start)
j->res_get_blocked_start = local_clock() ?: 1;
@@ -2403,16 +2403,16 @@ blocked:
}
/*
- * Essentially the entry function to the journaling code. When bcache is doing
+ * Essentially the entry function to the journaling code. When bcachefs is doing
* a btree insert, it calls this function to get the current journal write.
* Journal write is the structure used set up journal writes. The calling
- * function will then add its keys to the structure, queuing them for the
- * next write.
+ * function will then add its keys to the structure, queuing them for the next
+ * write.
*
* To ensure forward progress, the current task must not be holding any
* btree node write locks.
*/
-int bch_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
+int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
unsigned u64s_min, unsigned u64s_max)
{
int ret;
@@ -2423,13 +2423,13 @@ int bch_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
return ret < 0 ? ret : 0;
}
-void bch_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
+void bch2_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
{
spin_lock(&j->lock);
BUG_ON(seq > atomic64_read(&j->seq));
- if (bch_journal_error(j)) {
+ if (bch2_journal_error(j)) {
spin_unlock(&j->lock);
return;
}
@@ -2446,20 +2446,20 @@ void bch_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
/* check if raced with write completion (or failure) */
if (!j->reservations.prev_buf_unwritten ||
- bch_journal_error(j))
+ bch2_journal_error(j))
closure_wake_up(&journal_prev_buf(j)->wait);
}
spin_unlock(&j->lock);
}
-void bch_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
+void bch2_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
{
spin_lock(&j->lock);
BUG_ON(seq > atomic64_read(&j->seq));
- if (bch_journal_error(j)) {
+ if (bch2_journal_error(j)) {
spin_unlock(&j->lock);
return;
}
@@ -2502,41 +2502,41 @@ void bch_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *par
/* check if raced with write completion (or failure) */
if (!j->reservations.prev_buf_unwritten ||
- bch_journal_error(j))
+ bch2_journal_error(j))
closure_wake_up(&journal_prev_buf(j)->wait);
}
spin_unlock(&j->lock);
}
-int bch_journal_flush_seq(struct journal *j, u64 seq)
+int bch2_journal_flush_seq(struct journal *j, u64 seq)
{
struct closure cl;
u64 start_time = local_clock();
closure_init_stack(&cl);
- bch_journal_flush_seq_async(j, seq, &cl);
+ bch2_journal_flush_seq_async(j, seq, &cl);
closure_sync(&cl);
- bch_time_stats_update(j->flush_seq_time, start_time);
+ bch2_time_stats_update(j->flush_seq_time, start_time);
- return bch_journal_error(j);
+ return bch2_journal_error(j);
}
-void bch_journal_meta_async(struct journal *j, struct closure *parent)
+void bch2_journal_meta_async(struct journal *j, struct closure *parent)
{
struct journal_res res;
unsigned u64s = jset_u64s(0);
memset(&res, 0, sizeof(res));
- bch_journal_res_get(j, &res, u64s, u64s);
- bch_journal_res_put(j, &res);
+ bch2_journal_res_get(j, &res, u64s, u64s);
+ bch2_journal_res_put(j, &res);
- bch_journal_flush_seq_async(j, res.seq, parent);
+ bch2_journal_flush_seq_async(j, res.seq, parent);
}
-int bch_journal_meta(struct journal *j)
+int bch2_journal_meta(struct journal *j)
{
struct journal_res res;
unsigned u64s = jset_u64s(0);
@@ -2544,16 +2544,16 @@ int bch_journal_meta(struct journal *j)
memset(&res, 0, sizeof(res));
- ret = bch_journal_res_get(j, &res, u64s, u64s);
+ ret = bch2_journal_res_get(j, &res, u64s, u64s);
if (ret)
return ret;
- bch_journal_res_put(j, &res);
+ bch2_journal_res_put(j, &res);
- return bch_journal_flush_seq(j, res.seq);
+ return bch2_journal_flush_seq(j, res.seq);
}
-void bch_journal_flush_async(struct journal *j, struct closure *parent)
+void bch2_journal_flush_async(struct journal *j, struct closure *parent)
{
u64 seq, journal_seq;
@@ -2570,10 +2570,10 @@ void bch_journal_flush_async(struct journal *j, struct closure *parent)
}
spin_unlock(&j->lock);
- bch_journal_flush_seq_async(j, seq, parent);
+ bch2_journal_flush_seq_async(j, seq, parent);
}
-int bch_journal_flush(struct journal *j)
+int bch2_journal_flush(struct journal *j)
{
u64 seq, journal_seq;
@@ -2590,10 +2590,10 @@ int bch_journal_flush(struct journal *j)
}
spin_unlock(&j->lock);
- return bch_journal_flush_seq(j, seq);
+ return bch2_journal_flush_seq(j, seq);
}
-ssize_t bch_journal_print_debug(struct journal *j, char *buf)
+ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
{
union journal_res_state *s = &j->reservations;
struct bch_dev *ca;
@@ -2648,13 +2648,13 @@ ssize_t bch_journal_print_debug(struct journal *j, char *buf)
return ret;
}
-static bool bch_journal_writing_to_device(struct bch_dev *ca)
+static bool bch2_journal_writing_to_device(struct bch_dev *ca)
{
struct journal *j = &ca->fs->journal;
bool ret;
spin_lock(&j->lock);
- ret = bch_extent_has_device(bkey_i_to_s_c_extent(&j->key),
+ ret = bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key),
ca->dev_idx);
spin_unlock(&j->lock);
@@ -2673,7 +2673,7 @@ static bool bch_journal_writing_to_device(struct bch_dev *ca)
* writeable and pick a new set of devices to write to.
*/
-int bch_journal_move(struct bch_dev *ca)
+int bch2_journal_move(struct bch_dev *ca)
{
u64 last_flushed_seq;
struct journal_device *ja = &ca->journal;
@@ -2682,7 +2682,7 @@ int bch_journal_move(struct bch_dev *ca)
unsigned i;
int ret = 0; /* Success */
- if (bch_journal_writing_to_device(ca)) {
+ if (bch2_journal_writing_to_device(ca)) {
/*
* bch_journal_meta will write a record and we'll wait
* for the write to complete.
@@ -2690,8 +2690,8 @@ int bch_journal_move(struct bch_dev *ca)
* will call journal_next_bucket which notices that the
* device is no longer writeable, and picks a new one.
*/
- bch_journal_meta(j);
- BUG_ON(bch_journal_writing_to_device(ca));
+ bch2_journal_meta(j);
+ BUG_ON(bch2_journal_writing_to_device(ca));
}
/*
@@ -2703,14 +2703,14 @@ int bch_journal_move(struct bch_dev *ca)
/*
* XXX: switch to normal journal reclaim machinery
*/
- bch_btree_flush(c);
+ bch2_btree_flush(c);
/*
* Force a meta-data journal entry to be written so that
* we have newer journal entries in devices other than ca,
* and wait for the meta data write to complete.
*/
- bch_journal_meta(j);
+ bch2_journal_meta(j);
/*
* Verify that we no longer need any of the journal entries in
@@ -2726,7 +2726,7 @@ int bch_journal_move(struct bch_dev *ca)
return ret;
}
-void bch_fs_journal_stop(struct journal *j)
+void bch2_fs_journal_stop(struct journal *j)
{
if (!test_bit(JOURNAL_STARTED, &j->flags))
return;
@@ -2736,15 +2736,15 @@ void bch_fs_journal_stop(struct journal *j)
* journal entries, then force a brand new empty journal entry to be
* written:
*/
- bch_journal_flush_pins(j);
- bch_journal_flush_async(j, NULL);
- bch_journal_meta(j);
+ bch2_journal_flush_pins(j);
+ bch2_journal_flush_async(j, NULL);
+ bch2_journal_meta(j);
cancel_delayed_work_sync(&j->write_work);
cancel_delayed_work_sync(&j->reclaim_work);
}
-void bch_dev_journal_exit(struct bch_dev *ca)
+void bch2_dev_journal_exit(struct bch_dev *ca)
{
kfree(ca->journal.bio);
kfree(ca->journal.buckets);
@@ -2755,18 +2755,18 @@ void bch_dev_journal_exit(struct bch_dev *ca)
ca->journal.bucket_seq = NULL;
}
-int bch_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
+int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
{
struct journal_device *ja = &ca->journal;
struct bch_sb_field_journal *journal_buckets =
- bch_sb_get_journal(sb);
+ bch2_sb_get_journal(sb);
unsigned i, journal_entry_pages;
journal_entry_pages =
DIV_ROUND_UP(1U << BCH_SB_JOURNAL_ENTRY_SIZE(sb),
PAGE_SECTORS);
- ja->nr = bch_nr_journal_buckets(journal_buckets);
+ ja->nr = bch2_nr_journal_buckets(journal_buckets);
ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->bucket_seq)
@@ -2786,7 +2786,7 @@ int bch_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
return 0;
}
-void bch_fs_journal_exit(struct journal *j)
+void bch2_fs_journal_exit(struct journal *j)
{
unsigned order = get_order(j->entry_size_max);
@@ -2795,7 +2795,7 @@ void bch_fs_journal_exit(struct journal *j)
free_fifo(&j->pin);
}
-int bch_fs_journal_init(struct journal *j, unsigned entry_size_max)
+int bch2_fs_journal_init(struct journal *j, unsigned entry_size_max)
{
static struct lock_class_key res_key;
unsigned order = get_order(entry_size_max);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index c83f81046f47..f5fc465a752a 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -27,7 +27,7 @@
* possible, if the write for the previous journal entry was still in flight).
*
* Synchronous updates are specified by passing a closure (@flush_cl) to
- * bch_btree_insert() or bch_btree_insert_node(), which then pass that parameter
+ * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter
* down to the journalling code. That closure will will wait on the journal
* write to complete (via closure_wait()).
*
@@ -128,25 +128,25 @@ static inline bool journal_pin_active(struct journal_entry_pin *pin)
return pin->pin_list != NULL;
}
-void bch_journal_pin_add(struct journal *, struct journal_entry_pin *,
+void bch2_journal_pin_add(struct journal *, struct journal_entry_pin *,
journal_pin_flush_fn);
-void bch_journal_pin_drop(struct journal *, struct journal_entry_pin *);
-void bch_journal_pin_add_if_older(struct journal *,
+void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
+void bch2_journal_pin_add_if_older(struct journal *,
struct journal_entry_pin *,
struct journal_entry_pin *,
journal_pin_flush_fn);
-void bch_journal_flush_pins(struct journal *);
+void bch2_journal_flush_pins(struct journal *);
struct closure;
struct bch_fs;
struct keylist;
-struct bkey_i *bch_journal_find_btree_root(struct bch_fs *, struct jset *,
+struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *, struct jset *,
enum btree_id, unsigned *);
-int bch_journal_seq_should_ignore(struct bch_fs *, u64, struct btree *);
+int bch2_journal_seq_should_ignore(struct bch_fs *, u64, struct btree *);
-u64 bch_inode_journal_seq(struct journal *, u64);
+u64 bch2_inode_journal_seq(struct journal *, u64);
static inline int journal_state_count(union journal_res_state s, int idx)
{
@@ -159,7 +159,7 @@ static inline void journal_state_inc(union journal_res_state *s)
s->buf1_count += s->idx == 1;
}
-static inline void bch_journal_set_has_inode(struct journal_buf *buf, u64 inum)
+static inline void bch2_journal_set_has_inode(struct journal_buf *buf, u64 inum)
{
set_bit(hash_64(inum, ilog2(sizeof(buf->has_inode) * 8)), buf->has_inode);
}
@@ -173,7 +173,7 @@ static inline unsigned jset_u64s(unsigned u64s)
return u64s + sizeof(struct jset_entry) / sizeof(u64);
}
-static inline void bch_journal_add_entry_at(struct journal_buf *buf,
+static inline void bch2_journal_add_entry_at(struct journal_buf *buf,
const void *data, size_t u64s,
unsigned type, enum btree_id id,
unsigned level, unsigned offset)
@@ -189,7 +189,7 @@ static inline void bch_journal_add_entry_at(struct journal_buf *buf,
memcpy_u64s(entry->_data, data, u64s);
}
-static inline void bch_journal_add_keys(struct journal *j, struct journal_res *res,
+static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
enum btree_id id, const struct bkey_i *k)
{
struct journal_buf *buf = &j->buf[res->idx];
@@ -198,9 +198,9 @@ static inline void bch_journal_add_keys(struct journal *j, struct journal_res *r
EBUG_ON(!res->ref);
BUG_ON(actual > res->u64s);
- bch_journal_set_has_inode(buf, k->k.p.inode);
+ bch2_journal_set_has_inode(buf, k->k.p.inode);
- bch_journal_add_entry_at(buf, k, k->k.u64s,
+ bch2_journal_add_entry_at(buf, k, k->k.u64s,
JOURNAL_ENTRY_BTREE_KEYS, id,
0, res->offset);
@@ -208,9 +208,9 @@ static inline void bch_journal_add_keys(struct journal *j, struct journal_res *r
res->u64s -= actual;
}
-void bch_journal_buf_put_slowpath(struct journal *, bool);
+void bch2_journal_buf_put_slowpath(struct journal *, bool);
-static inline void bch_journal_buf_put(struct journal *j, unsigned idx,
+static inline void bch2_journal_buf_put(struct journal *j, unsigned idx,
bool need_write_just_set)
{
union journal_res_state s;
@@ -229,14 +229,14 @@ static inline void bch_journal_buf_put(struct journal *j, unsigned idx,
if (s.idx != idx &&
!journal_state_count(s, idx) &&
s.cur_entry_offset != JOURNAL_ENTRY_ERROR_VAL)
- bch_journal_buf_put_slowpath(j, need_write_just_set);
+ bch2_journal_buf_put_slowpath(j, need_write_just_set);
}
/*
* This function releases the journal write structure so other threads can
* then proceed to add their keys as well.
*/
-static inline void bch_journal_res_put(struct journal *j,
+static inline void bch2_journal_res_put(struct journal *j,
struct journal_res *res)
{
if (!res->ref)
@@ -245,19 +245,19 @@ static inline void bch_journal_res_put(struct journal *j,
lock_release(&j->res_map, 0, _RET_IP_);
while (res->u64s) {
- bch_journal_add_entry_at(&j->buf[res->idx], NULL, 0,
+ bch2_journal_add_entry_at(&j->buf[res->idx], NULL, 0,
JOURNAL_ENTRY_BTREE_KEYS,
0, 0, res->offset);
res->offset += jset_u64s(0);
res->u64s -= jset_u64s(0);
}
- bch_journal_buf_put(j, res->idx, false);
+ bch2_journal_buf_put(j, res->idx, false);
res->ref = 0;
}
-int bch_journal_res_get_slowpath(struct journal *, struct journal_res *,
+int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
unsigned, unsigned);
static inline int journal_res_get_fast(struct journal *j,
@@ -293,7 +293,7 @@ static inline int journal_res_get_fast(struct journal *j,
return 1;
}
-static inline int bch_journal_res_get(struct journal *j, struct journal_res *res,
+static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res,
unsigned u64s_min, unsigned u64s_max)
{
int ret;
@@ -304,7 +304,7 @@ static inline int bch_journal_res_get(struct journal *j, struct journal_res *res
if (journal_res_get_fast(j, res, u64s_min, u64s_max))
goto out;
- ret = bch_journal_res_get_slowpath(j, res, u64s_min, u64s_max);
+ ret = bch2_journal_res_get_slowpath(j, res, u64s_min, u64s_max);
if (ret)
return ret;
out:
@@ -313,18 +313,18 @@ out:
return 0;
}
-void bch_journal_wait_on_seq(struct journal *, u64, struct closure *);
-void bch_journal_flush_seq_async(struct journal *, u64, struct closure *);
-void bch_journal_flush_async(struct journal *, struct closure *);
-void bch_journal_meta_async(struct journal *, struct closure *);
+void bch2_journal_wait_on_seq(struct journal *, u64, struct closure *);
+void bch2_journal_flush_seq_async(struct journal *, u64, struct closure *);
+void bch2_journal_flush_async(struct journal *, struct closure *);
+void bch2_journal_meta_async(struct journal *, struct closure *);
-int bch_journal_flush_seq(struct journal *, u64);
-int bch_journal_flush(struct journal *);
-int bch_journal_meta(struct journal *);
+int bch2_journal_flush_seq(struct journal *, u64);
+int bch2_journal_flush(struct journal *);
+int bch2_journal_meta(struct journal *);
-void bch_journal_halt(struct journal *);
+void bch2_journal_halt(struct journal *);
-static inline int bch_journal_error(struct journal *j)
+static inline int bch2_journal_error(struct journal *j)
{
return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL
? -EIO : 0;
@@ -335,13 +335,13 @@ static inline bool journal_flushes_device(struct bch_dev *ca)
return true;
}
-void bch_journal_start(struct bch_fs *);
-void bch_journal_mark(struct bch_fs *, struct list_head *);
-void bch_journal_entries_free(struct list_head *);
-int bch_journal_read(struct bch_fs *, struct list_head *);
-int bch_journal_replay(struct bch_fs *, struct list_head *);
+void bch2_journal_start(struct bch_fs *);
+void bch2_journal_mark(struct bch_fs *, struct list_head *);
+void bch2_journal_entries_free(struct list_head *);
+int bch2_journal_read(struct bch_fs *, struct list_head *);
+int bch2_journal_replay(struct bch_fs *, struct list_head *);
-static inline void bch_journal_set_replay_done(struct journal *j)
+static inline void bch2_journal_set_replay_done(struct journal *j)
{
spin_lock(&j->lock);
BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
@@ -351,23 +351,23 @@ static inline void bch_journal_set_replay_done(struct journal *j)
spin_unlock(&j->lock);
}
-ssize_t bch_journal_print_debug(struct journal *, char *);
+ssize_t bch2_journal_print_debug(struct journal *, char *);
-int bch_dev_journal_alloc(struct bch_dev *);
+int bch2_dev_journal_alloc(struct bch_dev *);
-static inline unsigned bch_nr_journal_buckets(struct bch_sb_field_journal *j)
+static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j)
{
return j
? (__le64 *) vstruct_end(&j->field) - j->buckets
: 0;
}
-int bch_journal_move(struct bch_dev *);
+int bch2_journal_move(struct bch_dev *);
-void bch_fs_journal_stop(struct journal *);
-void bch_dev_journal_exit(struct bch_dev *);
-int bch_dev_journal_init(struct bch_dev *, struct bch_sb *);
-void bch_fs_journal_exit(struct journal *);
-int bch_fs_journal_init(struct journal *, unsigned);
+void bch2_fs_journal_stop(struct journal *);
+void bch2_dev_journal_exit(struct bch_dev *);
+int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *);
+void bch2_fs_journal_exit(struct journal *);
+int bch2_fs_journal_init(struct journal *, unsigned);
#endif /* _BCACHE_JOURNAL_H */
diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c
index adf5eebae0bb..51dd7edc2900 100644
--- a/fs/bcachefs/keylist.c
+++ b/fs/bcachefs/keylist.c
@@ -1,8 +1,8 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "keylist.h"
-int bch_keylist_realloc(struct keylist *l, u64 *inline_u64s,
+int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
size_t nr_inline_u64s, size_t new_u64s)
{
size_t oldsize = bch_keylist_u64s(l);
@@ -29,7 +29,7 @@ int bch_keylist_realloc(struct keylist *l, u64 *inline_u64s,
return 0;
}
-void bch_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
+void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
{
struct bkey_i *where;
@@ -45,9 +45,9 @@ void bch_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
bkey_copy(where, insert);
}
-void bch_keylist_pop_front(struct keylist *l)
+void bch2_keylist_pop_front(struct keylist *l)
{
- l->top_p -= bch_keylist_front(l)->k.u64s;
+ l->top_p -= bch2_keylist_front(l)->k.u64s;
memmove_u64s_down(l->keys,
bkey_next(l->keys),
diff --git a/fs/bcachefs/keylist.h b/fs/bcachefs/keylist.h
index 1166f9415f1f..66628058e141 100644
--- a/fs/bcachefs/keylist.h
+++ b/fs/bcachefs/keylist.h
@@ -3,35 +3,35 @@
#include "keylist_types.h"
-int bch_keylist_realloc(struct keylist *, u64 *, size_t, size_t);
-void bch_keylist_add_in_order(struct keylist *, struct bkey_i *);
-void bch_keylist_pop_front(struct keylist *);
+int bch2_keylist_realloc(struct keylist *, u64 *, size_t, size_t);
+void bch2_keylist_add_in_order(struct keylist *, struct bkey_i *);
+void bch2_keylist_pop_front(struct keylist *);
-static inline void bch_keylist_init(struct keylist *l, u64 *inline_keys,
+static inline void bch2_keylist_init(struct keylist *l, u64 *inline_keys,
size_t nr_inline_u64s)
{
l->top_p = l->keys_p = inline_keys;
}
-static inline void bch_keylist_free(struct keylist *l, u64 *inline_keys)
+static inline void bch2_keylist_free(struct keylist *l, u64 *inline_keys)
{
if (l->keys_p != inline_keys)
kfree(l->keys_p);
memset(l, 0, sizeof(*l));
}
-static inline void bch_keylist_push(struct keylist *l)
+static inline void bch2_keylist_push(struct keylist *l)
{
l->top = bkey_next(l->top);
}
-static inline void bch_keylist_add(struct keylist *l, const struct bkey_i *k)
+static inline void bch2_keylist_add(struct keylist *l, const struct bkey_i *k)
{
bkey_copy(l->top, k);
- bch_keylist_push(l);
+ bch2_keylist_push(l);
}
-static inline bool bch_keylist_empty(struct keylist *l)
+static inline bool bch2_keylist_empty(struct keylist *l)
{
return l->top == l->keys;
}
@@ -41,12 +41,12 @@ static inline size_t bch_keylist_u64s(struct keylist *l)
return l->top_p - l->keys_p;
}
-static inline size_t bch_keylist_bytes(struct keylist *l)
+static inline size_t bch2_keylist_bytes(struct keylist *l)
{
return bch_keylist_u64s(l) * sizeof(u64);
}
-static inline struct bkey_i *bch_keylist_front(struct keylist *l)
+static inline struct bkey_i *bch2_keylist_front(struct keylist *l)
{
return l->keys;
}
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 5bd93be2fddf..f79b624d367b 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -2,7 +2,7 @@
* Code for moving data off a device.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_update.h"
#include "buckets.h"
#include "extents.h"
@@ -22,7 +22,7 @@ static int issue_migration_move(struct bch_dev *ca,
const struct bch_extent_ptr *ptr;
int ret;
- if (bch_disk_reservation_get(c, &res, k.k->size, 0))
+ if (bch2_disk_reservation_get(c, &res, k.k->size, 0))
return -ENOSPC;
extent_for_each_ptr(bkey_s_c_to_extent(k), ptr)
@@ -33,9 +33,9 @@ static int issue_migration_move(struct bch_dev *ca,
found:
/* XXX: we need to be doing something with the disk reservation */
- ret = bch_data_move(c, ctxt, &c->migration_write_point, k, ptr);
+ ret = bch2_data_move(c, ctxt, &c->migration_write_point, k, ptr);
if (ret)
- bch_disk_reservation_put(c, &res);
+ bch2_disk_reservation_put(c, &res);
return ret;
}
@@ -55,7 +55,7 @@ found:
* land in the same device even if there are others available.
*/
-int bch_move_data_off_device(struct bch_dev *ca)
+int bch2_move_data_off_device(struct bch_dev *ca)
{
struct moving_context ctxt;
struct bch_fs *c = ca->fs;
@@ -69,7 +69,7 @@ int bch_move_data_off_device(struct bch_dev *ca)
if (!ca->mi.has_data)
return 0;
- bch_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE);
+ bch2_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE);
ctxt.avoid = ca;
/*
@@ -97,25 +97,25 @@ int bch_move_data_off_device(struct bch_dev *ca)
atomic_set(&ctxt.error_count, 0);
atomic_set(&ctxt.error_flags, 0);
- bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
- while (!bch_move_ctxt_wait(&ctxt) &&
- (k = bch_btree_iter_peek(&iter)).k &&
+ while (!bch2_move_ctxt_wait(&ctxt) &&
+ (k = bch2_btree_iter_peek(&iter)).k &&
!(ret = btree_iter_err(k))) {
if (!bkey_extent_is_data(k.k) ||
- !bch_extent_has_device(bkey_s_c_to_extent(k),
+ !bch2_extent_has_device(bkey_s_c_to_extent(k),
ca->dev_idx))
goto next;
ret = issue_migration_move(ca, &ctxt, k);
if (ret == -ENOMEM) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
/*
* memory allocation failure, wait for some IO
* to finish
*/
- bch_move_ctxt_wait_for_io(&ctxt);
+ bch2_move_ctxt_wait_for_io(&ctxt);
continue;
}
if (ret == -ENOSPC)
@@ -124,12 +124,12 @@ int bch_move_data_off_device(struct bch_dev *ca)
seen_key_count++;
next:
- bch_btree_iter_advance_pos(&iter);
- bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_cond_resched(&iter);
}
- bch_btree_iter_unlock(&iter);
- bch_move_ctxt_exit(&ctxt);
+ bch2_btree_iter_unlock(&iter);
+ bch2_move_ctxt_exit(&ctxt);
if (ret)
return ret;
@@ -142,10 +142,10 @@ next:
}
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
SET_BCH_MEMBER_HAS_DATA(&mi->members[ca->dev_idx], false);
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
return 0;
@@ -155,7 +155,7 @@ next:
* This walks the btree, and for any node on the relevant device it moves the
* node elsewhere.
*/
-static int bch_move_btree_off(struct bch_dev *ca, enum btree_id id)
+static int bch2_move_btree_off(struct bch_dev *ca, enum btree_id id)
{
struct bch_fs *c = ca->fs;
struct btree_iter iter;
@@ -170,29 +170,29 @@ static int bch_move_btree_off(struct bch_dev *ca, enum btree_id id)
for_each_btree_node(&iter, c, id, POS_MIN, 0, b) {
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
retry:
- if (!bch_extent_has_device(e, ca->dev_idx))
+ if (!bch2_extent_has_device(e, ca->dev_idx))
continue;
- ret = bch_btree_node_rewrite(&iter, b, &cl);
+ ret = bch2_btree_node_rewrite(&iter, b, &cl);
if (ret == -EINTR || ret == -ENOSPC) {
/*
* Drop locks to upgrade locks or wait on
* reserve: after retaking, recheck in case we
* raced.
*/
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
closure_sync(&cl);
- b = bch_btree_iter_peek_node(&iter);
+ b = bch2_btree_iter_peek_node(&iter);
goto retry;
}
if (ret) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
- bch_btree_iter_set_locks_want(&iter, 0);
+ bch2_btree_iter_set_locks_want(&iter, 0);
}
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
if (ret)
return ret; /* btree IO error */
@@ -200,9 +200,9 @@ retry:
for_each_btree_node(&iter, c, id, POS_MIN, 0, b) {
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
- BUG_ON(bch_extent_has_device(e, ca->dev_idx));
+ BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
}
return 0;
@@ -252,7 +252,7 @@ retry:
* is written.
*/
-int bch_move_metadata_off_device(struct bch_dev *ca)
+int bch2_move_metadata_off_device(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct bch_sb_field_members *mi;
@@ -267,7 +267,7 @@ int bch_move_metadata_off_device(struct bch_dev *ca)
/* 1st, Move the btree nodes off the device */
for (i = 0; i < BTREE_ID_NR; i++) {
- ret = bch_move_btree_off(ca, i);
+ ret = bch2_move_btree_off(ca, i);
if (ret)
return ret;
}
@@ -276,15 +276,15 @@ int bch_move_metadata_off_device(struct bch_dev *ca)
/* 2nd. Move the journal off the device */
- ret = bch_journal_move(ca);
+ ret = bch2_journal_move(ca);
if (ret)
return ret;
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
SET_BCH_MEMBER_HAS_METADATA(&mi->members[ca->dev_idx], false);
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
return 0;
@@ -295,7 +295,7 @@ int bch_move_metadata_off_device(struct bch_dev *ca)
* migrate the data off the device.
*/
-static int bch_flag_key_bad(struct btree_iter *iter,
+static int bch2_flag_key_bad(struct btree_iter *iter,
struct bch_dev *ca,
struct bkey_s_c_extent orig)
{
@@ -309,16 +309,16 @@ static int bch_flag_key_bad(struct btree_iter *iter,
extent_for_each_ptr_backwards(e, ptr)
if (ptr->dev == ca->dev_idx)
- bch_extent_drop_ptr(e, ptr);
+ bch2_extent_drop_ptr(e, ptr);
/*
- * If the new extent no longer has any pointers, bch_extent_normalize()
+ * If the new extent no longer has any pointers, bch2_extent_normalize()
* will do the appropriate thing with it (turning it into a
* KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
*/
- bch_extent_normalize(c, e.s);
+ bch2_extent_normalize(c, e.s);
- return bch_btree_insert_at(c, NULL, NULL, NULL,
+ return bch2_btree_insert_at(c, NULL, NULL, NULL,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(iter, &tmp.key));
}
@@ -334,25 +334,25 @@ static int bch_flag_key_bad(struct btree_iter *iter,
* that we've already tried to move the data MAX_DATA_OFF_ITER times and
* are not likely to succeed if we try again.
*/
-int bch_flag_data_bad(struct bch_dev *ca)
+int bch2_flag_data_bad(struct bch_dev *ca)
{
int ret = 0;
struct bkey_s_c k;
struct bkey_s_c_extent e;
struct btree_iter iter;
- bch_btree_iter_init(&iter, ca->fs, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, ca->fs, BTREE_ID_EXTENTS, POS_MIN);
- while ((k = bch_btree_iter_peek(&iter)).k &&
+ while ((k = bch2_btree_iter_peek(&iter)).k &&
!(ret = btree_iter_err(k))) {
if (!bkey_extent_is_data(k.k))
goto advance;
e = bkey_s_c_to_extent(k);
- if (!bch_extent_has_device(e, ca->dev_idx))
+ if (!bch2_extent_has_device(e, ca->dev_idx))
goto advance;
- ret = bch_flag_key_bad(&iter, ca, e);
+ ret = bch2_flag_key_bad(&iter, ca, e);
/*
* don't want to leave ret == -EINTR, since if we raced and
@@ -386,10 +386,10 @@ int bch_flag_data_bad(struct bch_dev *ca)
*/
continue;
advance:
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
diff --git a/fs/bcachefs/migrate.h b/fs/bcachefs/migrate.h
index c6a056cbd250..81776bdcfe23 100644
--- a/fs/bcachefs/migrate.h
+++ b/fs/bcachefs/migrate.h
@@ -1,8 +1,8 @@
#ifndef _BCACHE_MIGRATE_H
#define _BCACHE_MIGRATE_H
-int bch_move_data_off_device(struct bch_dev *);
-int bch_move_metadata_off_device(struct bch_dev *);
-int bch_flag_data_bad(struct bch_dev *);
+int bch2_move_data_off_device(struct bch_dev *);
+int bch2_move_metadata_off_device(struct bch_dev *);
+int bch2_flag_data_bad(struct bch_dev *);
#endif /* _BCACHE_MIGRATE_H */
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index a9a9d3197b6d..f718f42ad454 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "buckets.h"
@@ -29,7 +29,7 @@ static struct bch_extent_ptr *bkey_find_ptr(struct bch_fs *c,
return NULL;
}
-static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m,
+static struct bch_extent_ptr *bch2_migrate_matching_ptr(struct migrate_write *m,
struct bkey_s_extent e)
{
const struct bch_extent_ptr *ptr;
@@ -45,7 +45,7 @@ static struct bch_extent_ptr *bch_migrate_matching_ptr(struct migrate_write *m,
return ret;
}
-static int bch_migrate_index_update(struct bch_write_op *op)
+static int bch2_migrate_index_update(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct migrate_write *m =
@@ -54,19 +54,19 @@ static int bch_migrate_index_update(struct bch_write_op *op)
struct btree_iter iter;
int ret = 0;
- bch_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS,
- bkey_start_pos(&bch_keylist_front(keys)->k));
+ bch2_btree_iter_init_intent(&iter, c, BTREE_ID_EXTENTS,
+ bkey_start_pos(&bch2_keylist_front(keys)->k));
while (1) {
struct bkey_s_extent insert =
- bkey_i_to_s_extent(bch_keylist_front(keys));
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(&iter);
+ bkey_i_to_s_extent(bch2_keylist_front(keys));
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(&iter);
struct bch_extent_ptr *ptr;
struct bkey_s_extent e;
BKEY_PADDED(k) new;
if (!k.k) {
- ret = bch_btree_iter_unlock(&iter);
+ ret = bch2_btree_iter_unlock(&iter);
break;
}
@@ -74,19 +74,19 @@ static int bch_migrate_index_update(struct bch_write_op *op)
goto nomatch;
bkey_reassemble(&new.k, k);
- bch_cut_front(iter.pos, &new.k);
- bch_cut_back(insert.k->p, &new.k.k);
+ bch2_cut_front(iter.pos, &new.k);
+ bch2_cut_back(insert.k->p, &new.k.k);
e = bkey_i_to_s_extent(&new.k);
/* hack - promotes can race: */
if (m->promote)
extent_for_each_ptr(insert, ptr)
- if (bch_extent_has_device(e.c, ptr->dev))
+ if (bch2_extent_has_device(e.c, ptr->dev))
goto nomatch;
- ptr = bch_migrate_matching_ptr(m, e);
+ ptr = bch2_migrate_matching_ptr(m, e);
if (ptr) {
- int nr_new_dirty = bch_extent_nr_dirty_ptrs(insert.s_c);
+ int nr_new_dirty = bch2_extent_nr_dirty_ptrs(insert.s_c);
unsigned insert_flags =
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL;
@@ -97,7 +97,7 @@ static int bch_migrate_index_update(struct bch_write_op *op)
if (m->move) {
nr_new_dirty -= !ptr->cached;
- __bch_extent_drop_ptr(e, ptr);
+ __bch2_extent_drop_ptr(e, ptr);
}
BUG_ON(nr_new_dirty < 0);
@@ -107,12 +107,12 @@ static int bch_migrate_index_update(struct bch_write_op *op)
bkey_val_u64s(insert.k));
e.k->u64s += bkey_val_u64s(insert.k);
- bch_extent_narrow_crcs(e);
- bch_extent_drop_redundant_crcs(e);
- bch_extent_normalize(c, e.s);
- bch_extent_mark_replicas_cached(c, e, nr_new_dirty);
+ bch2_extent_narrow_crcs(e);
+ bch2_extent_drop_redundant_crcs(e);
+ bch2_extent_normalize(c, e.s);
+ bch2_extent_mark_replicas_cached(c, e, nr_new_dirty);
- ret = bch_btree_insert_at(c, &op->res,
+ ret = bch2_btree_insert_at(c, &op->res,
NULL, op_journal_seq(op),
insert_flags,
BTREE_INSERT_ENTRY(&iter, &new.k));
@@ -120,23 +120,23 @@ static int bch_migrate_index_update(struct bch_write_op *op)
break;
} else {
nomatch:
- bch_btree_iter_advance_pos(&iter);
+ bch2_btree_iter_advance_pos(&iter);
}
- while (bkey_cmp(iter.pos, bch_keylist_front(keys)->k.p) >= 0) {
- bch_keylist_pop_front(keys);
- if (bch_keylist_empty(keys))
+ while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
+ bch2_keylist_pop_front(keys);
+ if (bch2_keylist_empty(keys))
goto out;
}
- bch_cut_front(iter.pos, bch_keylist_front(keys));
+ bch2_cut_front(iter.pos, bch2_keylist_front(keys));
}
out:
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
-void bch_migrate_write_init(struct bch_fs *c,
+void bch2_migrate_write_init(struct bch_fs *c,
struct migrate_write *m,
struct write_point *wp,
struct bkey_s_c k,
@@ -154,7 +154,7 @@ void bch_migrate_write_init(struct bch_fs *c,
(move_ptr && move_ptr->cached))
flags |= BCH_WRITE_CACHED;
- bch_write_op_init(&m->op, c, &m->wbio,
+ bch2_write_op_init(&m->op, c, &m->wbio,
(struct disk_reservation) { 0 },
wp,
bkey_start_pos(k.k),
@@ -165,7 +165,7 @@ void bch_migrate_write_init(struct bch_fs *c,
m->op.nonce = extent_current_nonce(bkey_s_c_to_extent(k));
m->op.nr_replicas = 1;
- m->op.index_update_fn = bch_migrate_index_update;
+ m->op.index_update_fn = bch2_migrate_index_update;
}
static void migrate_bio_init(struct moving_io *io, struct bio *bio,
@@ -178,7 +178,7 @@ static void migrate_bio_init(struct moving_io *io, struct bio *bio,
bio->bi_max_vecs = DIV_ROUND_UP(sectors, PAGE_SECTORS);
bio->bi_private = &io->cl;
bio->bi_io_vec = io->bi_inline_vecs;
- bch_bio_map(bio, NULL);
+ bch2_bio_map(bio, NULL);
}
static void moving_io_destructor(struct closure *cl)
@@ -189,7 +189,7 @@ static void moving_io_destructor(struct closure *cl)
int i;
//if (io->replace.failures)
- // trace_bcache_copy_collision(q, &io->key.k);
+ // trace_copy_collision(q, &io->key.k);
atomic_sub(io->write.key.k.size, &ctxt->sectors_in_flight);
wake_up(&ctxt->wait);
@@ -225,7 +225,7 @@ static void write_moving(struct moving_io *io)
if (op->error) {
closure_return_with_destructor(&io->cl, moving_io_destructor);
} else {
- closure_call(&op->cl, bch_write, NULL, &io->cl);
+ closure_call(&op->cl, bch2_write, NULL, &io->cl);
closure_return_with_destructor(&io->cl, moving_io_after_write);
}
}
@@ -244,7 +244,7 @@ static void read_moving_endio(struct bio *bio)
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->ctxt;
- trace_bcache_move_read_done(&io->write.key.k);
+ trace_move_read_done(&io->write.key.k);
if (bio->bi_error) {
io->write.op.error = bio->bi_error;
@@ -258,13 +258,13 @@ static void read_moving_endio(struct bio *bio)
closure_put(&ctxt->cl);
}
-static void __bch_data_move(struct closure *cl)
+static void __bch2_data_move(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bch_fs *c = io->write.op.c;
struct extent_pick_ptr pick;
- bch_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key),
+ bch2_extent_pick_ptr_avoiding(c, bkey_i_to_s_c(&io->write.key),
io->ctxt->avoid, &pick);
if (IS_ERR_OR_NULL(pick.ca))
closure_return_with_destructor(cl, moving_io_destructor);
@@ -279,12 +279,12 @@ static void __bch_data_move(struct closure *cl)
*/
closure_get(&io->ctxt->cl);
- bch_read_extent(c, &io->rbio,
+ bch2_read_extent(c, &io->rbio,
bkey_i_to_s_c(&io->write.key),
&pick, BCH_READ_IS_LAST);
}
-int bch_data_move(struct bch_fs *c,
+int bch2_data_move(struct bch_fs *c,
struct moving_context *ctxt,
struct write_point *wp,
struct bkey_s_c k,
@@ -311,19 +311,19 @@ int bch_data_move(struct bch_fs *c,
bio_get(&io->write.wbio.bio);
io->write.wbio.bio.bi_iter.bi_sector = bkey_start_offset(k.k);
- bch_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
+ bch2_migrate_write_init(c, &io->write, wp, k, move_ptr, 0);
- trace_bcache_move_read(&io->write.key.k);
+ trace_move_read(&io->write.key.k);
ctxt->keys_moved++;
ctxt->sectors_moved += k.k->size;
if (ctxt->rate)
- bch_ratelimit_increment(ctxt->rate, k.k->size);
+ bch2_ratelimit_increment(ctxt->rate, k.k->size);
atomic_add(k.k->size, &ctxt->sectors_in_flight);
list_add_tail(&io->list, &ctxt->reads);
- closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl);
+ closure_call(&io->cl, __bch2_data_move, NULL, &ctxt->cl);
return 0;
}
@@ -333,7 +333,7 @@ static void do_pending_writes(struct moving_context *ctxt)
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
- trace_bcache_move_write(&io->write.key.k);
+ trace_move_write(&io->write.key.k);
write_moving(io);
}
}
@@ -348,18 +348,18 @@ do { \
next_pending_write(_ctxt) || (_cond)); \
} while (1)
-int bch_move_ctxt_wait(struct moving_context *ctxt)
+int bch2_move_ctxt_wait(struct moving_context *ctxt)
{
move_ctxt_wait_event(ctxt,
atomic_read(&ctxt->sectors_in_flight) <
ctxt->max_sectors_in_flight);
return ctxt->rate
- ? bch_ratelimit_wait_freezable_stoppable(ctxt->rate)
+ ? bch2_ratelimit_wait_freezable_stoppable(ctxt->rate)
: 0;
}
-void bch_move_ctxt_wait_for_io(struct moving_context *ctxt)
+void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
{
unsigned sectors_pending = atomic_read(&ctxt->sectors_in_flight);
@@ -368,7 +368,7 @@ void bch_move_ctxt_wait_for_io(struct moving_context *ctxt)
atomic_read(&ctxt->sectors_in_flight) != sectors_pending);
}
-void bch_move_ctxt_exit(struct moving_context *ctxt)
+void bch2_move_ctxt_exit(struct moving_context *ctxt)
{
move_ctxt_wait_event(ctxt, !atomic_read(&ctxt->sectors_in_flight));
closure_sync(&ctxt->cl);
@@ -377,7 +377,7 @@ void bch_move_ctxt_exit(struct moving_context *ctxt)
EBUG_ON(atomic_read(&ctxt->sectors_in_flight));
}
-void bch_move_ctxt_init(struct moving_context *ctxt,
+void bch2_move_ctxt_init(struct moving_context *ctxt,
struct bch_ratelimit *rate,
unsigned max_sectors_in_flight)
{
diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h
index 317431d69db8..548f0f0a8db1 100644
--- a/fs/bcachefs/move.h
+++ b/fs/bcachefs/move.h
@@ -22,7 +22,7 @@ struct migrate_write {
struct bch_write_bio wbio;
};
-void bch_migrate_write_init(struct bch_fs *,
+void bch2_migrate_write_init(struct bch_fs *,
struct migrate_write *,
struct write_point *,
struct bkey_s_c,
@@ -71,17 +71,17 @@ struct moving_io {
struct bio_vec bi_inline_vecs[0];
};
-int bch_data_move(struct bch_fs *,
+int bch2_data_move(struct bch_fs *,
struct moving_context *,
struct write_point *,
struct bkey_s_c,
const struct bch_extent_ptr *);
-int bch_move_ctxt_wait(struct moving_context *);
-void bch_move_ctxt_wait_for_io(struct moving_context *);
+int bch2_move_ctxt_wait(struct moving_context *);
+void bch2_move_ctxt_wait_for_io(struct moving_context *);
-void bch_move_ctxt_exit(struct moving_context *);
-void bch_move_ctxt_init(struct moving_context *, struct bch_ratelimit *,
+void bch2_move_ctxt_exit(struct moving_context *);
+void bch2_move_ctxt_init(struct moving_context *, struct bch_ratelimit *,
unsigned);
#endif /* _BCACHE_MOVE_H */
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index d194af51fd09..8804dbb32a69 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -4,7 +4,7 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "btree_iter.h"
#include "buckets.h"
#include "clock.h"
@@ -27,7 +27,7 @@ static const struct bch_extent_ptr *moving_pred(struct bch_dev *ca,
const struct bch_extent_ptr *ptr;
if (bkey_extent_is_data(k.k) &&
- (ptr = bch_extent_has_device(bkey_s_c_to_extent(k),
+ (ptr = bch2_extent_has_device(bkey_s_c_to_extent(k),
ca->dev_idx)) &&
PTR_BUCKET(ca, ptr)->mark.copygc)
return ptr;
@@ -47,11 +47,11 @@ static int issue_moving_gc_move(struct bch_dev *ca,
if (!ptr) /* We raced - bucket's been reused */
return 0;
- ret = bch_data_move(c, ctxt, &ca->copygc_write_point, k, ptr);
+ ret = bch2_data_move(c, ctxt, &ca->copygc_write_point, k, ptr);
if (!ret)
- trace_bcache_gc_copy(k.k);
+ trace_gc_copy(k.k);
else
- trace_bcache_moving_gc_alloc_fail(c, k.k->size);
+ trace_moving_gc_alloc_fail(c, k.k->size);
return ret;
}
@@ -66,17 +66,17 @@ static void read_moving(struct bch_dev *ca, size_t buckets_to_move,
u64 sectors_not_moved = 0;
size_t buckets_not_moved = 0;
- bch_ratelimit_reset(&ca->moving_gc_pd.rate);
- bch_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate,
+ bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
+ bch2_move_ctxt_init(&ctxt, &ca->moving_gc_pd.rate,
SECTORS_IN_FLIGHT_PER_DEVICE);
- bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
while (1) {
if (kthread_should_stop())
goto out;
- if (bch_move_ctxt_wait(&ctxt))
+ if (bch2_move_ctxt_wait(&ctxt))
goto out;
- k = bch_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(&iter);
if (!k.k)
break;
if (btree_iter_err(k))
@@ -86,24 +86,24 @@ static void read_moving(struct bch_dev *ca, size_t buckets_to_move,
goto next;
if (issue_moving_gc_move(ca, &ctxt, k)) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
/* memory allocation failure, wait for some IO to finish */
- bch_move_ctxt_wait_for_io(&ctxt);
+ bch2_move_ctxt_wait_for_io(&ctxt);
continue;
}
next:
- bch_btree_iter_advance_pos(&iter);
- //bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_advance_pos(&iter);
+ //bch2_btree_iter_cond_resched(&iter);
/* unlock before calling moving_context_wait() */
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
cond_resched();
}
- bch_btree_iter_unlock(&iter);
- bch_move_ctxt_exit(&ctxt);
- trace_bcache_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
+ bch2_btree_iter_unlock(&iter);
+ bch2_move_ctxt_exit(&ctxt);
+ trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
buckets_to_move);
/* don't check this if we bailed out early: */
@@ -119,9 +119,9 @@ next:
buckets_not_moved, buckets_to_move);
return;
out:
- bch_btree_iter_unlock(&iter);
- bch_move_ctxt_exit(&ctxt);
- trace_bcache_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
+ bch2_btree_iter_unlock(&iter);
+ bch2_move_ctxt_exit(&ctxt);
+ trace_moving_gc_end(ca, ctxt.sectors_moved, ctxt.keys_moved,
buckets_to_move);
}
@@ -137,7 +137,7 @@ static bool have_copygc_reserve(struct bch_dev *ca)
return ret;
}
-static void bch_moving_gc(struct bch_dev *ca)
+static void bch2_moving_gc(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
struct bucket *g;
@@ -163,7 +163,7 @@ static void bch_moving_gc(struct bch_dev *ca)
reserve_sectors = COPYGC_SECTORS_PER_ITER(ca);
- trace_bcache_moving_gc_start(ca);
+ trace_moving_gc_start(ca);
/*
* Find buckets with lowest sector counts, skipping completely
@@ -223,7 +223,7 @@ static void bch_moving_gc(struct bch_dev *ca)
read_moving(ca, buckets_to_move, sectors_to_move);
}
-static int bch_moving_gc_thread(void *arg)
+static int bch2_moving_gc_thread(void *arg)
{
struct bch_dev *ca = arg;
struct bch_fs *c = ca->fs;
@@ -248,27 +248,27 @@ static int bch_moving_gc_thread(void *arg)
if (available > want) {
next = last + (available - want) *
ca->mi.bucket_size;
- bch_kthread_io_clock_wait(clock, next);
+ bch2_kthread_io_clock_wait(clock, next);
continue;
}
- bch_moving_gc(ca);
+ bch2_moving_gc(ca);
}
return 0;
}
-void bch_moving_gc_stop(struct bch_dev *ca)
+void bch2_moving_gc_stop(struct bch_dev *ca)
{
ca->moving_gc_pd.rate.rate = UINT_MAX;
- bch_ratelimit_reset(&ca->moving_gc_pd.rate);
+ bch2_ratelimit_reset(&ca->moving_gc_pd.rate);
if (ca->moving_gc_read)
kthread_stop(ca->moving_gc_read);
ca->moving_gc_read = NULL;
}
-int bch_moving_gc_start(struct bch_dev *ca)
+int bch2_moving_gc_start(struct bch_dev *ca)
{
struct task_struct *t;
@@ -277,10 +277,10 @@ int bch_moving_gc_start(struct bch_dev *ca)
if (ca->fs->opts.nochanges)
return 0;
- if (bch_fs_init_fault("moving_gc_start"))
+ if (bch2_fs_init_fault("moving_gc_start"))
return -ENOMEM;
- t = kthread_create(bch_moving_gc_thread, ca, "bch_copygc_read");
+ t = kthread_create(bch2_moving_gc_thread, ca, "bch_copygc_read");
if (IS_ERR(t))
return PTR_ERR(t);
@@ -290,8 +290,8 @@ int bch_moving_gc_start(struct bch_dev *ca)
return 0;
}
-void bch_dev_moving_gc_init(struct bch_dev *ca)
+void bch2_dev_moving_gc_init(struct bch_dev *ca)
{
- bch_pd_controller_init(&ca->moving_gc_pd);
+ bch2_pd_controller_init(&ca->moving_gc_pd);
ca->moving_gc_pd.d_term = 0;
}
diff --git a/fs/bcachefs/movinggc.h b/fs/bcachefs/movinggc.h
index 5afbf34fd367..e27ccc35618f 100644
--- a/fs/bcachefs/movinggc.h
+++ b/fs/bcachefs/movinggc.h
@@ -23,8 +23,8 @@
#define COPYGC_SECTORS_PER_ITER(ca) \
((ca)->mi.bucket_size * COPYGC_BUCKETS_PER_ITER(ca))
-void bch_moving_gc_stop(struct bch_dev *);
-int bch_moving_gc_start(struct bch_dev *);
-void bch_dev_moving_gc_init(struct bch_dev *);
+void bch2_moving_gc_stop(struct bch_dev *);
+int bch2_moving_gc_start(struct bch_dev *);
+void bch2_dev_moving_gc_init(struct bch_dev *);
#endif
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 41780d594af1..7c4cf8048156 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -4,35 +4,35 @@
#include "opts.h"
#include "util.h"
-const char * const bch_error_actions[] = {
+const char * const bch2_error_actions[] = {
"continue",
"remount-ro",
"panic",
NULL
};
-const char * const bch_csum_types[] = {
+const char * const bch2_csum_types[] = {
"none",
"crc32c",
"crc64",
NULL
};
-const char * const bch_compression_types[] = {
+const char * const bch2_compression_types[] = {
"none",
"lz4",
"gzip",
NULL
};
-const char * const bch_str_hash_types[] = {
+const char * const bch2_str_hash_types[] = {
"crc32c",
"crc64",
"siphash",
NULL
};
-const char * const bch_cache_replacement_policies[] = {
+const char * const bch2_cache_replacement_policies[] = {
"lru",
"fifo",
"random",
@@ -40,7 +40,7 @@ const char * const bch_cache_replacement_policies[] = {
};
/* Default is -1; we skip past it for struct cached_dev's cache mode */
-const char * const bch_cache_modes[] = {
+const char * const bch2_cache_modes[] = {
"default",
"writethrough",
"writeback",
@@ -49,7 +49,7 @@ const char * const bch_cache_modes[] = {
NULL
};
-const char * const bch_dev_state[] = {
+const char * const bch2_dev_state[] = {
"readwrite",
"readonly",
"failed",
@@ -57,7 +57,7 @@ const char * const bch_dev_state[] = {
NULL
};
-const struct bch_option bch_opt_table[] = {
+const struct bch_option bch2_opt_table[] = {
#define OPT_BOOL() .type = BCH_OPT_BOOL
#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, .min = _min, .max = _max
#define OPT_STR(_choices) .type = BCH_OPT_STR, .choices = _choices
@@ -72,20 +72,20 @@ const struct bch_option bch_opt_table[] = {
#undef BCH_OPT
};
-static enum bch_opt_id bch_opt_lookup(const char *name)
+static enum bch_opt_id bch2_opt_lookup(const char *name)
{
const struct bch_option *i;
- for (i = bch_opt_table;
- i < bch_opt_table + ARRAY_SIZE(bch_opt_table);
+ for (i = bch2_opt_table;
+ i < bch2_opt_table + ARRAY_SIZE(bch2_opt_table);
i++)
if (!strcmp(name, i->name))
- return i - bch_opt_table;
+ return i - bch2_opt_table;
return -1;
}
-static u64 bch_opt_get(struct bch_opts *opts, enum bch_opt_id id)
+static u64 bch2_opt_get(struct bch_opts *opts, enum bch_opt_id id)
{
switch (id) {
#define BCH_OPT(_name, ...) \
@@ -100,7 +100,7 @@ static u64 bch_opt_get(struct bch_opts *opts, enum bch_opt_id id)
}
}
-void bch_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v)
+void bch2_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v)
{
switch (id) {
#define BCH_OPT(_name, ...) \
@@ -120,9 +120,9 @@ void bch_opt_set(struct bch_opts *opts, enum bch_opt_id id, u64 v)
* Initial options from superblock - here we don't want any options undefined,
* any options the superblock doesn't specify are set to 0:
*/
-struct bch_opts bch_sb_opts(struct bch_sb *sb)
+struct bch_opts bch2_sb_opts(struct bch_sb *sb)
{
- struct bch_opts opts = bch_opts_empty();
+ struct bch_opts opts = bch2_opts_empty();
#define BCH_OPT(_name, _mode, _sb_opt, ...) \
if (_sb_opt != NO_SB_OPT) \
@@ -134,9 +134,9 @@ struct bch_opts bch_sb_opts(struct bch_sb *sb)
return opts;
}
-int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res)
+static int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res)
{
- const struct bch_option *opt = &bch_opt_table[id];
+ const struct bch_option *opt = &bch2_opt_table[id];
ssize_t ret;
switch (opt->type) {
@@ -157,7 +157,7 @@ int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res)
return -ERANGE;
break;
case BCH_OPT_STR:
- ret = bch_read_string_list(val, opt->choices);
+ ret = bch2_read_string_list(val, opt->choices);
if (ret < 0)
return ret;
@@ -168,7 +168,7 @@ int parse_one_opt(enum bch_opt_id id, const char *val, u64 *res)
return 0;
}
-int bch_parse_mount_opts(struct bch_opts *opts, char *options)
+int bch2_parse_mount_opts(struct bch_opts *opts, char *options)
{
char *opt, *name, *val;
int ret, id;
@@ -179,7 +179,7 @@ int bch_parse_mount_opts(struct bch_opts *opts, char *options)
val = opt;
if (val) {
- id = bch_opt_lookup(name);
+ id = bch2_opt_lookup(name);
if (id < 0)
return -EINVAL;
@@ -187,29 +187,29 @@ int bch_parse_mount_opts(struct bch_opts *opts, char *options)
if (ret < 0)
return ret;
} else {
- id = bch_opt_lookup(name);
+ id = bch2_opt_lookup(name);
v = 1;
if (id < 0 &&
!strncmp("no", name, 2)) {
- id = bch_opt_lookup(name + 2);
+ id = bch2_opt_lookup(name + 2);
v = 0;
}
- if (bch_opt_table[id].type != BCH_OPT_BOOL)
+ if (bch2_opt_table[id].type != BCH_OPT_BOOL)
return -EINVAL;
}
- bch_opt_set(opts, id, v);
+ bch2_opt_set(opts, id, v);
}
return 0;
}
-enum bch_opt_id bch_parse_sysfs_opt(const char *name, const char *val,
+enum bch_opt_id bch2_parse_sysfs_opt(const char *name, const char *val,
u64 *res)
{
- enum bch_opt_id id = bch_opt_lookup(name);
+ enum bch_opt_id id = bch2_opt_lookup(name);
int ret;
if (id < 0)
@@ -222,20 +222,20 @@ enum bch_opt_id bch_parse_sysfs_opt(const char *name, const char *val,
return id;
}
-ssize_t bch_opt_show(struct bch_opts *opts, const char *name,
+ssize_t bch2_opt_show(struct bch_opts *opts, const char *name,
char *buf, size_t size)
{
- enum bch_opt_id id = bch_opt_lookup(name);
+ enum bch_opt_id id = bch2_opt_lookup(name);
const struct bch_option *opt;
u64 v;
if (id < 0)
return -EINVAL;
- v = bch_opt_get(opts, id);
- opt = &bch_opt_table[id];
+ v = bch2_opt_get(opts, id);
+ opt = &bch2_opt_table[id];
return opt->type == BCH_OPT_STR
- ? bch_snprint_string_list(buf, size, opt->choices, v)
+ ? bch2_snprint_string_list(buf, size, opt->choices, v)
: snprintf(buf, size, "%lli\n", v);
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 7441daf5f1ce..6fa707db24d7 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -6,13 +6,13 @@
#include <linux/string.h>
#include "bcachefs_format.h"
-extern const char * const bch_error_actions[];
-extern const char * const bch_csum_types[];
-extern const char * const bch_compression_types[];
-extern const char * const bch_str_hash_types[];
-extern const char * const bch_cache_replacement_policies[];
-extern const char * const bch_cache_modes[];
-extern const char * const bch_dev_state[];
+extern const char * const bch2_error_actions[];
+extern const char * const bch2_csum_types[];
+extern const char * const bch2_compression_types[];
+extern const char * const bch2_str_hash_types[];
+extern const char * const bch2_cache_replacement_policies[];
+extern const char * const bch2_cache_modes[];
+extern const char * const bch2_dev_state[];
/*
* Mount options; we also store defaults in the superblock.
@@ -22,7 +22,7 @@ extern const char * const bch_dev_state[];
* updates the superblock.
*
* We store options as signed integers, where -1 means undefined. This means we
- * can pass the mount options to bch_fs_alloc() as a whole struct, and then only
+ * can pass the mount options to bch2_fs_alloc() as a whole struct, and then only
* apply the options from that struct that are defined.
*/
@@ -50,7 +50,7 @@ enum opt_type {
#define BCH_VISIBLE_OPTS() \
BCH_OPT(errors, 0644, BCH_SB_ERROR_ACTION, \
- s8, OPT_STR(bch_error_actions)) \
+ s8, OPT_STR(bch2_error_actions)) \
BCH_OPT(metadata_replicas, 0444, BCH_SB_META_REPLICAS_WANT,\
s8, OPT_UINT(1, BCH_REPLICAS_MAX)) \
BCH_OPT(data_replicas, 0444, BCH_SB_DATA_REPLICAS_WANT,\
@@ -60,13 +60,13 @@ enum opt_type {
BCH_OPT(data_replicas_required, 0444, BCH_SB_DATA_REPLICAS_REQ,\
s8, OPT_UINT(1, BCH_REPLICAS_MAX)) \
BCH_OPT(metadata_checksum, 0644, BCH_SB_META_CSUM_TYPE, \
- s8, OPT_STR(bch_csum_types)) \
+ s8, OPT_STR(bch2_csum_types)) \
BCH_OPT(data_checksum, 0644, BCH_SB_DATA_CSUM_TYPE, \
- s8, OPT_STR(bch_csum_types)) \
+ s8, OPT_STR(bch2_csum_types)) \
BCH_OPT(compression, 0644, BCH_SB_COMPRESSION_TYPE,\
- s8, OPT_STR(bch_compression_types)) \
+ s8, OPT_STR(bch2_compression_types)) \
BCH_OPT(str_hash, 0644, BCH_SB_STR_HASH_TYPE, \
- s8, OPT_STR(bch_str_hash_types)) \
+ s8, OPT_STR(bch2_str_hash_types)) \
BCH_OPT(inodes_32bit, 0644, BCH_SB_INODE_32BIT, \
s8, OPT_BOOL()) \
BCH_OPT(gc_reserve_percent, 0444, BCH_SB_GC_RESERVE, \
@@ -135,9 +135,9 @@ struct bch_option {
};
-extern const struct bch_option bch_opt_table[];
+extern const struct bch_option bch2_opt_table[];
-static inline struct bch_opts bch_opts_empty(void)
+static inline struct bch_opts bch2_opts_empty(void)
{
struct bch_opts ret;
@@ -145,7 +145,7 @@ static inline struct bch_opts bch_opts_empty(void)
return ret;
}
-static inline void bch_opts_apply(struct bch_opts *dst, struct bch_opts src)
+static inline void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
{
#define BCH_OPT(_name, ...) \
if (src._name >= 0) \
@@ -157,12 +157,12 @@ static inline void bch_opts_apply(struct bch_opts *dst, struct bch_opts src)
#define opt_defined(_opt) ((_opt) >= 0)
-void bch_opt_set(struct bch_opts *, enum bch_opt_id, u64);
-struct bch_opts bch_sb_opts(struct bch_sb *);
+void bch2_opt_set(struct bch_opts *, enum bch_opt_id, u64);
+struct bch_opts bch2_sb_opts(struct bch_sb *);
-int bch_parse_mount_opts(struct bch_opts *, char *);
-enum bch_opt_id bch_parse_sysfs_opt(const char *, const char *, u64 *);
+int bch2_parse_mount_opts(struct bch_opts *, char *);
+enum bch_opt_id bch2_parse_sysfs_opt(const char *, const char *, u64 *);
-ssize_t bch_opt_show(struct bch_opts *, const char *, char *, size_t);
+ssize_t bch2_opt_show(struct bch_opts *, const char *, char *, size_t);
#endif /* _BCACHE_OPTS_H */
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 69be5255a5de..f70fc1a991fc 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -19,7 +19,7 @@ struct bch_hash_info {
};
static inline struct bch_hash_info
-bch_hash_info_init(struct bch_fs *c,
+bch2_hash_info_init(struct bch_fs *c,
const struct bch_inode_unpacked *bi)
{
/* XXX ick */
@@ -60,7 +60,7 @@ struct bch_str_hash_ctx {
};
};
-static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx,
+static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
const struct bch_hash_info *info)
{
switch (info->type) {
@@ -68,7 +68,7 @@ static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx,
ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
break;
case BCH_STR_HASH_CRC64:
- ctx->crc64 = bch_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
+ ctx->crc64 = bch2_crc64_update(~0, &info->crc_key, sizeof(info->crc_key));
break;
case BCH_STR_HASH_SIPHASH:
SipHash24_Init(&ctx->siphash, &info->siphash_key);
@@ -78,7 +78,7 @@ static inline void bch_str_hash_init(struct bch_str_hash_ctx *ctx,
}
}
-static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx,
+static inline void bch2_str_hash_update(struct bch_str_hash_ctx *ctx,
const struct bch_hash_info *info,
const void *data, size_t len)
{
@@ -87,7 +87,7 @@ static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx,
ctx->crc32c = crc32c(ctx->crc32c, data, len);
break;
case BCH_STR_HASH_CRC64:
- ctx->crc64 = bch_crc64_update(ctx->crc64, data, len);
+ ctx->crc64 = bch2_crc64_update(ctx->crc64, data, len);
break;
case BCH_STR_HASH_SIPHASH:
SipHash24_Update(&ctx->siphash, data, len);
@@ -97,7 +97,7 @@ static inline void bch_str_hash_update(struct bch_str_hash_ctx *ctx,
}
}
-static inline u64 bch_str_hash_end(struct bch_str_hash_ctx *ctx,
+static inline u64 bch2_str_hash_end(struct bch_str_hash_ctx *ctx,
const struct bch_hash_info *info)
{
switch (info->type) {
@@ -124,14 +124,14 @@ struct bch_hash_desc {
};
static inline struct bkey_s_c
-bch_hash_lookup_at(const struct bch_hash_desc desc,
+bch2_hash_lookup_at(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct btree_iter *iter, const void *search)
{
u64 inode = iter->pos.inode;
do {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
if (btree_iter_err(k))
return k;
@@ -146,21 +146,21 @@ bch_hash_lookup_at(const struct bch_hash_desc desc,
break;
}
- bch_btree_iter_advance_pos(iter);
+ bch2_btree_iter_advance_pos(iter);
} while (iter->pos.inode == inode);
return bkey_s_c_err(-ENOENT);
}
static inline struct bkey_s_c
-bch_hash_lookup_bkey_at(const struct bch_hash_desc desc,
+bch2_hash_lookup_bkey_at(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct btree_iter *iter, struct bkey_s_c search)
{
u64 inode = iter->pos.inode;
do {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
if (btree_iter_err(k))
return k;
@@ -175,41 +175,41 @@ bch_hash_lookup_bkey_at(const struct bch_hash_desc desc,
break;
}
- bch_btree_iter_advance_pos(iter);
+ bch2_btree_iter_advance_pos(iter);
} while (iter->pos.inode == inode);
return bkey_s_c_err(-ENOENT);
}
static inline struct bkey_s_c
-bch_hash_lookup(const struct bch_hash_desc desc,
+bch2_hash_lookup(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
struct btree_iter *iter, const void *key)
{
- bch_btree_iter_init(iter, c, desc.btree_id,
+ bch2_btree_iter_init(iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)));
- return bch_hash_lookup_at(desc, info, iter, key);
+ return bch2_hash_lookup_at(desc, info, iter, key);
}
static inline struct bkey_s_c
-bch_hash_lookup_intent(const struct bch_hash_desc desc,
+bch2_hash_lookup_intent(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
struct btree_iter *iter, const void *key)
{
- bch_btree_iter_init_intent(iter, c, desc.btree_id,
+ bch2_btree_iter_init_intent(iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)));
- return bch_hash_lookup_at(desc, info, iter, key);
+ return bch2_hash_lookup_at(desc, info, iter, key);
}
static inline struct bkey_s_c
-bch_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
+bch2_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
{
while (1) {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
if (btree_iter_err(k))
return k;
@@ -218,34 +218,34 @@ bch_hash_hole_at(const struct bch_hash_desc desc, struct btree_iter *iter)
return k;
/* hash collision, keep going */
- bch_btree_iter_advance_pos(iter);
+ bch2_btree_iter_advance_pos(iter);
if (iter->pos.inode != k.k->p.inode)
return bkey_s_c_err(-ENOENT);
}
}
-static inline struct bkey_s_c bch_hash_hole(const struct bch_hash_desc desc,
+static inline struct bkey_s_c bch2_hash_hole(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
struct btree_iter *iter,
const void *key)
{
- bch_btree_iter_init_intent(iter, c, desc.btree_id,
+ bch2_btree_iter_init_intent(iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)));
- return bch_hash_hole_at(desc, iter);
+ return bch2_hash_hole_at(desc, iter);
}
-static inline int bch_hash_needs_whiteout(const struct bch_hash_desc desc,
+static inline int bch2_hash_needs_whiteout(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct btree_iter *iter,
struct btree_iter *start)
{
- bch_btree_iter_set_pos(iter,
+ bch2_btree_iter_set_pos(iter,
btree_type_successor(start->btree_id, start->pos));
while (1) {
- struct bkey_s_c k = bch_btree_iter_peek_with_holes(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_with_holes(iter);
int ret = btree_iter_err(k);
if (ret)
@@ -259,14 +259,14 @@ static inline int bch_hash_needs_whiteout(const struct bch_hash_desc desc,
desc.hash_bkey(info, k) <= start->pos.offset)
return true;
- bch_btree_iter_advance_pos(iter);
+ bch2_btree_iter_advance_pos(iter);
}
}
#define BCH_HASH_SET_MUST_CREATE 1
#define BCH_HASH_SET_MUST_REPLACE 2
-static inline int bch_hash_set(const struct bch_hash_desc desc,
+static inline int bch2_hash_set(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
u64 *journal_seq,
@@ -276,17 +276,17 @@ static inline int bch_hash_set(const struct bch_hash_desc desc,
struct bkey_s_c k;
int ret;
- bch_btree_iter_init_intent(&hashed_slot, c, desc.btree_id,
+ bch2_btree_iter_init_intent(&hashed_slot, c, desc.btree_id,
POS(inode, desc.hash_bkey(info, bkey_i_to_s_c(insert))));
- bch_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos);
- bch_btree_iter_link(&hashed_slot, &iter);
+ bch2_btree_iter_init_intent(&iter, c, desc.btree_id, hashed_slot.pos);
+ bch2_btree_iter_link(&hashed_slot, &iter);
retry:
/*
* On hash collision, we have to keep the slot we hashed to locked while
* we do the insert - to avoid racing with another thread deleting
* whatever's in the slot we hashed to:
*/
- ret = bch_btree_iter_traverse(&hashed_slot);
+ ret = bch2_btree_iter_traverse(&hashed_slot);
if (ret)
goto err;
@@ -294,9 +294,9 @@ retry:
* On -EINTR/retry, we dropped locks - always restart from the slot we
* hashed to:
*/
- bch_btree_iter_copy(&iter, &hashed_slot);
+ bch2_btree_iter_copy(&iter, &hashed_slot);
- k = bch_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
+ k = bch2_hash_lookup_bkey_at(desc, info, &iter, bkey_i_to_s_c(insert));
ret = btree_iter_err(k);
if (ret == -ENOENT) {
@@ -311,8 +311,8 @@ retry:
* that we could have used, so restart from the
* slot we hashed to:
*/
- bch_btree_iter_copy(&iter, &hashed_slot);
- k = bch_hash_hole_at(desc, &iter);
+ bch2_btree_iter_copy(&iter, &hashed_slot);
+ k = bch2_hash_hole_at(desc, &iter);
if ((ret = btree_iter_err(k)))
goto err;
} else if (!ret) {
@@ -325,7 +325,7 @@ retry:
}
insert->k.p = iter.pos;
- ret = bch_btree_insert_at(c, NULL, NULL, journal_seq,
+ ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&iter, insert));
err:
@@ -336,12 +336,12 @@ err:
* On successful insert, we don't want to clobber ret with error from
* iter:
*/
- bch_btree_iter_unlock(&iter);
- bch_btree_iter_unlock(&hashed_slot);
+ bch2_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&hashed_slot);
return ret;
}
-static inline int bch_hash_delete(const struct bch_hash_desc desc,
+static inline int bch2_hash_delete(const struct bch_hash_desc desc,
const struct bch_hash_info *info,
struct bch_fs *c, u64 inode,
u64 *journal_seq, const void *key)
@@ -351,17 +351,17 @@ static inline int bch_hash_delete(const struct bch_hash_desc desc,
struct bkey_i delete;
int ret = -ENOENT;
- bch_btree_iter_init_intent(&iter, c, desc.btree_id,
+ bch2_btree_iter_init_intent(&iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)));
- bch_btree_iter_init(&whiteout_iter, c, desc.btree_id,
+ bch2_btree_iter_init(&whiteout_iter, c, desc.btree_id,
POS(inode, desc.hash_key(info, key)));
- bch_btree_iter_link(&iter, &whiteout_iter);
+ bch2_btree_iter_link(&iter, &whiteout_iter);
retry:
- k = bch_hash_lookup_at(desc, info, &iter, key);
+ k = bch2_hash_lookup_at(desc, info, &iter, key);
if ((ret = btree_iter_err(k)))
goto err;
- ret = bch_hash_needs_whiteout(desc, info, &whiteout_iter, &iter);
+ ret = bch2_hash_needs_whiteout(desc, info, &whiteout_iter, &iter);
if (ret < 0)
goto err;
@@ -369,7 +369,7 @@ retry:
delete.k.p = k.k->p;
delete.k.type = ret ? desc.whiteout_type : KEY_TYPE_DELETED;
- ret = bch_btree_insert_at(c, NULL, NULL, journal_seq,
+ ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC,
BTREE_INSERT_ENTRY(&iter, &delete));
@@ -377,8 +377,8 @@ err:
if (ret == -EINTR)
goto retry;
- bch_btree_iter_unlock(&whiteout_iter);
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&whiteout_iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 41d54d3cdb10..9f41d71d6c11 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "checksum.h"
#include "error.h"
#include "io.h"
@@ -11,12 +11,12 @@
#include <linux/backing-dev.h>
#include <linux/sort.h>
-static inline void __bch_sb_layout_size_assert(void)
+static inline void __bch2_sb_layout_size_assert(void)
{
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
}
-struct bch_sb_field *bch_sb_field_get(struct bch_sb *sb,
+struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
enum bch_sb_field_type type)
{
struct bch_sb_field *f;
@@ -29,7 +29,7 @@ struct bch_sb_field *bch_sb_field_get(struct bch_sb *sb,
return NULL;
}
-void bch_free_super(struct bcache_superblock *sb)
+void bch2_free_super(struct bcache_superblock *sb)
{
if (sb->bio)
bio_put(sb->bio);
@@ -40,7 +40,7 @@ void bch_free_super(struct bcache_superblock *sb)
memset(sb, 0, sizeof(*sb));
}
-static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order)
+static int __bch2_super_realloc(struct bcache_superblock *sb, unsigned order)
{
struct bch_sb *new_sb;
struct bio *bio;
@@ -48,7 +48,7 @@ static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order)
if (sb->page_order >= order && sb->sb)
return 0;
- if (dynamic_fault("bcache:add:super_realloc"))
+ if (dynamic_fault("bcachefs:add:super_realloc"))
return -ENOMEM;
bio = bio_kmalloc(GFP_KERNEL, 1 << order);
@@ -74,7 +74,7 @@ static int __bch_super_realloc(struct bcache_superblock *sb, unsigned order)
return 0;
}
-static int bch_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
+static int bch2_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
{
u64 new_bytes = __vstruct_bytes(struct bch_sb, u64s);
u64 max_bytes = 512 << sb->sb->layout.sb_max_size_bits;
@@ -87,10 +87,10 @@ static int bch_sb_realloc(struct bcache_superblock *sb, unsigned u64s)
return -ENOSPC;
}
- return __bch_super_realloc(sb, get_order(new_bytes));
+ return __bch2_super_realloc(sb, get_order(new_bytes));
}
-static int bch_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
+static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
{
u64 bytes = __vstruct_bytes(struct bch_sb, u64s);
struct bch_sb *sb;
@@ -113,7 +113,7 @@ static int bch_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
return 0;
}
-static struct bch_sb_field *__bch_sb_field_resize(struct bch_sb *sb,
+static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
struct bch_sb_field *f,
unsigned u64s)
{
@@ -142,27 +142,27 @@ static struct bch_sb_field *__bch_sb_field_resize(struct bch_sb *sb,
return f;
}
-struct bch_sb_field *bch_sb_field_resize(struct bcache_superblock *sb,
+struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *sb,
enum bch_sb_field_type type,
unsigned u64s)
{
- struct bch_sb_field *f = bch_sb_field_get(sb->sb, type);
+ struct bch_sb_field *f = bch2_sb_field_get(sb->sb, type);
ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
ssize_t d = -old_u64s + u64s;
- if (bch_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
+ if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d))
return NULL;
- f = __bch_sb_field_resize(sb->sb, f, u64s);
+ f = __bch2_sb_field_resize(sb->sb, f, u64s);
f->type = type;
return f;
}
-struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c,
+struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
enum bch_sb_field_type type,
unsigned u64s)
{
- struct bch_sb_field *f = bch_sb_field_get(c->disk_sb, type);
+ struct bch_sb_field *f = bch2_sb_field_get(c->disk_sb, type);
ssize_t old_u64s = f ? le32_to_cpu(f->u64s) : 0;
ssize_t d = -old_u64s + u64s;
struct bch_dev *ca;
@@ -170,7 +170,7 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c,
lockdep_assert_held(&c->sb_lock);
- if (bch_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
+ if (bch2_fs_sb_realloc(c, le32_to_cpu(c->disk_sb->u64s) + d))
return NULL;
/* XXX: we're not checking that offline device have enough space */
@@ -178,13 +178,13 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *c,
for_each_online_member(ca, c, i) {
struct bcache_superblock *sb = &ca->disk_sb;
- if (bch_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
+ if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s) + d)) {
percpu_ref_put(&ca->ref);
return NULL;
}
}
- f = __bch_sb_field_resize(c->disk_sb, f, u64s);
+ f = __bch2_sb_field_resize(c->disk_sb, f, u64s);
f->type = type;
return f;
}
@@ -195,7 +195,7 @@ static const char *validate_sb_layout(struct bch_sb_layout *layout)
unsigned i;
if (uuid_le_cmp(layout->magic, BCACHE_MAGIC))
- return "Not a bcache superblock layout";
+ return "Not a bcachefs superblock layout";
if (layout->layout_type != 0)
return "Invalid superblock layout type";
@@ -228,7 +228,7 @@ static int u64_cmp(const void *_l, const void *_r)
return l < r ? -1 : l > r ? 1 : 0;
}
-const char *bch_validate_journal_layout(struct bch_sb *sb,
+const char *bch2_validate_journal_layout(struct bch_sb *sb,
struct bch_member_cpu mi)
{
struct bch_sb_field_journal *journal;
@@ -237,11 +237,11 @@ const char *bch_validate_journal_layout(struct bch_sb *sb,
unsigned i;
u64 *b;
- journal = bch_sb_get_journal(sb);
+ journal = bch2_sb_get_journal(sb);
if (!journal)
return NULL;
- nr = bch_nr_journal_buckets(journal);
+ nr = bch2_nr_journal_buckets(journal);
if (!nr)
return NULL;
@@ -277,12 +277,12 @@ err:
return err;
}
-static const char *bch_sb_validate_members(struct bch_sb *sb)
+static const char *bch2_sb_validate_members(struct bch_sb *sb)
{
struct bch_sb_field_members *mi;
unsigned i;
- mi = bch_sb_get_members(sb);
+ mi = bch2_sb_get_members(sb);
if (!mi)
return "Invalid superblock: member info area missing";
@@ -291,7 +291,7 @@ static const char *bch_sb_validate_members(struct bch_sb *sb)
return "Invalid superblock: bad member info";
for (i = 0; i < sb->nr_devices; i++) {
- if (bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)))
+ if (bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)))
continue;
if (le16_to_cpu(mi->members[i].bucket_size) <
@@ -302,7 +302,7 @@ static const char *bch_sb_validate_members(struct bch_sb *sb)
return NULL;
}
-const char *bch_validate_cache_super(struct bcache_superblock *disk_sb)
+const char *bch2_validate_cache_super(struct bcache_superblock *disk_sb)
{
struct bch_sb *sb = disk_sb->sb;
struct bch_sb_field *f;
@@ -328,10 +328,10 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb)
block_size > PAGE_SECTORS)
return "Bad block size";
- if (bch_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
+ if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
return "Bad user UUID";
- if (bch_is_zero(sb->uuid.b, sizeof(uuid_le)))
+ if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le)))
return "Bad internal UUID";
if (!sb->nr_devices ||
@@ -404,12 +404,12 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb)
return "Invalid superblock: unknown optional field type";
}
- err = bch_sb_validate_members(sb);
+ err = bch2_sb_validate_members(sb);
if (err)
return err;
- sb_mi = bch_sb_get_members(sb);
- mi = bch_mi_to_cpu(sb_mi->members + sb->dev_idx);
+ sb_mi = bch2_sb_get_members(sb);
+ mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx);
if (mi.nbuckets > LONG_MAX)
return "Too many buckets";
@@ -426,7 +426,7 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb)
mi.bucket_size * mi.nbuckets)
return "Invalid superblock: device too small";
- err = bch_validate_journal_layout(sb, mi);
+ err = bch2_validate_journal_layout(sb, mi);
if (err)
return err;
@@ -435,7 +435,7 @@ const char *bch_validate_cache_super(struct bcache_superblock *disk_sb)
/* device open: */
-static const char *bch_blkdev_open(const char *path, fmode_t mode,
+static const char *bch2_blkdev_open(const char *path, fmode_t mode,
void *holder, struct block_device **ret)
{
struct block_device *bdev;
@@ -456,10 +456,10 @@ static const char *bch_blkdev_open(const char *path, fmode_t mode,
return NULL;
}
-static void bch_sb_update(struct bch_fs *c)
+static void bch2_sb_update(struct bch_fs *c)
{
struct bch_sb *src = c->disk_sb;
- struct bch_sb_field_members *mi = bch_sb_get_members(src);
+ struct bch_sb_field_members *mi = bch2_sb_get_members(src);
struct bch_dev *ca;
unsigned i;
@@ -480,7 +480,7 @@ static void bch_sb_update(struct bch_fs *c)
c->sb.time_precision = le32_to_cpu(src->time_precision);
for_each_member_device(ca, c, i)
- ca->mi = bch_mi_to_cpu(mi->members + i);
+ ca->mi = bch2_mi_to_cpu(mi->members + i);
}
/* doesn't copy member info */
@@ -509,45 +509,45 @@ static void __copy_super(struct bch_sb *dst, struct bch_sb *src)
if (src_f->type == BCH_SB_FIELD_journal)
continue;
- dst_f = bch_sb_field_get(dst, src_f->type);
- dst_f = __bch_sb_field_resize(dst, dst_f,
+ dst_f = bch2_sb_field_get(dst, src_f->type);
+ dst_f = __bch2_sb_field_resize(dst, dst_f,
le32_to_cpu(src_f->u64s));
memcpy(dst_f, src_f, vstruct_bytes(src_f));
}
}
-int bch_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
+int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
{
struct bch_sb_field_journal *journal_buckets =
- bch_sb_get_journal(src);
+ bch2_sb_get_journal(src);
unsigned journal_u64s = journal_buckets
? le32_to_cpu(journal_buckets->field.u64s)
: 0;
lockdep_assert_held(&c->sb_lock);
- if (bch_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
+ if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
return -ENOMEM;
__copy_super(c->disk_sb, src);
- bch_sb_update(c);
+ bch2_sb_update(c);
return 0;
}
-int bch_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
+int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
{
struct bch_sb *src = c->disk_sb, *dst = ca->disk_sb.sb;
struct bch_sb_field_journal *journal_buckets =
- bch_sb_get_journal(dst);
+ bch2_sb_get_journal(dst);
unsigned journal_u64s = journal_buckets
? le32_to_cpu(journal_buckets->field.u64s)
: 0;
unsigned u64s = le32_to_cpu(src->u64s) + journal_u64s;
int ret;
- ret = bch_sb_realloc(&ca->disk_sb, u64s);
+ ret = bch2_sb_realloc(&ca->disk_sb, u64s);
if (ret)
return ret;
@@ -569,13 +569,13 @@ reread:
sb->bio->bi_iter.bi_sector = offset;
sb->bio->bi_iter.bi_size = PAGE_SIZE << sb->page_order;
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
- bch_bio_map(sb->bio, sb->sb);
+ bch2_bio_map(sb->bio, sb->sb);
if (submit_bio_wait(sb->bio))
return "IO error";
if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC))
- return "Not a bcache superblock";
+ return "Not a bcachefs superblock";
if (le64_to_cpu(sb->sb->version) != BCACHE_SB_VERSION_CDEV_V4)
return "Unsupported superblock version";
@@ -587,7 +587,7 @@ reread:
order = get_order(bytes);
if (order > sb->page_order) {
- if (__bch_super_realloc(sb, order))
+ if (__bch2_super_realloc(sb, order))
return "cannot allocate memory";
goto reread;
}
@@ -599,13 +599,13 @@ reread:
csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb->sb),
(struct nonce) { 0 }, sb->sb);
- if (bch_crc_cmp(csum, sb->sb->csum))
+ if (bch2_crc_cmp(csum, sb->sb->csum))
return "bad checksum reading superblock";
return NULL;
}
-const char *bch_read_super(struct bcache_superblock *sb,
+const char *bch2_read_super(struct bcache_superblock *sb,
struct bch_opts opts,
const char *path)
{
@@ -623,16 +623,16 @@ const char *bch_read_super(struct bcache_superblock *sb,
if (!(opt_defined(opts.nochanges) && opts.nochanges))
sb->mode |= FMODE_WRITE;
- err = bch_blkdev_open(path, sb->mode, sb, &sb->bdev);
+ err = bch2_blkdev_open(path, sb->mode, sb, &sb->bdev);
if (err)
return err;
err = "cannot allocate memory";
- if (__bch_super_realloc(sb, 0))
+ if (__bch2_super_realloc(sb, 0))
goto err;
err = "dynamic fault";
- if (bch_fs_init_fault("read_super"))
+ if (bch2_fs_init_fault("read_super"))
goto err;
err = read_one_super(sb, offset);
@@ -659,7 +659,7 @@ const char *bch_read_super(struct bcache_superblock *sb,
* use sb buffer to read layout, since sb buffer is page aligned but
* layout won't be:
*/
- bch_bio_map(sb->bio, sb->sb);
+ bch2_bio_map(sb->bio, sb->sb);
err = "IO error";
if (submit_bio_wait(sb->bio))
@@ -695,7 +695,7 @@ got_super:
return NULL;
err:
- bch_free_super(sb);
+ bch2_free_super(sb);
return err;
}
@@ -707,7 +707,7 @@ static void write_super_endio(struct bio *bio)
/* XXX: return errors directly */
- bch_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
+ bch2_dev_fatal_io_err_on(bio->bi_error, ca, "superblock write");
closure_put(&ca->fs->sb_write);
percpu_ref_put(&ca->io_ref);
@@ -739,13 +739,13 @@ static bool write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
bio->bi_end_io = write_super_endio;
bio->bi_private = ca;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
- bch_bio_map(bio, sb);
+ bch2_bio_map(bio, sb);
closure_bio_submit(bio, &c->sb_write);
return true;
}
-void bch_write_super(struct bch_fs *c)
+void bch2_write_super(struct bch_fs *c)
{
struct closure *cl = &c->sb_write;
struct bch_dev *ca;
@@ -759,7 +759,7 @@ void bch_write_super(struct bch_fs *c)
le64_add_cpu(&c->disk_sb->seq, 1);
for_each_online_member(ca, c, i)
- bch_sb_from_fs(c, ca);
+ bch2_sb_from_fs(c, ca);
if (c->opts.nochanges)
goto out;
@@ -775,10 +775,10 @@ void bch_write_super(struct bch_fs *c)
} while (wrote);
out:
/* Make new options visible after they're persistent: */
- bch_sb_update(c);
+ bch2_sb_update(c);
}
-void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k,
+void bch2_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k,
bool meta)
{
struct bch_member *mi;
@@ -789,12 +789,12 @@ void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k,
mutex_lock(&c->sb_lock);
/* recheck, might have raced */
- if (bch_check_super_marked(c, k, meta)) {
+ if (bch2_check_super_marked(c, k, meta)) {
mutex_unlock(&c->sb_lock);
return;
}
- mi = bch_sb_get_members(c->disk_sb)->members;
+ mi = bch2_sb_get_members(c->disk_sb)->members;
extent_for_each_ptr(e, ptr)
if (!ptr->cached) {
@@ -812,6 +812,6 @@ void bch_check_mark_super_slowpath(struct bch_fs *c, const struct bkey_i *k,
? SET_BCH_SB_META_REPLICAS_HAVE
: SET_BCH_SB_DATA_REPLICAS_HAVE)(c->disk_sb, nr_replicas);
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h
index 1a9bd3092e4c..8f0d82dbe260 100644
--- a/fs/bcachefs/super-io.h
+++ b/fs/bcachefs/super-io.h
@@ -6,10 +6,10 @@
#include <asm/byteorder.h>
-struct bch_sb_field *bch_sb_field_get(struct bch_sb *, enum bch_sb_field_type);
-struct bch_sb_field *bch_sb_field_resize(struct bcache_superblock *,
+struct bch_sb_field *bch2_sb_field_get(struct bch_sb *, enum bch_sb_field_type);
+struct bch_sb_field *bch2_sb_field_resize(struct bcache_superblock *,
enum bch_sb_field_type, unsigned);
-struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *,
+struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *,
enum bch_sb_field_type, unsigned);
#define field_to_type(_f, _name) \
@@ -17,23 +17,23 @@ struct bch_sb_field *bch_fs_sb_field_resize(struct bch_fs *,
#define BCH_SB_FIELD_TYPE(_name) \
static inline struct bch_sb_field_##_name * \
-bch_sb_get_##_name(struct bch_sb *sb) \
+bch2_sb_get_##_name(struct bch_sb *sb) \
{ \
- return field_to_type(bch_sb_field_get(sb, \
+ return field_to_type(bch2_sb_field_get(sb, \
BCH_SB_FIELD_##_name), _name); \
} \
\
static inline struct bch_sb_field_##_name * \
-bch_sb_resize_##_name(struct bcache_superblock *sb, unsigned u64s) \
+bch2_sb_resize_##_name(struct bcache_superblock *sb, unsigned u64s) \
{ \
- return field_to_type(bch_sb_field_resize(sb, \
+ return field_to_type(bch2_sb_field_resize(sb, \
BCH_SB_FIELD_##_name, u64s), _name); \
} \
\
static inline struct bch_sb_field_##_name * \
-bch_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \
+bch2_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \
{ \
- return field_to_type(bch_fs_sb_field_resize(c, \
+ return field_to_type(bch2_fs_sb_field_resize(c, \
BCH_SB_FIELD_##_name, u64s), _name); \
}
@@ -41,8 +41,8 @@ BCH_SB_FIELD_TYPE(journal);
BCH_SB_FIELD_TYPE(members);
BCH_SB_FIELD_TYPE(crypt);
-static inline bool bch_sb_test_feature(struct bch_sb *sb,
- enum bch_sb_features f)
+static inline bool bch2_sb_test_feature(struct bch_sb *sb,
+ enum bch_sb_features f)
{
unsigned w = f / 64;
unsigned b = f % 64;
@@ -50,10 +50,10 @@ static inline bool bch_sb_test_feature(struct bch_sb *sb,
return le64_to_cpu(sb->features[w]) & (1ULL << b);
}
-static inline void bch_sb_set_feature(struct bch_sb *sb,
- enum bch_sb_features f)
+static inline void bch2_sb_set_feature(struct bch_sb *sb,
+ enum bch_sb_features f)
{
- if (!bch_sb_test_feature(sb, f)) {
+ if (!bch2_sb_test_feature(sb, f)) {
unsigned w = f / 64;
unsigned b = f % 64;
@@ -61,7 +61,7 @@ static inline void bch_sb_set_feature(struct bch_sb *sb,
}
}
-static inline __le64 bch_sb_magic(struct bch_fs *c)
+static inline __le64 bch2_sb_magic(struct bch_fs *c)
{
__le64 ret;
memcpy(&ret, &c->sb.uuid, sizeof(ret));
@@ -70,20 +70,20 @@ static inline __le64 bch_sb_magic(struct bch_fs *c)
static inline __u64 jset_magic(struct bch_fs *c)
{
- return __le64_to_cpu(bch_sb_magic(c) ^ JSET_MAGIC);
+ return __le64_to_cpu(bch2_sb_magic(c) ^ JSET_MAGIC);
}
static inline __u64 pset_magic(struct bch_fs *c)
{
- return __le64_to_cpu(bch_sb_magic(c) ^ PSET_MAGIC);
+ return __le64_to_cpu(bch2_sb_magic(c) ^ PSET_MAGIC);
}
static inline __u64 bset_magic(struct bch_fs *c)
{
- return __le64_to_cpu(bch_sb_magic(c) ^ BSET_MAGIC);
+ return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC);
}
-static inline struct bch_member_cpu bch_mi_to_cpu(struct bch_member *mi)
+static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
{
return (struct bch_member_cpu) {
.nbuckets = le64_to_cpu(mi->nbuckets),
@@ -95,29 +95,29 @@ static inline struct bch_member_cpu bch_mi_to_cpu(struct bch_member *mi)
.has_data = BCH_MEMBER_HAS_DATA(mi),
.replacement = BCH_MEMBER_REPLACEMENT(mi),
.discard = BCH_MEMBER_DISCARD(mi),
- .valid = !bch_is_zero(mi->uuid.b, sizeof(uuid_le)),
+ .valid = !bch2_is_zero(mi->uuid.b, sizeof(uuid_le)),
};
}
-int bch_sb_to_fs(struct bch_fs *, struct bch_sb *);
-int bch_sb_from_fs(struct bch_fs *, struct bch_dev *);
+int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
+int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
-void bch_free_super(struct bcache_superblock *);
-int bch_super_realloc(struct bcache_superblock *, unsigned);
+void bch2_free_super(struct bcache_superblock *);
+int bch2_super_realloc(struct bcache_superblock *, unsigned);
-const char *bch_validate_journal_layout(struct bch_sb *,
- struct bch_member_cpu);
-const char *bch_validate_cache_super(struct bcache_superblock *);
+const char *bch2_validate_journal_layout(struct bch_sb *,
+ struct bch_member_cpu);
+const char *bch2_validate_cache_super(struct bcache_superblock *);
-const char *bch_read_super(struct bcache_superblock *,
+const char *bch2_read_super(struct bcache_superblock *,
struct bch_opts, const char *);
-void bch_write_super(struct bch_fs *);
+void bch2_write_super(struct bch_fs *);
-void bch_check_mark_super_slowpath(struct bch_fs *,
- const struct bkey_i *, bool);
+void bch2_check_mark_super_slowpath(struct bch_fs *,
+ const struct bkey_i *, bool);
-static inline bool bch_check_super_marked(struct bch_fs *c,
- const struct bkey_i *k, bool meta)
+static inline bool bch2_check_super_marked(struct bch_fs *c,
+ const struct bkey_i *k, bool meta)
{
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(k);
const struct bch_extent_ptr *ptr;
@@ -147,13 +147,13 @@ static inline bool bch_check_super_marked(struct bch_fs *c,
return ret;
}
-static inline void bch_check_mark_super(struct bch_fs *c,
- const struct bkey_i *k, bool meta)
+static inline void bch2_check_mark_super(struct bch_fs *c,
+ const struct bkey_i *k, bool meta)
{
- if (bch_check_super_marked(c, k, meta))
+ if (bch2_check_super_marked(c, k, meta))
return;
- bch_check_mark_super_slowpath(c, k, meta);
+ bch2_check_mark_super_slowpath(c, k, meta);
}
#endif /* _BCACHE_SUPER_IO_H */
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index fdd731d25917..8aa5cc00b25e 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -1,12 +1,12 @@
/*
- * bcache setup/teardown code, and some metadata io - read a superblock and
+ * bcachefs setup/teardown code, and some metadata io - read a superblock and
* figure out what to do with it.
*
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "btree_cache.h"
#include "btree_gc.h"
@@ -56,18 +56,18 @@ static const uuid_le invalid_uuid = {
}
};
-static struct kset *bcache_kset;
+static struct kset *bcachefs_kset;
static LIST_HEAD(bch_fs_list);
static DEFINE_MUTEX(bch_fs_list_lock);
static DECLARE_WAIT_QUEUE_HEAD(bch_read_only_wait);
-static void bch_dev_free(struct bch_dev *);
-static int bch_dev_alloc(struct bch_fs *, unsigned);
-static int bch_dev_sysfs_online(struct bch_dev *);
-static void __bch_dev_read_only(struct bch_fs *, struct bch_dev *);
+static void bch2_dev_free(struct bch_dev *);
+static int bch2_dev_alloc(struct bch_fs *, unsigned);
+static int bch2_dev_sysfs_online(struct bch_dev *);
+static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
-struct bch_fs *bch_bdev_to_fs(struct block_device *bdev)
+struct bch_fs *bch2_bdev_to_fs(struct block_device *bdev)
{
struct bch_fs *c;
struct bch_dev *ca;
@@ -90,7 +90,7 @@ found:
return c;
}
-static struct bch_fs *__bch_uuid_to_fs(uuid_le uuid)
+static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
{
struct bch_fs *c;
@@ -103,12 +103,12 @@ static struct bch_fs *__bch_uuid_to_fs(uuid_le uuid)
return NULL;
}
-struct bch_fs *bch_uuid_to_fs(uuid_le uuid)
+struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
{
struct bch_fs *c;
mutex_lock(&bch_fs_list_lock);
- c = __bch_uuid_to_fs(uuid);
+ c = __bch2_uuid_to_fs(uuid);
if (c)
closure_get(&c->cl);
mutex_unlock(&bch_fs_list_lock);
@@ -116,7 +116,7 @@ struct bch_fs *bch_uuid_to_fs(uuid_le uuid)
return c;
}
-int bch_congested(struct bch_fs *c, int bdi_bits)
+int bch2_congested(struct bch_fs *c, int bdi_bits)
{
struct backing_dev_info *bdi;
struct bch_dev *ca;
@@ -153,11 +153,11 @@ int bch_congested(struct bch_fs *c, int bdi_bits)
return ret;
}
-static int bch_congested_fn(void *data, int bdi_bits)
+static int bch2_congested_fn(void *data, int bdi_bits)
{
struct bch_fs *c = data;
- return bch_congested(c, bdi_bits);
+ return bch2_congested(c, bdi_bits);
}
/* Filesystem RO/RW: */
@@ -177,27 +177,27 @@ static int bch_congested_fn(void *data, int bdi_bits)
* - allocator depends on the journal (when it rewrites prios and gens)
*/
-static void __bch_fs_read_only(struct bch_fs *c)
+static void __bch2_fs_read_only(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
- bch_tiering_stop(c);
+ bch2_tiering_stop(c);
for_each_member_device(ca, c, i)
- bch_moving_gc_stop(ca);
+ bch2_moving_gc_stop(ca);
- bch_gc_thread_stop(c);
+ bch2_gc_thread_stop(c);
- bch_btree_flush(c);
+ bch2_btree_flush(c);
for_each_member_device(ca, c, i)
- bch_dev_allocator_stop(ca);
+ bch2_dev_allocator_stop(ca);
- bch_fs_journal_stop(&c->journal);
+ bch2_fs_journal_stop(&c->journal);
}
-static void bch_writes_disabled(struct percpu_ref *writes)
+static void bch2_writes_disabled(struct percpu_ref *writes)
{
struct bch_fs *c = container_of(writes, struct bch_fs, writes);
@@ -205,7 +205,7 @@ static void bch_writes_disabled(struct percpu_ref *writes)
wake_up(&bch_read_only_wait);
}
-void bch_fs_read_only(struct bch_fs *c)
+void bch2_fs_read_only(struct bch_fs *c)
{
mutex_lock(&c->state_lock);
if (c->state != BCH_FS_STARTING &&
@@ -221,7 +221,7 @@ void bch_fs_read_only(struct bch_fs *c)
*
* (This is really blocking new _allocations_, writes to previously
* allocated space can still happen until stopping the allocator in
- * bch_dev_allocator_stop()).
+ * bch2_dev_allocator_stop()).
*/
percpu_ref_kill(&c->writes);
@@ -229,7 +229,7 @@ void bch_fs_read_only(struct bch_fs *c)
cancel_delayed_work(&c->pd_controllers_update);
c->foreground_write_pd.rate.rate = UINT_MAX;
- bch_wake_delayed_writes((unsigned long) c);
+ bch2_wake_delayed_writes((unsigned long) c);
/*
* If we're not doing an emergency shutdown, we want to wait on
@@ -246,18 +246,18 @@ void bch_fs_read_only(struct bch_fs *c)
test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags) ||
test_bit(BCH_FS_EMERGENCY_RO, &c->flags));
- __bch_fs_read_only(c);
+ __bch2_fs_read_only(c);
wait_event(bch_read_only_wait,
test_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags));
clear_bit(BCH_FS_WRITE_DISABLE_COMPLETE, &c->flags);
- if (!bch_journal_error(&c->journal) &&
+ if (!bch2_journal_error(&c->journal) &&
!test_bit(BCH_FS_ERROR, &c->flags)) {
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb, true);
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
@@ -266,31 +266,31 @@ out:
mutex_unlock(&c->state_lock);
}
-static void bch_fs_read_only_work(struct work_struct *work)
+static void bch2_fs_read_only_work(struct work_struct *work)
{
struct bch_fs *c =
container_of(work, struct bch_fs, read_only_work);
- bch_fs_read_only(c);
+ bch2_fs_read_only(c);
}
-static void bch_fs_read_only_async(struct bch_fs *c)
+static void bch2_fs_read_only_async(struct bch_fs *c)
{
queue_work(system_long_wq, &c->read_only_work);
}
-bool bch_fs_emergency_read_only(struct bch_fs *c)
+bool bch2_fs_emergency_read_only(struct bch_fs *c)
{
bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
- bch_fs_read_only_async(c);
- bch_journal_halt(&c->journal);
+ bch2_fs_read_only_async(c);
+ bch2_journal_halt(&c->journal);
wake_up(&bch_read_only_wait);
return ret;
}
-const char *bch_fs_read_write(struct bch_fs *c)
+const char *bch2_fs_read_write(struct bch_fs *c)
{
struct bch_dev *ca;
const char *err = NULL;
@@ -303,24 +303,24 @@ const char *bch_fs_read_write(struct bch_fs *c)
err = "error starting allocator thread";
for_each_rw_member(ca, c, i)
- if (bch_dev_allocator_start(ca)) {
+ if (bch2_dev_allocator_start(ca)) {
percpu_ref_put(&ca->io_ref);
goto err;
}
err = "error starting btree GC thread";
- if (bch_gc_thread_start(c))
+ if (bch2_gc_thread_start(c))
goto err;
err = "error starting moving GC thread";
for_each_rw_member(ca, c, i)
- if (bch_moving_gc_start(ca)) {
+ if (bch2_moving_gc_start(ca)) {
percpu_ref_put(&ca->io_ref);
goto err;
}
err = "error starting tiering thread";
- if (bch_tiering_start(c))
+ if (bch2_tiering_start(c))
goto err;
schedule_delayed_work(&c->pd_controllers_update, 5 * HZ);
@@ -334,20 +334,20 @@ out:
mutex_unlock(&c->state_lock);
return err;
err:
- __bch_fs_read_only(c);
+ __bch2_fs_read_only(c);
goto out;
}
/* Filesystem startup/shutdown: */
-static void bch_fs_free(struct bch_fs *c)
+static void bch2_fs_free(struct bch_fs *c)
{
- bch_fs_encryption_exit(c);
- bch_fs_btree_exit(c);
- bch_fs_journal_exit(&c->journal);
- bch_io_clock_exit(&c->io_clock[WRITE]);
- bch_io_clock_exit(&c->io_clock[READ]);
- bch_fs_compress_exit(c);
+ bch2_fs_encryption_exit(c);
+ bch2_fs_btree_exit(c);
+ bch2_fs_journal_exit(&c->journal);
+ bch2_io_clock_exit(&c->io_clock[WRITE]);
+ bch2_io_clock_exit(&c->io_clock[READ]);
+ bch2_fs_compress_exit(c);
bdi_destroy(&c->bdi);
lg_lock_free(&c->usage_lock);
free_percpu(c->usage_percpu);
@@ -372,7 +372,7 @@ static void bch_fs_free(struct bch_fs *c)
module_put(THIS_MODULE);
}
-static void bch_fs_exit(struct bch_fs *c)
+static void bch2_fs_exit(struct bch_fs *c)
{
unsigned i;
@@ -383,13 +383,13 @@ static void bch_fs_exit(struct bch_fs *c)
for (i = 0; i < c->sb.nr_devices; i++)
if (c->devs[i])
- bch_dev_free(c->devs[i]);
+ bch2_dev_free(c->devs[i]);
closure_debug_destroy(&c->cl);
kobject_put(&c->kobj);
}
-static void bch_fs_offline(struct bch_fs *c)
+static void bch2_fs_offline(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned i;
@@ -402,46 +402,46 @@ static void bch_fs_offline(struct bch_fs *c)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
- "bcache");
+ "bcachefs");
if (c->kobj.state_in_sysfs)
kobject_del(&c->kobj);
- bch_fs_debug_exit(c);
- bch_fs_chardev_exit(c);
+ bch2_fs_debug_exit(c);
+ bch2_fs_chardev_exit(c);
kobject_put(&c->time_stats);
kobject_put(&c->opts_dir);
kobject_put(&c->internal);
- __bch_fs_read_only(c);
+ __bch2_fs_read_only(c);
}
-void bch_fs_release(struct kobject *kobj)
+void bch2_fs_release(struct kobject *kobj)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
- bch_fs_free(c);
+ bch2_fs_free(c);
}
-void bch_fs_stop(struct bch_fs *c)
+void bch2_fs_stop(struct bch_fs *c)
{
mutex_lock(&c->state_lock);
BUG_ON(c->state == BCH_FS_STOPPING);
c->state = BCH_FS_STOPPING;
mutex_unlock(&c->state_lock);
- bch_fs_offline(c);
+ bch2_fs_offline(c);
closure_sync(&c->cl);
- bch_fs_exit(c);
+ bch2_fs_exit(c);
}
#define alloc_bucket_pages(gfp, ca) \
((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(ca))))
-static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
+static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
{
struct bch_sb_field_members *mi;
struct bch_fs *c;
@@ -460,7 +460,7 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mutex_init(&c->btree_cache_lock);
mutex_init(&c->bucket_lock);
mutex_init(&c->btree_root_lock);
- INIT_WORK(&c->read_only_work, bch_fs_read_only_work);
+ INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
init_rwsem(&c->gc_lock);
@@ -469,8 +469,8 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
BCH_TIME_STATS()
#undef BCH_TIME_STAT
- bch_fs_allocator_init(c);
- bch_fs_tiering_init(c);
+ bch2_fs_allocator_init(c);
+ bch2_fs_tiering_init(c);
INIT_LIST_HEAD(&c->list);
INIT_LIST_HEAD(&c->btree_cache);
@@ -484,7 +484,7 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mutex_init(&c->bio_bounce_pages_lock);
bio_list_init(&c->read_retry_list);
spin_lock_init(&c->read_retry_lock);
- INIT_WORK(&c->read_retry_work, bch_read_retry_work);
+ INIT_WORK(&c->read_retry_work, bch2_read_retry_work);
mutex_init(&c->zlib_workspace_lock);
seqcount_init(&c->gc_pos_lock);
@@ -510,7 +510,7 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mutex_lock(&c->sb_lock);
- if (bch_sb_to_fs(c, sb)) {
+ if (bch2_sb_to_fs(c, sb)) {
mutex_unlock(&c->sb_lock);
goto err;
}
@@ -519,15 +519,15 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
scnprintf(c->name, sizeof(c->name), "%pU", &c->sb.user_uuid);
- bch_opts_apply(&c->opts, bch_sb_opts(sb));
- bch_opts_apply(&c->opts, opts);
+ bch2_opts_apply(&c->opts, bch2_sb_opts(sb));
+ bch2_opts_apply(&c->opts, opts);
c->opts.nochanges |= c->opts.noreplay;
c->opts.read_only |= c->opts.nochanges;
c->block_bits = ilog2(c->sb.block_size);
- if (bch_fs_init_fault("fs_alloc"))
+ if (bch2_fs_init_fault("fs_alloc"))
goto err;
iter_size = (btree_blocks(c) + 1) * 2 *
@@ -535,11 +535,11 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
journal_entry_bytes = 512U << BCH_SB_JOURNAL_ENTRY_SIZE(sb);
- if (!(c->wq = alloc_workqueue("bcache",
+ if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
- percpu_ref_init(&c->writes, bch_writes_disabled, 0, GFP_KERNEL) ||
+ percpu_ref_init(&c->writes, bch2_writes_disabled, 0, GFP_KERNEL) ||
mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
sizeof(struct btree_reserve)) ||
mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
@@ -558,24 +558,24 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
lg_lock_init(&c->usage_lock) ||
mempool_init_page_pool(&c->btree_bounce_pool, 1,
ilog2(btree_pages(c))) ||
- bdi_setup_and_register(&c->bdi, "bcache") ||
- bch_io_clock_init(&c->io_clock[READ]) ||
- bch_io_clock_init(&c->io_clock[WRITE]) ||
- bch_fs_journal_init(&c->journal, journal_entry_bytes) ||
- bch_fs_btree_init(c) ||
- bch_fs_encryption_init(c) ||
- bch_fs_compress_init(c) ||
- bch_check_set_has_compressed_data(c, c->opts.compression))
+ bdi_setup_and_register(&c->bdi, "bcachefs") ||
+ bch2_io_clock_init(&c->io_clock[READ]) ||
+ bch2_io_clock_init(&c->io_clock[WRITE]) ||
+ bch2_fs_journal_init(&c->journal, journal_entry_bytes) ||
+ bch2_fs_btree_init(c) ||
+ bch2_fs_encryption_init(c) ||
+ bch2_fs_compress_init(c) ||
+ bch2_check_set_has_compressed_data(c, c->opts.compression))
goto err;
c->bdi.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
- c->bdi.congested_fn = bch_congested_fn;
+ c->bdi.congested_fn = bch2_congested_fn;
c->bdi.congested_data = c;
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
for (i = 0; i < c->sb.nr_devices; i++)
- if (!bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)) &&
- bch_dev_alloc(c, i))
+ if (!bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le)) &&
+ bch2_dev_alloc(c, i))
goto err;
/*
@@ -584,18 +584,18 @@ static struct bch_fs *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
*/
closure_init(&c->cl, NULL);
- c->kobj.kset = bcache_kset;
- kobject_init(&c->kobj, &bch_fs_ktype);
- kobject_init(&c->internal, &bch_fs_internal_ktype);
- kobject_init(&c->opts_dir, &bch_fs_opts_dir_ktype);
- kobject_init(&c->time_stats, &bch_fs_time_stats_ktype);
+ c->kobj.kset = bcachefs_kset;
+ kobject_init(&c->kobj, &bch2_fs_ktype);
+ kobject_init(&c->internal, &bch2_fs_internal_ktype);
+ kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
+ kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
return c;
err:
- bch_fs_free(c);
+ bch2_fs_free(c);
return NULL;
}
-static const char *__bch_fs_online(struct bch_fs *c)
+static const char *__bch2_fs_online(struct bch_fs *c)
{
struct bch_dev *ca;
const char *err = NULL;
@@ -607,14 +607,14 @@ static const char *__bch_fs_online(struct bch_fs *c)
if (!list_empty(&c->list))
return NULL;
- if (__bch_uuid_to_fs(c->sb.uuid))
+ if (__bch2_uuid_to_fs(c->sb.uuid))
return "filesystem UUID already open";
- ret = bch_fs_chardev_init(c);
+ ret = bch2_fs_chardev_init(c);
if (ret)
return "error creating character device";
- bch_fs_debug_init(c);
+ bch2_fs_debug_init(c);
if (kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ||
kobject_add(&c->internal, &c->kobj, "internal") ||
@@ -626,7 +626,7 @@ static const char *__bch_fs_online(struct bch_fs *c)
err = "error creating sysfs objects";
__for_each_member_device(ca, c, i)
- if (bch_dev_sysfs_online(ca))
+ if (bch2_dev_sysfs_online(ca))
goto err;
list_add(&c->list, &bch_fs_list);
@@ -636,18 +636,18 @@ err:
return err;
}
-static const char *bch_fs_online(struct bch_fs *c)
+static const char *bch2_fs_online(struct bch_fs *c)
{
const char *err;
mutex_lock(&bch_fs_list_lock);
- err = __bch_fs_online(c);
+ err = __bch2_fs_online(c);
mutex_unlock(&bch_fs_list_lock);
return err;
}
-static const char *__bch_fs_start(struct bch_fs *c)
+static const char *__bch2_fs_start(struct bch_fs *c)
{
const char *err = "cannot allocate memory";
struct bch_sb_field_members *mi;
@@ -662,11 +662,11 @@ static const char *__bch_fs_start(struct bch_fs *c)
mutex_lock(&c->sb_lock);
for_each_online_member(ca, c, i)
- bch_sb_from_fs(c, ca);
+ bch2_sb_from_fs(c, ca);
mutex_unlock(&c->sb_lock);
if (BCH_SB_INITIALIZED(c->disk_sb)) {
- ret = bch_journal_read(c, &journal);
+ ret = bch2_journal_read(c, &journal);
if (ret)
goto err;
@@ -677,7 +677,7 @@ static const char *__bch_fs_start(struct bch_fs *c)
err = "error reading priorities";
for_each_readable_member(ca, c, i) {
- ret = bch_prio_read(ca);
+ ret = bch2_prio_read(ca);
if (ret) {
percpu_ref_put(&ca->io_ref);
goto err;
@@ -689,7 +689,7 @@ static const char *__bch_fs_start(struct bch_fs *c)
struct bkey_i *k;
err = "bad btree root";
- k = bch_journal_find_btree_root(c, j, id, &level);
+ k = bch2_journal_find_btree_root(c, j, id, &level);
if (!k && id == BTREE_ID_EXTENTS)
goto err;
if (!k) {
@@ -698,14 +698,14 @@ static const char *__bch_fs_start(struct bch_fs *c)
}
err = "error reading btree root";
- if (bch_btree_root_read(c, id, k, level))
+ if (bch2_btree_root_read(c, id, k, level))
goto err;
}
bch_verbose(c, "starting mark and sweep:");
err = "error in recovery";
- if (bch_initial_gc(c, &journal))
+ if (bch2_initial_gc(c, &journal))
goto err;
if (c->opts.noreplay)
@@ -714,15 +714,15 @@ static const char *__bch_fs_start(struct bch_fs *c)
bch_verbose(c, "mark and sweep done");
/*
- * bch_journal_start() can't happen sooner, or btree_gc_finish()
+ * bch2_journal_start() can't happen sooner, or btree_gc_finish()
* will give spurious errors about oldest_gen > bucket_gen -
* this is a hack but oh well.
*/
- bch_journal_start(c);
+ bch2_journal_start(c);
err = "error starting allocator thread";
for_each_rw_member(ca, c, i)
- if (bch_dev_allocator_start(ca)) {
+ if (bch2_dev_allocator_start(ca)) {
percpu_ref_put(&ca->io_ref);
goto err;
}
@@ -730,7 +730,7 @@ static const char *__bch_fs_start(struct bch_fs *c)
bch_verbose(c, "starting journal replay:");
err = "journal replay failed";
- ret = bch_journal_replay(c, &journal);
+ ret = bch2_journal_replay(c, &journal);
if (ret)
goto err;
@@ -741,7 +741,7 @@ static const char *__bch_fs_start(struct bch_fs *c)
bch_verbose(c, "starting fsck:");
err = "error in fsck";
- ret = bch_fsck(c, !c->opts.nofsck);
+ ret = bch2_fsck(c, !c->opts.nofsck);
if (ret)
goto err;
@@ -755,11 +755,11 @@ static const char *__bch_fs_start(struct bch_fs *c)
bch_notice(c, "initializing new filesystem");
- bch_initial_gc(c, NULL);
+ bch2_initial_gc(c, NULL);
err = "unable to allocate journal buckets";
for_each_rw_member(ca, c, i)
- if (bch_dev_journal_alloc(ca)) {
+ if (bch2_dev_journal_alloc(ca)) {
percpu_ref_put(&ca->io_ref);
goto err;
}
@@ -768,19 +768,19 @@ static const char *__bch_fs_start(struct bch_fs *c)
* journal_res_get() will crash if called before this has
* set up the journal.pin FIFO and journal.cur pointer:
*/
- bch_journal_start(c);
- bch_journal_set_replay_done(&c->journal);
+ bch2_journal_start(c);
+ bch2_journal_set_replay_done(&c->journal);
err = "error starting allocator thread";
for_each_rw_member(ca, c, i)
- if (bch_dev_allocator_start(ca)) {
+ if (bch2_dev_allocator_start(ca)) {
percpu_ref_put(&ca->io_ref);
goto err;
}
err = "cannot allocate new btree root";
for (id = 0; id < BTREE_ID_NR; id++)
- if (bch_btree_root_alloc(c, id, &cl)) {
+ if (bch2_btree_root_alloc(c, id, &cl)) {
closure_sync(&cl);
goto err;
}
@@ -788,37 +788,37 @@ static const char *__bch_fs_start(struct bch_fs *c)
/* Wait for new btree roots to be written: */
closure_sync(&cl);
- bch_inode_init(c, &inode, 0, 0,
+ bch2_inode_init(c, &inode, 0, 0,
S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0);
inode.inum = BCACHE_ROOT_INO;
- bch_inode_pack(&packed_inode, &inode);
+ bch2_inode_pack(&packed_inode, &inode);
err = "error creating root directory";
- if (bch_btree_insert(c, BTREE_ID_INODES,
+ if (bch2_btree_insert(c, BTREE_ID_INODES,
&packed_inode.inode.k_i,
NULL, NULL, NULL, 0))
goto err;
err = "error writing first journal entry";
- if (bch_journal_meta(&c->journal))
+ if (bch2_journal_meta(&c->journal))
goto err;
}
recovery_done:
err = "dynamic fault";
- if (bch_fs_init_fault("fs_start"))
+ if (bch2_fs_init_fault("fs_start"))
goto err;
if (c->opts.read_only) {
- bch_fs_read_only(c);
+ bch2_fs_read_only(c);
} else {
- err = bch_fs_read_write(c);
+ err = bch2_fs_read_write(c);
if (err)
goto err;
}
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
now = ktime_get_seconds();
for_each_member_device(ca, c, i)
@@ -828,12 +828,12 @@ recovery_done:
SET_BCH_SB_CLEAN(c->disk_sb, false);
c->disk_sb->version = BCACHE_SB_VERSION_CDEV;
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
err = NULL;
out:
- bch_journal_entries_free(&journal);
+ bch2_journal_entries_free(&journal);
return err;
err:
switch (ret) {
@@ -867,16 +867,16 @@ err:
goto out;
}
-const char *bch_fs_start(struct bch_fs *c)
+const char *bch2_fs_start(struct bch_fs *c)
{
- return __bch_fs_start(c) ?: bch_fs_online(c);
+ return __bch2_fs_start(c) ?: bch2_fs_online(c);
}
-static const char *bch_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
+static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
{
struct bch_sb_field_members *sb_mi;
- sb_mi = bch_sb_get_members(sb);
+ sb_mi = bch2_sb_get_members(sb);
if (!sb_mi)
return "Invalid superblock: member info area missing";
@@ -890,11 +890,11 @@ static const char *bch_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
return NULL;
}
-static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
+static const char *bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
{
struct bch_sb *newest =
le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
- struct bch_sb_field_members *mi = bch_sb_get_members(newest);
+ struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
if (uuid_le_cmp(fs->uuid, sb->uuid))
return "device not a member of filesystem";
@@ -902,7 +902,7 @@ static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
if (sb->dev_idx >= newest->nr_devices)
return "device has invalid dev_idx";
- if (bch_is_zero(mi->members[sb->dev_idx].uuid.b, sizeof(uuid_le)))
+ if (bch2_is_zero(mi->members[sb->dev_idx].uuid.b, sizeof(uuid_le)))
return "device has been removed";
if (fs->block_size != sb->block_size)
@@ -913,14 +913,14 @@ static const char *bch_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
/* Device startup/shutdown: */
-void bch_dev_release(struct kobject *kobj)
+void bch2_dev_release(struct kobject *kobj)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
kfree(ca);
}
-static void bch_dev_free(struct bch_dev *ca)
+static void bch2_dev_free(struct bch_dev *ca)
{
unsigned i;
@@ -929,13 +929,13 @@ static void bch_dev_free(struct bch_dev *ca)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
sysfs_remove_link(&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj,
- "bcache");
+ "bcachefs");
if (ca->kobj.state_in_sysfs)
kobject_del(&ca->kobj);
- bch_free_super(&ca->disk_sb);
- bch_dev_journal_exit(ca);
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_journal_exit(ca);
free_percpu(ca->sectors_written);
bioset_exit(&ca->replica_set);
@@ -956,20 +956,20 @@ static void bch_dev_free(struct bch_dev *ca)
kobject_put(&ca->kobj);
}
-static void bch_dev_io_ref_release(struct percpu_ref *ref)
+static void bch2_dev_io_ref_release(struct percpu_ref *ref)
{
struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
complete(&ca->offline_complete);
}
-static void __bch_dev_offline(struct bch_dev *ca)
+static void __bch2_dev_offline(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
lockdep_assert_held(&c->state_lock);
- __bch_dev_read_only(ca->fs, ca);
+ __bch2_dev_read_only(ca->fs, ca);
reinit_completion(&ca->offline_complete);
percpu_ref_kill(&ca->io_ref);
@@ -979,22 +979,22 @@ static void __bch_dev_offline(struct bch_dev *ca)
struct kobject *block =
&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
- sysfs_remove_link(block, "bcache");
+ sysfs_remove_link(block, "bcachefs");
sysfs_remove_link(&ca->kobj, "block");
}
- bch_free_super(&ca->disk_sb);
- bch_dev_journal_exit(ca);
+ bch2_free_super(&ca->disk_sb);
+ bch2_dev_journal_exit(ca);
}
-static void bch_dev_ref_release(struct percpu_ref *ref)
+static void bch2_dev_ref_release(struct percpu_ref *ref)
{
struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
complete(&ca->stop_complete);
}
-static void bch_dev_stop(struct bch_dev *ca)
+static void bch2_dev_stop(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
@@ -1010,7 +1010,7 @@ static void bch_dev_stop(struct bch_dev *ca)
wait_for_completion(&ca->stop_complete);
}
-static int bch_dev_sysfs_online(struct bch_dev *ca)
+static int bch2_dev_sysfs_online(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
int ret;
@@ -1029,7 +1029,7 @@ static int bch_dev_sysfs_online(struct bch_dev *ca)
struct kobject *block =
&part_to_dev(ca->disk_sb.bdev->bd_part)->kobj;
- ret = sysfs_create_link(block, &ca->kobj, "bcache");
+ ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
if (ret)
return ret;
ret = sysfs_create_link(&ca->kobj, block, "block");
@@ -1040,7 +1040,7 @@ static int bch_dev_sysfs_online(struct bch_dev *ca)
return 0;
}
-static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx)
+static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
{
struct bch_member *member;
size_t reserve_none, movinggc_reserve, free_inc_reserve, total_reserve;
@@ -1048,14 +1048,14 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx)
unsigned i;
struct bch_dev *ca;
- if (bch_fs_init_fault("dev_alloc"))
+ if (bch2_fs_init_fault("dev_alloc"))
return -ENOMEM;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
if (!ca)
return -ENOMEM;
- kobject_init(&ca->kobj, &bch_dev_ktype);
+ kobject_init(&ca->kobj, &bch2_dev_ktype);
init_completion(&ca->stop_complete);
init_completion(&ca->offline_complete);
@@ -1067,16 +1067,16 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx)
spin_lock_init(&ca->freelist_lock);
spin_lock_init(&ca->prio_buckets_lock);
mutex_init(&ca->heap_lock);
- bch_dev_moving_gc_init(ca);
+ bch2_dev_moving_gc_init(ca);
- INIT_WORK(&ca->io_error_work, bch_nonfatal_io_error_work);
+ INIT_WORK(&ca->io_error_work, bch2_nonfatal_io_error_work);
- if (bch_fs_init_fault("dev_alloc"))
+ if (bch2_fs_init_fault("dev_alloc"))
goto err;
- member = bch_sb_get_members(c->disk_sb)->members + dev_idx;
+ member = bch2_sb_get_members(c->disk_sb)->members + dev_idx;
- ca->mi = bch_mi_to_cpu(member);
+ ca->mi = bch2_mi_to_cpu(member);
ca->uuid = member->uuid;
ca->bucket_bits = ilog2(ca->mi.bucket_size);
scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
@@ -1092,9 +1092,9 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx)
free_inc_reserve = movinggc_reserve / 2;
heap_size = movinggc_reserve * 8;
- if (percpu_ref_init(&ca->ref, bch_dev_ref_release,
+ if (percpu_ref_init(&ca->ref, bch2_dev_ref_release,
0, GFP_KERNEL) ||
- percpu_ref_init(&ca->io_ref, bch_dev_io_ref_release,
+ percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_release,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
!init_fifo(&ca->free[RESERVE_BTREE], BTREE_NODE_RESERVE, GFP_KERNEL) ||
@@ -1129,16 +1129,16 @@ static int bch_dev_alloc(struct bch_fs *c, unsigned dev_idx)
ca->fs = c;
rcu_assign_pointer(c->devs[ca->dev_idx], ca);
- if (bch_dev_sysfs_online(ca))
+ if (bch2_dev_sysfs_online(ca))
pr_warn("error creating sysfs objects");
return 0;
err:
- bch_dev_free(ca);
+ bch2_dev_free(ca);
return -ENOMEM;
}
-static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
+static int __bch2_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
{
struct bch_dev *ca;
int ret;
@@ -1147,7 +1147,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
if (le64_to_cpu(sb->sb->seq) >
le64_to_cpu(c->disk_sb->seq))
- bch_sb_to_fs(c, sb->sb);
+ bch2_sb_to_fs(c, sb->sb);
BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
!c->devs[sb->sb->dev_idx]);
@@ -1159,7 +1159,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
return -EINVAL;
}
- ret = bch_dev_journal_init(ca, sb->sb);
+ ret = bch2_dev_journal_init(ca, sb->sb);
if (ret)
return ret;
@@ -1182,12 +1182,12 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
bdevname(ca->disk_sb.bdev, c->name);
bdevname(ca->disk_sb.bdev, ca->name);
- if (bch_dev_sysfs_online(ca))
+ if (bch2_dev_sysfs_online(ca))
pr_warn("error creating sysfs objects");
lg_local_lock(&c->usage_lock);
if (!gc_will_visit(c, gc_phase(GC_PHASE_SB_METADATA)))
- bch_mark_dev_metadata(ca->fs, ca);
+ bch2_mark_dev_metadata(ca->fs, ca);
lg_local_unlock(&c->usage_lock);
percpu_ref_reinit(&ca->io_ref);
@@ -1196,7 +1196,7 @@ static int __bch_dev_online(struct bch_fs *c, struct bcache_superblock *sb)
/* Device management: */
-bool bch_fs_may_start(struct bch_fs *c, int flags)
+bool bch2_fs_may_start(struct bch_fs *c, int flags)
{
struct bch_sb_field_members *mi;
unsigned meta_missing = 0;
@@ -1205,11 +1205,11 @@ bool bch_fs_may_start(struct bch_fs *c, int flags)
unsigned i;
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
for (i = 0; i < c->disk_sb->nr_devices; i++)
if (!c->devs[i] &&
- !bch_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) {
+ !bch2_is_zero(mi->members[i].uuid.b, sizeof(uuid_le))) {
degraded = true;
if (BCH_MEMBER_HAS_METADATA(&mi->members[i]))
meta_missing++;
@@ -1240,8 +1240,8 @@ bool bch_fs_may_start(struct bch_fs *c, int flags)
return true;
}
-bool bch_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
+bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
{
lockdep_assert_held(&c->state_lock);
@@ -1269,74 +1269,74 @@ bool bch_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
return true;
}
-static void __bch_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
+static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
- bch_moving_gc_stop(ca);
+ bch2_moving_gc_stop(ca);
/*
* This stops new data writes (e.g. to existing open data
* buckets) and then waits for all existing writes to
* complete.
*/
- bch_dev_allocator_stop(ca);
+ bch2_dev_allocator_stop(ca);
- bch_dev_group_remove(&c->journal.devs, ca);
+ bch2_dev_group_remove(&c->journal.devs, ca);
}
-static const char *__bch_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
+static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
{
lockdep_assert_held(&c->state_lock);
BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
- if (bch_dev_allocator_start(ca))
+ if (bch2_dev_allocator_start(ca))
return "error starting allocator thread";
- if (bch_moving_gc_start(ca))
+ if (bch2_moving_gc_start(ca))
return "error starting moving GC thread";
- if (bch_tiering_start(c))
+ if (bch2_tiering_start(c))
return "error starting tiering thread";
return NULL;
}
-int __bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
+int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
{
struct bch_sb_field_members *mi;
if (ca->mi.state == new_state)
return 0;
- if (!bch_dev_state_allowed(c, ca, new_state, flags))
+ if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -EINVAL;
if (new_state == BCH_MEMBER_STATE_RW) {
- if (__bch_dev_read_write(c, ca))
+ if (__bch2_dev_read_write(c, ca))
return -ENOMEM;
} else {
- __bch_dev_read_only(c, ca);
+ __bch2_dev_read_only(c, ca);
}
- bch_notice(ca, "%s", bch_dev_state[new_state]);
+ bch_notice(ca, "%s", bch2_dev_state[new_state]);
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
SET_BCH_MEMBER_STATE(&mi->members[ca->dev_idx], new_state);
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
return 0;
}
-int bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
- enum bch_member_state new_state, int flags)
+int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
+ enum bch_member_state new_state, int flags)
{
int ret;
mutex_lock(&c->state_lock);
- ret = __bch_dev_set_state(c, ca, new_state, flags);
+ ret = __bch2_dev_set_state(c, ca, new_state, flags);
mutex_unlock(&c->state_lock);
return ret;
@@ -1344,7 +1344,7 @@ int bch_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
/* Device add/removal: */
-int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
+int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
{
struct bch_sb_field_members *mi;
unsigned dev_idx = ca->dev_idx;
@@ -1359,7 +1359,7 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
goto err;
}
- if (!bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
bch_err(ca, "Cannot remove without losing data");
goto err;
}
@@ -1369,7 +1369,7 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
*
* flag_data_bad() does not check btree pointers
*/
- ret = bch_flag_data_bad(ca);
+ ret = bch2_flag_data_bad(ca);
if (ret) {
bch_err(ca, "Remove failed");
goto err;
@@ -1388,21 +1388,21 @@ int bch_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
c->journal.prio_buckets[dev_idx] = 0;
spin_unlock(&c->journal.lock);
- bch_journal_meta(&c->journal);
+ bch2_journal_meta(&c->journal);
- __bch_dev_offline(ca);
- bch_dev_stop(ca);
- bch_dev_free(ca);
+ __bch2_dev_offline(ca);
+ bch2_dev_stop(ca);
+ bch2_dev_free(ca);
/*
* Free this device's slot in the bch_member array - all pointers to
* this device must be gone:
*/
mutex_lock(&c->sb_lock);
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
memset(&mi->members[dev_idx].uuid, 0, sizeof(mi->members[dev_idx].uuid));
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
mutex_unlock(&c->state_lock);
@@ -1412,7 +1412,7 @@ err:
return ret;
}
-int bch_dev_add(struct bch_fs *c, const char *path)
+int bch2_dev_add(struct bch_fs *c, const char *path)
{
struct bcache_superblock sb;
const char *err;
@@ -1422,15 +1422,15 @@ int bch_dev_add(struct bch_fs *c, const char *path)
unsigned dev_idx, nr_devices, u64s;
int ret = -EINVAL;
- err = bch_read_super(&sb, bch_opts_empty(), path);
+ err = bch2_read_super(&sb, bch2_opts_empty(), path);
if (err)
return -EINVAL;
- err = bch_validate_cache_super(&sb);
+ err = bch2_validate_cache_super(&sb);
if (err)
return -EINVAL;
- err = bch_dev_may_add(sb.sb, c);
+ err = bch2_dev_may_add(sb.sb, c);
if (err)
return -EINVAL;
@@ -1441,17 +1441,17 @@ int bch_dev_add(struct bch_fs *c, const char *path)
* Preserve the old cache member information (esp. tier)
* before we start bashing the disk stuff.
*/
- dev_mi = bch_sb_get_members(sb.sb);
+ dev_mi = bch2_sb_get_members(sb.sb);
saved_mi = dev_mi->members[sb.sb->dev_idx];
saved_mi.last_mount = cpu_to_le64(ktime_get_seconds());
- if (dynamic_fault("bcache:add:no_slot"))
+ if (dynamic_fault("bcachefs:add:no_slot"))
goto no_slot;
- mi = bch_sb_get_members(c->disk_sb);
+ mi = bch2_sb_get_members(c->disk_sb);
for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
if (dev_idx >= c->sb.nr_devices ||
- bch_is_zero(mi->members[dev_idx].uuid.b,
+ bch2_is_zero(mi->members[dev_idx].uuid.b,
sizeof(uuid_le)))
goto have_slot;
no_slot:
@@ -1465,11 +1465,11 @@ have_slot:
sizeof(struct bch_member) * nr_devices) / sizeof(u64);
err = "no space in superblock for member info";
- mi = bch_fs_sb_resize_members(c, u64s);
+ mi = bch2_fs_sb_resize_members(c, u64s);
if (!mi)
goto err_unlock;
- dev_mi = bch_sb_resize_members(&sb, u64s);
+ dev_mi = bch2_sb_resize_members(&sb, u64s);
if (!dev_mi)
goto err_unlock;
@@ -1485,28 +1485,28 @@ have_slot:
c->disk_sb->nr_devices = nr_devices;
c->sb.nr_devices = nr_devices;
- if (bch_dev_alloc(c, dev_idx)) {
+ if (bch2_dev_alloc(c, dev_idx)) {
err = "cannot allocate memory";
ret = -ENOMEM;
goto err_unlock;
}
- if (__bch_dev_online(c, &sb)) {
- err = "bch_dev_online() error";
+ if (__bch2_dev_online(c, &sb)) {
+ err = "bch2_dev_online() error";
ret = -ENOMEM;
goto err_unlock;
}
- bch_write_super(c);
+ bch2_write_super(c);
mutex_unlock(&c->sb_lock);
ca = c->devs[dev_idx];
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
err = "journal alloc failed";
- if (bch_dev_journal_alloc(ca))
+ if (bch2_dev_journal_alloc(ca))
goto err;
- err = __bch_dev_read_write(c, ca);
+ err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
}
@@ -1517,13 +1517,13 @@ err_unlock:
mutex_unlock(&c->sb_lock);
err:
mutex_unlock(&c->state_lock);
- bch_free_super(&sb);
+ bch2_free_super(&sb);
bch_err(c, "Unable to add device: %s", err);
return ret ?: -EINVAL;
}
-int bch_dev_online(struct bch_fs *c, const char *path)
+int bch2_dev_online(struct bch_fs *c, const char *path)
{
struct bcache_superblock sb = { 0 };
struct bch_dev *ca;
@@ -1532,19 +1532,19 @@ int bch_dev_online(struct bch_fs *c, const char *path)
mutex_lock(&c->state_lock);
- err = bch_read_super(&sb, bch_opts_empty(), path);
+ err = bch2_read_super(&sb, bch2_opts_empty(), path);
if (err)
goto err;
dev_idx = sb.sb->dev_idx;
- err = bch_dev_in_fs(c->disk_sb, sb.sb);
+ err = bch2_dev_in_fs(c->disk_sb, sb.sb);
if (err)
goto err;
mutex_lock(&c->sb_lock);
- if (__bch_dev_online(c, &sb)) {
- err = "__bch_dev_online() error";
+ if (__bch2_dev_online(c, &sb)) {
+ err = "__bch2_dev_online() error";
mutex_unlock(&c->sb_lock);
goto err;
}
@@ -1552,7 +1552,7 @@ int bch_dev_online(struct bch_fs *c, const char *path)
ca = c->devs[dev_idx];
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
- err = __bch_dev_read_write(c, ca);
+ err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
}
@@ -1561,29 +1561,29 @@ int bch_dev_online(struct bch_fs *c, const char *path)
return 0;
err:
mutex_unlock(&c->state_lock);
- bch_free_super(&sb);
+ bch2_free_super(&sb);
bch_err(c, "error bringing %s online: %s", path, err);
return -EINVAL;
}
-int bch_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
+int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
{
mutex_lock(&c->state_lock);
- if (!bch_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
+ if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
bch_err(ca, "Cannot offline required disk");
mutex_unlock(&c->state_lock);
return -EINVAL;
}
- __bch_dev_read_only(c, ca);
- __bch_dev_offline(ca);
+ __bch2_dev_read_only(c, ca);
+ __bch2_dev_offline(ca);
mutex_unlock(&c->state_lock);
return 0;
}
-int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
+int bch2_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
{
int ret;
@@ -1597,13 +1597,13 @@ int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
mutex_unlock(&c->state_lock);
- ret = bch_move_data_off_device(ca);
+ ret = bch2_move_data_off_device(ca);
if (ret) {
bch_err(ca, "Error migrating data: %i", ret);
return ret;
}
- ret = bch_move_metadata_off_device(ca);
+ ret = bch2_move_metadata_off_device(ca);
if (ret) {
bch_err(ca, "Error migrating metadata: %i", ret);
return ret;
@@ -1619,8 +1619,8 @@ int bch_dev_evacuate(struct bch_fs *c, struct bch_dev *ca)
/* Filesystem open: */
-const char *bch_fs_open(char * const *devices, unsigned nr_devices,
- struct bch_opts opts, struct bch_fs **ret)
+const char *bch2_fs_open(char * const *devices, unsigned nr_devices,
+ struct bch_opts opts, struct bch_fs **ret)
{
const char *err;
struct bch_fs *c = NULL;
@@ -1639,7 +1639,7 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices,
goto err;
for (i = 0; i < nr_devices; i++) {
- err = bch_read_super(&sb[i], opts, devices[i]);
+ err = bch2_read_super(&sb[i], opts, devices[i]);
if (err)
goto err;
@@ -1647,7 +1647,7 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices,
if (__SB_IS_BDEV(le64_to_cpu(sb[i].sb->version)))
goto err;
- err = bch_validate_cache_super(&sb[i]);
+ err = bch2_validate_cache_super(&sb[i]);
if (err)
goto err;
}
@@ -1658,36 +1658,36 @@ const char *bch_fs_open(char * const *devices, unsigned nr_devices,
best_sb = i;
for (i = 0; i < nr_devices; i++) {
- err = bch_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+ err = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
if (err)
goto err;
}
err = "cannot allocate memory";
- c = bch_fs_alloc(sb[best_sb].sb, opts);
+ c = bch2_fs_alloc(sb[best_sb].sb, opts);
if (!c)
goto err;
- err = "bch_dev_online() error";
+ err = "bch2_dev_online() error";
mutex_lock(&c->sb_lock);
for (i = 0; i < nr_devices; i++)
- if (__bch_dev_online(c, &sb[i])) {
+ if (__bch2_dev_online(c, &sb[i])) {
mutex_unlock(&c->sb_lock);
goto err;
}
mutex_unlock(&c->sb_lock);
err = "insufficient devices";
- if (!bch_fs_may_start(c, 0))
+ if (!bch2_fs_may_start(c, 0))
goto err;
if (!c->opts.nostart) {
- err = __bch_fs_start(c);
+ err = __bch2_fs_start(c);
if (err)
goto err;
}
- err = bch_fs_online(c);
+ err = bch2_fs_online(c);
if (err)
goto err;
@@ -1705,34 +1705,34 @@ out:
return err;
err:
if (c)
- bch_fs_stop(c);
+ bch2_fs_stop(c);
for (i = 0; i < nr_devices; i++)
- bch_free_super(&sb[i]);
+ bch2_free_super(&sb[i]);
goto out;
}
-static const char *__bch_fs_open_incremental(struct bcache_superblock *sb,
- struct bch_opts opts)
+static const char *__bch2_fs_open_incremental(struct bcache_superblock *sb,
+ struct bch_opts opts)
{
const char *err;
struct bch_fs *c;
bool allocated_fs = false;
- err = bch_validate_cache_super(sb);
+ err = bch2_validate_cache_super(sb);
if (err)
return err;
mutex_lock(&bch_fs_list_lock);
- c = __bch_uuid_to_fs(sb->sb->uuid);
+ c = __bch2_uuid_to_fs(sb->sb->uuid);
if (c) {
closure_get(&c->cl);
- err = bch_dev_in_fs(c->disk_sb, sb->sb);
+ err = bch2_dev_in_fs(c->disk_sb, sb->sb);
if (err)
goto err;
} else {
- c = bch_fs_alloc(sb->sb, opts);
+ c = bch2_fs_alloc(sb->sb, opts);
err = "cannot allocate memory";
if (!c)
goto err;
@@ -1740,22 +1740,22 @@ static const char *__bch_fs_open_incremental(struct bcache_superblock *sb,
allocated_fs = true;
}
- err = "bch_dev_online() error";
+ err = "bch2_dev_online() error";
mutex_lock(&c->sb_lock);
- if (__bch_dev_online(c, sb)) {
+ if (__bch2_dev_online(c, sb)) {
mutex_unlock(&c->sb_lock);
goto err;
}
mutex_unlock(&c->sb_lock);
- if (!c->opts.nostart && bch_fs_may_start(c, 0)) {
- err = __bch_fs_start(c);
+ if (!c->opts.nostart && bch2_fs_may_start(c, 0)) {
+ err = __bch2_fs_start(c);
if (err)
goto err;
}
- err = __bch_fs_online(c);
+ err = __bch2_fs_online(c);
if (err)
goto err;
@@ -1767,66 +1767,66 @@ err:
mutex_unlock(&bch_fs_list_lock);
if (allocated_fs)
- bch_fs_stop(c);
+ bch2_fs_stop(c);
else if (c)
closure_put(&c->cl);
return err;
}
-const char *bch_fs_open_incremental(const char *path)
+const char *bch2_fs_open_incremental(const char *path)
{
struct bcache_superblock sb;
- struct bch_opts opts = bch_opts_empty();
+ struct bch_opts opts = bch2_opts_empty();
const char *err;
- err = bch_read_super(&sb, opts, path);
+ err = bch2_read_super(&sb, opts, path);
if (err)
return err;
if (!__SB_IS_BDEV(le64_to_cpu(sb.sb->version)))
- err = __bch_fs_open_incremental(&sb, opts);
+ err = __bch2_fs_open_incremental(&sb, opts);
else
err = "not a bcachefs superblock";
- bch_free_super(&sb);
+ bch2_free_super(&sb);
return err;
}
/* Global interfaces/init */
-static void bcache_exit(void)
+static void bcachefs_exit(void)
{
- bch_debug_exit();
- bch_vfs_exit();
- bch_chardev_exit();
- if (bcache_kset)
- kset_unregister(bcache_kset);
+ bch2_debug_exit();
+ bch2_vfs_exit();
+ bch2_chardev_exit();
+ if (bcachefs_kset)
+ kset_unregister(bcachefs_kset);
}
-static int __init bcache_init(void)
+static int __init bcachefs_init(void)
{
- bkey_pack_test();
+ bch2_bkey_pack_test();
- if (!(bcache_kset = kset_create_and_add("bcache", NULL, fs_kobj)) ||
- bch_chardev_init() ||
- bch_vfs_init() ||
- bch_debug_init())
+ if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
+ bch2_chardev_init() ||
+ bch2_vfs_init() ||
+ bch2_debug_init())
goto err;
return 0;
err:
- bcache_exit();
+ bcachefs_exit();
return -ENOMEM;
}
#define BCH_DEBUG_PARAM(name, description) \
- bool bch_##name; \
- module_param_named(name, bch_##name, bool, 0644); \
+ bool bch2_##name; \
+ module_param_named(name, bch2_##name, bool, 0644); \
MODULE_PARM_DESC(name, description);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
-module_exit(bcache_exit);
-module_init(bcache_init);
+module_exit(bcachefs_exit);
+module_init(bcachefs_init);
diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h
index 26b6da49fd6e..944244149e2e 100644
--- a/fs/bcachefs/super.h
+++ b/fs/bcachefs/super.h
@@ -20,7 +20,7 @@ static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
return s & (ca->mi.bucket_size - 1);
}
-static inline struct bch_dev *__bch_next_dev(struct bch_fs *c, unsigned *iter)
+static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca = NULL;
@@ -33,17 +33,17 @@ static inline struct bch_dev *__bch_next_dev(struct bch_fs *c, unsigned *iter)
}
#define __for_each_member_device(ca, c, iter) \
- for ((iter) = 0; ((ca) = __bch_next_dev((c), &(iter))); (iter)++)
+ for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter))); (iter)++)
#define for_each_member_device_rcu(ca, c, iter) \
__for_each_member_device(ca, c, iter)
-static inline struct bch_dev *bch_get_next_dev(struct bch_fs *c, unsigned *iter)
+static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca;
rcu_read_lock();
- if ((ca = __bch_next_dev(c, iter)))
+ if ((ca = __bch2_next_dev(c, iter)))
percpu_ref_get(&ca->ref);
rcu_read_unlock();
@@ -55,17 +55,17 @@ static inline struct bch_dev *bch_get_next_dev(struct bch_fs *c, unsigned *iter)
*/
#define for_each_member_device(ca, c, iter) \
for ((iter) = 0; \
- (ca = bch_get_next_dev(c, &(iter))); \
+ (ca = bch2_get_next_dev(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
-static inline struct bch_dev *bch_get_next_online_dev(struct bch_fs *c,
+static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
unsigned *iter,
int state_mask)
{
struct bch_dev *ca;
rcu_read_lock();
- while ((ca = __bch_next_dev(c, iter)) &&
+ while ((ca = __bch2_next_dev(c, iter)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
(*iter)++;
@@ -76,7 +76,7 @@ static inline struct bch_dev *bch_get_next_online_dev(struct bch_fs *c,
#define __for_each_online_member(ca, c, iter, state_mask) \
for ((iter) = 0; \
- (ca = bch_get_next_online_dev(c, &(iter), state_mask)); \
+ (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
percpu_ref_put(&ca->io_ref), (iter)++)
#define for_each_online_member(ca, c, iter) \
@@ -89,42 +89,42 @@ static inline struct bch_dev *bch_get_next_online_dev(struct bch_fs *c,
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
-struct bch_fs *bch_bdev_to_fs(struct block_device *);
-struct bch_fs *bch_uuid_to_fs(uuid_le);
-int bch_congested(struct bch_fs *, int);
+struct bch_fs *bch2_bdev_to_fs(struct block_device *);
+struct bch_fs *bch2_uuid_to_fs(uuid_le);
+int bch2_congested(struct bch_fs *, int);
-void bch_dev_release(struct kobject *);
+void bch2_dev_release(struct kobject *);
-bool bch_dev_state_allowed(struct bch_fs *, struct bch_dev *,
+bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
-int __bch_dev_set_state(struct bch_fs *, struct bch_dev *,
+int __bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
-int bch_dev_set_state(struct bch_fs *, struct bch_dev *,
+int bch2_dev_set_state(struct bch_fs *, struct bch_dev *,
enum bch_member_state, int);
-int bch_dev_fail(struct bch_dev *, int);
-int bch_dev_remove(struct bch_fs *, struct bch_dev *, int);
-int bch_dev_add(struct bch_fs *, const char *);
-int bch_dev_online(struct bch_fs *, const char *);
-int bch_dev_offline(struct bch_fs *, struct bch_dev *, int);
-int bch_dev_evacuate(struct bch_fs *, struct bch_dev *);
+int bch2_dev_fail(struct bch_dev *, int);
+int bch2_dev_remove(struct bch_fs *, struct bch_dev *, int);
+int bch2_dev_add(struct bch_fs *, const char *);
+int bch2_dev_online(struct bch_fs *, const char *);
+int bch2_dev_offline(struct bch_fs *, struct bch_dev *, int);
+int bch2_dev_evacuate(struct bch_fs *, struct bch_dev *);
-bool bch_fs_emergency_read_only(struct bch_fs *);
-void bch_fs_read_only(struct bch_fs *);
-const char *bch_fs_read_write(struct bch_fs *);
+bool bch2_fs_emergency_read_only(struct bch_fs *);
+void bch2_fs_read_only(struct bch_fs *);
+const char *bch2_fs_read_write(struct bch_fs *);
-void bch_fs_release(struct kobject *);
-void bch_fs_stop(struct bch_fs *);
+void bch2_fs_release(struct kobject *);
+void bch2_fs_stop(struct bch_fs *);
-const char *bch_fs_start(struct bch_fs *);
-const char *bch_fs_open(char * const *, unsigned, struct bch_opts,
+const char *bch2_fs_start(struct bch_fs *);
+const char *bch2_fs_open(char * const *, unsigned, struct bch_opts,
struct bch_fs **);
-const char *bch_fs_open_incremental(const char *path);
+const char *bch2_fs_open_incremental(const char *path);
-extern struct kobj_type bch_fs_ktype;
-extern struct kobj_type bch_fs_internal_ktype;
-extern struct kobj_type bch_fs_time_stats_ktype;
-extern struct kobj_type bch_fs_opts_dir_ktype;
-extern struct kobj_type bch_dev_ktype;
+extern struct kobj_type bch2_fs_ktype;
+extern struct kobj_type bch2_fs_internal_ktype;
+extern struct kobj_type bch2_fs_time_stats_ktype;
+extern struct kobj_type bch2_fs_opts_dir_ktype;
+extern struct kobj_type bch2_dev_ktype;
#endif /* _BCACHE_SUPER_H */
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 70b0c546171f..11c6cdcc8577 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -5,7 +5,7 @@
* Copyright 2012 Google, Inc.
*/
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "compress.h"
#include "sysfs.h"
@@ -127,7 +127,7 @@ static struct attribute sysfs_state_rw = {
.mode = S_IRUGO
};
-static int bch_bset_print_stats(struct bch_fs *c, char *buf)
+static int bch2_bset_print_stats(struct bch_fs *c, char *buf)
{
struct bset_stats stats;
size_t nodes = 0;
@@ -140,7 +140,7 @@ static int bch_bset_print_stats(struct bch_fs *c, char *buf)
rcu_read_lock();
for_each_cached_btree(b, c, tbl, iter, pos) {
- bch_btree_keys_stats(b, &stats);
+ bch2_btree_keys_stats(b, &stats);
nodes++;
}
rcu_read_unlock();
@@ -170,7 +170,7 @@ static int bch_bset_print_stats(struct bch_fs *c, char *buf)
stats.failed_overflow);
}
-static unsigned bch_root_usage(struct bch_fs *c)
+static unsigned bch2_root_usage(struct bch_fs *c)
{
unsigned bytes = 0;
struct bkey_packed *k;
@@ -194,7 +194,7 @@ lock_root:
return (bytes * 100) / btree_bytes(c);
}
-static size_t bch_btree_cache_size(struct bch_fs *c)
+static size_t bch2_btree_cache_size(struct bch_fs *c)
{
size_t ret = 0;
struct btree *b;
@@ -207,20 +207,20 @@ static size_t bch_btree_cache_size(struct bch_fs *c)
return ret;
}
-static unsigned bch_fs_available_percent(struct bch_fs *c)
+static unsigned bch2_fs_available_percent(struct bch_fs *c)
{
return div64_u64((u64) sectors_available(c) * 100,
c->capacity ?: 1);
}
#if 0
-static unsigned bch_btree_used(struct bch_fs *c)
+static unsigned bch2_btree_used(struct bch_fs *c)
{
return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c));
}
-static unsigned bch_average_key_size(struct bch_fs *c)
+static unsigned bch2_average_key_size(struct bch_fs *c)
{
return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
@@ -230,7 +230,7 @@ static unsigned bch_average_key_size(struct bch_fs *c)
static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
{
- struct bch_fs_usage stats = bch_fs_usage_read(c);
+ struct bch_fs_usage stats = bch2_fs_usage_read(c);
return scnprintf(buf, PAGE_SIZE,
"capacity:\t\t%llu\n"
@@ -255,7 +255,7 @@ static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
stats.online_reserved);
}
-static ssize_t bch_compression_stats(struct bch_fs *c, char *buf)
+static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -286,7 +286,7 @@ static ssize_t bch_compression_stats(struct bch_fs *c, char *buf)
break;
}
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return snprintf(buf, PAGE_SIZE,
"uncompressed data:\n"
@@ -303,7 +303,7 @@ static ssize_t bch_compression_stats(struct bch_fs *c, char *buf)
compressed_sectors_uncompressed << 9);
}
-SHOW(bch_fs)
+SHOW(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
@@ -318,16 +318,16 @@ SHOW(bch_fs)
sysfs_hprint(btree_node_size, c->sb.btree_node_size << 9);
sysfs_print(btree_node_size_bytes, c->sb.btree_node_size << 9);
- sysfs_hprint(btree_cache_size, bch_btree_cache_size(c));
- sysfs_print(cache_available_percent, bch_fs_available_percent(c));
+ sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
+ sysfs_print(cache_available_percent, bch2_fs_available_percent(c));
sysfs_print(btree_gc_running, c->gc_pos.phase != GC_PHASE_DONE);
#if 0
/* XXX: reimplement */
- sysfs_print(btree_used_percent, bch_btree_used(c));
+ sysfs_print(btree_used_percent, bch2_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
- sysfs_hprint(average_key_size, bch_average_key_size(c));
+ sysfs_hprint(average_key_size, bch2_average_key_size(c));
#endif
sysfs_print(cache_read_races,
@@ -353,32 +353,32 @@ SHOW(bch_fs)
/* Debugging: */
if (attr == &sysfs_journal_debug)
- return bch_journal_print_debug(&c->journal, buf);
+ return bch2_journal_print_debug(&c->journal, buf);
#define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
- if (!bch_fs_running(c))
+ if (!bch2_fs_running(c))
return -EPERM;
if (attr == &sysfs_bset_tree_stats)
- return bch_bset_print_stats(c, buf);
+ return bch2_bset_print_stats(c, buf);
if (attr == &sysfs_alloc_debug)
return show_fs_alloc_debug(c, buf);
sysfs_print(tree_depth, c->btree_roots[BTREE_ID_EXTENTS].b->level);
- sysfs_print(root_usage_percent, bch_root_usage(c));
+ sysfs_print(root_usage_percent, bch2_root_usage(c));
if (attr == &sysfs_compression_stats)
- return bch_compression_stats(c, buf);
+ return bch2_compression_stats(c, buf);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
return 0;
}
-STORE(__bch_fs)
+STORE(__bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
@@ -404,7 +404,7 @@ STORE(__bch_fs)
ssize_t ret = strtoul_safe(buf, c->tiering_enabled)
?: (ssize_t) size;
- bch_tiering_start(c); /* issue wakeups */
+ bch2_tiering_start(c); /* issue wakeups */
return ret;
}
@@ -423,22 +423,22 @@ STORE(__bch_fs)
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
- if (!bch_fs_running(c))
+ if (!bch2_fs_running(c))
return -EPERM;
if (attr == &sysfs_journal_flush) {
- bch_journal_meta_async(&c->journal, NULL);
+ bch2_journal_meta_async(&c->journal, NULL);
return size;
}
if (attr == &sysfs_trigger_btree_coalesce)
- bch_coalesce(c);
+ bch2_coalesce(c);
/* Debugging: */
if (attr == &sysfs_trigger_gc)
- bch_gc(c);
+ bch2_gc(c);
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
@@ -451,18 +451,18 @@ STORE(__bch_fs)
return size;
}
-STORE(bch_fs)
+STORE(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
mutex_lock(&c->state_lock);
- size = __bch_fs_store(kobj, attr, buf, size);
+ size = __bch2_fs_store(kobj, attr, buf, size);
mutex_unlock(&c->state_lock);
return size;
}
-static struct attribute *bch_fs_files[] = {
+static struct attribute *bch2_fs_files[] = {
&sysfs_journal_write_delay_ms,
&sysfs_journal_reclaim_delay_ms,
&sysfs_journal_entry_size_max,
@@ -488,27 +488,27 @@ static struct attribute *bch_fs_files[] = {
&sysfs_journal_flush,
NULL
};
-KTYPE(bch_fs);
+KTYPE(bch2_fs);
/* internal dir - just a wrapper */
-SHOW(bch_fs_internal)
+SHOW(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
- return bch_fs_show(&c->kobj, attr, buf);
+ return bch2_fs_show(&c->kobj, attr, buf);
}
-STORE(bch_fs_internal)
+STORE(bch2_fs_internal)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
- return bch_fs_store(&c->kobj, attr, buf, size);
+ return bch2_fs_store(&c->kobj, attr, buf, size);
}
-static void bch_fs_internal_release(struct kobject *k)
+static void bch2_fs_internal_release(struct kobject *k)
{
}
-static struct attribute *bch_fs_internal_files[] = {
+static struct attribute *bch2_fs_internal_files[] = {
&sysfs_journal_debug,
&sysfs_alloc_debug,
@@ -537,34 +537,34 @@ static struct attribute *bch_fs_internal_files[] = {
NULL
};
-KTYPE(bch_fs_internal);
+KTYPE(bch2_fs_internal);
/* options */
-SHOW(bch_fs_opts_dir)
+SHOW(bch2_fs_opts_dir)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
- return bch_opt_show(&c->opts, attr->name, buf, PAGE_SIZE);
+ return bch2_opt_show(&c->opts, attr->name, buf, PAGE_SIZE);
}
-STORE(bch_fs_opts_dir)
+STORE(bch2_fs_opts_dir)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
const struct bch_option *opt;
enum bch_opt_id id;
u64 v;
- id = bch_parse_sysfs_opt(attr->name, buf, &v);
+ id = bch2_parse_sysfs_opt(attr->name, buf, &v);
if (id < 0)
return id;
- opt = &bch_opt_table[id];
+ opt = &bch2_opt_table[id];
mutex_lock(&c->sb_lock);
if (id == Opt_compression) {
- int ret = bch_check_set_has_compressed_data(c, v);
+ int ret = bch2_check_set_has_compressed_data(c, v);
if (ret) {
mutex_unlock(&c->sb_lock);
return ret;
@@ -573,21 +573,21 @@ STORE(bch_fs_opts_dir)
if (opt->set_sb != SET_NO_SB_OPT) {
opt->set_sb(c->disk_sb, v);
- bch_write_super(c);
+ bch2_write_super(c);
}
- bch_opt_set(&c->opts, id, v);
+ bch2_opt_set(&c->opts, id, v);
mutex_unlock(&c->sb_lock);
return size;
}
-static void bch_fs_opts_dir_release(struct kobject *k)
+static void bch2_fs_opts_dir_release(struct kobject *k)
{
}
-static struct attribute *bch_fs_opts_dir_files[] = {
+static struct attribute *bch2_fs_opts_dir_files[] = {
#define BCH_OPT(_name, ...) \
&sysfs_opt_##_name,
@@ -596,11 +596,11 @@ static struct attribute *bch_fs_opts_dir_files[] = {
NULL
};
-KTYPE(bch_fs_opts_dir);
+KTYPE(bch2_fs_opts_dir);
/* time stats */
-SHOW(bch_fs_time_stats)
+SHOW(bch2_fs_time_stats)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
@@ -613,7 +613,7 @@ SHOW(bch_fs_time_stats)
return 0;
}
-STORE(bch_fs_time_stats)
+STORE(bch2_fs_time_stats)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
@@ -625,11 +625,11 @@ STORE(bch_fs_time_stats)
return size;
}
-static void bch_fs_time_stats_release(struct kobject *k)
+static void bch2_fs_time_stats_release(struct kobject *k)
{
}
-static struct attribute *bch_fs_time_stats_files[] = {
+static struct attribute *bch2_fs_time_stats_files[] = {
#define BCH_TIME_STAT(name, frequency_units, duration_units) \
sysfs_time_stats_attribute_list(name, frequency_units, duration_units)
BCH_TIME_STATS()
@@ -637,7 +637,7 @@ static struct attribute *bch_fs_time_stats_files[] = {
NULL
};
-KTYPE(bch_fs_time_stats);
+KTYPE(bch2_fs_time_stats);
typedef unsigned (bucket_map_fn)(struct bch_dev *, struct bucket *, void *);
@@ -725,7 +725,7 @@ static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch_dev_usage_read(ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
return scnprintf(buf, PAGE_SIZE,
"free_inc: %zu/%zu\n"
@@ -765,11 +765,11 @@ static u64 sectors_written(struct bch_dev *ca)
return ret;
}
-SHOW(bch_dev)
+SHOW(bch2_dev)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch_dev_usage_read(ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
@@ -803,16 +803,16 @@ SHOW(bch_dev)
sysfs_pd_controller_show(copy_gc, &ca->moving_gc_pd);
if (attr == &sysfs_cache_replacement_policy)
- return bch_snprint_string_list(buf, PAGE_SIZE,
- bch_cache_replacement_policies,
- ca->mi.replacement);
+ return bch2_snprint_string_list(buf, PAGE_SIZE,
+ bch2_cache_replacement_policies,
+ ca->mi.replacement);
sysfs_print(tier, ca->mi.tier);
if (attr == &sysfs_state_rw)
- return bch_snprint_string_list(buf, PAGE_SIZE,
- bch_dev_state,
- ca->mi.state);
+ return bch2_snprint_string_list(buf, PAGE_SIZE,
+ bch2_dev_state,
+ ca->mi.state);
if (attr == &sysfs_read_priority_stats)
return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0);
@@ -830,7 +830,7 @@ SHOW(bch_dev)
return 0;
}
-STORE(bch_dev)
+STORE(bch2_dev)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
@@ -842,27 +842,27 @@ STORE(bch_dev)
bool v = strtoul_or_return(buf);
mutex_lock(&c->sb_lock);
- mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx];
+ mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
if (v != BCH_MEMBER_DISCARD(mi)) {
SET_BCH_MEMBER_DISCARD(mi, v);
- bch_write_super(c);
+ bch2_write_super(c);
}
mutex_unlock(&c->sb_lock);
}
if (attr == &sysfs_cache_replacement_policy) {
- ssize_t v = bch_read_string_list(buf, bch_cache_replacement_policies);
+ ssize_t v = bch2_read_string_list(buf, bch2_cache_replacement_policies);
if (v < 0)
return v;
mutex_lock(&c->sb_lock);
- mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx];
+ mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
SET_BCH_MEMBER_REPLACEMENT(mi, v);
- bch_write_super(c);
+ bch2_write_super(c);
}
mutex_unlock(&c->sb_lock);
}
@@ -880,22 +880,22 @@ STORE(bch_dev)
return size;
}
- mi = &bch_sb_get_members(c->disk_sb)->members[ca->dev_idx];
+ mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
SET_BCH_MEMBER_TIER(mi, v);
- bch_write_super(c);
+ bch2_write_super(c);
- bch_dev_group_remove(&c->tiers[prev_tier].devs, ca);
- bch_dev_group_add(&c->tiers[ca->mi.tier].devs, ca);
+ bch2_dev_group_remove(&c->tiers[prev_tier].devs, ca);
+ bch2_dev_group_add(&c->tiers[ca->mi.tier].devs, ca);
mutex_unlock(&c->sb_lock);
- bch_recalc_capacity(c);
- bch_tiering_start(c);
+ bch2_recalc_capacity(c);
+ bch2_tiering_start(c);
}
return size;
}
-static struct attribute *bch_dev_files[] = {
+static struct attribute *bch2_dev_files[] = {
&sysfs_uuid,
&sysfs_bucket_size,
&sysfs_bucket_size_bytes,
@@ -932,4 +932,4 @@ static struct attribute *bch_dev_files[] = {
sysfs_pd_controller_files(copy_gc),
NULL
};
-KTYPE(bch_dev);
+KTYPE(bch2_dev);
diff --git a/fs/bcachefs/sysfs.h b/fs/bcachefs/sysfs.h
index 02700246acaf..d1f17cff2a32 100644
--- a/fs/bcachefs/sysfs.h
+++ b/fs/bcachefs/sysfs.h
@@ -44,7 +44,7 @@ do { \
#define sysfs_hprint(file, val) \
do { \
if (attr == &sysfs_ ## file) { \
- ssize_t ret = bch_hprint(buf, val); \
+ ssize_t ret = bch2_hprint(buf, val); \
strcat(buf, "\n"); \
return ret + 1; \
} \
diff --git a/fs/bcachefs/tier.c b/fs/bcachefs/tier.c
index b1ac13c99275..16d32928a217 100644
--- a/fs/bcachefs/tier.c
+++ b/fs/bcachefs/tier.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc.h"
#include "btree_iter.h"
#include "buckets.h"
@@ -83,12 +83,12 @@ static int issue_tiering_move(struct bch_fs *c,
{
int ret;
- ret = bch_data_move(c, ctxt, &s->ca->tiering_write_point, k, NULL);
+ ret = bch2_data_move(c, ctxt, &s->ca->tiering_write_point, k, NULL);
if (!ret) {
- trace_bcache_tiering_copy(k.k);
+ trace_tiering_copy(k.k);
s->sectors += k.k->size;
} else {
- trace_bcache_tiering_alloc_fail(c, k.k->size);
+ trace_tiering_alloc_fail(c, k.k->size);
}
return ret;
@@ -110,19 +110,19 @@ static s64 read_tiering(struct bch_fs *c, struct bch_tier *tier)
if (!nr_devices)
return 0;
- trace_bcache_tiering_start(c);
+ trace_tiering_start(c);
memset(&s, 0, sizeof(s));
s.tier = tier;
s.stripe_size = 2048; /* 1 mb for now */
- bch_move_ctxt_init(&ctxt, &tier->pd.rate,
+ bch2_move_ctxt_init(&ctxt, &tier->pd.rate,
nr_devices * SECTORS_IN_FLIGHT_PER_DEVICE);
- bch_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
+ bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN);
while (!kthread_should_stop() &&
- !bch_move_ctxt_wait(&ctxt) &&
- (k = bch_btree_iter_peek(&iter)).k &&
+ !bch2_move_ctxt_wait(&ctxt) &&
+ (k = bch2_btree_iter_peek(&iter)).k &&
!btree_iter_err(k)) {
if (!tiering_pred(c, &s, k))
goto next;
@@ -133,30 +133,30 @@ static s64 read_tiering(struct bch_fs *c, struct bch_tier *tier)
ret = issue_tiering_move(c, &s, &ctxt, k);
if (ret) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
/* memory allocation failure, wait for some IO to finish */
- bch_move_ctxt_wait_for_io(&ctxt);
+ bch2_move_ctxt_wait_for_io(&ctxt);
continue;
}
next:
- bch_btree_iter_advance_pos(&iter);
- //bch_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_advance_pos(&iter);
+ //bch2_btree_iter_cond_resched(&iter);
/* unlock before calling moving_context_wait() */
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
cond_resched();
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
tier_put_device(&s);
- bch_move_ctxt_exit(&ctxt);
- trace_bcache_tiering_end(c, ctxt.sectors_moved, ctxt.keys_moved);
+ bch2_move_ctxt_exit(&ctxt);
+ trace_tiering_end(c, ctxt.sectors_moved, ctxt.keys_moved);
return ctxt.sectors_moved;
}
-static int bch_tiering_thread(void *arg)
+static int bch2_tiering_thread(void *arg)
{
struct bch_tier *tier = arg;
struct bch_fs *c = container_of(tier, struct bch_fs, tiers[tier->idx]);
@@ -196,7 +196,7 @@ static int bch_tiering_thread(void *arg)
if (available_sectors < (tier_capacity >> 1))
break;
- bch_kthread_io_clock_wait(clock,
+ bch2_kthread_io_clock_wait(clock,
last +
available_sectors -
(tier_capacity >> 1));
@@ -210,10 +210,10 @@ static int bch_tiering_thread(void *arg)
return 0;
}
-static void __bch_tiering_stop(struct bch_tier *tier)
+static void __bch2_tiering_stop(struct bch_tier *tier)
{
tier->pd.rate.rate = UINT_MAX;
- bch_ratelimit_reset(&tier->pd.rate);
+ bch2_ratelimit_reset(&tier->pd.rate);
if (tier->migrate)
kthread_stop(tier->migrate);
@@ -221,19 +221,19 @@ static void __bch_tiering_stop(struct bch_tier *tier)
tier->migrate = NULL;
}
-void bch_tiering_stop(struct bch_fs *c)
+void bch2_tiering_stop(struct bch_fs *c)
{
struct bch_tier *tier;
for (tier = c->tiers; tier < c->tiers + ARRAY_SIZE(c->tiers); tier++)
- __bch_tiering_stop(tier);
+ __bch2_tiering_stop(tier);
}
-static int __bch_tiering_start(struct bch_tier *tier)
+static int __bch2_tiering_start(struct bch_tier *tier)
{
if (!tier->migrate) {
struct task_struct *p =
- kthread_create(bch_tiering_thread, tier,
+ kthread_create(bch2_tiering_thread, tier,
"bch_tier[%u]", tier->idx);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -245,7 +245,7 @@ static int __bch_tiering_start(struct bch_tier *tier)
return 0;
}
-int bch_tiering_start(struct bch_fs *c)
+int bch2_tiering_start(struct bch_fs *c)
{
struct bch_tier *tier;
bool have_faster_tier = false;
@@ -258,11 +258,11 @@ int bch_tiering_start(struct bch_fs *c)
continue;
if (have_faster_tier) {
- int ret = __bch_tiering_start(tier);
+ int ret = __bch2_tiering_start(tier);
if (ret)
return ret;
} else {
- __bch_tiering_stop(tier);
+ __bch2_tiering_stop(tier);
}
have_faster_tier = true;
@@ -271,12 +271,12 @@ int bch_tiering_start(struct bch_fs *c)
return 0;
}
-void bch_fs_tiering_init(struct bch_fs *c)
+void bch2_fs_tiering_init(struct bch_fs *c)
{
unsigned i;
for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
c->tiers[i].idx = i;
- bch_pd_controller_init(&c->tiers[i].pd);
+ bch2_pd_controller_init(&c->tiers[i].pd);
}
}
diff --git a/fs/bcachefs/tier.h b/fs/bcachefs/tier.h
index b6f8d4a2b53c..a4fd6225d01c 100644
--- a/fs/bcachefs/tier.h
+++ b/fs/bcachefs/tier.h
@@ -1,8 +1,8 @@
#ifndef _BCACHE_TIER_H
#define _BCACHE_TIER_H
-void bch_tiering_stop(struct bch_fs *);
-int bch_tiering_start(struct bch_fs *);
-void bch_fs_tiering_init(struct bch_fs *);
+void bch2_tiering_stop(struct bch_fs *);
+int bch2_tiering_start(struct bch_fs *);
+void bch2_fs_tiering_init(struct bch_fs *);
#endif
diff --git a/fs/bcachefs/trace.c b/fs/bcachefs/trace.c
index 970699930c5e..13f0fc24a3f7 100644
--- a/fs/bcachefs/trace.c
+++ b/fs/bcachefs/trace.c
@@ -1,4 +1,4 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "alloc_types.h"
#include "buckets.h"
#include "btree_types.h"
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 5f8165935e56..e4cd63174974 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -23,7 +23,7 @@
#define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
#define STRTO_H(name, type) \
-int bch_ ## name ## _h(const char *cp, type *res) \
+int bch2_ ## name ## _h(const char *cp, type *res) \
{ \
int u = 0; \
char *e; \
@@ -77,7 +77,7 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
-ssize_t bch_hprint(char *buf, s64 v)
+ssize_t bch2_hprint(char *buf, s64 v)
{
static const char units[] = "?kMGTPEZY";
char dec[4] = "";
@@ -101,7 +101,7 @@ ssize_t bch_hprint(char *buf, s64 v)
return sprintf(buf, "%lli%s%c", v, dec, units[u]);
}
-ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[],
size_t selected)
{
char *out = buf;
@@ -115,7 +115,7 @@ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[
return out - buf;
}
-ssize_t bch_read_string_list(const char *buf, const char * const list[])
+ssize_t bch2_read_string_list(const char *buf, const char * const list[])
{
size_t i;
char *s, *d = kstrndup(buf, PAGE_SIZE - 1, GFP_KERNEL);
@@ -136,7 +136,7 @@ ssize_t bch_read_string_list(const char *buf, const char * const list[])
return i;
}
-bool bch_is_zero(const void *_p, size_t n)
+bool bch2_is_zero(const void *_p, size_t n)
{
const char *p = _p;
size_t i;
@@ -147,7 +147,7 @@ bool bch_is_zero(const void *_p, size_t n)
return true;
}
-void bch_time_stats_clear(struct time_stats *stats)
+void bch2_time_stats_clear(struct time_stats *stats)
{
spin_lock(&stats->lock);
@@ -161,7 +161,7 @@ void bch_time_stats_clear(struct time_stats *stats)
spin_unlock(&stats->lock);
}
-void __bch_time_stats_update(struct time_stats *stats, u64 start_time)
+void __bch2_time_stats_update(struct time_stats *stats, u64 start_time)
{
u64 now, duration, last;
@@ -193,22 +193,22 @@ void __bch_time_stats_update(struct time_stats *stats, u64 start_time)
stats->last = now ?: 1;
}
-void bch_time_stats_update(struct time_stats *stats, u64 start_time)
+void bch2_time_stats_update(struct time_stats *stats, u64 start_time)
{
spin_lock(&stats->lock);
- __bch_time_stats_update(stats, start_time);
+ __bch2_time_stats_update(stats, start_time);
spin_unlock(&stats->lock);
}
/**
- * bch_ratelimit_delay() - return how long to delay until the next time to do
+ * bch2_ratelimit_delay() - return how long to delay until the next time to do
* some work
*
* @d - the struct bch_ratelimit to update
*
* Returns the amount of time to delay by, in jiffies
*/
-u64 bch_ratelimit_delay(struct bch_ratelimit *d)
+u64 bch2_ratelimit_delay(struct bch_ratelimit *d)
{
u64 now = local_clock();
@@ -218,12 +218,12 @@ u64 bch_ratelimit_delay(struct bch_ratelimit *d)
}
/**
- * bch_ratelimit_increment() - increment @d by the amount of work done
+ * bch2_ratelimit_increment() - increment @d by the amount of work done
*
* @d - the struct bch_ratelimit to update
* @done - the amount of work done, in arbitrary units
*/
-void bch_ratelimit_increment(struct bch_ratelimit *d, u64 done)
+void bch2_ratelimit_increment(struct bch_ratelimit *d, u64 done)
{
u64 now = local_clock();
@@ -236,10 +236,10 @@ void bch_ratelimit_increment(struct bch_ratelimit *d, u64 done)
d->next = now - NSEC_PER_SEC * 2;
}
-int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d)
+int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d)
{
while (1) {
- u64 delay = bch_ratelimit_delay(d);
+ u64 delay = bch2_ratelimit_delay(d);
if (delay)
set_current_state(TASK_INTERRUPTIBLE);
@@ -263,7 +263,7 @@ int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *d)
* @sign: 1 or -1; 1 if increasing the rate makes actual go up, -1 if increasing
* it makes actual go down.
*/
-void bch_pd_controller_update(struct bch_pd_controller *pd,
+void bch2_pd_controller_update(struct bch_pd_controller *pd,
s64 target, s64 actual, int sign)
{
s64 proportional, derivative, change;
@@ -307,7 +307,7 @@ void bch_pd_controller_update(struct bch_pd_controller *pd,
pd->last_target = target;
}
-void bch_pd_controller_init(struct bch_pd_controller *pd)
+void bch2_pd_controller_init(struct bch_pd_controller *pd)
{
pd->rate.rate = 1024;
pd->last_update = jiffies;
@@ -317,7 +317,7 @@ void bch_pd_controller_init(struct bch_pd_controller *pd)
pd->backpressure = 1;
}
-size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
+size_t bch2_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
{
/* 2^64 - 1 is 20 digits, plus null byte */
char rate[21];
@@ -328,12 +328,12 @@ size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
char change[21];
s64 next_io;
- bch_hprint(rate, pd->rate.rate);
- bch_hprint(actual, pd->last_actual);
- bch_hprint(target, pd->last_target);
- bch_hprint(proportional, pd->last_proportional);
- bch_hprint(derivative, pd->last_derivative);
- bch_hprint(change, pd->last_change);
+ bch2_hprint(rate, pd->rate.rate);
+ bch2_hprint(actual, pd->last_actual);
+ bch2_hprint(target, pd->last_target);
+ bch2_hprint(proportional, pd->last_proportional);
+ bch2_hprint(derivative, pd->last_derivative);
+ bch2_hprint(change, pd->last_change);
next_io = div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC);
@@ -349,7 +349,7 @@ size_t bch_pd_controller_print_debug(struct bch_pd_controller *pd, char *buf)
derivative, change, next_io);
}
-void bch_bio_map(struct bio *bio, void *base)
+void bch2_bio_map(struct bio *bio, void *base)
{
size_t size = bio->bi_iter.bi_size;
struct bio_vec *bv = bio->bi_io_vec;
@@ -377,7 +377,7 @@ start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset,
}
}
-size_t bch_rand_range(size_t max)
+size_t bch2_rand_range(size_t max)
{
size_t rand;
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 95be24854669..5f13c8244a78 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -244,36 +244,36 @@ do { \
#define ANYSINT_MAX(t) \
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
-int bch_strtoint_h(const char *, int *);
-int bch_strtouint_h(const char *, unsigned int *);
-int bch_strtoll_h(const char *, long long *);
-int bch_strtoull_h(const char *, unsigned long long *);
+int bch2_strtoint_h(const char *, int *);
+int bch2_strtouint_h(const char *, unsigned int *);
+int bch2_strtoll_h(const char *, long long *);
+int bch2_strtoull_h(const char *, unsigned long long *);
-static inline int bch_strtol_h(const char *cp, long *res)
+static inline int bch2_strtol_h(const char *cp, long *res)
{
#if BITS_PER_LONG == 32
- return bch_strtoint_h(cp, (int *) res);
+ return bch2_strtoint_h(cp, (int *) res);
#else
- return bch_strtoll_h(cp, (long long *) res);
+ return bch2_strtoll_h(cp, (long long *) res);
#endif
}
-static inline int bch_strtoul_h(const char *cp, long *res)
+static inline int bch2_strtoul_h(const char *cp, long *res)
{
#if BITS_PER_LONG == 32
- return bch_strtouint_h(cp, (unsigned int *) res);
+ return bch2_strtouint_h(cp, (unsigned int *) res);
#else
- return bch_strtoull_h(cp, (unsigned long long *) res);
+ return bch2_strtoull_h(cp, (unsigned long long *) res);
#endif
}
#define strtoi_h(cp, res) \
- ( type_is(*res, int) ? bch_strtoint_h(cp, (void *) res)\
- : type_is(*res, long) ? bch_strtol_h(cp, (void *) res)\
- : type_is(*res, long long) ? bch_strtoll_h(cp, (void *) res)\
- : type_is(*res, unsigned) ? bch_strtouint_h(cp, (void *) res)\
- : type_is(*res, unsigned long) ? bch_strtoul_h(cp, (void *) res)\
- : type_is(*res, unsigned long long) ? bch_strtoull_h(cp, (void *) res)\
+ ( type_is(*res, int) ? bch2_strtoint_h(cp, (void *) res)\
+ : type_is(*res, long) ? bch2_strtol_h(cp, (void *) res)\
+ : type_is(*res, long long) ? bch2_strtoll_h(cp, (void *) res)\
+ : type_is(*res, unsigned) ? bch2_strtouint_h(cp, (void *) res)\
+ : type_is(*res, unsigned long) ? bch2_strtoul_h(cp, (void *) res)\
+ : type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
: -EINVAL)
#define strtoul_safe(cp, var) \
@@ -316,14 +316,14 @@ static inline int bch_strtoul_h(const char *cp, long *res)
: type_is(var, char *) ? "%s\n" \
: "%i\n", var)
-ssize_t bch_hprint(char *buf, s64 v);
+ssize_t bch2_hprint(char *buf, s64 v);
-bool bch_is_zero(const void *, size_t);
+bool bch2_is_zero(const void *, size_t);
-ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+ssize_t bch2_snprint_string_list(char *buf, size_t size, const char * const list[],
size_t selected);
-ssize_t bch_read_string_list(const char *buf, const char * const list[]);
+ssize_t bch2_read_string_list(const char *buf, const char * const list[]);
struct time_stats {
spinlock_t lock;
@@ -339,9 +339,9 @@ struct time_stats {
u64 last;
};
-void bch_time_stats_clear(struct time_stats *stats);
-void __bch_time_stats_update(struct time_stats *stats, u64 time);
-void bch_time_stats_update(struct time_stats *stats, u64 time);
+void bch2_time_stats_clear(struct time_stats *stats);
+void __bch2_time_stats_update(struct time_stats *stats, u64 time);
+void bch2_time_stats_update(struct time_stats *stats, u64 time);
static inline unsigned local_clock_us(void)
{
@@ -382,7 +382,7 @@ do { \
#define sysfs_clear_time_stats(stats, name) \
do { \
if (attr == &sysfs_ ## name ## _clear) \
- bch_time_stats_clear(stats); \
+ bch2_time_stats_clear(stats); \
} while (0)
#define sysfs_time_stats_attribute(name, \
@@ -422,19 +422,19 @@ struct bch_ratelimit {
/*
* Rate at which we want to do work, in units per nanosecond
* The units here correspond to the units passed to
- * bch_ratelimit_increment()
+ * bch2_ratelimit_increment()
*/
unsigned rate;
};
-static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
+static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
{
d->next = local_clock();
}
-u64 bch_ratelimit_delay(struct bch_ratelimit *);
-void bch_ratelimit_increment(struct bch_ratelimit *, u64);
-int bch_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *);
+u64 bch2_ratelimit_delay(struct bch_ratelimit *);
+void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
+int bch2_ratelimit_wait_freezable_stoppable(struct bch_ratelimit *);
struct bch_pd_controller {
struct bch_ratelimit rate;
@@ -453,14 +453,14 @@ struct bch_pd_controller {
s64 last_change;
s64 last_target;
- /* If true, the rate will not increase if bch_ratelimit_delay()
+ /* If true, the rate will not increase if bch2_ratelimit_delay()
* is not being called often enough. */
bool backpressure;
};
-void bch_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
-void bch_pd_controller_init(struct bch_pd_controller *);
-size_t bch_pd_controller_print_debug(struct bch_pd_controller *, char *);
+void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
+void bch2_pd_controller_init(struct bch_pd_controller *);
+size_t bch2_pd_controller_print_debug(struct bch_pd_controller *, char *);
#define sysfs_pd_controller_attribute(name) \
rw_attribute(name##_rate); \
@@ -484,7 +484,7 @@ do { \
sysfs_print(name##_rate_p_term_inverse, (var)->p_term_inverse); \
\
if (attr == &sysfs_##name##_rate_debug) \
- return bch_pd_controller_print_debug(var, buf); \
+ return bch2_pd_controller_print_debug(var, buf); \
} while (0)
#define sysfs_pd_controller_store(name, var) \
@@ -600,7 +600,7 @@ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
return x;
}
-void bch_bio_map(struct bio *bio, void *base);
+void bch2_bio_map(struct bio *bio, void *base);
static inline sector_t bdev_sectors(struct block_device *bdev)
{
@@ -633,7 +633,7 @@ do { \
_ret; \
})
-size_t bch_rand_range(size_t);
+size_t bch2_rand_range(size_t);
void memcpy_to_bio(struct bio *, struct bvec_iter, void *);
void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index a5c66fa17816..62a08897dc88 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -1,5 +1,5 @@
-#include "bcache.h"
+#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_update.h"
#include "extents.h"
@@ -18,30 +18,30 @@ struct xattr_search_key {
#define X_SEARCH(_type, _name, _len) ((struct xattr_search_key) \
{ .type = _type, .name = QSTR_INIT(_name, _len) })
-static u64 bch_xattr_hash(const struct bch_hash_info *info,
+static u64 bch2_xattr_hash(const struct bch_hash_info *info,
const struct xattr_search_key *key)
{
struct bch_str_hash_ctx ctx;
- bch_str_hash_init(&ctx, info);
- bch_str_hash_update(&ctx, info, &key->type, sizeof(key->type));
- bch_str_hash_update(&ctx, info, key->name.name, key->name.len);
+ bch2_str_hash_init(&ctx, info);
+ bch2_str_hash_update(&ctx, info, &key->type, sizeof(key->type));
+ bch2_str_hash_update(&ctx, info, key->name.name, key->name.len);
- return bch_str_hash_end(&ctx, info);
+ return bch2_str_hash_end(&ctx, info);
}
#define xattr_val(_xattr) ((_xattr)->x_name + (_xattr)->x_name_len)
static u64 xattr_hash_key(const struct bch_hash_info *info, const void *key)
{
- return bch_xattr_hash(info, key);
+ return bch2_xattr_hash(info, key);
}
static u64 xattr_hash_bkey(const struct bch_hash_info *info, struct bkey_s_c k)
{
struct bkey_s_c_xattr x = bkey_s_c_to_xattr(k);
- return bch_xattr_hash(info,
+ return bch2_xattr_hash(info,
&X_SEARCH(x.v->x_type, x.v->x_name, x.v->x_name_len));
}
@@ -75,7 +75,7 @@ static const struct bch_hash_desc xattr_hash_desc = {
.cmp_bkey = xattr_cmp_bkey,
};
-static const char *bch_xattr_invalid(const struct bch_fs *c,
+static const char *bch2_xattr_invalid(const struct bch_fs *c,
struct bkey_s_c k)
{
switch (k.k->type) {
@@ -94,7 +94,7 @@ static const char *bch_xattr_invalid(const struct bch_fs *c,
}
}
-static void bch_xattr_to_text(struct bch_fs *c, char *buf,
+static void bch2_xattr_to_text(struct bch_fs *c, char *buf,
size_t size, struct bkey_s_c k)
{
struct bkey_s_c_xattr xattr;
@@ -132,12 +132,12 @@ static void bch_xattr_to_text(struct bch_fs *c, char *buf,
}
}
-const struct bkey_ops bch_bkey_xattr_ops = {
- .key_invalid = bch_xattr_invalid,
- .val_to_text = bch_xattr_to_text,
+const struct bkey_ops bch2_bkey_xattr_ops = {
+ .key_invalid = bch2_xattr_invalid,
+ .val_to_text = bch2_xattr_to_text,
};
-int bch_xattr_get(struct bch_fs *c, struct inode *inode,
+int bch2_xattr_get(struct bch_fs *c, struct inode *inode,
const char *name, void *buffer, size_t size, int type)
{
struct bch_inode_info *ei = to_bch_ei(inode);
@@ -146,11 +146,11 @@ int bch_xattr_get(struct bch_fs *c, struct inode *inode,
struct bkey_s_c_xattr xattr;
int ret;
- k = bch_hash_lookup(xattr_hash_desc, &ei->str_hash, c,
+ k = bch2_hash_lookup(xattr_hash_desc, &ei->str_hash, c,
ei->vfs_inode.i_ino, &iter,
&X_SEARCH(type, name, strlen(name)));
if (IS_ERR(k.k))
- return bch_btree_iter_unlock(&iter) ?: -ENODATA;
+ return bch2_btree_iter_unlock(&iter) ?: -ENODATA;
xattr = bkey_s_c_to_xattr(k);
ret = le16_to_cpu(xattr.v->x_val_len);
@@ -161,11 +161,11 @@ int bch_xattr_get(struct bch_fs *c, struct inode *inode,
memcpy(buffer, xattr_val(xattr.v), ret);
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
-int __bch_xattr_set(struct bch_fs *c, u64 inum,
+int __bch2_xattr_set(struct bch_fs *c, u64 inum,
const struct bch_hash_info *hash_info,
const char *name, const void *value, size_t size,
int flags, int type, u64 *journal_seq)
@@ -174,7 +174,7 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum,
int ret;
if (!value) {
- ret = bch_hash_delete(xattr_hash_desc, hash_info,
+ ret = bch2_hash_delete(xattr_hash_desc, hash_info,
c, inum,
journal_seq, &search);
} else {
@@ -199,7 +199,7 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum,
memcpy(xattr->v.x_name, search.name.name, search.name.len);
memcpy(xattr_val(&xattr->v), value, size);
- ret = bch_hash_set(xattr_hash_desc, hash_info, c,
+ ret = bch2_hash_set(xattr_hash_desc, hash_info, c,
inum, journal_seq,
&xattr->k_i,
(flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
@@ -213,25 +213,25 @@ int __bch_xattr_set(struct bch_fs *c, u64 inum,
return ret;
}
-int bch_xattr_set(struct bch_fs *c, struct inode *inode,
+int bch2_xattr_set(struct bch_fs *c, struct inode *inode,
const char *name, const void *value, size_t size,
int flags, int type)
{
struct bch_inode_info *ei = to_bch_ei(inode);
- return __bch_xattr_set(c, inode->i_ino, &ei->str_hash,
+ return __bch2_xattr_set(c, inode->i_ino, &ei->str_hash,
name, value, size, flags, type,
&ei->journal_seq);
}
-static const struct xattr_handler *bch_xattr_type_to_handler(unsigned);
+static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned);
-static size_t bch_xattr_emit(struct dentry *dentry,
+static size_t bch2_xattr_emit(struct dentry *dentry,
const struct bch_xattr *xattr,
char *buffer, size_t buffer_size)
{
const struct xattr_handler *handler =
- bch_xattr_type_to_handler(xattr->x_type);
+ bch2_xattr_type_to_handler(xattr->x_type);
if (handler && (!handler->list || handler->list(dentry))) {
const char *prefix = handler->prefix ?: handler->name;
@@ -251,7 +251,7 @@ static size_t bch_xattr_emit(struct dentry *dentry,
}
}
-ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct bch_fs *c = dentry->d_sb->s_fs_info;
struct btree_iter iter;
@@ -272,10 +272,10 @@ ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
xattr = bkey_s_c_to_xattr(k).v;
- len = bch_xattr_emit(dentry, xattr, buffer, buffer_size);
+ len = bch2_xattr_emit(dentry, xattr, buffer, buffer_size);
if (buffer) {
if (len > buffer_size) {
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return -ERANGE;
}
@@ -286,55 +286,55 @@ ssize_t bch_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ret += len;
}
- bch_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(&iter);
return ret;
}
-static int bch_xattr_get_handler(const struct xattr_handler *handler,
+static int bch2_xattr_get_handler(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
- return bch_xattr_get(c, inode, name, buffer, size, handler->flags);
+ return bch2_xattr_get(c, inode, name, buffer, size, handler->flags);
}
-static int bch_xattr_set_handler(const struct xattr_handler *handler,
+static int bch2_xattr_set_handler(const struct xattr_handler *handler,
struct dentry *dentry, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
struct bch_fs *c = inode->i_sb->s_fs_info;
- return bch_xattr_set(c, inode, name, value, size, flags,
+ return bch2_xattr_set(c, inode, name, value, size, flags,
handler->flags);
}
static const struct xattr_handler bch_xattr_user_handler = {
.prefix = XATTR_USER_PREFIX,
- .get = bch_xattr_get_handler,
- .set = bch_xattr_set_handler,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
.flags = BCH_XATTR_INDEX_USER,
};
-static bool bch_xattr_trusted_list(struct dentry *dentry)
+static bool bch2_xattr_trusted_list(struct dentry *dentry)
{
return capable(CAP_SYS_ADMIN);
}
static const struct xattr_handler bch_xattr_trusted_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
- .list = bch_xattr_trusted_list,
- .get = bch_xattr_get_handler,
- .set = bch_xattr_set_handler,
+ .list = bch2_xattr_trusted_list,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
.flags = BCH_XATTR_INDEX_TRUSTED,
};
static const struct xattr_handler bch_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
- .get = bch_xattr_get_handler,
- .set = bch_xattr_set_handler,
+ .get = bch2_xattr_get_handler,
+ .set = bch2_xattr_set_handler,
.flags = BCH_XATTR_INDEX_SECURITY,
};
@@ -348,7 +348,7 @@ static const struct xattr_handler *bch_xattr_handler_map[] = {
[BCH_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler,
};
-const struct xattr_handler *bch_xattr_handlers[] = {
+const struct xattr_handler *bch2_xattr_handlers[] = {
&bch_xattr_user_handler,
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
@@ -357,7 +357,7 @@ const struct xattr_handler *bch_xattr_handlers[] = {
NULL
};
-static const struct xattr_handler *bch_xattr_type_to_handler(unsigned type)
+static const struct xattr_handler *bch2_xattr_type_to_handler(unsigned type)
{
return type < ARRAY_SIZE(bch_xattr_handler_map)
? bch_xattr_handler_map[type]
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
index c48c7acf361d..14eba241869a 100644
--- a/fs/bcachefs/xattr.h
+++ b/fs/bcachefs/xattr.h
@@ -1,20 +1,20 @@
#ifndef _BCACHE_XATTR_H
#define _BCACHE_XATTR_H
-extern const struct bkey_ops bch_bkey_xattr_ops;
+extern const struct bkey_ops bch2_bkey_xattr_ops;
struct dentry;
struct xattr_handler;
struct bch_hash_info;
-int bch_xattr_get(struct bch_fs *, struct inode *,
+int bch2_xattr_get(struct bch_fs *, struct inode *,
const char *, void *, size_t, int);
-int __bch_xattr_set(struct bch_fs *, u64, const struct bch_hash_info *,
+int __bch2_xattr_set(struct bch_fs *, u64, const struct bch_hash_info *,
const char *, const void *, size_t, int, int, u64 *);
-int bch_xattr_set(struct bch_fs *, struct inode *,
+int bch2_xattr_set(struct bch_fs *, struct inode *,
const char *, const void *, size_t, int, int);
-ssize_t bch_xattr_list(struct dentry *, char *, size_t);
+ssize_t bch2_xattr_list(struct dentry *, char *, size_t);
-extern const struct xattr_handler *bch_xattr_handlers[];
+extern const struct xattr_handler *bch2_xattr_handlers[];
#endif /* _BCACHE_XATTR_H */
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 5b55dd0862c8..7dea9d63e654 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -75,7 +75,7 @@ DECLARE_EVENT_CLASS(bch_fs,
TP_printk("%pU", __entry->uuid)
);
-DECLARE_EVENT_CLASS(bcache_bio,
+DECLARE_EVENT_CLASS(bio,
TP_PROTO(struct bio *bio),
TP_ARGS(bio),
@@ -118,27 +118,27 @@ DECLARE_EVENT_CLASS(page_alloc_fail,
/* io.c: */
-DEFINE_EVENT(bcache_bio, bcache_read_split,
+DEFINE_EVENT(bio, read_split,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_read_bounce,
+DEFINE_EVENT(bio, read_bounce,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_read_retry,
+DEFINE_EVENT(bio, read_retry,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-DEFINE_EVENT(bcache_bio, bcache_promote,
+DEFINE_EVENT(bio, promote,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
-TRACE_EVENT(bcache_write_throttle,
+TRACE_EVENT(write_throttle,
TP_PROTO(struct bch_fs *c, u64 inode, struct bio *bio, u64 delay),
TP_ARGS(c, inode, bio, delay),
@@ -169,17 +169,17 @@ TRACE_EVENT(bcache_write_throttle,
/* Journal */
-DEFINE_EVENT(bch_fs, bcache_journal_full,
+DEFINE_EVENT(bch_fs, journal_full,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_journal_entry_full,
+DEFINE_EVENT(bch_fs, journal_entry_full,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
+DEFINE_EVENT(bio, journal_write,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
@@ -220,12 +220,12 @@ DECLARE_EVENT_CLASS(btree_node,
__entry->inode, __entry->offset)
);
-DEFINE_EVENT(btree_node, bcache_btree_read,
+DEFINE_EVENT(btree_node, btree_read,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
-TRACE_EVENT(bcache_btree_write,
+TRACE_EVENT(btree_write,
TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
TP_ARGS(b, bytes, sectors),
@@ -245,17 +245,17 @@ TRACE_EVENT(bcache_btree_write,
__entry->type , __entry->bytes, __entry->sectors)
);
-DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
+DEFINE_EVENT(btree_node, btree_node_alloc,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
-DEFINE_EVENT(btree_node, bcache_btree_node_free,
+DEFINE_EVENT(btree_node, btree_node_free,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
-TRACE_EVENT(bcache_mca_reap,
+TRACE_EVENT(btree_node_reap,
TP_PROTO(struct bch_fs *c, struct btree *b, int ret),
TP_ARGS(c, b, ret),
@@ -272,33 +272,7 @@ TRACE_EVENT(bcache_mca_reap,
TP_printk("bucket %llu ret %d", __entry->bucket, __entry->ret)
);
-TRACE_EVENT(bcache_mca_scan,
- TP_PROTO(struct bch_fs *c, unsigned touched, unsigned freed,
- unsigned can_free, unsigned long nr),
- TP_ARGS(c, touched, freed, can_free, nr),
-
- TP_STRUCT__entry(
- __array(char, uuid, 16 )
- __field(unsigned long, touched )
- __field(unsigned long, freed )
- __field(unsigned long, can_free )
- __field(unsigned long, nr )
- ),
-
- TP_fast_assign(
- memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
- __entry->touched = touched;
- __entry->freed = freed;
- __entry->can_free = can_free;
- __entry->nr = nr;
- ),
-
- TP_printk("%pU touched %lu freed %lu can_free %lu nr %lu",
- __entry->uuid, __entry->touched, __entry->freed,
- __entry->can_free, __entry->nr)
-);
-
-DECLARE_EVENT_CLASS(mca_cannibalize_lock,
+DECLARE_EVENT_CLASS(btree_node_cannibalize_lock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c),
@@ -313,27 +287,27 @@ DECLARE_EVENT_CLASS(mca_cannibalize_lock,
TP_printk("%pU", __entry->uuid)
);
-DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock_fail,
+DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock_fail,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock,
+DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize_lock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize,
+DEFINE_EVENT(btree_node_cannibalize_lock, btree_node_cannibalize,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_mca_cannibalize_unlock,
+DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-TRACE_EVENT(bcache_btree_reserve_get_fail,
+TRACE_EVENT(btree_reserve_get_fail,
TP_PROTO(struct bch_fs *c, size_t required, struct closure *cl),
TP_ARGS(c, required, cl),
@@ -353,7 +327,7 @@ TRACE_EVENT(bcache_btree_reserve_get_fail,
__entry->required, __entry->cl)
);
-TRACE_EVENT(bcache_btree_insert_key,
+TRACE_EVENT(btree_insert_key,
TP_PROTO(struct bch_fs *c, struct btree *b, struct bkey_i *k),
TP_ARGS(c, b, k),
@@ -412,24 +386,24 @@ DECLARE_EVENT_CLASS(btree_split,
__entry->inode, __entry->offset, __entry->keys)
);
-DEFINE_EVENT(btree_split, bcache_btree_node_split,
+DEFINE_EVENT(btree_split, btree_node_split,
TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
TP_ARGS(c, b, keys)
);
-DEFINE_EVENT(btree_split, bcache_btree_node_compact,
+DEFINE_EVENT(btree_split, btree_node_compact,
TP_PROTO(struct bch_fs *c, struct btree *b, unsigned keys),
TP_ARGS(c, b, keys)
);
-DEFINE_EVENT(btree_node, bcache_btree_set_root,
+DEFINE_EVENT(btree_node, btree_set_root,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
/* Garbage collection */
-TRACE_EVENT(bcache_btree_gc_coalesce,
+TRACE_EVENT(btree_gc_coalesce,
TP_PROTO(struct bch_fs *c, struct btree *b, unsigned nodes),
TP_ARGS(c, b, nodes),
@@ -456,7 +430,7 @@ TRACE_EVENT(bcache_btree_gc_coalesce,
__entry->inode, __entry->offset, __entry->nodes)
);
-TRACE_EVENT(bcache_btree_gc_coalesce_fail,
+TRACE_EVENT(btree_gc_coalesce_fail,
TP_PROTO(struct bch_fs *c, int reason),
TP_ARGS(c, reason),
@@ -473,85 +447,54 @@ TRACE_EVENT(bcache_btree_gc_coalesce_fail,
TP_printk("%pU: %u", __entry->uuid, __entry->reason)
);
-TRACE_EVENT(bcache_btree_node_alloc_replacement,
- TP_PROTO(struct bch_fs *c, struct btree *old, struct btree *b),
- TP_ARGS(c, old, b),
-
- TP_STRUCT__entry(
- __array(char, uuid, 16 )
- __field(u64, bucket )
- __field(u64, old_bucket )
- __field(u8, level )
- __field(u8, id )
- __field(u32, inode )
- __field(u64, offset )
- ),
-
- TP_fast_assign(
- memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
- __entry->old_bucket = PTR_BUCKET_NR_TRACE(c,
- &old->key, 0);
- __entry->bucket = PTR_BUCKET_NR_TRACE(c, &b->key, 0);
- __entry->level = b->level;
- __entry->id = b->btree_id;
- __entry->inode = b->key.k.p.inode;
- __entry->offset = b->key.k.p.offset;
- ),
-
- TP_printk("%pU for %llu bucket %llu(%u) id %u: %u:%llu",
- __entry->uuid, __entry->old_bucket, __entry->bucket,
- __entry->level, __entry->id,
- __entry->inode, __entry->offset)
-);
-
-DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node,
+DEFINE_EVENT(btree_node, btree_gc_rewrite_node,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
-DEFINE_EVENT(btree_node, bcache_btree_gc_rewrite_node_fail,
+DEFINE_EVENT(btree_node, btree_gc_rewrite_node_fail,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
-DEFINE_EVENT(bch_fs, bcache_gc_start,
+DEFINE_EVENT(bch_fs, gc_start,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_gc_end,
+DEFINE_EVENT(bch_fs, gc_end,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_gc_coalesce_start,
+DEFINE_EVENT(bch_fs, gc_coalesce_start,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_gc_coalesce_end,
+DEFINE_EVENT(bch_fs, gc_coalesce_end,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_dev, bcache_sectors_saturated,
+DEFINE_EVENT(bch_dev, sectors_saturated,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
-DEFINE_EVENT(bch_fs, bcache_gc_sectors_saturated,
+DEFINE_EVENT(bch_fs, gc_sectors_saturated,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-DEFINE_EVENT(bch_fs, bcache_gc_cannot_inc_gens,
+DEFINE_EVENT(bch_fs, gc_cannot_inc_gens,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
/* Allocator */
-TRACE_EVENT(bcache_alloc_batch,
+TRACE_EVENT(alloc_batch,
TP_PROTO(struct bch_dev *ca, size_t free, size_t total),
TP_ARGS(ca, free, total),
@@ -571,17 +514,17 @@ TRACE_EVENT(bcache_alloc_batch,
__entry->uuid, __entry->free, __entry->total)
);
-DEFINE_EVENT(bch_dev, bcache_prio_write_start,
+DEFINE_EVENT(bch_dev, prio_write_start,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
-DEFINE_EVENT(bch_dev, bcache_prio_write_end,
+DEFINE_EVENT(bch_dev, prio_write_end,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
-TRACE_EVENT(bcache_invalidate,
+TRACE_EVENT(invalidate,
TP_PROTO(struct bch_dev *ca, size_t bucket, unsigned sectors),
TP_ARGS(ca, bucket, sectors),
@@ -602,7 +545,7 @@ TRACE_EVENT(bcache_invalidate,
MINOR(__entry->dev), __entry->offset)
);
-DEFINE_EVENT(bch_fs, bcache_rescale_prios,
+DEFINE_EVENT(bch_fs, rescale_prios,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
@@ -624,17 +567,17 @@ DECLARE_EVENT_CLASS(bucket_alloc,
TP_printk("%pU reserve %d", __entry->uuid, __entry->reserve)
);
-DEFINE_EVENT(bucket_alloc, bcache_bucket_alloc,
+DEFINE_EVENT(bucket_alloc, bucket_alloc,
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
TP_ARGS(ca, reserve)
);
-DEFINE_EVENT(bucket_alloc, bcache_bucket_alloc_fail,
+DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve),
TP_ARGS(ca, reserve)
);
-TRACE_EVENT(bcache_freelist_empty_fail,
+TRACE_EVENT(freelist_empty_fail,
TP_PROTO(struct bch_fs *c, enum alloc_reserve reserve,
struct closure *cl),
TP_ARGS(c, reserve, cl),
@@ -673,12 +616,12 @@ DECLARE_EVENT_CLASS(open_bucket_alloc,
__entry->uuid, __entry->cl)
);
-DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc,
+DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc,
TP_PROTO(struct bch_fs *c, struct closure *cl),
TP_ARGS(c, cl)
);
-DEFINE_EVENT(open_bucket_alloc, bcache_open_bucket_alloc_fail,
+DEFINE_EVENT(open_bucket_alloc, open_bucket_alloc_fail,
TP_PROTO(struct bch_fs *c, struct closure *cl),
TP_ARGS(c, cl)
);
@@ -705,39 +648,39 @@ DECLARE_EVENT_CLASS(moving_io,
__entry->inode, __entry->offset, __entry->sectors)
);
-DEFINE_EVENT(moving_io, bcache_move_read,
+DEFINE_EVENT(moving_io, move_read,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
-DEFINE_EVENT(moving_io, bcache_move_read_done,
+DEFINE_EVENT(moving_io, move_read_done,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
-DEFINE_EVENT(moving_io, bcache_move_write,
+DEFINE_EVENT(moving_io, move_write,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
-DEFINE_EVENT(moving_io, bcache_copy_collision,
+DEFINE_EVENT(moving_io, copy_collision,
TP_PROTO(struct bkey *k),
TP_ARGS(k)
);
/* Copy GC */
-DEFINE_EVENT(page_alloc_fail, bcache_moving_gc_alloc_fail,
+DEFINE_EVENT(page_alloc_fail, moving_gc_alloc_fail,
TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size)
);
-DEFINE_EVENT(bch_dev, bcache_moving_gc_start,
+DEFINE_EVENT(bch_dev, moving_gc_start,
TP_PROTO(struct bch_dev *ca),
TP_ARGS(ca)
);
-TRACE_EVENT(bcache_moving_gc_end,
+TRACE_EVENT(moving_gc_end,
TP_PROTO(struct bch_dev *ca, u64 sectors_moved, u64 keys_moved,
u64 buckets_moved),
TP_ARGS(ca, sectors_moved, keys_moved, buckets_moved),
@@ -761,24 +704,24 @@ TRACE_EVENT(bcache_moving_gc_end,
__entry->buckets_moved)
);
-DEFINE_EVENT(bkey, bcache_gc_copy,
+DEFINE_EVENT(bkey, gc_copy,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
);
/* Tiering */
-DEFINE_EVENT(page_alloc_fail, bcache_tiering_alloc_fail,
+DEFINE_EVENT(page_alloc_fail, tiering_alloc_fail,
TP_PROTO(struct bch_fs *c, u64 size),
TP_ARGS(c, size)
);
-DEFINE_EVENT(bch_fs, bcache_tiering_start,
+DEFINE_EVENT(bch_fs, tiering_start,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
-TRACE_EVENT(bcache_tiering_end,
+TRACE_EVENT(tiering_end,
TP_PROTO(struct bch_fs *c, u64 sectors_moved,
u64 keys_moved),
TP_ARGS(c, sectors_moved, keys_moved),
@@ -799,7 +742,7 @@ TRACE_EVENT(bcache_tiering_end,
__entry->uuid, __entry->sectors_moved, __entry->keys_moved)
);
-DEFINE_EVENT(bkey, bcache_tiering_copy,
+DEFINE_EVENT(bkey, tiering_copy,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
);