summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-11-27 08:23:22 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-12-27 11:38:37 -0500
commit1e572b45b7ea4b9e8a73f0ba9d5f031a5382af3b (patch)
tree390228ba6dc5ba93ff5c3506e7fac5d21779db73
parent680c95b58e2a008a835a22112f7f4d80595a64d4 (diff)
bcachefs: refactor bch_fs_usage
-rw-r--r--fs/bcachefs/alloc_foreground.c2
-rw-r--r--fs/bcachefs/bcachefs.h10
-rw-r--r--fs/bcachefs/buckets.c132
-rw-r--r--fs/bcachefs/buckets.h42
-rw-r--r--fs/bcachefs/buckets_types.h14
-rw-r--r--fs/bcachefs/chardev.c2
-rw-r--r--fs/bcachefs/fs.c8
-rw-r--r--fs/bcachefs/super.c2
8 files changed, 119 insertions, 93 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index ecd07dfdd57f..596d3bc7d99e 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -720,7 +720,7 @@ static struct write_point *__writepoint_find(struct hlist_head *head,
static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
{
u64 stranded = c->write_points_nr * c->bucket_size_max;
- u64 free = bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
+ u64 free = bch2_fs_sectors_free(c);
return stranded * factor > free;
}
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index e8428e0fe59e..5149e6eaf49e 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -500,6 +500,10 @@ enum bch_fs_state {
BCH_FS_RW,
};
+struct bch_fs_pcpu {
+ u64 sectors_available;
+};
+
struct bch_fs {
struct closure cl;
@@ -612,9 +616,11 @@ struct bch_fs {
atomic64_t sectors_available;
- struct bch_fs_usage __percpu *usage[2];
+ struct bch_fs_pcpu __percpu *pcpu;
+
+ struct bch_fs_usage __percpu *usage[2];
- struct percpu_rw_semaphore mark_lock;
+ struct percpu_rw_semaphore mark_lock;
/*
* When we invalidate buckets, we use both the priority and the amount
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index d8eb545df24c..5f2ff3604f93 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -271,16 +271,31 @@ static u64 avail_factor(u64 r)
return (r << RESERVE_FACTOR) / ((1 << RESERVE_FACTOR) + 1);
}
-static inline u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
+static inline u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
{
- struct fs_usage_sum sum = __fs_usage_sum(stats);
+ struct fs_usage_sum sum = __fs_usage_sum(fs_usage);
return sum.hidden + sum.data + reserve_factor(sum.reserved);
}
-u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage fs_usage)
{
- return min(c->capacity, __bch2_fs_sectors_used(c, stats));
+ return min(c->capacity, __bch2_fs_sectors_used(c, fs_usage));
+}
+
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *c)
+{
+ struct bch_fs_usage usage = bch2_fs_usage_read(c);
+ struct fs_usage_sum sum = __fs_usage_sum(usage);
+ struct bch_fs_usage_short ret;
+
+ ret.capacity = READ_ONCE(c->capacity) - sum.hidden;
+ ret.used = min(ret.capacity, sum.data +
+ reserve_factor(sum.reserved));
+ ret.nr_inodes = usage.nr_inodes;
+
+ return ret;
}
static inline int is_unavailable_bucket(struct bucket_mark m)
@@ -314,11 +329,11 @@ static bool bucket_became_unavailable(struct bucket_mark old,
}
void bch2_fs_usage_apply(struct bch_fs *c,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
- struct fs_usage_sum sum = __fs_usage_sum(*stats);
+ struct fs_usage_sum sum = __fs_usage_sum(*fs_usage);
s64 added = sum.data + sum.reserved;
s64 should_not_have_added;
@@ -336,23 +351,18 @@ void bch2_fs_usage_apply(struct bch_fs *c,
}
if (added > 0) {
- disk_res->sectors -= added;
- stats->online_reserved -= added;
+ disk_res->sectors -= added;
+ fs_usage->online_reserved -= added;
}
- /* online_reserved not subject to gc: */
- this_cpu_ptr(c->usage[0])->online_reserved +=
- stats->online_reserved;
- stats->online_reserved = 0;
-
- bch2_usage_add(this_cpu_ptr(c->usage[0]), stats);
+ bch2_usage_add(this_cpu_ptr(c->usage[0]), fs_usage);
if (gc_visited(c, gc_pos))
- bch2_usage_add(this_cpu_ptr(c->usage[1]), stats);
+ bch2_usage_add(this_cpu_ptr(c->usage[1]), fs_usage);
bch2_fs_stats_verify(c);
- memset(stats, 0, sizeof(*stats));
+ memset(fs_usage, 0, sizeof(*fs_usage));
}
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
@@ -431,11 +441,11 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old,
bool gc)
{
- struct bch_fs_usage *stats = this_cpu_ptr(c->usage[gc]);
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark new;
- *old = bucket_data_cmpxchg(c, ca, stats, g, new, ({
+ *old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
BUG_ON(!is_available_bucket(new));
new.owned_by_allocator = 1;
@@ -445,7 +455,7 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
new.gen++;
}));
- stats->replicas[0].data[BCH_DATA_CACHED] -= old->cached_sectors;
+ fs_usage->replicas[0].data[BCH_DATA_CACHED] -= old->cached_sectors;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -464,11 +474,11 @@ static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
bool gc)
{
- struct bch_fs_usage *stats = this_cpu_ptr(c->usage[gc]);
+ struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
- old = bucket_data_cmpxchg(c, ca, stats, g, new, ({
+ old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
new.owned_by_allocator = owned_by_allocator;
}));
@@ -580,7 +590,7 @@ static void bch2_mark_pointer(struct bch_fs *c,
struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type,
struct bch_fs_usage *fs_usage,
- u64 journal_seq, unsigned flags,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
struct bucket_mark old, new;
@@ -685,8 +695,8 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
s64 sectors, enum bch_data_type data_type,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -709,7 +719,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
s64 adjusted_disk_sectors = disk_sectors;
bch2_mark_pointer(c, p, disk_sectors, data_type,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
if (!p.ptr.cached)
for (i = 0; i < p.ec_nr; i++) {
@@ -732,13 +742,13 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
}
replicas = clamp_t(unsigned, replicas,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
ec_redundancy = clamp_t(unsigned, ec_redundancy,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
- stats->replicas[0].data[BCH_DATA_CACHED] += cached_sectors;
- stats->replicas[replicas - 1].data[data_type] += dirty_sectors;
- stats->replicas[ec_redundancy - 1].ec_data += ec_sectors;
+ fs_usage->replicas[0].data[BCH_DATA_CACHED] += cached_sectors;
+ fs_usage->replicas[replicas - 1].data[data_type] += dirty_sectors;
+ fs_usage->replicas[ec_redundancy - 1].ec_data += ec_sectors;
return 0;
}
@@ -823,8 +833,8 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags,
+ struct bch_fs_usage *fs_usage,
+ unsigned journal_seq, unsigned flags,
bool gc)
{
int ret = 0;
@@ -835,30 +845,30 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
? c->opts.btree_node_size
: -c->opts.btree_node_size,
BCH_DATA_BTREE,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_extent:
ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_stripe:
ret = bch2_mark_stripe(c, k, inserting,
- stats, journal_seq, flags, gc);
+ fs_usage, journal_seq, flags, gc);
break;
case KEY_TYPE_alloc:
if (inserting)
- stats->nr_inodes++;
+ fs_usage->nr_inodes++;
else
- stats->nr_inodes--;
+ fs_usage->nr_inodes--;
break;
case KEY_TYPE_reservation: {
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
sectors *= replicas;
replicas = clamp_t(unsigned, replicas,
- 1, ARRAY_SIZE(stats->replicas));
+ 1, ARRAY_SIZE(fs_usage->replicas));
- stats->replicas[replicas - 1].persistent_reserved += sectors;
+ fs_usage->replicas[replicas - 1].persistent_reserved += sectors;
break;
}
default:
@@ -872,17 +882,15 @@ int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
int ret;
if (!(flags & BCH_BUCKET_MARK_GC)) {
- if (!stats)
- stats = this_cpu_ptr(c->usage[0]);
-
ret = __bch2_mark_key(c, k, inserting, sectors,
- stats, journal_seq, flags, false);
+ fs_usage ?: this_cpu_ptr(c->usage[0]),
+ journal_seq, flags, false);
if (ret)
return ret;
}
@@ -902,14 +910,14 @@ int bch2_mark_key_locked(struct bch_fs *c,
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
bool inserting, s64 sectors,
struct gc_pos pos,
- struct bch_fs_usage *stats,
+ struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
int ret;
percpu_down_read_preempt_disable(&c->mark_lock);
ret = bch2_mark_key_locked(c, k, inserting, sectors,
- pos, stats, journal_seq, flags);
+ pos, fs_usage, journal_seq, flags);
percpu_up_read_preempt_enable(&c->mark_lock);
return ret;
@@ -922,7 +930,7 @@ void bch2_mark_update(struct btree_insert *trans,
struct btree_iter *iter = insert->iter;
struct btree *b = iter->l[0].b;
struct btree_node_iter node_iter = iter->l[0].iter;
- struct bch_fs_usage stats = { 0 };
+ struct bch_fs_usage fs_usage = { 0 };
struct gc_pos pos = gc_pos_btree_node(b);
struct bkey_packed *_k;
@@ -935,7 +943,7 @@ void bch2_mark_update(struct btree_insert *trans,
bch2_mark_key_locked(c, bkey_i_to_s_c(insert->k), true,
bpos_min(insert->k->k.p, b->key.k.p).offset -
bkey_start_offset(&insert->k->k),
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_discard))) {
@@ -968,7 +976,7 @@ void bch2_mark_update(struct btree_insert *trans,
BUG_ON(sectors <= 0);
bch2_mark_key_locked(c, k, true, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
@@ -979,12 +987,12 @@ void bch2_mark_update(struct btree_insert *trans,
}
bch2_mark_key_locked(c, k, false, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ pos, &fs_usage, trans->journal_res.seq, 0);
bch2_btree_node_iter_advance(&node_iter, b);
}
- bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
+ bch2_fs_usage_apply(c, &fs_usage, trans->disk_res, pos);
percpu_up_read_preempt_enable(&c->mark_lock);
}
@@ -996,9 +1004,9 @@ static u64 bch2_recalc_sectors_available(struct bch_fs *c)
int cpu;
for_each_possible_cpu(cpu)
- per_cpu_ptr(c->usage[0], cpu)->available_cache = 0;
+ per_cpu_ptr(c->pcpu, cpu)->sectors_available = 0;
- return avail_factor(bch2_fs_sectors_free(c, bch2_fs_usage_read(c)));
+ return avail_factor(bch2_fs_sectors_free(c));
}
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
@@ -1018,15 +1026,15 @@ void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
unsigned sectors, int flags)
{
- struct bch_fs_usage *stats;
+ struct bch_fs_pcpu *pcpu;
u64 old, v, get;
s64 sectors_available;
int ret;
percpu_down_read_preempt_disable(&c->mark_lock);
- stats = this_cpu_ptr(c->usage[0]);
+ pcpu = this_cpu_ptr(c->pcpu);
- if (sectors <= stats->available_cache)
+ if (sectors <= pcpu->sectors_available)
goto out;
v = atomic64_read(&c->sectors_available);
@@ -1041,12 +1049,12 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
} while ((v = atomic64_cmpxchg(&c->sectors_available,
old, old - get)) != old);
- stats->available_cache += get;
+ pcpu->sectors_available += get;
out:
- stats->available_cache -= sectors;
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ pcpu->sectors_available -= sectors;
+ this_cpu_add(c->usage[0]->online_reserved, sectors);
+ res->sectors += sectors;
bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
@@ -1078,8 +1086,8 @@ recalculate:
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
atomic64_set(&c->sectors_available,
max_t(s64, 0, sectors_available - sectors));
- stats->online_reserved += sectors;
- res->sectors += sectors;
+ this_cpu_add(c->usage[0]->online_reserved, sectors);
+ res->sectors += sectors;
ret = 0;
bch2_disk_reservations_verify(c, flags);
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 554eb03b82ce..84059111c7ca 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -163,6 +163,20 @@ static inline bool bucket_unused(struct bucket_mark mark)
!bucket_sectors_used(mark);
}
+static inline bool is_available_bucket(struct bucket_mark mark)
+{
+ return (!mark.owned_by_allocator &&
+ !mark.dirty_sectors &&
+ !mark.stripe);
+}
+
+static inline bool bucket_needs_journal_commit(struct bucket_mark m,
+ u16 last_seq_ondisk)
+{
+ return m.journal_seq_valid &&
+ ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
+}
+
/* Device usage: */
struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *, bool);
@@ -206,31 +220,21 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *, bool);
struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
-void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
- struct disk_reservation *, struct gc_pos);
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
-static inline u64 bch2_fs_sectors_free(struct bch_fs *c,
- struct bch_fs_usage stats)
-{
- return c->capacity - bch2_fs_sectors_used(c, stats);
-}
+struct bch_fs_usage_short
+bch2_fs_usage_read_short(struct bch_fs *);
-static inline bool is_available_bucket(struct bucket_mark mark)
+static inline u64 bch2_fs_sectors_free(struct bch_fs *c)
{
- return (!mark.owned_by_allocator &&
- !mark.dirty_sectors &&
- !mark.stripe);
-}
+ struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
-static inline bool bucket_needs_journal_commit(struct bucket_mark m,
- u16 last_seq_ondisk)
-{
- return m.journal_seq_valid &&
- ((s16) m.journal_seq - (s16) last_seq_ondisk > 0);
+ return usage.capacity - usage.used;
}
+/* key/bucket marking: */
+
void bch2_bucket_seq_cleanup(struct bch_fs *);
void bch2_invalidate_bucket(struct bch_fs *, struct bch_dev *,
@@ -251,6 +255,10 @@ int bch2_mark_key(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,
struct bch_fs_usage *, u64, unsigned);
void bch2_mark_update(struct btree_insert *, struct btree_insert_entry *);
+void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
+ struct disk_reservation *, struct gc_pos);
+
+/* disk reservations: */
void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 56289559d256..e60901a5b047 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -72,18 +72,22 @@ struct bch_fs_usage {
u64 nr_inodes;
- /* fields starting here aren't touched by gc: */
u64 online_reserved;
- u64 available_cache;
+};
+
+struct bch_fs_usage_short {
+ u64 capacity;
+ u64 used;
+ u64 nr_inodes;
};
/*
* A reservation for space on disk:
*/
struct disk_reservation {
- u64 sectors;
- u32 gen;
- unsigned nr_replicas;
+ u64 sectors;
+ u32 gen;
+ unsigned nr_replicas;
};
struct copygc_heap_entry {
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 808167d5a60c..3c228f680427 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -305,7 +305,7 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
.p.btree_id = ctx->stats.iter.btree_id,
.p.pos = ctx->stats.iter.pos,
.p.sectors_done = atomic64_read(&ctx->stats.sectors_seen),
- .p.sectors_total = bch2_fs_sectors_used(c, bch2_fs_usage_read(c)),
+ .p.sectors_total = bch2_fs_usage_read_short(c).used,
};
if (len < sizeof(e))
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 73a5c10354e2..13670a638e0a 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -1338,16 +1338,14 @@ static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct bch_fs *c = sb->s_fs_info;
- struct bch_fs_usage usage = bch2_fs_usage_read(c);
- u64 hidden_metadata = usage.buckets[BCH_DATA_SB] +
- usage.buckets[BCH_DATA_JOURNAL];
+ struct bch_fs_usage_short usage = bch2_fs_usage_read_short(c);
unsigned shift = sb->s_blocksize_bits - 9;
u64 fsid;
buf->f_type = BCACHEFS_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
- buf->f_blocks = (c->capacity - hidden_metadata) >> shift;
- buf->f_bfree = (c->capacity - bch2_fs_sectors_used(c, usage)) >> shift;
+ buf->f_blocks = usage.capacity >> shift;
+ buf->f_bfree = (usage.capacity - usage.used) >> shift;
buf->f_bavail = buf->f_bfree;
buf->f_files = usage.nr_inodes;
buf->f_ffree = U64_MAX;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 89eee959222a..0212832a7e8a 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -407,6 +407,7 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->usage[0]);
+ free_percpu(c->pcpu);
mempool_exit(&c->btree_iters_pool);
mempool_exit(&c->btree_bounce_pool);
bioset_exit(&c->btree_bio);
@@ -644,6 +645,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
!(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
+ !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,