summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-03-28 20:56:25 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2022-10-03 22:51:18 -0400
commit105f8de63f061e80e87d108491b6465135668ed4 (patch)
tree737333a43b93fc87a33f120784be51566a86a76e
parentca1bca416120bf355692c1a51512213d25a16d80 (diff)
bcachefs: Don't use write side of mark_lock in journal write path
The write side of percpu rwsemaphors is really expensive, and we shouldn't be taking it at all in steady state operation. Fortunately, in bch2_journal_super_entries_add_common(), we don't need to - we have a seqlock, usage_lock for accumulating percpu usage counters to the base counters. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/bcachefs.h3
-rw-r--r--fs/bcachefs/btree_update_leaf.c6
-rw-r--r--fs/bcachefs/buckets.c168
-rw-r--r--fs/bcachefs/buckets.h20
-rw-r--r--fs/bcachefs/buckets_types.h12
-rw-r--r--fs/bcachefs/chardev.c6
-rw-r--r--fs/bcachefs/replicas.c6
-rw-r--r--fs/bcachefs/super-io.c4
-rw-r--r--fs/bcachefs/super.c2
-rw-r--r--fs/bcachefs/sysfs.c4
10 files changed, 104 insertions, 127 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 17e3d55a1f06..abb30fe03aa7 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -684,10 +684,11 @@ struct bch_fs {
struct bch_fs_usage *usage_base;
struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
struct bch_fs_usage __percpu *usage_gc;
+ u64 __percpu *online_reserved;
/* single element mempool: */
struct mutex usage_scratch_lock;
- struct bch_fs_usage *usage_scratch;
+ struct bch_fs_usage_online *usage_scratch;
struct io_clock io_clock[2];
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 3744fb487e6c..a45aac1b1af3 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -375,7 +375,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
struct btree_insert_entry **stopped_at)
{
struct bch_fs *c = trans->c;
- struct bch_fs_usage *fs_usage = NULL;
+ struct bch_fs_usage_online *fs_usage = NULL;
struct btree_insert_entry *i;
struct btree_trans_commit_hook *h;
unsigned u64s = 0;
@@ -464,7 +464,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
/* Must be called under mark_lock: */
if (marking && trans->fs_usage_deltas &&
- bch2_replicas_delta_list_apply(c, fs_usage,
+ bch2_replicas_delta_list_apply(c, &fs_usage->u,
trans->fs_usage_deltas)) {
ret = BTREE_INSERT_NEED_MARK_REPLICAS;
goto err;
@@ -473,7 +473,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
trans_for_each_update(trans, i)
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type))
bch2_mark_update(trans, i->iter, i->k,
- fs_usage, i->trigger_flags);
+ &fs_usage->u, i->trigger_flags);
if (marking)
bch2_trans_fs_usage_apply(trans, fs_usage);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index e26e95c8dcc8..fd8f24507343 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -167,7 +167,7 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
percpu_up_write(&c->mark_lock);
}
-void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
+void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
if (fs_usage == c->usage_scratch)
mutex_unlock(&c->usage_scratch_lock);
@@ -175,11 +175,11 @@ void bch2_fs_usage_scratch_put(struct bch_fs *c, struct bch_fs_usage *fs_usage)
kfree(fs_usage);
}
-struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *c)
+struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *c)
{
- struct bch_fs_usage *ret;
- unsigned bytes = fs_usage_u64s(c) * sizeof(u64);
-
+ struct bch_fs_usage_online *ret;
+ unsigned bytes = sizeof(struct bch_fs_usage_online) + sizeof(u64) *
+ READ_ONCE(c->replicas.nr);
ret = kzalloc(bytes, GFP_NOWAIT|__GFP_NOWARN);
if (ret)
return ret;
@@ -252,30 +252,28 @@ u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
return ret;
}
-struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
+struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
- struct bch_fs_usage *ret;
- unsigned seq, i, v, u64s = fs_usage_u64s(c);
-retry:
- ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
- if (unlikely(!ret))
- return NULL;
+ struct bch_fs_usage_online *ret;
+ unsigned seq, i, u64s;
percpu_down_read(&c->mark_lock);
- v = fs_usage_u64s(c);
- if (unlikely(u64s != v)) {
- u64s = v;
+ ret = kmalloc(sizeof(struct bch_fs_usage_online) +
+ sizeof(u64) + c->replicas.nr, GFP_NOFS);
+ if (unlikely(!ret)) {
percpu_up_read(&c->mark_lock);
- kfree(ret);
- goto retry;
+ return NULL;
}
+ ret->online_reserved = percpu_u64_get(c->online_reserved);
+
+ u64s = fs_usage_u64s(c);
do {
seq = read_seqcount_begin(&c->usage_lock);
- memcpy(ret, c->usage_base, u64s * sizeof(u64));
+ memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
- acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[i], u64s);
+ acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
@@ -311,31 +309,31 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
void bch2_fs_usage_to_text(struct printbuf *out,
struct bch_fs *c,
- struct bch_fs_usage *fs_usage)
+ struct bch_fs_usage_online *fs_usage)
{
unsigned i;
pr_buf(out, "capacity:\t\t\t%llu\n", c->capacity);
pr_buf(out, "hidden:\t\t\t\t%llu\n",
- fs_usage->hidden);
+ fs_usage->u.hidden);
pr_buf(out, "data:\t\t\t\t%llu\n",
- fs_usage->data);
+ fs_usage->u.data);
pr_buf(out, "cached:\t\t\t\t%llu\n",
- fs_usage->cached);
+ fs_usage->u.cached);
pr_buf(out, "reserved:\t\t\t%llu\n",
- fs_usage->reserved);
+ fs_usage->u.reserved);
pr_buf(out, "nr_inodes:\t\t\t%llu\n",
- fs_usage->nr_inodes);
+ fs_usage->u.nr_inodes);
pr_buf(out, "online reserved:\t\t%llu\n",
fs_usage->online_reserved);
for (i = 0;
- i < ARRAY_SIZE(fs_usage->persistent_reserved);
+ i < ARRAY_SIZE(fs_usage->u.persistent_reserved);
i++) {
pr_buf(out, "%u replicas:\n", i + 1);
pr_buf(out, "\treserved:\t\t%llu\n",
- fs_usage->persistent_reserved[i]);
+ fs_usage->u.persistent_reserved[i]);
}
for (i = 0; i < c->replicas.nr; i++) {
@@ -344,7 +342,7 @@ void bch2_fs_usage_to_text(struct printbuf *out,
pr_buf(out, "\t");
bch2_replicas_entry_to_text(out, e);
- pr_buf(out, ":\t%llu\n", fs_usage->replicas[i]);
+ pr_buf(out, ":\t%llu\n", fs_usage->u.replicas[i]);
}
}
@@ -360,12 +358,12 @@ static u64 avail_factor(u64 r)
return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1);
}
-u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage *fs_usage)
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{
- return min(fs_usage->hidden +
- fs_usage->btree +
- fs_usage->data +
- reserve_factor(fs_usage->reserved +
+ return min(fs_usage->u.hidden +
+ fs_usage->u.btree +
+ fs_usage->u.data +
+ reserve_factor(fs_usage->u.reserved +
fs_usage->online_reserved),
c->capacity);
}
@@ -382,7 +380,7 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
data = bch2_fs_usage_read_one(c, &c->usage_base->data) +
bch2_fs_usage_read_one(c, &c->usage_base->btree);
reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) +
- bch2_fs_usage_read_one(c, &c->usage_base->online_reserved);
+ percpu_u64_get(c->online_reserved);
ret.used = min(ret.capacity, data + reserve_factor(reserved));
ret.free = ret.capacity - ret.used;
@@ -436,43 +434,6 @@ static bool bucket_became_unavailable(struct bucket_mark old,
!is_available_bucket(new);
}
-int bch2_fs_usage_apply(struct bch_fs *c,
- struct bch_fs_usage *fs_usage,
- struct disk_reservation *disk_res,
- unsigned journal_seq)
-{
- s64 added = fs_usage->data + fs_usage->reserved;
- s64 should_not_have_added;
- int ret = 0;
-
- percpu_rwsem_assert_held(&c->mark_lock);
-
- /*
- * Not allowed to reduce sectors_available except by getting a
- * reservation:
- */
- should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
- if (WARN_ONCE(should_not_have_added > 0,
- "disk usage increased by %lli more than reservation of %llu",
- added, disk_res ? disk_res->sectors : 0)) {
- atomic64_sub(should_not_have_added, &c->sectors_available);
- added -= should_not_have_added;
- ret = -1;
- }
-
- if (added > 0) {
- disk_res->sectors -= added;
- fs_usage->online_reserved -= added;
- }
-
- preempt_disable();
- acc_u64s((u64 *) fs_usage_ptr(c, journal_seq, false),
- (u64 *) fs_usage, fs_usage_u64s(c));
- preempt_enable();
-
- return ret;
-}
-
static inline void account_bucket(struct bch_fs_usage *fs_usage,
struct bch_dev_usage *dev_usage,
enum bch_data_type type,
@@ -504,8 +465,6 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
account_bucket(fs_usage, u, bucket_type(new),
1, ca->mi.bucket_size);
- u->buckets_alloc +=
- (int) new.owned_by_allocator - (int) old.owned_by_allocator;
u->buckets_ec += (int) new.stripe - (int) old.stripe;
u->buckets_unavailable +=
is_unavailable_bucket(new) - is_unavailable_bucket(old);
@@ -670,7 +629,6 @@ static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
bool gc)
{
- struct bch_fs_usage *fs_usage = fs_usage_ptr(c, 0, gc);
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
@@ -678,13 +636,6 @@ static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
new.owned_by_allocator = owned_by_allocator;
}));
- /*
- * XXX: this is wrong, this means we'll be doing updates to the percpu
- * buckets_alloc counter that don't have an open journal buffer and
- * we'll race with the machinery that accumulates that to ca->usage_base
- */
- bch2_dev_usage_update(c, ca, fs_usage, old, new, 0, gc);
-
BUG_ON(!gc &&
!owned_by_allocator && !old.owned_by_allocator);
@@ -1433,8 +1384,47 @@ int bch2_mark_update(struct btree_trans *trans,
return ret;
}
+static int bch2_fs_usage_apply(struct bch_fs *c,
+ struct bch_fs_usage_online *src,
+ struct disk_reservation *disk_res,
+ unsigned journal_seq)
+{
+ struct bch_fs_usage *dst = fs_usage_ptr(c, journal_seq, false);
+ s64 added = src->u.data + src->u.reserved;
+ s64 should_not_have_added;
+ int ret = 0;
+
+ percpu_rwsem_assert_held(&c->mark_lock);
+
+ /*
+ * Not allowed to reduce sectors_available except by getting a
+ * reservation:
+ */
+ should_not_have_added = added - (s64) (disk_res ? disk_res->sectors : 0);
+ if (WARN_ONCE(should_not_have_added > 0,
+ "disk usage increased by %lli more than reservation of %llu",
+ added, disk_res ? disk_res->sectors : 0)) {
+ atomic64_sub(should_not_have_added, &c->sectors_available);
+ added -= should_not_have_added;
+ ret = -1;
+ }
+
+ if (added > 0) {
+ disk_res->sectors -= added;
+ src->online_reserved -= added;
+ }
+
+ this_cpu_add(*c->online_reserved, src->online_reserved);
+
+ preempt_disable();
+ acc_u64s((u64 *) dst, (u64 *) &src->u, fs_usage_u64s(c));
+ preempt_enable();
+
+ return ret;
+}
+
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
- struct bch_fs_usage *fs_usage)
+ struct bch_fs_usage_online *fs_usage)
{
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
@@ -2214,16 +2204,6 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c,
/* Disk reservations: */
-void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
-{
- percpu_down_read(&c->mark_lock);
- this_cpu_sub(c->usage[0]->online_reserved,
- res->sectors);
- percpu_up_read(&c->mark_lock);
-
- res->sectors = 0;
-}
-
#define SECTORS_CACHE 1024
int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
@@ -2257,7 +2237,7 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
out:
pcpu->sectors_available -= sectors;
- this_cpu_add(c->usage[0]->online_reserved, sectors);
+ this_cpu_add(*c->online_reserved, sectors);
res->sectors += sectors;
preempt_enable();
@@ -2274,7 +2254,7 @@ recalculate:
(flags & BCH_DISK_RESERVATION_NOFAIL)) {
atomic64_set(&c->sectors_available,
max_t(s64, 0, sectors_available - sectors));
- this_cpu_add(c->usage[0]->online_reserved, sectors);
+ this_cpu_add(*c->online_reserved, sectors);
res->sectors += sectors;
ret = 0;
} else {
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 9a91a4969783..af8cb74d71e0 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -216,19 +216,19 @@ static inline unsigned dev_usage_u64s(void)
return sizeof(struct bch_dev_usage) / sizeof(u64);
}
-void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage *);
-struct bch_fs_usage *bch2_fs_usage_scratch_get(struct bch_fs *);
+void bch2_fs_usage_scratch_put(struct bch_fs *, struct bch_fs_usage_online *);
+struct bch_fs_usage_online *bch2_fs_usage_scratch_get(struct bch_fs *);
u64 bch2_fs_usage_read_one(struct bch_fs *, u64 *);
-struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *);
+struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *);
void bch2_fs_usage_acc_to_base(struct bch_fs *, unsigned);
void bch2_fs_usage_to_text(struct printbuf *,
- struct bch_fs *, struct bch_fs_usage *);
+ struct bch_fs *, struct bch_fs_usage_online *);
-u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage *);
+u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage_online *);
struct bch_fs_usage_short
bch2_fs_usage_read_short(struct bch_fs *);
@@ -246,8 +246,6 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned,
s64, struct bch_fs_usage *, u64, unsigned);
-int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
- struct disk_reservation *, unsigned);
int bch2_mark_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, struct bch_fs_usage *, unsigned);
@@ -259,7 +257,7 @@ int bch2_trans_mark_key(struct btree_trans *, struct bkey_s_c, struct bkey_s_c,
unsigned, s64, unsigned);
int bch2_trans_mark_update(struct btree_trans *, struct btree_iter *iter,
struct bkey_i *insert, unsigned);
-void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage *);
+void bch2_trans_fs_usage_apply(struct btree_trans *, struct bch_fs_usage_online *);
int bch2_trans_mark_metadata_bucket(struct btree_trans *,
struct disk_reservation *, struct bch_dev *,
@@ -269,13 +267,11 @@ int bch2_trans_mark_dev_sb(struct bch_fs *, struct disk_reservation *,
/* disk reservations: */
-void __bch2_disk_reservation_put(struct bch_fs *, struct disk_reservation *);
-
static inline void bch2_disk_reservation_put(struct bch_fs *c,
struct disk_reservation *res)
{
- if (res->sectors)
- __bch2_disk_reservation_put(c, res);
+ this_cpu_sub(*c->online_reserved, res->sectors);
+ res->sectors = 0;
}
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 404c89a7a264..b6ea67506cc2 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -53,7 +53,6 @@ struct bucket_array {
};
struct bch_dev_usage {
- u64 buckets_alloc;
u64 buckets_ec;
u64 buckets_unavailable;
@@ -66,12 +65,6 @@ struct bch_dev_usage {
struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */
-
- u64 online_reserved;
-
- /* fields after online_reserved are cleared/recalculated by gc: */
- u64 gc_start[0];
-
u64 hidden;
u64 btree;
u64 data;
@@ -91,6 +84,11 @@ struct bch_fs_usage {
u64 replicas[];
};
+struct bch_fs_usage_online {
+ u64 online_reserved;
+ struct bch_fs_usage u;
+};
+
struct bch_fs_usage_short {
u64 capacity;
u64 used;
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 49842ec88390..c61601476c0d 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -379,7 +379,7 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c,
{
struct bch_ioctl_fs_usage *arg = NULL;
struct bch_replicas_usage *dst_e, *dst_end;
- struct bch_fs_usage *src;
+ struct bch_fs_usage_online *src;
u32 replica_entries_bytes;
unsigned i;
int ret = 0;
@@ -405,7 +405,7 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c,
arg->online_reserved = src->online_reserved;
for (i = 0; i < BCH_REPLICAS_MAX; i++)
- arg->persistent_reserved[i] = src->persistent_reserved[i];
+ arg->persistent_reserved[i] = src->u.persistent_reserved[i];
dst_e = arg->replicas;
dst_end = (void *) arg->replicas + replica_entries_bytes;
@@ -419,7 +419,7 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c,
break;
}
- dst_e->sectors = src->replicas[i];
+ dst_e->sectors = src->u.replicas[i];
dst_e->r = *src_e;
/* recheck after setting nr_devs: */
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index e45a6d6b103c..068fbca1dd54 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -271,11 +271,13 @@ static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r)
{
struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
- struct bch_fs_usage *new_scratch = NULL;
+ struct bch_fs_usage_online *new_scratch = NULL;
struct bch_fs_usage __percpu *new_gc = NULL;
struct bch_fs_usage *new_base = NULL;
unsigned i, bytes = sizeof(struct bch_fs_usage) +
sizeof(u64) * new_r->nr;
+ unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
+ sizeof(u64) * new_r->nr;
int ret = 0;
memset(new_usage, 0, sizeof(new_usage));
@@ -286,7 +288,7 @@ static int replicas_table_update(struct bch_fs *c,
goto err;
if (!(new_base = kzalloc(bytes, GFP_KERNEL)) ||
- !(new_scratch = kmalloc(bytes, GFP_KERNEL)) ||
+ !(new_scratch = kmalloc(scratch_bytes, GFP_KERNEL)) ||
(c->usage_gc &&
!(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_KERNEL))))
goto err;
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index fcbc1901952b..9d7820db00d4 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -993,7 +993,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
struct bch_dev *ca;
unsigned i, dev;
- percpu_down_write(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
if (!journal_seq) {
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
@@ -1064,7 +1064,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
}
}
- percpu_up_write(&c->mark_lock);
+ percpu_up_read(&c->mark_lock);
for (i = 0; i < 2; i++) {
struct jset_entry_clock *clock =
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 9f92ff3a1e45..ef798bdf791b 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -492,6 +492,7 @@ static void __bch2_fs_free(struct bch_fs *c)
for_each_possible_cpu(cpu)
kfree(per_cpu_ptr(c->btree_iters_bufs, cpu)->iter);
+ free_percpu(c->online_reserved);
free_percpu(c->btree_iters_bufs);
free_percpu(c->pcpu);
mempool_exit(&c->large_bkey_pool);
@@ -767,6 +768,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
!(c->btree_iters_bufs = alloc_percpu(struct btree_iter_buf)) ||
+ !(c->online_reserved = alloc_percpu(u64)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index a6a0a3f6f205..4b83a98621d7 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -230,7 +230,7 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
+ struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
if (!fs_usage)
return -ENOMEM;
@@ -794,7 +794,6 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
pr_buf(out,
"ec\t%16llu\n"
"available%15llu\n"
- "alloc\t%16llu\n"
"\n"
"free_inc\t\t%zu/%zu\n"
"free[RESERVE_MOVINGGC]\t%zu/%zu\n"
@@ -810,7 +809,6 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
"thread state:\t\t%s\n",
stats.buckets_ec,
__dev_buckets_available(ca, stats),
- stats.buckets_alloc,
fifo_used(&ca->free_inc), ca->free_inc.size,
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,