summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-02-14 20:39:17 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2019-04-03 12:44:07 -0400
commit0e2ca2a40c6cb5af5b434eb81a723600ead2b43f (patch)
tree96c291efddc7f34462ba435417dc3cd7affeb811
parentbb9154d2a446f7aa0491bdde9066217bf49995d1 (diff)
bcachefs: fs_usage_u64s()
-rw-r--r--fs/bcachefs/btree_gc.c9
-rw-r--r--fs/bcachefs/buckets.c22
-rw-r--r--fs/bcachefs/buckets.h12
-rw-r--r--fs/bcachefs/replicas.c53
-rw-r--r--fs/bcachefs/super.c7
5 files changed, 48 insertions, 55 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index b1ed1e217788..8f146282b518 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -602,8 +602,7 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
}
{
- unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
- c->replicas.nr;
+ unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = (void *)
bch2_acc_percpu_u64s((void *) c->usage[0], nr);
struct bch_fs_usage *src = (void *)
@@ -654,10 +653,8 @@ static int bch2_gc_start(struct bch_fs *c)
BUG_ON(c->usage[1]);
- c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
- sizeof(u64) * c->replicas.nr,
- sizeof(u64),
- GFP_KERNEL);
+ c->usage[1] = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
+ sizeof(u64), GFP_KERNEL);
percpu_up_write(&c->mark_lock);
if (!c->usage[1])
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 6c1b5e79b4e3..377a8b0f7f7d 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -116,11 +116,11 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
void bch2_fs_usage_initialize(struct bch_fs *c)
{
struct bch_fs_usage *usage;
- unsigned i, nr;
+ unsigned i;
percpu_down_write(&c->mark_lock);
- nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
- usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
+ usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0],
+ fs_usage_u64s(c));
for (i = 0; i < BCH_REPLICAS_MAX; i++)
usage->reserved += usage->persistent_reserved[i];
@@ -158,24 +158,23 @@ struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *c, struct bch_dev *ca)
struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage *ret;
- unsigned nr = READ_ONCE(c->replicas.nr);
+ unsigned v, u64s = fs_usage_u64s(c);
retry:
- ret = kzalloc(sizeof(*ret) + nr * sizeof(u64), GFP_NOFS);
+ ret = kzalloc(u64s * sizeof(u64), GFP_NOFS);
if (unlikely(!ret))
return NULL;
percpu_down_read_preempt_disable(&c->mark_lock);
- if (unlikely(nr < c->replicas.nr)) {
- nr = c->replicas.nr;
+ v = fs_usage_u64s(c);
+ if (unlikely(u64s != v)) {
+ u64s = v;
percpu_up_read_preempt_enable(&c->mark_lock);
kfree(ret);
goto retry;
}
- acc_u64s_percpu((u64 *) ret,
- (u64 __percpu *) c->usage[0],
- sizeof(*ret) / sizeof(u64) + nr);
+ acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
return ret;
}
@@ -292,8 +291,7 @@ int bch2_fs_usage_apply(struct bch_fs *c,
}
acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
- (u64 *) fs_usage,
- sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
+ (u64 *) fs_usage, fs_usage_u64s(c));
return ret;
}
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 88f197428e94..33ed1a96bedf 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -211,14 +211,18 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
/* Filesystem usage: */
-static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
+static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
- struct bch_fs_usage *ret;
- ret = this_cpu_ptr(c->usage_scratch);
+ return sizeof(struct bch_fs_usage) / sizeof(u64) +
+ READ_ONCE(c->replicas.nr);
+}
- memset(ret, 0, sizeof(*ret) + c->replicas.nr * sizeof(u64));
+static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
+{
+ struct bch_fs_usage *ret = this_cpu_ptr(c->usage_scratch);
+ memset(ret, 0, fs_usage_u64s(c) * sizeof(u64));
return ret;
}
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 3b6acbf6613c..99283b1047ff 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -261,39 +261,37 @@ static void __replicas_table_update(struct bch_fs_usage __percpu *dst_p,
static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r)
{
- struct bch_fs_usage __percpu *new_usage[3] = { NULL, NULL, NULL };
+ struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
+ struct bch_fs_usage __percpu *new_scratch = NULL;
unsigned bytes = sizeof(struct bch_fs_usage) +
sizeof(u64) * new_r->nr;
- unsigned i;
int ret = -ENOMEM;
- for (i = 0; i < 3; i++) {
- if (i < 2 && !c->usage[i])
- continue;
-
- new_usage[i] = __alloc_percpu_gfp(bytes, sizeof(u64),
- GFP_NOIO);
- if (!new_usage[i])
- goto err;
- }
-
- for (i = 0; i < 2; i++) {
- if (!c->usage[i])
- continue;
-
- __replicas_table_update(new_usage[i], new_r,
- c->usage[i], &c->replicas);
-
- swap(c->usage[i], new_usage[i]);
- }
-
- swap(c->usage_scratch, new_usage[2]);
+ if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO)) ||
+ (c->usage[1] &&
+ !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO))) ||
+ !(new_scratch = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO)))
+ goto err;
- swap(c->replicas, *new_r);
+ if (c->usage[0])
+ __replicas_table_update(new_usage[0], new_r,
+ c->usage[0], &c->replicas);
+ if (c->usage[1])
+ __replicas_table_update(new_usage[1], new_r,
+ c->usage[1], &c->replicas);
+
+ swap(c->usage[0], new_usage[0]);
+ swap(c->usage[1], new_usage[1]);
+ swap(c->usage_scratch, new_scratch);
+ swap(c->replicas, *new_r);
ret = 0;
err:
- for (i = 0; i < 3; i++)
- free_percpu(new_usage[i]);
+ free_percpu(new_scratch);
+ free_percpu(new_usage[1]);
+ free_percpu(new_usage[0]);
return ret;
}
@@ -974,5 +972,6 @@ int bch2_fs_replicas_init(struct bch_fs *c)
{
c->journal.entry_u64s_reserved +=
reserve_journal_replicas(c, &c->replicas);
- return 0;
+
+ return replicas_table_update(c, &c->replicas);
}
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index a8eb161585c1..1528f77e6d30 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -567,7 +567,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
{
struct bch_sb_field_members *mi;
struct bch_fs *c;
- unsigned i, iter_size, fs_usage_size;
+ unsigned i, iter_size;
const char *err;
pr_verbose_init(opts, "");
@@ -661,9 +661,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
(btree_blocks(c) + 1) * 2 *
sizeof(struct btree_node_iter_set);
- fs_usage_size = sizeof(struct bch_fs_usage) +
- sizeof(u64) * c->replicas.nr;
-
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
@@ -680,8 +677,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
max(offsetof(struct btree_read_bio, bio),
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
- !(c->usage[0] = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
- !(c->usage_scratch = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||