diff options
Diffstat (limited to 'fs/bcachefs/btree_gc.c')
-rw-r--r-- | fs/bcachefs/btree_gc.c | 106 |
1 files changed, 50 insertions, 56 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 9f5a79a8c240..c43b2072349b 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -479,25 +479,35 @@ static void bch2_gc_free(struct bch_fs *c) c->usage[1] = NULL; } -static void fs_usage_reset(struct bch_fs_usage *fs_usage) +/* + * Accumulate percpu counters onto one cpu's copy - only valid when access + * against any percpu counter is guarded against + */ +static u64 *acc_percpu_u64s(u64 __percpu *p, unsigned nr) { - memset(&fs_usage->s.gc_start[0], 0, - sizeof(*fs_usage) - offsetof(typeof(*fs_usage), s.gc_start)); -} + u64 *ret; + int cpu; -static void fs_usage_cpy(struct bch_fs_usage *dst, - struct bch_fs_usage *src) -{ - memcpy(&dst->s.gc_start[0], - &src->s.gc_start[0], - sizeof(*dst) - offsetof(typeof(*dst), s.gc_start)); + preempt_disable(); + ret = this_cpu_ptr(p); + preempt_enable(); + + for_each_possible_cpu(cpu) { + u64 *i = per_cpu_ptr(p, cpu); + + if (i != ret) { + acc_u64s(ret, i, nr); + memset(i, 0, nr * sizeof(u64)); + } + } + + return ret; } static void bch2_gc_done_nocheck(struct bch_fs *c) { struct bch_dev *ca; unsigned i; - int cpu; { struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0); @@ -527,42 +537,38 @@ static void bch2_gc_done_nocheck(struct bch_fs *c) }; for_each_member_device(ca, c, i) { - struct bch_dev_usage *p; - - for_each_possible_cpu(cpu) { - p = per_cpu_ptr(ca->usage[0], cpu); - memset(p, 0, sizeof(*p)); - } + unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64); + struct bch_dev_usage *dst = (void *) + acc_percpu_u64s((void *) ca->usage[0], nr); + struct bch_dev_usage *src = (void *) + acc_percpu_u64s((void *) ca->usage[1], nr); - preempt_disable(); - *this_cpu_ptr(ca->usage[0]) = __bch2_dev_usage_read(ca, 1); - preempt_enable(); + *dst = *src; } { - struct bch_fs_usage src = __bch2_fs_usage_read(c, 1); - - for_each_possible_cpu(cpu) - fs_usage_reset(per_cpu_ptr(c->usage[0], cpu)); - - preempt_disable(); - fs_usage_cpy(this_cpu_ptr(c->usage[0]), &src); - preempt_enable(); + unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64); + struct bch_fs_usage *dst = (void *) + acc_percpu_u64s((void *) c->usage[0], nr); + struct bch_fs_usage *src = (void *) + acc_percpu_u64s((void *) c->usage[1], nr); + + memcpy(&dst->s.gc_start[0], + &src->s.gc_start[0], + sizeof(*dst) - offsetof(typeof(*dst), s.gc_start)); } - } static void bch2_gc_done(struct bch_fs *c, bool initial) { struct bch_dev *ca; unsigned i; - int cpu; #define copy_field(_f, _msg, ...) \ - if (dst._f != src._f) { \ - bch_err(c, _msg ": got %llu, should be %llu, fixing"\ - , ##__VA_ARGS__, dst._f, src._f); \ - dst._f = src._f; \ + if (dst->_f != src->_f) { \ + bch_err(c, _msg ": got %llu, should be %llu, fixing" \ + , ##__VA_ARGS__, dst->_f, src->_f); \ + dst->_f = src->_f; \ } #define copy_stripe_field(_f, _msg, ...) \ if (dst->_f != src->_f) { \ @@ -643,9 +649,11 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) }; for_each_member_device(ca, c, i) { - struct bch_dev_usage dst = __bch2_dev_usage_read(ca, 0); - struct bch_dev_usage src = __bch2_dev_usage_read(ca, 1); - struct bch_dev_usage *p; + unsigned nr = sizeof(struct bch_dev_usage) / sizeof(u64); + struct bch_dev_usage *dst = (void *) + acc_percpu_u64s((void *) ca->usage[0], nr); + struct bch_dev_usage *src = (void *) + acc_percpu_u64s((void *) ca->usage[1], nr); unsigned b; for (b = 0; b < BCH_DATA_NR; b++) @@ -659,21 +667,14 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) "sectors[%s]", bch2_data_types[b]); copy_dev_field(sectors_fragmented, "sectors_fragmented"); - - for_each_possible_cpu(cpu) { - p = per_cpu_ptr(ca->usage[0], cpu); - memset(p, 0, sizeof(*p)); - } - - preempt_disable(); - p = this_cpu_ptr(ca->usage[0]); - *p = dst; - preempt_enable(); } { - struct bch_fs_usage dst = __bch2_fs_usage_read(c, 0); - struct bch_fs_usage src = __bch2_fs_usage_read(c, 1); + unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64); + struct bch_fs_usage *dst = (void *) + acc_percpu_u64s((void *) c->usage[0], nr); + struct bch_fs_usage *src = (void *) + acc_percpu_u64s((void *) c->usage[1], nr); unsigned r, b; copy_fs_field(s.hidden, "hidden"); @@ -696,13 +697,6 @@ static void bch2_gc_done(struct bch_fs *c, bool initial) for (b = 0; b < BCH_DATA_NR; b++) copy_fs_field(buckets[b], "buckets[%s]", bch2_data_types[b]); - - for_each_possible_cpu(cpu) - fs_usage_reset(per_cpu_ptr(c->usage[0], cpu)); - - preempt_disable(); - fs_usage_cpy(this_cpu_ptr(c->usage[0]), &dst); - preempt_enable(); } out: percpu_up_write(&c->mark_lock); |