diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2020-06-15 17:38:26 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-06-15 17:38:26 -0400 |
commit | f22af5cc7cd5e1df6a6e3fdea235eb469052e09b (patch) | |
tree | 5a41ec876aa0aa74209cb4dc79d13d4b455bbb70 /fs/bcachefs/alloc_background.c | |
parent | 8a316f4112c5aa4b3c2c4745fa83475dbec8a959 (diff) |
bcachefs: Increase size of btree node reserve
Also tweak the allocator to be more aggressive about keeping it full.
The recent changes to make updates to interior nodes transactional (and
thus generate updates to the alloc btree) all put more stress on the
btree node reserves.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 0d64ba631d1a..27b9ccac1ad5 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -501,6 +501,7 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw) static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) { unsigned long gc_count = c->gc_count; + u64 available; int ret = 0; ca->allocator_state = ALLOCATOR_BLOCKED; @@ -516,9 +517,11 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca) if (gc_count != c->gc_count) ca->inc_gen_really_needs_gc = 0; - if ((ssize_t) (dev_buckets_available(c, ca) - - ca->inc_gen_really_needs_gc) >= - (ssize_t) fifo_free(&ca->free_inc)) + available = max_t(s64, 0, dev_buckets_available(c, ca) - + ca->inc_gen_really_needs_gc); + + if (available > fifo_free(&ca->free_inc) || + (available && !fifo_full(&ca->free[RESERVE_BTREE]))) break; up_read(&c->gc_lock); |