diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-03-03 16:16:03 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2018-03-14 20:11:32 -0400 |
commit | 3f1529ed45674d232c33fb2f4725d92b51f57fc1 (patch) | |
tree | 5ac6da38a90aee25f3e6d716695e0089f16d563b | |
parent | 28461f036c5486ff4da862c760a3375ee4c8df83 (diff) |
bcachefs: fix copygc so it doesn't spin when no work to do
-rw-r--r-- | fs/bcachefs/buckets.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/buckets_types.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/movinggc.c | 31 |
3 files changed, 38 insertions, 7 deletions
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index 4ea89a97c45c..864de940f1e7 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -285,6 +285,17 @@ static inline int is_unavailable_bucket(struct bucket_mark m) return !is_available_bucket(m); } +static inline int is_fragmented_bucket(struct bucket_mark m, + struct bch_dev *ca) +{ + if (!m.owned_by_allocator && + m.data_type == BCH_DATA_USER && + bucket_sectors_used(m)) + return max_t(int, 0, (int) ca->mi.bucket_size - + bucket_sectors_used(m)); + return 0; +} + static inline enum bch_data_type bucket_type(struct bucket_mark m) { return m.cached_sectors && !m.dirty_sectors @@ -361,6 +372,8 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, dev_usage->sectors[new.data_type] += new.dirty_sectors; dev_usage->sectors[BCH_DATA_CACHED] += (int) new.cached_sectors - (int) old.cached_sectors; + dev_usage->sectors_fragmented += + is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca); if (!is_available_bucket(old) && is_available_bucket(new)) bch2_wake_allocator(ca); diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h index 6f52a109d102..a0256e13618c 100644 --- a/fs/bcachefs/buckets_types.h +++ b/fs/bcachefs/buckets_types.h @@ -53,6 +53,7 @@ struct bch_dev_usage { /* _compressed_ sectors: */ u64 sectors[BCH_DATA_NR]; + u64 sectors_fragmented; }; /* kill, switch to bch_data_type? */ diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 2aa58b557794..ea7f591e18b1 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -213,8 +213,9 @@ static int bch2_copygc_thread(void *arg) struct bch_dev *ca = arg; struct bch_fs *c = ca->fs; struct io_clock *clock = &c->io_clock[WRITE]; + struct bch_dev_usage usage; unsigned long last; - u64 available, want, next; + u64 available, fragmented, reserve, next; set_freezable(); @@ -223,16 +224,32 @@ static int bch2_copygc_thread(void *arg) break; last = atomic_long_read(&clock->now); + + reserve = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) * + ca->mi.bucket_size * + c->opts.gc_reserve_percent, 200); + + usage = bch2_dev_usage_read(c, ca); + /* * don't start copygc until less than half the gc reserve is * available: */ - available = dev_buckets_available(c, ca); - want = div64_u64((ca->mi.nbuckets - ca->mi.first_bucket) * - c->opts.gc_reserve_percent, 200); - if (available > want) { - next = last + (available - want) * - ca->mi.bucket_size; + available = __dev_buckets_available(ca, usage) * + ca->mi.bucket_size; + if (available > reserve) { + next = last + available - reserve; + bch2_kthread_io_clock_wait(clock, next); + continue; + } + + /* + * don't start copygc until there's more than half the copygc + * reserve of fragmented space: + */ + fragmented = usage.sectors_fragmented; + if (fragmented < reserve) { + next = last + reserve - fragmented; bch2_kthread_io_clock_wait(clock, next); continue; } |