diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2018-03-29 12:27:40 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2018-04-04 12:42:56 -0400 |
commit | 33afe766b85dd74c12f7c1bdd948c780b7e7e44e (patch) | |
tree | 17586991506975b3839c1583b64a44e23a4a1822 | |
parent | 42e79d6265d25a48bd3c0f1d3da7adc40a3a6fed (diff) |
bcachefs: Avoid allocation fragmentation
The allocation code wasn't explicitly preferring buckets at the start or
end of the device, and for some odd reason was mostly picking buckets at
the start of the device with a few from the end mixed in.
This fixes that, and also makes it prefer contiguous buckets.
-rw-r--r-- | fs/bcachefs/alloc.c | 45 | ||||
-rw-r--r-- | fs/bcachefs/alloc_types.h | 1 |
2 files changed, 31 insertions, 15 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c index 0b8d0b20b5ed..7bb495249f7e 100644 --- a/fs/bcachefs/alloc.c +++ b/fs/bcachefs/alloc.c @@ -660,13 +660,15 @@ static inline int bucket_alloc_cmp(alloc_heap *h, struct alloc_heap_entry l, struct alloc_heap_entry r) { - return (l.key > r.key) - (l.key < r.key); + return (l.key > r.key) - (l.key < r.key) ?: + (l.nr < r.nr) - (l.nr > r.nr) ?: + (l.bucket > r.bucket) - (l.bucket < r.bucket); } static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) { struct bucket_array *buckets; - struct alloc_heap_entry e; + struct alloc_heap_entry e = { 0 }; size_t b; ca->alloc_heap.used = 0; @@ -685,32 +687,45 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) */ for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) { struct bucket_mark m = READ_ONCE(buckets->b[b].mark); + unsigned long key = bucket_sort_key(c, ca, b, m); if (!bch2_can_invalidate_bucket(ca, b, m)) continue; - e = (struct alloc_heap_entry) { - .bucket = b, - .key = bucket_sort_key(c, ca, b, m) - }; - - heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); + if (e.nr && e.bucket + e.nr == b && e.key == key) { + e.nr++; + } else { + if (e.nr) + heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); + + e = (struct alloc_heap_entry) { + .bucket = b, + .nr = 1, + .key = key, + }; + } cond_resched(); } + if (e.nr) + heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); + up_read(&ca->bucket_lock); mutex_unlock(&c->prio_clock[READ].lock); heap_resort(&ca->alloc_heap, bucket_alloc_cmp); - /* - * If we run out of buckets to invalidate, bch2_allocator_thread() will - * kick stuff and retry us - */ - while (!fifo_full(&ca->free_inc) && - heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) - bch2_invalidate_one_bucket(c, ca, e.bucket); + while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) { + for (b = e.bucket; + b < e.bucket + e.nr; + b++) { + if (fifo_full(&ca->free_inc)) + return; + + bch2_invalidate_one_bucket(c, ca, b); + } + } } static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index f3bd47011025..bee1e5a35778 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -80,6 +80,7 @@ struct write_point_specifier { struct alloc_heap_entry { size_t bucket; + size_t nr; unsigned long key; }; |