summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-03-29 12:27:40 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2018-05-22 00:44:18 -0400
commit974cf00f19a8ee2413773534071a2ae469d46aa9 (patch)
tree94dd408e5c5dec63ea4cf1a97a7fc9bc7ccc9d79
parent9578fb06b7ecb8448f1b6d86a8585153682d4f99 (diff)
bcachefs: Avoid allocation fragmentation
The allocation code wasn't explicitly preferring buckets at the start or end of the device, and for some odd reason was mostly picking buckets at the start of the device with a few from the end mixed in. This fixes that, and also makes it prefer contiguous buckets.
-rw-r--r--fs/bcachefs/alloc.c45
-rw-r--r--fs/bcachefs/alloc_types.h1
2 files changed, 31 insertions, 15 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index 0b8d0b20b5ed..7bb495249f7e 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -660,13 +660,15 @@ static inline int bucket_alloc_cmp(alloc_heap *h,
struct alloc_heap_entry l,
struct alloc_heap_entry r)
{
- return (l.key > r.key) - (l.key < r.key);
+ return (l.key > r.key) - (l.key < r.key) ?:
+ (l.nr < r.nr) - (l.nr > r.nr) ?:
+ (l.bucket > r.bucket) - (l.bucket < r.bucket);
}
static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
{
struct bucket_array *buckets;
- struct alloc_heap_entry e;
+ struct alloc_heap_entry e = { 0 };
size_t b;
ca->alloc_heap.used = 0;
@@ -685,32 +687,45 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
*/
for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
+ unsigned long key = bucket_sort_key(c, ca, b, m);
if (!bch2_can_invalidate_bucket(ca, b, m))
continue;
- e = (struct alloc_heap_entry) {
- .bucket = b,
- .key = bucket_sort_key(c, ca, b, m)
- };
-
- heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
+ if (e.nr && e.bucket + e.nr == b && e.key == key) {
+ e.nr++;
+ } else {
+ if (e.nr)
+ heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
+
+ e = (struct alloc_heap_entry) {
+ .bucket = b,
+ .nr = 1,
+ .key = key,
+ };
+ }
cond_resched();
}
+ if (e.nr)
+ heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
+
up_read(&ca->bucket_lock);
mutex_unlock(&c->prio_clock[READ].lock);
heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
- /*
- * If we run out of buckets to invalidate, bch2_allocator_thread() will
- * kick stuff and retry us
- */
- while (!fifo_full(&ca->free_inc) &&
- heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp))
- bch2_invalidate_one_bucket(c, ca, e.bucket);
+ while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) {
+ for (b = e.bucket;
+ b < e.bucket + e.nr;
+ b++) {
+ if (fifo_full(&ca->free_inc))
+ return;
+
+ bch2_invalidate_one_bucket(c, ca, b);
+ }
+ }
}
static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index f3bd47011025..bee1e5a35778 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -80,6 +80,7 @@ struct write_point_specifier {
struct alloc_heap_entry {
size_t bucket;
+ size_t nr;
unsigned long key;
};