summaryrefslogtreecommitdiff
path: root/libbcachefs/alloc_foreground.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-12-28 14:30:25 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2021-12-28 15:03:54 -0500
commit38f8daa2b17627488c51f2e0c150213bd1636e00 (patch)
tree5904dee003738f53da3fe569c229cff63c7acc00 /libbcachefs/alloc_foreground.c
parentbe490d2732dbb1daf59ea00dec1fd37a9c17a37c (diff)
Update bcachefs sources to 078a1a596a bcachefs: Optimize bucket reuse
Diffstat (limited to 'libbcachefs/alloc_foreground.c')
-rw-r--r--libbcachefs/alloc_foreground.c158
1 files changed, 105 insertions, 53 deletions
diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c
index dce77cc2..0a634125 100644
--- a/libbcachefs/alloc_foreground.c
+++ b/libbcachefs/alloc_foreground.c
@@ -43,9 +43,32 @@
* reference _after_ doing the index update that makes its allocation reachable.
*/
+static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ ob->hash = *slot;
+ *slot = idx;
+}
+
+static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ while (*slot != idx) {
+ BUG_ON(!*slot);
+ slot = &c->open_buckets[*slot].hash;
+ }
+
+ *slot = ob->hash;
+ ob->hash = 0;
+}
+
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (ob->ec) {
bch2_ec_bucket_written(c, ob);
@@ -55,14 +78,16 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock);
- bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
+ bch2_mark_alloc_bucket(c, ca, ob->bucket, false);
ob->valid = false;
- ob->type = 0;
+ ob->data_type = 0;
spin_unlock(&ob->lock);
percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
+ bch2_open_bucket_hash_remove(c, ob);
+
ob->freelist = c->open_buckets_freelist;
c->open_buckets_freelist = ob - c->open_buckets;
@@ -81,8 +106,7 @@ void bch2_open_bucket_write_error(struct bch_fs *c,
unsigned i;
open_bucket_for_each(c, obs, ob, i)
- if (ob->ptr.dev == dev &&
- ob->ec)
+ if (ob->dev == dev && ob->ec)
bch2_ec_bucket_cancel(c, ob);
}
@@ -95,7 +119,7 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
ob = c->open_buckets + c->open_buckets_freelist;
c->open_buckets_freelist = ob->freelist;
atomic_set(&ob->pin, 1);
- ob->type = 0;
+ ob->data_type = 0;
c->open_buckets_nr_free--;
return ob;
@@ -105,8 +129,8 @@ static void open_bucket_free_unused(struct bch_fs *c,
struct write_point *wp,
struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
- bool may_realloc = wp->type == BCH_DATA_user;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ bool may_realloc = wp->data_type == BCH_DATA_user;
BUG_ON(ca->open_buckets_partial_nr >
ARRAY_SIZE(ca->open_buckets_partial));
@@ -133,32 +157,28 @@ static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
struct open_bucket *ob;
unsigned i;
+ rcu_read_lock();
open_bucket_for_each(c, obs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
- BUG_ON(ptr_stale(ca, &ob->ptr));
+ BUG_ON(*bucket_gen(ca, ob->bucket) != ob->gen);
}
+ rcu_read_unlock();
#endif
}
/* _only_ for allocating the journal on a new device: */
long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
{
- struct bucket_array *buckets;
- ssize_t b;
+ while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
+ u64 b = ca->new_fs_bucket_idx++;
- rcu_read_lock();
- buckets = bucket_array(ca);
-
- for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
- if (is_available_bucket(buckets->b[b].mark) &&
- (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)) &&
- !buckets->b[b].mark.owned_by_allocator)
- goto success;
- b = -1;
-success:
- rcu_read_unlock();
- return b;
+ if (!is_superblock_bucket(ca, b) &&
+ (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
+ return b;
+ }
+
+ return -1;
}
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
@@ -252,15 +272,14 @@ out:
ob->valid = true;
ob->sectors_free = ca->mi.bucket_size;
ob->alloc_reserve = reserve;
- ob->ptr = (struct bch_extent_ptr) {
- .type = 1 << BCH_EXTENT_ENTRY_ptr,
- .gen = bucket(ca, b)->mark.gen,
- .offset = bucket_to_sector(ca, b),
- .dev = ca->dev_idx,
- };
-
+ ob->dev = ca->dev_idx;
+ ob->gen = *bucket_gen(ca, b);
+ ob->bucket = b;
spin_unlock(&ob->lock);
+ ca->nr_open_buckets++;
+ bch2_open_bucket_hash_add(c, ob);
+
if (c->blocked_allocate_open_bucket) {
bch2_time_stats_update(
&c->times[BCH_TIME_blocked_allocate_open_bucket],
@@ -275,7 +294,6 @@ out:
c->blocked_allocate = 0;
}
- ca->nr_open_buckets++;
spin_unlock(&c->freelist_lock);
bch2_wake_allocator(ca);
@@ -339,9 +357,9 @@ static void add_new_bucket(struct bch_fs *c,
struct open_bucket *ob)
{
unsigned durability =
- bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
+ bch_dev_bkey_exists(c, ob->dev)->mi.durability;
- __clear_bit(ob->ptr.dev, devs_may_alloc->d);
+ __clear_bit(ob->dev, devs_may_alloc->d);
*nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
? durability : 1;
*have_cache |= !durability;
@@ -451,13 +469,13 @@ static int bucket_alloc_from_stripe(struct bch_fs *c,
continue;
ob = c->open_buckets + h->s->blocks[ec_idx];
- if (ob->ptr.dev == devs_sorted.devs[i] &&
+ if (ob->dev == devs_sorted.devs[i] &&
!test_and_set_bit(ec_idx, h->s->blocks_allocated))
goto got_bucket;
}
goto out_put_head;
got_bucket:
- ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ ca = bch_dev_bkey_exists(c, ob->dev);
ob->ec_idx = ec_idx;
ob->ec = h->s;
@@ -487,12 +505,12 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
unsigned i;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (*nr_effective < nr_replicas &&
- test_bit(ob->ptr.dev, devs_may_alloc->d) &&
+ test_bit(ob->dev, devs_may_alloc->d) &&
(ca->mi.durability ||
- (wp->type == BCH_DATA_user && !*have_cache)) &&
+ (wp->data_type == BCH_DATA_user && !*have_cache)) &&
(ob->ec || !need_ec)) {
add_new_bucket(c, ptrs, devs_may_alloc,
nr_effective, have_cache,
@@ -524,7 +542,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
unsigned i;
rcu_read_lock();
- devs = target_rw_devs(c, wp->type, target);
+ devs = target_rw_devs(c, wp->data_type, target);
rcu_read_unlock();
/* Don't allocate from devices we already have pointers to: */
@@ -532,7 +550,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
__clear_bit(devs_have->devs[i], devs.d);
open_bucket_for_each(c, ptrs, ob, i)
- __clear_bit(ob->ptr.dev, devs.d);
+ __clear_bit(ob->dev, devs.d);
if (erasure_code) {
if (!ec_open_bucket(c, ptrs)) {
@@ -592,7 +610,7 @@ void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
unsigned i, j;
open_bucket_for_each(c, obs, ob, i) {
- bool drop = !ca || ob->ptr.dev == ca->dev_idx;
+ bool drop = !ca || ob->dev == ca->dev_idx;
if (!drop && ob->ec) {
mutex_lock(&ob->ec->lock);
@@ -601,7 +619,7 @@ void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
continue;
ob2 = c->open_buckets + ob->ec->blocks[j];
- drop |= ob2->ptr.dev == ca->dev_idx;
+ drop |= ob2->dev == ca->dev_idx;
}
mutex_unlock(&ob->ec->lock);
}
@@ -785,11 +803,11 @@ retry:
wp = writepoint_find(c, write_point.v);
- if (wp->type == BCH_DATA_user)
+ if (wp->data_type == BCH_DATA_user)
ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
/* metadata may not allocate on cache devices: */
- if (wp->type != BCH_DATA_user)
+ if (wp->data_type != BCH_DATA_user)
have_cache = true;
if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
@@ -867,12 +885,27 @@ err:
}
}
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ return (struct bch_extent_ptr) {
+ .type = 1 << BCH_EXTENT_ENTRY_ptr,
+ .gen = ob->gen,
+ .dev = ob->dev,
+ .offset = bucket_to_sector(ca, ob->bucket) +
+ ca->mi.bucket_size -
+ ob->sectors_free,
+ };
+}
+
/*
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
- struct bkey_i *k, unsigned sectors)
+ struct bkey_i *k, unsigned sectors,
+ bool cached)
{
struct open_bucket *ob;
@@ -882,14 +915,14 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
wp->sectors_free -= sectors;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
- struct bch_extent_ptr tmp = ob->ptr;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
- tmp.cached = !ca->mi.durability &&
- wp->type == BCH_DATA_user;
+ ptr.cached = cached ||
+ (!ca->mi.durability &&
+ wp->data_type == BCH_DATA_user);
- tmp.offset += ca->mi.bucket_size - ob->sectors_free;
- bch2_bkey_append_ptr(k, tmp);
+ bch2_bkey_append_ptr(k, ptr);
BUG_ON(sectors > ob->sectors_free);
ob->sectors_free -= sectors;
@@ -919,7 +952,7 @@ static inline void writepoint_init(struct write_point *wp,
enum bch_data_type type)
{
mutex_init(&wp->lock);
- wp->type = type;
+ wp->data_type = type;
}
void bch2_fs_allocator_foreground_init(struct bch_fs *c)
@@ -956,3 +989,22 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
writepoint_hash(c, wp->write_point));
}
}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list) {
+ pr_buf(out, "%zu ref %u type %s\n",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ bch2_data_types[ob->data_type]);
+ }
+ spin_unlock(&ob->lock);
+ }
+
+}