diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2025-03-31 15:46:45 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2025-04-06 19:13:45 -0400 |
commit | 067a3eeadc1a229e5fe409944a7500406dd562ea (patch) | |
tree | eb3a3c4b689323a0e9a74aee0c780841080fa5a8 | |
parent | 935cfd90fcbe9a3a0a7932db0008c6095ddd5435 (diff) |
bcachefs: new_stripe_alloc_buckets() takes alloc_request
More stack usage improvements: instead of creating a new alloc_request
(currently on the stack), save/restore just the fields we need to reuse.
This is a bit tricky, because we're doing a normal alloc_foreground.c
allocation, which calls into ec.c to get a stripe, which then does more
normal allocations - some of the fields get reused, and used
differently.
So we have to save and restore them - but the stack usage improvements
will be well worth it.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/ec.c | 73 |
1 files changed, 44 insertions, 29 deletions
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 952211f92555..88ff294e977d 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -1716,8 +1716,9 @@ err: } static int new_stripe_alloc_buckets(struct btree_trans *trans, + struct alloc_request *req, struct ec_stripe_head *h, struct ec_stripe_new *s, - enum bch_watermark watermark, struct closure *cl) + struct closure *cl) { struct bch_fs *c = trans->c; struct open_bucket *ob; @@ -1725,17 +1726,21 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, unsigned i, j, nr_have_parity = 0, nr_have_data = 0; int ret = 0; - struct alloc_request req = { - .watermark = watermark, - .devs_may_alloc = h->devs, - .have_cache = true, - }; + enum bch_data_type saved_data_type = req->data_type; + struct open_buckets saved_ptrs = req->ptrs; + unsigned saved_nr_replicas = req->nr_replicas; + unsigned saved_nr_effective = req->nr_effective; + bool saved_have_cache = req->have_cache; + struct bch_devs_mask saved_devs_may_alloc = req->devs_may_alloc; + + req->devs_may_alloc = h->devs; + req->have_cache = true; BUG_ON(v->nr_blocks != s->nr_data + s->nr_parity); BUG_ON(v->nr_redundant != s->nr_parity); /* * We bypass the sector allocator which normally does this: */ - bitmap_and(req.devs_may_alloc.d, req.devs_may_alloc.d, + bitmap_and(req->devs_may_alloc.d, req->devs_may_alloc.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) { @@ -1746,7 +1751,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, * block when updating the stripe */ if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID) - __clear_bit(v->ptrs[i].dev, req.devs_may_alloc.d); + __clear_bit(v->ptrs[i].dev, req->devs_may_alloc.d); if (i < s->nr_data) nr_have_data++; @@ -1757,52 +1762,58 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, BUG_ON(nr_have_data > s->nr_data); BUG_ON(nr_have_parity > s->nr_parity); - req.ptrs.nr = 0; + req->ptrs.nr = 0; if (nr_have_parity < s->nr_parity) { - req.nr_replicas = s->nr_parity; - req.nr_effective = nr_have_parity; - req.data_type = BCH_DATA_parity; + req->nr_replicas = s->nr_parity; + req->nr_effective = nr_have_parity; + req->data_type = BCH_DATA_parity; - ret = bch2_bucket_alloc_set_trans(trans, &req, &h->parity_stripe, cl); + ret = bch2_bucket_alloc_set_trans(trans, req, &h->parity_stripe, cl); - open_bucket_for_each(c, &req.ptrs, ob, i) { + open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, s->nr_data + s->nr_parity, s->nr_data); BUG_ON(j >= s->nr_data + s->nr_parity); - s->blocks[j] = req.ptrs.v[i]; + s->blocks[j] = req->ptrs.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, s->blocks_gotten); } if (ret) - return ret; + goto err; } - req.ptrs.nr = 0; + req->ptrs.nr = 0; if (nr_have_data < s->nr_data) { - req.nr_replicas = s->nr_data; - req.nr_effective = nr_have_data; - req.data_type = BCH_DATA_user; + req->nr_replicas = s->nr_data; + req->nr_effective = nr_have_data; + req->data_type = BCH_DATA_user; - ret = bch2_bucket_alloc_set_trans(trans, &req, &h->block_stripe, cl); + ret = bch2_bucket_alloc_set_trans(trans, req, &h->block_stripe, cl); - open_bucket_for_each(c, &req.ptrs, ob, i) { + open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, s->nr_data, 0); BUG_ON(j >= s->nr_data); - s->blocks[j] = req.ptrs.v[i]; + s->blocks[j] = req->ptrs.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); __set_bit(j, s->blocks_gotten); } if (ret) - return ret; + goto err; } - - return 0; +err: + req->data_type = saved_data_type; + req->ptrs = saved_ptrs; + req->nr_replicas = saved_nr_replicas; + req->nr_effective = saved_nr_effective; + req->have_cache = saved_have_cache; + req->devs_may_alloc = saved_devs_may_alloc; + return ret; } static int __get_existing_stripe(struct btree_trans *trans, @@ -2028,8 +2039,12 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, goto alloc_existing; /* First, try to allocate a full stripe: */ - ret = new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?: + enum bch_watermark saved_watermark = BCH_WATERMARK_stripe; + swap(req->watermark, saved_watermark); + ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: __bch2_ec_stripe_head_reserve(trans, h, s); + swap(req->watermark, saved_watermark); + if (!ret) goto allocate_buf; if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || @@ -2048,7 +2063,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, goto err; if (req->watermark == BCH_WATERMARK_copygc) { - ret = new_stripe_alloc_buckets(trans, h, s, req->watermark, NULL) ?: + ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: __bch2_ec_stripe_head_reserve(trans, h, s); if (ret) goto err; @@ -2067,7 +2082,7 @@ alloc_existing: * Retry allocating buckets, with the watermark for this * particular write: */ - ret = new_stripe_alloc_buckets(trans, h, s, req->watermark, cl); + ret = new_stripe_alloc_buckets(trans, req, h, s, cl); if (ret) goto err; |