diff options
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 192 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/btree_node_scan.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/ec.c | 38 | ||||
-rw-r--r-- | fs/bcachefs/ec.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/errcode.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 3 |
7 files changed, 131 insertions, 124 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 5b9e64163d37..df7a28cd8491 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -206,8 +206,7 @@ static inline bool may_alloc_bucket(struct bch_fs *c, static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct alloc_request *req, - u64 bucket, u8 gen, - struct closure *cl) + u64 bucket, u8 gen) { struct bch_dev *ca = req->ca; @@ -222,12 +221,18 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, spin_lock(&c->freelist_lock); if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(req->watermark))) { - if (cl) - closure_wait(&c->open_buckets_wait, cl); - track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true); + + int ret; + if (req->cl && !(req->flags & BCH_WRITE_alloc_nowait)) { + closure_wait(&c->open_buckets_wait, req->cl); + ret = bch_err_throw(c, open_bucket_alloc_blocked); + } else { + ret = bch_err_throw(c, open_buckets_empty); + } + spin_unlock(&c->freelist_lock); - return ERR_PTR(bch_err_throw(c, open_buckets_empty)); + return ERR_PTR(ret); } /* Recheck under lock: */ @@ -259,8 +264,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct alloc_request *req, - struct btree_iter *freespace_iter, - struct closure *cl) + struct btree_iter *freespace_iter) { struct bch_fs *c = trans->c; u64 b = freespace_iter->pos.offset & ~(~0ULL << 56); @@ -275,7 +279,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, if (ret) return NULL; - return __try_alloc_bucket(c, req, b, gen, cl); + return __try_alloc_bucket(c, req, b, gen); } /* @@ -283,8 +287,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, */ static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, - struct alloc_request *req, - struct closure *cl) + struct alloc_request *req) { struct bch_fs *c = trans->c; struct bch_dev *ca = req->ca; @@ -348,7 +351,7 @@ again: req->counters.buckets_seen++; ob = may_alloc_bucket(c, req, k.k->p) - ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) + ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen) : NULL; next: bch2_set_btree_iter_dontneed(trans, &citer); @@ -374,8 +377,7 @@ next: } static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, - struct alloc_request *req, - struct closure *cl) + struct alloc_request *req) { struct bch_dev *ca = req->ca; struct btree_iter iter; @@ -417,7 +419,7 @@ again: goto next; } - ob = try_alloc_bucket(trans, req, &iter, cl); + ob = try_alloc_bucket(trans, req, &iter); if (ob) { if (!IS_ERR(ob)) *dev_alloc_cursor = iter.pos.offset; @@ -450,7 +452,6 @@ fail: static noinline void trace_bucket_alloc2(struct bch_fs *c, struct alloc_request *req, - struct closure *cl, struct open_bucket *ob) { struct printbuf buf = PRINTBUF; @@ -460,7 +461,8 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, prt_printf(&buf, "dev\t%s (%u)\n", req->ca->name, req->ca->dev_idx); prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[req->watermark]); prt_printf(&buf, "data type\t%s\n", __bch2_data_types[req->data_type]); - prt_printf(&buf, "blocking\t%u\n", cl != NULL); + prt_printf(&buf, "blocking\t%u\n", !req->will_retry_target_devices && + !req->will_retry_all_devices); prt_printf(&buf, "free\t%llu\n", req->usage.buckets[BCH_DATA_free]); prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(req->ca, req->usage, req->watermark)); prt_printf(&buf, "copygc_wait\t%llu/%lli\n", @@ -488,28 +490,23 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, * bch2_bucket_alloc_trans - allocate a single bucket from a specific device * @trans: transaction object * @req: state for the entire allocation - * @cl: if not NULL, closure to be used to wait if buckets not available - * @nowait: if true, do not wait for buckets to become available * * Returns: an open_bucket on success, or an ERR_PTR() on failure. */ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, - struct alloc_request *req, - struct closure *cl, - bool nowait) + struct alloc_request *req) { struct bch_fs *c = trans->c; struct bch_dev *ca = req->ca; struct open_bucket *ob = NULL; bool freespace = READ_ONCE(ca->mi.freespace_initialized); - u64 avail; - bool waiting = nowait; + bool waiting = false; req->btree_bitmap = req->data_type == BCH_DATA_btree; memset(&req->counters, 0, sizeof(req->counters)); again: bch2_dev_usage_read_fast(ca, &req->usage); - avail = dev_buckets_free(ca, req->usage, req->watermark); + u64 avail = dev_buckets_free(ca, req->usage, req->watermark); if (req->usage.buckets[BCH_DATA_need_discard] > avail) bch2_dev_do_discards(ca); @@ -525,8 +522,12 @@ again: c->recovery.pass_done < BCH_RECOVERY_PASS_check_allocations) goto alloc; - if (cl && !waiting) { - closure_wait(&c->freelist_wait, cl); + if (!waiting && + req->cl && + !req->will_retry_target_devices && + !req->will_retry_all_devices && + !(req->flags & BCH_WRITE_alloc_nowait)) { + closure_wait(&c->freelist_wait, req->cl); waiting = true; goto again; } @@ -541,8 +542,8 @@ again: closure_wake_up(&c->freelist_wait); alloc: ob = likely(freespace) - ? bch2_bucket_alloc_freelist(trans, req, cl) - : bch2_bucket_alloc_early(trans, req, cl); + ? bch2_bucket_alloc_freelist(trans, req) + : bch2_bucket_alloc_early(trans, req); if (req->counters.need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); @@ -571,7 +572,7 @@ err: if (!IS_ERR(ob) ? trace_bucket_alloc_enabled() : trace_bucket_alloc_fail_enabled()) - trace_bucket_alloc2(c, req, cl, ob); + trace_bucket_alloc2(c, req, ob); return ob; } @@ -583,13 +584,14 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, { struct open_bucket *ob; struct alloc_request req = { + .cl = cl, .watermark = watermark, .data_type = data_type, .ca = ca, }; bch2_trans_do(c, - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, &req, cl, false))); + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, &req))); return ob; } @@ -703,13 +705,11 @@ static int add_new_bucket(struct bch_fs *c, return 0; } -inline int bch2_bucket_alloc_set_trans(struct btree_trans *trans, - struct alloc_request *req, - struct dev_stripe_state *stripe, - struct closure *_cl) +int bch2_bucket_alloc_set_trans(struct btree_trans *trans, + struct alloc_request *req, + struct dev_stripe_state *stripe) { struct bch_fs *c = trans->c; - struct closure *cl = NULL; int ret = 0; BUG_ON(req->nr_effective >= req->nr_replicas); @@ -721,6 +721,8 @@ inline int bch2_bucket_alloc_set_trans(struct btree_trans *trans, retry_blocking: bch2_dev_alloc_list(c, stripe, &req->devs_may_alloc, &req->devs_sorted); + req->will_retry_target_devices = req->devs_sorted.nr > 1; + darray_for_each(req->devs_sorted, i) { req->ca = bch2_dev_tryget_noerror(c, *i); if (!req->ca) @@ -731,37 +733,31 @@ retry_blocking: continue; } - struct open_bucket *ob = bch2_bucket_alloc_trans(trans, req, cl, - req->flags & BCH_WRITE_alloc_nowait); + struct open_bucket *ob = bch2_bucket_alloc_trans(trans, req); if (!IS_ERR(ob)) bch2_dev_stripe_increment_inlined(req->ca, stripe, &req->usage); bch2_dev_put(req->ca); - if (IS_ERR(ob)) { + if (IS_ERR(ob)) { /* don't squash error */ ret = PTR_ERR(ob); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl) - break; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_operation_blocked) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + return ret; continue; } - ret = add_new_bucket(c, req, ob); - if (ret) - break; + if (add_new_bucket(c, req, ob)) + return 0; } - if (ret == 1) - return 0; - - if (ret && - !bch2_err_matches(ret, BCH_ERR_transaction_restart) && - cl != _cl) { - cl = _cl; + if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && + req->will_retry_target_devices) { + req->will_retry_target_devices = false; goto retry_blocking; } - if (ret) - return ret; - return bch_err_throw(c, insufficient_devices); + return ret ?: bch_err_throw(c, insufficient_devices); } /* Allocate from stripes: */ @@ -773,14 +769,13 @@ retry_blocking: */ static int bucket_alloc_from_stripe(struct btree_trans *trans, - struct alloc_request *req, - struct closure *cl) + struct alloc_request *req) { struct bch_fs *c = trans->c; int ret = 0; struct ec_stripe_head *h = - bch2_ec_stripe_head_get(trans, req, 0, cl); + bch2_ec_stripe_head_get(trans, req, 0); if (IS_ERR(h)) return PTR_ERR(h); if (!h) @@ -1174,7 +1169,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, unsigned nr_replicas_required, enum bch_watermark watermark, enum bch_write_flags flags, - struct closure *_cl, + struct closure *cl, struct write_point **wp_ret) { struct bch_fs *c = trans->c; @@ -1193,6 +1188,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, if (nr_replicas < 2) erasure_code = false; + req->cl = cl; req->nr_replicas = nr_replicas; req->target = target; req->watermark = watermark; @@ -1201,11 +1197,13 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, BUG_ON(!nr_replicas || !nr_replicas_required); retry: - req->ec = erasure_code; - req->ptrs.nr = 0; - req->nr_effective = 0; - req->have_cache = false; - write_points_nr = c->write_points_nr; + req->ec = erasure_code; + req->will_retry_target_devices = true; + req->will_retry_all_devices = true; + req->ptrs.nr = 0; + req->nr_effective = 0; + req->have_cache = false; + write_points_nr = c->write_points_nr; *wp_ret = req->wp = writepoint_find(trans, write_point.v); @@ -1215,11 +1213,6 @@ retry: if (req->data_type != BCH_DATA_user) req->have_cache = true; - /* If we're going to fall back to the whole fs, try nonblocking first */ - struct closure *cl = req->target && !(flags & BCH_WRITE_only_specified_devs) - ? _cl - : NULL; - ret = bch2_trans_relock(trans); if (ret) goto err; @@ -1237,50 +1230,49 @@ retry: ret = bucket_alloc_set_writepoint(c, req) ?: bucket_alloc_set_partial(c, req) ?: (req->ec - ? bucket_alloc_from_stripe(trans, req, _cl) - : bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe, cl)); + ? bucket_alloc_from_stripe(trans, req) + : bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe)); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_operation_blocked) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) goto err; - /* Don't retry from all devices if we're out of open buckets: */ - if (ret == -BCH_ERR_open_buckets_empty) - goto retry_blocking; + if (ret == -BCH_ERR_freelist_empty || + ret == -BCH_ERR_insufficient_devices) { + if (req->will_retry_all_devices) { + BUG_ON(!req->will_retry_all_devices); + req->will_retry_all_devices = false; + /* + * Only try to allocate cache (durability = 0 devices) from the + * specified target: + */ + if (req->target && + (!(flags & BCH_WRITE_only_specified_devs) || + (ret == -BCH_ERR_insufficient_devices))) { + req->have_cache = true; + req->target = 0; + } + continue; + } - if (ret == -BCH_ERR_freelist_empty) { - if (req->target && !(flags & BCH_WRITE_only_specified_devs)) - goto retry_all; - goto retry_blocking; + if (ret == -BCH_ERR_insufficient_devices && + req->nr_effective >= nr_replicas_required) + ret = 0; + else + goto err; } - if (ret == -BCH_ERR_insufficient_devices && req->target) - goto retry_all; - if (req->nr_effective < req->nr_replicas && req->ec) { - req->ec = false; + req->ec = false; + req->will_retry_target_devices = true; + req->will_retry_all_devices = true; continue; } - if (ret == -BCH_ERR_insufficient_devices) { - if (req->nr_effective < nr_replicas_required) - goto err; - ret = 0; - } - + BUG_ON(req->nr_effective < nr_replicas_required); BUG_ON(ret < 0); break; -retry_blocking: - if (cl == _cl) - goto err; - cl = _cl; - continue; -retry_all: - /* - * Only try to allocate cache (durability = 0 devices) from the - * specified target: - */ - req->have_cache = true; - req->target = 0; } if (req->nr_effective > req->nr_replicas) diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h index 1b3fc8460096..90eb8604a0a2 100644 --- a/fs/bcachefs/alloc_foreground.h +++ b/fs/bcachefs/alloc_foreground.h @@ -26,9 +26,12 @@ struct dev_alloc_list { }; struct alloc_request { + struct closure *cl; unsigned nr_replicas; unsigned target; - bool ec; + bool ec:1; + bool will_retry_target_devices:1; + bool will_retry_all_devices:1; enum bch_watermark watermark; enum bch_write_flags flags; enum bch_data_type data_type; @@ -224,7 +227,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64 enum bch_write_flags; int bch2_bucket_alloc_set_trans(struct btree_trans *, struct alloc_request *, - struct dev_stripe_state *, struct closure *); + struct dev_stripe_state *); int bch2_alloc_sectors_start_trans(struct btree_trans *, unsigned, unsigned, diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 365808b4b7c0..42c9eb2c786e 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -226,15 +226,17 @@ static int read_btree_nodes_worker(void *p) struct bch_fs *c = container_of(w->f, struct bch_fs, found_btree_nodes); struct bch_dev *ca = w->ca; unsigned long last_print = jiffies; + struct btree *b = NULL; + struct bio *bio = NULL; - struct btree *b = __bch2_btree_node_mem_alloc(c); + b = __bch2_btree_node_mem_alloc(c); if (!b) { bch_err(c, "read_btree_nodes_worker: error allocating buf"); w->f->ret = -ENOMEM; goto err; } - struct bio *bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL); + bio = bio_alloc(NULL, buf_pages(b->data, c->opts.btree_node_size), 0, GFP_KERNEL); if (!bio) { bch_err(c, "read_btree_nodes_worker: error allocating bio"); w->f->ret = -ENOMEM; diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 687c3ba98095..71956ee86a9c 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -1720,8 +1720,7 @@ err: static int new_stripe_alloc_buckets(struct btree_trans *trans, struct alloc_request *req, - struct ec_stripe_head *h, struct ec_stripe_new *s, - struct closure *cl) + struct ec_stripe_head *h, struct ec_stripe_new *s) { struct bch_fs *c = trans->c; struct open_bucket *ob; @@ -1771,7 +1770,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, req->nr_effective = nr_have_parity; req->data_type = BCH_DATA_parity; - ret = bch2_bucket_alloc_set_trans(trans, req, &h->parity_stripe, cl); + ret = bch2_bucket_alloc_set_trans(trans, req, &h->parity_stripe); open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, @@ -1794,7 +1793,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, req->nr_effective = nr_have_data; req->data_type = BCH_DATA_user; - ret = bch2_bucket_alloc_set_trans(trans, req, &h->block_stripe, cl); + ret = bch2_bucket_alloc_set_trans(trans, req, &h->block_stripe); open_bucket_for_each(c, &req->ptrs, ob, i) { j = find_next_zero_bit(s->blocks_gotten, @@ -1926,7 +1925,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri } bch2_trans_iter_exit(trans, &lru_iter); if (!ret) - ret = bch_err_throw(c, stripe_alloc_blocked); + return bch_err_throw(c, stripe_alloc_blocked); if (ret == 1) ret = 0; if (ret) @@ -1998,8 +1997,7 @@ err: struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, struct alloc_request *req, - unsigned algo, - struct closure *cl) + unsigned algo) { struct bch_fs *c = trans->c; unsigned redundancy = req->nr_replicas - 1; @@ -2041,12 +2039,18 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, if (s->have_existing_stripe) goto alloc_existing; + /* First, try to allocate a full stripe: */ enum bch_watermark saved_watermark = BCH_WATERMARK_stripe; - swap(req->watermark, saved_watermark); - ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: + unsigned saved_flags = req->flags | BCH_WRITE_alloc_nowait; + swap(req->watermark, saved_watermark); + swap(req->flags, saved_flags); + + ret = new_stripe_alloc_buckets(trans, req, h, s) ?: __bch2_ec_stripe_head_reserve(trans, h, s); - swap(req->watermark, saved_watermark); + + swap(req->watermark, saved_watermark); + swap(req->flags, saved_flags); if (!ret) goto allocate_buf; @@ -2062,19 +2066,25 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, ret = __bch2_ec_stripe_head_reuse(trans, h, s); if (!ret) break; - if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) + if (waiting || + (req->flags & BCH_WRITE_alloc_nowait) || + ret != -BCH_ERR_stripe_alloc_blocked) goto err; if (req->watermark == BCH_WATERMARK_copygc) { - ret = new_stripe_alloc_buckets(trans, req, h, s, NULL) ?: + /* Don't self-deadlock copygc */ + swap(req->flags, saved_flags); + ret = new_stripe_alloc_buckets(trans, req, h, s) ?: __bch2_ec_stripe_head_reserve(trans, h, s); + swap(req->flags, saved_flags); + if (ret) goto err; goto allocate_buf; } /* XXX freelist_wait? */ - closure_wait(&c->freelist_wait, cl); + closure_wait(&c->freelist_wait, req->cl); waiting = true; } @@ -2085,7 +2095,7 @@ alloc_existing: * Retry allocating buckets, with the watermark for this * particular write: */ - ret = new_stripe_alloc_buckets(trans, req, h, s, cl); + ret = new_stripe_alloc_buckets(trans, req, h, s); if (ret) goto err; diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h index 548048adf0d5..756f14bd7bb7 100644 --- a/fs/bcachefs/ec.h +++ b/fs/bcachefs/ec.h @@ -258,7 +258,7 @@ void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *); struct alloc_request; struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *, - struct alloc_request *, unsigned, struct closure *); + struct alloc_request *, unsigned); void bch2_do_stripe_deletes(struct bch_fs *); void bch2_ec_do_stripe_creates(struct bch_fs *); diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 2de0dc91a69e..defa06eb3466 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -130,7 +130,7 @@ x(EEXIST, EEXIST_str_hash_set) \ x(EEXIST, EEXIST_discard_in_flight_add) \ x(EEXIST, EEXIST_subvolume_create) \ - x(ENOSPC, open_buckets_empty) \ + x(EAGAIN, open_buckets_empty) \ x(ENOSPC, freelist_empty) \ x(BCH_ERR_freelist_empty, no_buckets_found) \ x(0, transaction_restart) \ @@ -236,6 +236,9 @@ x(0, operation_blocked) \ x(BCH_ERR_operation_blocked, btree_cache_cannibalize_lock_blocked) \ x(BCH_ERR_operation_blocked, journal_res_blocked) \ + x(BCH_ERR_operation_blocked, bucket_alloc_blocked) \ + x(BCH_ERR_operation_blocked, open_bucket_alloc_blocked) \ + x(BCH_ERR_operation_blocked, stripe_alloc_blocked) \ x(BCH_ERR_journal_res_blocked, journal_blocked) \ x(BCH_ERR_journal_res_blocked, journal_max_in_flight) \ x(BCH_ERR_journal_res_blocked, journal_max_open) \ @@ -244,8 +247,6 @@ x(BCH_ERR_journal_res_blocked, journal_buf_enomem) \ x(BCH_ERR_journal_res_blocked, journal_stuck) \ x(BCH_ERR_journal_res_blocked, journal_retry_open) \ - x(BCH_ERR_journal_res_blocked, bucket_alloc_blocked) \ - x(BCH_ERR_journal_res_blocked, stripe_alloc_blocked) \ x(BCH_ERR_invalid, invalid_sb) \ x(BCH_ERR_invalid_sb, invalid_sb_magic) \ x(BCH_ERR_invalid_sb, invalid_sb_version) \ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index f22b05e02c1e..436a86b8ecc3 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1278,8 +1278,7 @@ static int bch2_set_nr_journal_buckets_loop(struct bch_fs *c, struct bch_dev *ca ret = bch2_set_nr_journal_buckets_iter(ca, nr, new_fs, &cl); - if (ret == -BCH_ERR_bucket_alloc_blocked || - ret == -BCH_ERR_open_buckets_empty) + if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) ret = 0; /* wait and retry */ bch2_disk_reservation_put(c, &disk_res); |