diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2022-02-15 23:40:30 -0500 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2022-02-17 02:19:59 -0500 |
commit | 4448a395018995ee92fc8cfa6304482491446d0a (patch) | |
tree | 4b3ab0829aa8a1728356e58fd4aaf723a0b05e8d | |
parent | be30be94e6f519864ad7283694928dbcc17007ee (diff) |
bcachefs: Fix locking in data move path
We need to ensure we don't have any btree locks held when calling
do_pending_writes() - besides issuing IOs, upcoming allocator changes
will have allocations doing btree lookups directly.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/move.c | 37 |
1 files changed, 20 insertions, 17 deletions
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 3e3dcec327a0..83536fdc309a 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -487,19 +487,22 @@ static void move_read_endio(struct bio *bio) closure_put(&ctxt->cl); } -static void do_pending_writes(struct moving_context *ctxt) +static void do_pending_writes(struct moving_context *ctxt, struct btree_trans *trans) { struct moving_io *io; + if (trans) + bch2_trans_unlock(trans); + while ((io = next_pending_write(ctxt))) { list_del(&io->list); closure_call(&io->cl, move_write, NULL, &ctxt->cl); } } -#define move_ctxt_wait_event(_ctxt, _cond) \ +#define move_ctxt_wait_event(_ctxt, _trans, _cond) \ do { \ - do_pending_writes(_ctxt); \ + do_pending_writes(_ctxt, _trans); \ \ if (_cond) \ break; \ @@ -507,11 +510,12 @@ do { \ next_pending_write(_ctxt) || (_cond)); \ } while (1) -static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt) +static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt, + struct btree_trans *trans) { unsigned sectors_pending = atomic_read(&ctxt->write_sectors); - move_ctxt_wait_event(ctxt, + move_ctxt_wait_event(ctxt, trans, !atomic_read(&ctxt->write_sectors) || atomic_read(&ctxt->write_sectors) != sectors_pending); } @@ -533,14 +537,6 @@ static int bch2_move_extent(struct btree_trans *trans, unsigned sectors = k.k->size, pages; int ret = -ENOMEM; - move_ctxt_wait_event(ctxt, - atomic_read(&ctxt->write_sectors) < - SECTORS_IN_FLIGHT_PER_DEVICE); - - move_ctxt_wait_event(ctxt, - atomic_read(&ctxt->read_sectors) < - SECTORS_IN_FLIGHT_PER_DEVICE); - /* write path might have to decompress data: */ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); @@ -691,12 +687,19 @@ static int __bch2_move_data(struct bch_fs *c, schedule_timeout(delay); if (unlikely(freezing(current))) { - bch2_trans_unlock(&trans); - move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads)); + move_ctxt_wait_event(ctxt, &trans, list_empty(&ctxt->reads)); try_to_freeze(); } } while (delay); + move_ctxt_wait_event(ctxt, &trans, + atomic_read(&ctxt->write_sectors) < + SECTORS_IN_FLIGHT_PER_DEVICE); + + move_ctxt_wait_event(ctxt, &trans, + atomic_read(&ctxt->read_sectors) < + SECTORS_IN_FLIGHT_PER_DEVICE); + bch2_trans_begin(&trans); k = bch2_btree_iter_peek(&iter); @@ -761,7 +764,7 @@ static int __bch2_move_data(struct bch_fs *c, if (ret2 == -ENOMEM) { /* memory allocation failure, wait for some IO to finish */ - bch2_move_ctxt_wait_for_io(ctxt); + bch2_move_ctxt_wait_for_io(ctxt, &trans); continue; } @@ -846,7 +849,7 @@ int bch2_move_data(struct bch_fs *c, } - move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads)); + move_ctxt_wait_event(&ctxt, NULL, list_empty(&ctxt.reads)); closure_sync(&ctxt.cl); EBUG_ON(atomic_read(&ctxt.write_sectors)); |