summaryrefslogtreecommitdiff
path: root/libbcachefs/btree_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/btree_io.c')
-rw-r--r--libbcachefs/btree_io.c108
1 files changed, 62 insertions, 46 deletions
diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c
index 87a8ddf9..3f87e91e 100644
--- a/libbcachefs/btree_io.c
+++ b/libbcachefs/btree_io.c
@@ -1352,7 +1352,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
return;
}
- bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_read_bio);
+ bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c;
rb->start_time = local_clock();
@@ -1438,9 +1438,9 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b)
}
static void bch2_btree_node_write_error(struct bch_fs *c,
- struct bch_write_bio *wbio)
+ struct btree_write_bio *wbio)
{
- struct btree *b = wbio->bio.bi_private;
+ struct btree *b = wbio->wbio.bio.bi_private;
struct closure *cl = wbio->cl;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct bkey_i_extent *new_key;
@@ -1473,7 +1473,7 @@ retry:
new_key = bkey_i_to_extent(&tmp.k);
e = extent_i_to_s(new_key);
extent_for_each_ptr_backwards(e, ptr)
- if (bch2_dev_list_has_dev(wbio->failed, ptr->dev))
+ if (bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev))
bch2_extent_drop_ptr(e, ptr);
if (!bch2_extent_nr_ptrs(e.c))
@@ -1486,7 +1486,7 @@ retry:
goto err;
out:
bch2_btree_iter_unlock(&iter);
- bio_put(&wbio->bio);
+ bio_put(&wbio->wbio.bio);
btree_node_write_done(c, b);
if (cl)
closure_put(cl);
@@ -1511,17 +1511,46 @@ void bch2_btree_write_error_work(struct work_struct *work)
if (!bio)
break;
- bch2_btree_node_write_error(c, to_wbio(bio));
+ bch2_btree_node_write_error(c,
+ container_of(bio, struct btree_write_bio, wbio.bio));
}
}
+static void btree_node_write_work(struct work_struct *work)
+{
+ struct btree_write_bio *wbio =
+ container_of(work, struct btree_write_bio, work);
+ struct closure *cl = wbio->cl;
+ struct bch_fs *c = wbio->wbio.c;
+ struct btree *b = wbio->wbio.bio.bi_private;
+
+ btree_bounce_free(c,
+ wbio->wbio.order,
+ wbio->wbio.used_mempool,
+ wbio->data);
+
+ if (wbio->wbio.failed.nr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->btree_write_error_lock, flags);
+ bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
+ spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+
+ queue_work(c->wq, &c->btree_write_error_work);
+ return;
+ }
+
+ bio_put(&wbio->wbio.bio);
+ btree_node_write_done(c, b);
+ if (cl)
+ closure_put(cl);
+}
+
static void btree_node_write_endio(struct bio *bio)
{
- struct btree *b = bio->bi_private;
struct bch_write_bio *wbio = to_wbio(bio);
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
struct bch_write_bio *orig = parent ?: wbio;
- struct closure *cl = !wbio->split ? wbio->cl : NULL;
struct bch_fs *c = wbio->c;
struct bch_dev *ca = wbio->ca;
unsigned long flags;
@@ -1542,27 +1571,13 @@ static void btree_node_write_endio(struct bio *bio)
if (parent) {
bio_put(bio);
bio_endio(&parent->bio);
- return;
- }
-
- btree_bounce_free(c,
- wbio->order,
- wbio->used_mempool,
- wbio->data);
-
- if (wbio->failed.nr) {
- spin_lock_irqsave(&c->btree_write_error_lock, flags);
- bio_list_add(&c->btree_write_error_list, &wbio->bio);
- spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
+ } else {
+ struct btree_write_bio *wb =
+ container_of(orig, struct btree_write_bio, wbio);
- queue_work(c->wq, &c->btree_write_error_work);
- return;
+ INIT_WORK(&wb->work, btree_node_write_work);
+ schedule_work(&wb->work);
}
-
- bio_put(bio);
- btree_node_write_done(c, b);
- if (cl)
- closure_put(cl);
}
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
@@ -1586,7 +1601,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
struct closure *parent,
enum six_lock_type lock_type_held)
{
- struct bch_write_bio *wbio;
+ struct btree_write_bio *wbio;
struct bset_tree *t;
struct bset *i;
struct btree_node *bn = NULL;
@@ -1602,6 +1617,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
unsigned long old, new;
void *data;
+ if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
+ return;
+
/*
* We may only have a read lock on the btree node - the dirty bit is our
* "lock" against racing with other threads that may be trying to start
@@ -1631,6 +1649,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
+ BUG_ON(btree_node_fake(b));
BUG_ON(!list_empty(&b->write_blocked));
BUG_ON((b->will_make_reachable != NULL) != !b->written);
@@ -1763,21 +1782,22 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
trace_btree_write(b, bytes_to_write, sectors_to_write);
- wbio = wbio_init(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->bio_write));
- wbio->cl = parent;
- wbio->failed.nr = 0;
- wbio->order = order;
- wbio->used_mempool = used_mempool;
- wbio->data = data;
- wbio->bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
- wbio->bio.bi_iter.bi_size = sectors_to_write << 9;
- wbio->bio.bi_end_io = btree_node_write_endio;
- wbio->bio.bi_private = b;
+ wbio = container_of(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->btree_bio),
+ struct btree_write_bio, wbio.bio);
+ wbio_init(&wbio->wbio.bio);
+ wbio->data = data;
+ wbio->cl = parent;
+ wbio->wbio.order = order;
+ wbio->wbio.used_mempool = used_mempool;
+ wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
+ wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
+ wbio->wbio.bio.bi_end_io = btree_node_write_endio;
+ wbio->wbio.bio.bi_private = b;
if (parent)
closure_get(parent);
- bch2_bio_map(&wbio->bio, data);
+ bch2_bio_map(&wbio->wbio.bio, data);
/*
* If we're appending to a leaf node, we don't technically need FUA -
@@ -1802,7 +1822,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
b->written += sectors_to_write;
- bch2_submit_wbio_replicas(wbio, c, BCH_DATA_BTREE, &k.key);
+ bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
return;
err:
set_btree_node_noevict(b);
@@ -1905,11 +1925,7 @@ void bch2_btree_verify_flushed(struct bch_fs *c)
unsigned i;
rcu_read_lock();
- tbl = rht_dereference_rcu(c->btree_cache.table.tbl,
- &c->btree_cache.table);
-
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_rcu(b, pos, tbl, i, hash)
- BUG_ON(btree_node_dirty(b));
+ for_each_cached_btree(b, c, tbl, i, pos)
+ BUG_ON(btree_node_dirty(b));
rcu_read_unlock();
}