summaryrefslogtreecommitdiff
path: root/fs/bcachefs/io_read.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bcachefs/io_read.c')
-rw-r--r--fs/bcachefs/io_read.c57
1 files changed, 32 insertions, 25 deletions
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index aecc645e6516..a4a61bce076a 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -103,14 +103,21 @@ static inline bool have_io_error(struct bch_io_failures *failed)
return failed && failed->nr;
}
-static bool ptr_being_rewritten(struct bch_read_bio *orig,
- unsigned dev,
- unsigned flags)
+static inline struct data_update *rbio_data_update(struct bch_read_bio *rbio)
{
- if (!(flags & BCH_READ_data_update))
+ EBUG_ON(rbio->split);
+
+ return rbio->data_update
+ ? container_of(rbio, struct data_update, rbio)
+ : NULL;
+}
+
+static bool ptr_being_rewritten(struct bch_read_bio *orig, unsigned dev)
+{
+ struct data_update *u = rbio_data_update(orig);
+ if (!u)
return false;
- struct data_update *u = container_of(orig, struct data_update, rbio);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(u->k.k));
unsigned i = 0;
bkey_for_each_ptr(ptrs, ptr) {
@@ -199,7 +206,6 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
struct bpos pos,
struct extent_ptr_decoded *pick,
unsigned sectors,
- unsigned flags,
struct bch_read_bio *orig,
struct bch_io_failures *failed)
{
@@ -220,7 +226,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
unsigned ptr_bit = 1;
bkey_for_each_ptr(ptrs, ptr) {
if (bch2_dev_io_failures(failed, ptr->dev) &&
- !ptr_being_rewritten(orig, ptr->dev, flags))
+ !ptr_being_rewritten(orig, ptr->dev))
update_opts.rewrite_ptrs |= ptr_bit;
ptr_bit <<= 1;
}
@@ -314,7 +320,7 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans,
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_reflink
: BTREE_ID_extents,
- k, pos, pick, sectors, flags, orig, failed);
+ k, pos, pick, sectors, orig, failed);
if (!promote)
return NULL;
@@ -342,7 +348,7 @@ static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *o
if (ret)
return ret;
- if (rbio->flags & BCH_READ_data_update)
+ if (rbio->data_update)
prt_str(out, "(internal move) ");
return 0;
@@ -505,7 +511,7 @@ static void bch2_rbio_retry(struct work_struct *work)
flags &= ~BCH_READ_last_fragment;
flags |= BCH_READ_must_clone;
- int ret = flags & BCH_READ_data_update
+ int ret = rbio->data_update
? bch2_read_retry_nodecode(c, rbio, iter, &failed, flags)
: __bch2_read(c, rbio, iter, inum, &failed, flags);
@@ -519,7 +525,7 @@ static void bch2_rbio_retry(struct work_struct *work)
bch2_inum_offset_err_msg_trans(trans, &buf,
(subvol_inum) { subvol, read_pos.inode },
read_pos.offset << 9));
- if (rbio->flags & BCH_READ_data_update)
+ if (rbio->data_update)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "successful retry");
@@ -712,9 +718,10 @@ static void __bch2_read_endio(struct work_struct *work)
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
- struct bio *src = &rbio->bio;
- struct bio *dst = &bch2_rbio_parent(rbio)->bio;
- struct bvec_iter dst_iter = rbio->bvec_iter;
+ struct bch_read_bio *parent = bch2_rbio_parent(rbio);
+ struct bio *src = &rbio->bio;
+ struct bio *dst = &parent->bio;
+ struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_extent_crc_unpacked crc = rbio->pick.crc;
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
@@ -764,7 +771,7 @@ static void __bch2_read_endio(struct work_struct *work)
if (unlikely(rbio->narrow_crcs))
bch2_rbio_narrow_crcs(rbio);
- if (likely(!(rbio->flags & BCH_READ_data_update))) {
+ if (likely(!parent->data_update)) {
/* Adjust crc to point to subset of data we want: */
crc.offset += rbio->offset_into_extent;
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
@@ -934,6 +941,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
struct bch_read_bio *rbio = NULL;
bool bounce = false, read_full = false, narrow_crcs = false;
struct bpos data_pos = bkey_start_pos(k.k);
+ struct data_update *u = rbio_data_update(orig);
int ret = 0;
if (bkey_extent_is_inline_data(k.k)) {
@@ -997,7 +1005,7 @@ retry_pick:
goto retry_pick;
}
- if (!(flags & BCH_READ_data_update)) {
+ if (likely(!u)) {
if (!(flags & BCH_READ_last_fragment) ||
bio_flagged(&orig->bio, BIO_CHAIN))
flags |= BCH_READ_must_clone;
@@ -1020,12 +1028,10 @@ retry_pick:
bounce = true;
}
} else {
- read_full = true;
/*
* can happen if we retry, and the extent we were going to read
* has been merged in the meantime:
*/
- struct data_update *u = container_of(orig, struct data_update, rbio);
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
percpu_ref_put(&ca->io_ref);
@@ -1034,6 +1040,7 @@ retry_pick:
}
iter.bi_size = pick.crc.compressed_size << 9;
+ read_full = true;
}
if (orig->opts.promote_target || have_io_error(failed))
@@ -1127,7 +1134,7 @@ retry_pick:
if (rbio->bounce)
trace_and_count(c, io_read_bounce, &rbio->bio);
- if (!(flags & BCH_READ_data_update))
+ if (!u)
this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
else
this_cpu_add(c->counters[BCH_COUNTER_io_move_read], bio_sectors(&rbio->bio));
@@ -1137,7 +1144,7 @@ retry_pick:
* If it's being moved internally, we don't want to flag it as a cache
* hit:
*/
- if (ca && pick.ptr.cached && !(flags & BCH_READ_data_update))
+ if (ca && pick.ptr.cached && !u)
bch2_bucket_io_time_reset(trans, pick.ptr.dev,
PTR_BUCKET_NR(ca, &pick.ptr), READ);
@@ -1234,11 +1241,11 @@ hole:
this_cpu_add(c->counters[BCH_COUNTER_io_read_hole],
bvec_iter_sectors(iter));
/*
- * won't normally happen in the BCH_READ_data_update
- * (bch2_move_extent()) path, but if we retry and the extent we wanted
- * to read no longer exists we have to signal that:
+ * won't normally happen in the data update (bch2_move_extent()) path,
+ * but if we retry and the extent we wanted to read no longer exists we
+ * have to signal that:
*/
- if (flags & BCH_READ_data_update)
+ if (u)
orig->ret = -BCH_ERR_data_read_key_overwritten;
zero_fill_bio_iter(&orig->bio, iter);
@@ -1259,7 +1266,7 @@ int __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
struct bkey_s_c k;
int ret;
- BUG_ON(flags & BCH_READ_data_update);
+ EBUG_ON(rbio->data_update);
bch2_bkey_buf_init(&sk);
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,