summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-04-25 22:38:41 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-01-18 21:39:19 -0900
commit1456d0ff4384eff7881e00b88b36c79e0f9892d7 (patch)
treef44aaf6d443162a3f822f0e37c99031c5bc384a3
parentaaaf2a20127f050bb2f62d15a8b2a5d90f7a029a (diff)
bcache: kill skip parameter to bch_read_extent()
-rw-r--r--drivers/md/bcache/fs-io.c5
-rw-r--r--drivers/md/bcache/io.c18
-rw-r--r--drivers/md/bcache/io.h7
-rw-r--r--drivers/md/bcache/move.c3
-rw-r--r--drivers/md/bcache/request.c40
5 files changed, 30 insertions, 43 deletions
diff --git a/drivers/md/bcache/fs-io.c b/drivers/md/bcache/fs-io.c
index a8895481eec5..add3db305fa9 100644
--- a/drivers/md/bcache/fs-io.c
+++ b/drivers/md/bcache/fs-io.c
@@ -659,8 +659,7 @@ static void bchfs_read(struct cache_set *c, struct bch_read_bio *rbio, u64 inode
return;
}
- sectors = min_t(u64, k.k->p.offset,
- bio_end_sector(bio)) -
+ sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
bio->bi_iter.bi_sector;
bytes = sectors << 9;
is_last = bytes == bio->bi_iter.bi_size;
@@ -674,8 +673,6 @@ static void bchfs_read(struct cache_set *c, struct bch_read_bio *rbio, u64 inode
c->prio_clock[READ].hand;
bch_read_extent(c, rbio, k, &pick,
- bio->bi_iter.bi_sector -
- bkey_start_offset(k.k),
BCH_READ_RETRY_IF_STALE|
BCH_READ_PROMOTE|
(is_last ? BCH_READ_IS_LAST : 0));
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index c99d5ca0962d..31e6330dcf67 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -1746,14 +1746,17 @@ static void bch_read_endio(struct bio *bio)
void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
- struct extent_pick_ptr *pick,
- unsigned skip, unsigned flags)
+ struct extent_pick_ptr *pick, unsigned flags)
{
struct bch_read_bio *rbio;
struct cache_promote_op *promote_op = NULL;
unsigned orig_sectors = bio_sectors(&orig->bio);
+ unsigned skip = iter.bi_sector - bkey_start_offset(k.k);
bool bounce = false, split, read_full = false;
+ EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
+ k.k->p.offset < bvec_iter_end_sector(iter));
+
/* only promote if we're not reading from the fastest tier: */
if ((flags & BCH_READ_PROMOTE) && pick->ca->mi.tier) {
promote_op = kmalloc(sizeof(*promote_op), GFP_NOIO);
@@ -1887,12 +1890,6 @@ static void bch_read_iter(struct cache_set *c, struct bch_read_bio *rbio,
unsigned bytes, sectors;
bool is_last;
- EBUG_ON(bkey_cmp(bkey_start_pos(k.k),
- POS(inode, bvec_iter.bi_sector)) > 0);
-
- EBUG_ON(bkey_cmp(k.k->p,
- POS(inode, bvec_iter.bi_sector)) <= 0);
-
bch_extent_pick_ptr(c, k, &pick);
/*
@@ -1921,9 +1918,8 @@ static void bch_read_iter(struct cache_set *c, struct bch_read_bio *rbio,
PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
c->prio_clock[READ].hand;
- bch_read_extent_iter(c, rbio, bvec_iter, k, &pick,
- bvec_iter.bi_sector -
- bkey_start_offset(k.k), flags);
+ bch_read_extent_iter(c, rbio, bvec_iter,
+ k, &pick, flags);
flags &= ~BCH_READ_MAY_REUSE_BIO;
} else {
diff --git a/drivers/md/bcache/io.h b/drivers/md/bcache/io.h
index 9cfdc0c4902e..a72fe01ac38e 100644
--- a/drivers/md/bcache/io.h
+++ b/drivers/md/bcache/io.h
@@ -50,17 +50,16 @@ struct extent_pick_ptr;
void bch_read_extent_iter(struct cache_set *, struct bch_read_bio *,
struct bvec_iter, struct bkey_s_c k,
- struct extent_pick_ptr *,
- unsigned, unsigned);
+ struct extent_pick_ptr *, unsigned);
static inline void bch_read_extent(struct cache_set *c,
struct bch_read_bio *orig,
struct bkey_s_c k,
struct extent_pick_ptr *pick,
- unsigned skip, unsigned flags)
+ unsigned flags)
{
bch_read_extent_iter(c, orig, orig->bio.bi_iter,
- k, pick, skip, flags);
+ k, pick, flags);
}
enum bch_read_flags {
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index 3ee928ecedd6..4632fb420899 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -481,11 +481,12 @@ static void __bch_data_move(struct closure *cl)
bch_ratelimit_increment(io->context->rate, size);
bio_set_op_attrs(&io->rbio.bio, REQ_OP_READ, 0);
+ io->rbio.bio.bi_iter.bi_sector = bkey_start_offset(&io->key.k);
io->rbio.bio.bi_end_io = read_moving_endio;
bch_read_extent(io->op.c, &io->rbio,
bkey_i_to_s_c(&io->key),
- &pick, 0, BCH_READ_IS_LAST);
+ &pick, BCH_READ_IS_LAST);
}
/*
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 066eb585ed94..5ad20f2d87dc 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -404,20 +404,9 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
POS(s->inode, bio->bi_iter.bi_sector), k) {
struct extent_pick_ptr pick;
unsigned sectors, bytes;
- bool done;
+ bool is_last;
retry:
- BUG_ON(bkey_cmp(bkey_start_pos(k.k),
- POS(s->inode, bio->bi_iter.bi_sector)) > 0);
- BUG_ON(bkey_cmp(k.k->p,
- POS(s->inode, bio->bi_iter.bi_sector)) <= 0);
-
- sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
- bio->bi_iter.bi_sector;
- bytes = sectors << 9;
- done = bytes == bio->bi_iter.bi_size;
-
- swap(bio->bi_iter.bi_size, bytes);
bch_extent_pick_ptr(s->iop.c, k, &pick);
if (IS_ERR(pick.ca)) {
@@ -427,13 +416,13 @@ retry:
goto out;
}
- if (!pick.ca) {
- /* not present (hole), or stale cached data */
- if (cached_dev_cache_miss(&iter, s, bio, sectors)) {
- k = bch_btree_iter_peek_with_holes(&iter);
- goto retry;
- }
- } else {
+ sectors = min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
+ bio->bi_iter.bi_sector;
+ bytes = sectors << 9;
+ is_last = bytes == bio->bi_iter.bi_size;
+ swap(bio->bi_iter.bi_size, bytes);
+
+ if (pick.ca) {
PTR_BUCKET(pick.ca, &pick.ptr)->read_prio =
s->iop.c->prio_clock[READ].hand;
@@ -441,17 +430,22 @@ retry:
s->read_dirty_data = true;
bch_read_extent(s->iop.c, &s->rbio, k, &pick,
- bio->bi_iter.bi_sector -
- bkey_start_offset(k.k),
BCH_READ_FORCE_BOUNCE|
BCH_READ_RETRY_IF_STALE|
- (!s->bypass ? BCH_READ_PROMOTE : 0));
+ (!s->bypass ? BCH_READ_PROMOTE : 0)|
+ (is_last ? BCH_READ_IS_LAST : 0));
+ } else {
+ /* not present (hole), or stale cached data */
+ if (cached_dev_cache_miss(&iter, s, bio, sectors)) {
+ k = bch_btree_iter_peek_with_holes(&iter);
+ goto retry;
+ }
}
swap(bio->bi_iter.bi_size, bytes);
bio_advance(bio, bytes);
- if (done) {
+ if (is_last) {
bch_btree_iter_unlock(&iter);
goto out;
}