summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/request.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-22 17:35:56 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-06-28 18:12:46 -0400
commit8aa6e6e10ce712bcb10be9a206ec13d00ad0e7e6 (patch)
tree347feb718da57de42ae6f2e5367df3e2e5ad6466 /drivers/md/bcache/request.c
parent7b42d65c0607af212b9ef3aa834ea449e3f846b1 (diff)
Initial bcache/bcachefs integrationbcache2
Diffstat (limited to 'drivers/md/bcache/request.c')
-rw-r--r--drivers/md/bcache/request.c110
1 files changed, 65 insertions, 45 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 755bc6a448cb..ac4d1c545d61 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -8,11 +8,11 @@
*/
#include "bcache.h"
+#include "bch2.h"
#include "btree.h"
#include "debug.h"
#include "io.h"
#include "request.h"
-#include "request2.h"
#include "writeback.h"
#include <linux/module.h>
@@ -851,7 +851,8 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
if (should_writeback(dc, s->orig_bio,
cache_mode(dc),
- s->iop.bypass)) {
+ s->iop.bypass,
+ dc->disk.c->gc_stats.in_use)) {
s->iop.bypass = false;
s->iop.writeback = true;
}
@@ -1013,34 +1014,57 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
+static void bch1_cached_dev_make_request(struct cached_dev *dc, struct bio *bio)
{
+ struct bcache_device *d = &dc->disk;
+ struct cache_set *c = d->c;
struct search *s;
- struct bcache_device *d = bio->bi_disk->private_data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
- int rw = bio_data_dir(bio);
- if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
- dc->io_disable)) {
+ if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) {
+ cached_dev_put(dc);
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
+ }
+
+ s = search_alloc(bio, d);
+ trace_bcache_request_start(s->d, bio);
+
+ if (atomic_read(&c->idle_counter))
+ atomic_set(&c->idle_counter, 0);
+ /*
+ * If at_max_writeback_rate of cache set is true and new I/O
+ * comes, quit max writeback rate of all cached devices
+ * attached to this cache set, and set at_max_writeback_rate
+ * to false.
+ */
+ if (unlikely(atomic_read(&c->at_max_writeback_rate) == 1)) {
+ atomic_set(&c->at_max_writeback_rate, 0);
+ quit_max_writeback_rate(c, dc);
}
- if (likely(d->c)) {
- if (atomic_read(&d->c->idle_counter))
- atomic_set(&d->c->idle_counter, 0);
+ if (!bio->bi_iter.bi_size) {
/*
- * If at_max_writeback_rate of cache set is true and new I/O
- * comes, quit max writeback rate of all cached devices
- * attached to this cache set, and set at_max_writeback_rate
- * to false.
+ * can't call bch_journal_meta from under
+ * generic_make_request
*/
- if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
- atomic_set(&d->c->at_max_writeback_rate, 0);
- quit_max_writeback_rate(d->c, dc);
- }
+ continue_at_nobarrier(&s->cl, cached_dev_nodata, bcache_wq);
+ } else {
+ s->iop.bypass = bch_check_should_bypass(dc, bio,
+ c->sb.block_size,
+ c->gc_stats.in_use);
+
+ if (bio_data_dir(bio) == WRITE)
+ cached_dev_write(dc, s);
+ else
+ cached_dev_read(dc, s);
}
+}
+
+blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct bcache_device *d = bio->bi_disk->private_data;
+ struct cached_dev *dc = container_of(d, struct cached_dev, disk);
generic_start_io_acct(q,
bio_op(bio),
@@ -1050,29 +1074,21 @@ blk_qc_t cached_dev_make_request(struct request_queue *q, struct bio *bio)
bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset;
- if (cached_dev_get(dc)) {
- s = search_alloc(bio, d);
- trace_bcache_request_start(s->d, bio);
-
- if (!bio->bi_iter.bi_size) {
- /*
- * can't call bch_journal_meta from under
- * generic_make_request
- */
- continue_at_nobarrier(&s->cl,
- cached_dev_nodata,
- bcache_wq);
- } else {
- s->iop.bypass = check_should_bypass(dc, bio);
+ if (unlikely(dc->io_disable)) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ return BLK_QC_T_NONE;
+ }
- if (rw)
- cached_dev_write(dc, s);
- else
- cached_dev_read(dc, s);
- }
- } else
+ if (cached_dev_get(dc)) {
+ if (d->c)
+ bch1_cached_dev_make_request(dc, bio);
+ else
+ bch2_cached_dev_make_request(dc, bio);
+ } else {
/* I/O request sent to backing device */
detached_dev_do_request(d, bio);
+ }
return BLK_QC_T_NONE;
}
@@ -1099,12 +1115,16 @@ static int cached_dev_congested(void *data, int bits)
return 1;
if (cached_dev_get(dc)) {
- unsigned int i;
- struct cache *ca;
+ if (d->c) {
+ unsigned int i;
+ struct cache *ca;
- for_each_cache(ca, d->c, i) {
- q = bdev_get_queue(ca->bdev);
- ret |= bdi_congested(q->backing_dev_info, bits);
+ for_each_cache(ca, d->c, i) {
+ q = bdev_get_queue(ca->bdev);
+ ret |= bdi_congested(q->backing_dev_info, bits);
+ }
+ } else {
+ /* bcache2: */
}
cached_dev_put(dc);