summaryrefslogtreecommitdiff
path: root/drivers/md/bcache/backingdev.h
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-22 17:35:56 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-06-28 18:12:46 -0400
commit8aa6e6e10ce712bcb10be9a206ec13d00ad0e7e6 (patch)
tree347feb718da57de42ae6f2e5367df3e2e5ad6466 /drivers/md/bcache/backingdev.h
parent7b42d65c0607af212b9ef3aa834ea449e3f846b1 (diff)
Initial bcache/bcachefs integrationbcache2
Diffstat (limited to 'drivers/md/bcache/backingdev.h')
-rw-r--r--drivers/md/bcache/backingdev.h39
1 files changed, 38 insertions, 1 deletions
diff --git a/drivers/md/bcache/backingdev.h b/drivers/md/bcache/backingdev.h
index 58362eb7902a..038b532e91d8 100644
--- a/drivers/md/bcache/backingdev.h
+++ b/drivers/md/bcache/backingdev.h
@@ -22,7 +22,11 @@ struct bcache_device {
struct kobject kobj;
struct cache_set *c;
- unsigned int id;
+ struct bch_fs *c2;
+
+ u64 id;
+ struct inode *inode;
+
#define BCACHEDEVNAME_SIZE 12
char name[BCACHEDEVNAME_SIZE];
@@ -49,6 +53,11 @@ struct bcache_device {
unsigned int cmd, unsigned long arg);
};
+static inline bool bcache_dev_is_attached(struct bcache_device *d)
+{
+ return d->c != NULL || d->c2 != NULL;
+}
+
enum stop_on_failure {
BCH_CACHED_DEV_STOP_AUTO = 0,
BCH_CACHED_DEV_STOP_ALWAYS,
@@ -87,6 +96,9 @@ struct cached_dev {
*/
atomic_t running;
+ struct bio_set bch2_bio_read;
+ mempool_t bch2_io_write;
+
/*
* Writes take a shared lock from start to finish; scanning for dirty
* data to refill the rb tree requires an exclusive lock.
@@ -225,6 +237,31 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
}
}
+static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ unsigned int cache_mode, bool would_skip,
+ unsigned int in_use)
+{
+ if (cache_mode != CACHE_MODE_WRITEBACK ||
+ test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
+ in_use > bch_cutoff_writeback_sync)
+ return false;
+
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ return false;
+
+ if (dc->partial_stripes_expensive &&
+ bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
+ bio_sectors(bio)))
+ return true;
+
+ if (would_skip)
+ return false;
+
+ return (op_is_sync(bio->bi_opf) ||
+ bio->bi_opf & (REQ_META|REQ_PRIO) ||
+ in_use <= bch_cutoff_writeback);
+}
+
static inline void bch_writeback_queue(struct cached_dev *dc)
{
if (!IS_ERR_OR_NULL(dc->writeback_thread))