summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/move.c15
-rw-r--r--drivers/md/bcache/movinggc.c3
-rw-r--r--drivers/md/bcache/request.c43
-rw-r--r--drivers/md/bcache/request.h3
-rw-r--r--drivers/md/bcache/super.c25
-rw-r--r--drivers/md/bcache/tier.c5
-rw-r--r--drivers/md/bcache/writeback.c2
8 files changed, 36 insertions, 66 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 486da65277df..223c52a82dfc 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -535,7 +535,6 @@ struct cache {
/* Moving GC: */
struct task_struct *moving_gc_thread;
- struct workqueue_struct *moving_gc_wq;
struct keybuf moving_gc_keys;
struct bch_pd_controller moving_gc_pd;
@@ -677,7 +676,7 @@ struct cache_set {
struct closure_waitlist mca_wait;
struct task_struct *btree_cache_alloc_lock;
- struct workqueue_struct *btree_insert_wq;
+ struct workqueue_struct *wq;
/* ALLOCATION */
struct cache_tier cache_by_alloc[CACHE_TIERS];
@@ -742,7 +741,6 @@ struct cache_set {
/* TIERING */
struct task_struct *tiering_thread;
- struct workqueue_struct *tiering_wq;
struct keybuf tiering_keys;
struct bch_pd_controller tiering_pd;
@@ -1020,7 +1018,7 @@ void bch_write_bdev_super(struct cached_dev *, struct closure *);
struct bcache_device *bch_dev_get_by_inode(struct cache_set *, u64);
-extern struct workqueue_struct *bcache_wq, *bcache_io_wq;
+extern struct workqueue_struct *bcache_io_wq;
extern const char * const bch_cache_modes[];
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index 90ce26466d26..183650a1cea6 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -32,20 +32,13 @@ static void moving_io_destructor(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
- bch_keybuf_del(io->keybuf, io->w);
- kfree(io);
-}
-
-static void write_moving_finish(struct closure *cl)
-{
- struct moving_io *io = container_of(cl, struct moving_io, cl);
-
bio_free_pages(&io->bio.bio);
if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
- closure_return_with_destructor(cl, moving_io_destructor);
+ bch_keybuf_del(io->keybuf, io->w);
+ kfree(io);
}
static void write_moving(struct closure *cl)
@@ -61,7 +54,7 @@ static void write_moving(struct closure *cl)
closure_call(&op->cl, bch_data_insert, NULL, cl);
}
- continue_at(cl, write_moving_finish, io->op.wq);
+ closure_return_with_destructor(cl, moving_io_destructor);
}
static void read_moving_endio(struct bio *bio)
@@ -100,5 +93,5 @@ void bch_data_move(struct closure *cl)
bch_submit_bbio(bio, io->op.c, &io->w->key, ptr);
- continue_at(cl, write_moving, io->op.wq);
+ continue_at(cl, write_moving, io->op.c->wq);
}
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index fa83cd68f840..4866e5365618 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -94,8 +94,7 @@ static void read_moving(struct cache *ca, struct moving_io_stats *stats)
io->keybuf = &ca->moving_gc_keys;
io->stats = stats;
- bch_data_insert_op_init(&io->op, c, ca->moving_gc_wq,
- &io->bio.bio, 0,
+ bch_data_insert_op_init(&io->op, c, &io->bio.bio, 0,
false, false, false,
&io->w->key, &io->w->key);
io->op.moving_gc = true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index c57de46337fc..9e29425ca743 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -99,7 +99,7 @@ static void bch_data_insert_keys_done(struct closure *cl)
}
if (!op->insert_data_done)
- continue_at(cl, bch_data_insert_start, op->wq);
+ continue_at(cl, bch_data_insert_start, op->c->wq);
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -121,7 +121,7 @@ static void __bch_data_insert_keys(struct closure *cl)
}
if (ret == -EAGAIN)
- continue_at(cl, __bch_data_insert_keys, op->c->btree_insert_wq);
+ continue_at(cl, __bch_data_insert_keys, op->c->wq);
closure_return(cl);
}
@@ -139,7 +139,7 @@ static void bch_data_insert_keys(struct closure *cl)
__bch_btree_op_init(&op->op, id, reserve, 0);
closure_call(&op->op.cl, __bch_data_insert_keys, NULL, cl);
- continue_at(cl, bch_data_insert_keys_done, op->c->btree_insert_wq);
+ continue_at(cl, bch_data_insert_keys_done, op->c->wq);
}
/**
@@ -177,8 +177,7 @@ static void bch_data_invalidate(struct closure *cl)
op->insert_data_done = true;
bio_put(bio);
out:
- continue_at(cl, bch_data_insert_keys,
- op->c->btree_insert_wq);
+ continue_at(cl, bch_data_insert_keys, op->c->wq);
}
static void bch_data_insert_error(struct closure *cl)
@@ -222,7 +221,7 @@ static void bch_data_insert_endio(struct bio *bio)
op->error = bio->bi_error;
else if (!op->replace)
set_closure_fn(cl, bch_data_insert_error,
- op->c->btree_insert_wq);
+ op->c->wq);
else
set_closure_fn(cl, NULL, NULL);
}
@@ -255,15 +254,14 @@ static void bch_data_insert_start(struct closure *cl)
if (open_bucket_nr == ARRAY_SIZE(op->open_buckets))
continue_at(cl, bch_data_insert_keys,
- op->c->btree_insert_wq);
+ op->c->wq);
/* for the device pointers and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
KEY_U64s(&op->insert_key) +
BKEY_PAD_PTRS +
(KEY_CSUM(&op->insert_key) ? 1 : 0)))
- continue_at(cl, bch_data_insert_keys,
- op->c->btree_insert_wq);
+ continue_at(cl, bch_data_insert_keys, op->c->wq);
memset(ptrs_to_write, 0, sizeof(ptrs_to_write));
@@ -281,10 +279,11 @@ static void bch_data_insert_start(struct closure *cl)
* before allocating another open bucket. We only hit
* this case if open_bucket_nr > 1. */
if (bch_keylist_empty(&op->insert_keys))
- continue_at(cl, bch_data_insert_start, op->wq);
+ continue_at(cl, bch_data_insert_start,
+ op->c->wq);
else
continue_at(cl, bch_data_insert_keys,
- op->c->btree_insert_wq);
+ op->c->wq);
} else if (IS_ERR(b))
goto err;
@@ -309,7 +308,7 @@ static void bch_data_insert_start(struct closure *cl)
} while (n != bio);
op->insert_data_done = true;
- continue_at(cl, bch_data_insert_keys, op->c->btree_insert_wq);
+ continue_at(cl, bch_data_insert_keys, op->c->wq);
err:
BUG_ON(op->wait);
@@ -337,8 +336,7 @@ err:
bio_put(bio);
if (!bch_keylist_empty(&op->insert_keys))
- continue_at(cl, bch_data_insert_keys,
- op->c->btree_insert_wq);
+ continue_at(cl, bch_data_insert_keys, op->c->wq);
else
closure_return(cl);
}
@@ -517,7 +515,6 @@ static void __cache_promote(struct cache_set *c, struct bio *orig_bio,
op->orig_bio = orig_bio;
bch_data_insert_op_init(&op->iop, c,
- bcache_wq,
bio,
hash_long((unsigned long) current, 16),
false,
@@ -534,7 +531,7 @@ static void __cache_promote(struct cache_set *c, struct bio *orig_bio,
bbio->submit_time_us = local_clock_us();
closure_bio_submit(bio, &op->cl);
- continue_at(&op->cl, cache_promote_write, bcache_wq);
+ continue_at(&op->cl, cache_promote_write, c->wq);
out_free:
kfree(op);
out_submit:
@@ -926,7 +923,7 @@ static void cache_lookup(struct closure *cl)
&KEY(s->inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_HOLES | MAP_ASYNC);
if (ret == -EAGAIN)
- continue_at(cl, cache_lookup, bcache_wq);
+ continue_at(cl, cache_lookup, s->iop.c->wq);
else if (ret)
pr_err("error %i", ret);
@@ -1067,9 +1064,9 @@ static void cached_dev_read_done_bh(struct closure *cl)
trace_bcache_read(s->orig_bio, !s->cache_miss, s->bypass);
if (s->iop.error)
- continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
+ continue_at_nobarrier(cl, cached_dev_read_error, s->iop.c->wq);
else if (dc->verify)
- continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
+ continue_at_nobarrier(cl, cached_dev_read_done, s->iop.c->wq);
else
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
}
@@ -1220,7 +1217,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
closure_bio_submit(bio, cl);
}
- bch_data_insert_op_init(&s->iop, dc->disk.c, bcache_wq, insert_bio,
+ bch_data_insert_op_init(&s->iop, dc->disk.c, insert_bio,
hash_long((unsigned long) current, 16),
!KEY_CACHED(&insert_key), bypass,
bio->bi_opf & (REQ_PREFLUSH|REQ_FUA),
@@ -1269,7 +1266,7 @@ static void __cached_dev_make_request(struct request_queue *q, struct bio *bio)
*/
continue_at_nobarrier(&s->cl,
cached_dev_nodata,
- bcache_wq);
+ d->c->wq);
} else {
s->bypass = check_should_bypass(dc, bio, rw);
@@ -1390,13 +1387,13 @@ static void __flash_dev_make_request(struct request_queue *q, struct bio *bio)
*/
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
- bcache_wq);
+ d->c->wq);
}
bch_increment_clock(d->c, bio->bi_iter.bi_size, rw);
if (rw) {
- bch_data_insert_op_init(&s->iop, d->c, bcache_wq, bio,
+ bch_data_insert_op_init(&s->iop, d->c, bio,
hash_long((unsigned long) current, 16),
true,
bio_op(bio) == REQ_OP_DISCARD,
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index c97cb0477d7e..913160680f2e 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -11,7 +11,6 @@ struct kmem_cache;
struct data_insert_op {
struct closure cl;
struct cache_set *c;
- struct workqueue_struct *wq;
struct bio *bio;
/* Used internally, do not touch */
@@ -56,7 +55,6 @@ struct data_insert_op {
static inline void bch_data_insert_op_init(struct data_insert_op *op,
struct cache_set *c,
- struct workqueue_struct *wq,
struct bio *bio,
unsigned write_point,
bool wait, bool discard, bool flush,
@@ -64,7 +62,6 @@ static inline void bch_data_insert_op_init(struct data_insert_op *op,
struct bkey *replace_key)
{
op->c = c;
- op->wq = wq;
op->bio = bio;
op->write_point = write_point;
op->error = 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 49dc4ebf598a..3cfb396fba42 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -61,7 +61,7 @@ static LIST_HEAD(uncached_devices);
static int bcache_major;
static DEFINE_IDA(bcache_minor);
static wait_queue_head_t unregister_wait;
-struct workqueue_struct *bcache_wq, *bcache_io_wq;
+struct workqueue_struct *bcache_io_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
@@ -1275,10 +1275,8 @@ static void cache_set_free(struct closure *cl)
bch_bset_sort_state_free(&c->sort);
- if (c->tiering_wq)
- destroy_workqueue(c->tiering_wq);
- if (c->btree_insert_wq)
- destroy_workqueue(c->btree_insert_wq);
+ if (c->wq)
+ destroy_workqueue(c->wq);
if (c->bio_split)
bioset_free(c->bio_split);
mempool_destroy(c->fill_iter);
@@ -1482,10 +1480,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
bucket_pages(c))) ||
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(c->btree_insert_wq = alloc_workqueue("bcache_btree",
- WQ_MEM_RECLAIM, 0)) ||
- !(c->tiering_wq = alloc_workqueue("bcache_tier",
- WQ_MEM_RECLAIM, 0)) ||
+ !(c->wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) ||
bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
@@ -1793,9 +1788,6 @@ void bch_cache_release(struct kobject *kobj)
ca->set->cache[ca->sb.nr_this_dev] = NULL;
}
- if (ca->moving_gc_wq)
- destroy_workqueue(ca->moving_gc_wq);
-
if (ca->replica_set)
bioset_free(ca->replica_set);
@@ -1856,9 +1848,7 @@ static int cache_alloc(struct cache *ca)
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
- !(ca->replica_set = bioset_create(4, offsetof(struct bbio, bio))) ||
- !(ca->moving_gc_wq = alloc_workqueue("bcache_move",
- WQ_MEM_RECLAIM, 0)))
+ !(ca->replica_set = bioset_create(4, offsetof(struct bbio, bio))))
return -ENOMEM;
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
@@ -2128,8 +2118,6 @@ static void bcache_exit(void)
kobject_put(bcache_kobj);
if (bcache_io_wq)
destroy_workqueue(bcache_io_wq);
- if (bcache_wq)
- destroy_workqueue(bcache_wq);
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
@@ -2155,8 +2143,7 @@ static int __init bcache_init(void)
return bcache_major;
}
- if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
- !(bcache_io_wq = alloc_workqueue("bcache_io", WQ_MEM_RECLAIM, 0)) ||
+ if (!(bcache_io_wq = alloc_workqueue("bcache_io", WQ_MEM_RECLAIM, 0)) ||
!(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
sysfs_create_files(bcache_kobj, files) ||
bch_request_init() ||
diff --git a/drivers/md/bcache/tier.c b/drivers/md/bcache/tier.c
index 034974b61996..114efb36d6cd 100644
--- a/drivers/md/bcache/tier.c
+++ b/drivers/md/bcache/tier.c
@@ -103,9 +103,8 @@ static void read_tiering(struct cache_set *c)
io->keybuf = &c->tiering_keys;
io->stats = &stats;
- bch_data_insert_op_init(&io->op, c, c->tiering_wq,
- &io->bio.bio, write_point,
- false, false, false,
+ bch_data_insert_op_init(&io->op, c, &io->bio.bio,
+ write_point, false, false, false,
&io->w->key, &io->w->key);
io->op.tiering = 1;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index db6851d24487..58569d80139e 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -143,7 +143,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
}
- continue_at(cl, write_dirty_finish, io->dc->disk.c->btree_insert_wq);
+ continue_at(cl, write_dirty_finish, io->dc->disk.c->wq);
}
static void read_dirty_endio(struct bio *bio)