summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/alloc.c4
-rw-r--r--drivers/md/bcache/bcache.h7
-rw-r--r--drivers/md/bcache/move.c2
-rw-r--r--drivers/md/bcache/movinggc.c33
-rw-r--r--drivers/md/bcache/request.c13
-rw-r--r--drivers/md/bcache/request.h3
-rw-r--r--drivers/md/bcache/super.c9
-rw-r--r--drivers/md/bcache/sysfs.c8
-rw-r--r--drivers/md/bcache/tier.c18
9 files changed, 62 insertions, 35 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index a294251a9fe7..42da1ffd2b50 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -97,7 +97,7 @@ static void alloc_failed(struct cache *ca)
if (c->cache_by_alloc[i].nr_devices) {
c->tiering_pd.rate.rate = UINT_MAX;
bch_ratelimit_reset(&c->tiering_pd.rate);
- wake_up_process(c->tiering_thread);
+ wake_up_process(c->tiering_read);
trace_bcache_alloc_wake_tiering(ca);
goto wait;
}
@@ -105,7 +105,7 @@ static void alloc_failed(struct cache *ca)
/* If this is the highest tier cache, just do a btree GC */
ca->moving_gc_pd.rate.rate = UINT_MAX;
bch_ratelimit_reset(&ca->moving_gc_pd.rate);
- wake_up_process(ca->moving_gc_thread);
+ wake_up_process(ca->moving_gc_read);
trace_bcache_alloc_wake_moving(ca);
wait:
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 75ea0ce76281..3fbe452c203c 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -564,7 +564,8 @@ struct cache {
DECLARE_HEAP(struct bucket *, heap);
/* Moving GC: */
- struct task_struct *moving_gc_thread;
+ struct task_struct *moving_gc_read;
+ struct workqueue_struct *moving_gc_write;
struct keybuf moving_gc_keys;
struct bch_pd_controller moving_gc_pd;
@@ -779,7 +780,9 @@ struct cache_set {
spinlock_t read_race_lock;
/* TIERING */
- struct task_struct *tiering_thread;
+ struct task_struct *tiering_read;
+ struct workqueue_struct *tiering_write;
+
struct keybuf tiering_keys;
struct bch_pd_controller tiering_pd;
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index 11788d5c957e..a5341a9c29ef 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -93,5 +93,5 @@ void bch_data_move(struct closure *cl)
bch_submit_bbio(&io->bio, ca, &io->w->key, ptr, false);
- continue_at(cl, write_moving, io->op.c->wq);
+ continue_at(cl, write_moving, io->op.io_wq);
}
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 6697e468fca9..df7a8748e304 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -99,6 +99,7 @@ static void read_moving(struct cache *ca, struct moving_io_stats *stats)
bch_data_insert_op_init(&io->op, c, &io->bio.bio, 0,
false, false, false,
&io->w->key, &io->w->key);
+ io->op.io_wq = ca->moving_gc_write;
io->op.moving_gc = true;
trace_bcache_gc_copy(&w->key);
@@ -262,30 +263,34 @@ static int bch_moving_gc_thread(void *arg)
void bch_moving_gc_stop(struct cache *ca)
{
+ cancel_delayed_work_sync(&ca->moving_gc_pd.update);
+
ca->moving_gc_pd.rate.rate = UINT_MAX;
bch_ratelimit_reset(&ca->moving_gc_pd.rate);
- if (ca->moving_gc_thread)
- kthread_stop(ca->moving_gc_thread);
- ca->moving_gc_thread = NULL;
+ if (ca->moving_gc_read)
+ kthread_stop(ca->moving_gc_read);
+ ca->moving_gc_read = NULL;
- cancel_delayed_work_sync(&ca->moving_gc_pd.update);
+ if (ca->moving_gc_write)
+ destroy_workqueue(ca->moving_gc_write);
+ ca->moving_gc_write = NULL;
}
int bch_moving_gc_thread_start(struct cache *ca)
{
- char moving_gc_name[16];
-
- snprintf(moving_gc_name, sizeof(moving_gc_name),
- "bcache_mv/%s", ca->bdev->bd_disk->disk_name);
+ struct task_struct *t;
- BUG_ON(ca->moving_gc_thread);
- ca->moving_gc_thread = kthread_create(bch_moving_gc_thread, ca,
- moving_gc_name);
- if (IS_ERR(ca->moving_gc_thread))
- return PTR_ERR(ca->moving_gc_thread);
+ ca->moving_gc_write = alloc_workqueue("bch_copygc_write",
+ WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
+ if (!ca->moving_gc_write)
+ return -ENOMEM;
- wake_up_process(ca->moving_gc_thread);
+ t = kthread_create(bch_moving_gc_thread, ca, "bch_copygc_read");
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ ca->moving_gc_read = t;
+ wake_up_process(ca->moving_gc_read);
bch_pd_controller_start(&ca->moving_gc_pd);
ca->moving_gc_pd.d_term = 0;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index a53aed582829..5a77c8ab86bc 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -154,7 +154,7 @@ static void bch_data_insert_keys_done(struct closure *cl)
}
if (!op->insert_data_done)
- continue_at(cl, bch_data_insert_start, op->c->wq);
+ continue_at(cl, bch_data_insert_start, op->io_wq);
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@@ -335,7 +335,7 @@ static void bch_data_insert_start(struct closure *cl)
* this case if open_bucket_nr > 1. */
if (bch_keylist_empty(&op->insert_keys))
continue_at(cl, bch_data_insert_start,
- op->c->wq);
+ op->io_wq);
else
continue_at(cl, bch_data_insert_keys,
op->c->wq);
@@ -425,13 +425,18 @@ void bch_data_insert(struct closure *cl)
trace_bcache_write(c, inode, op->bio, !KEY_CACHED(&op->insert_key),
op->discard);
- memset(op->open_buckets, 0, sizeof(op->open_buckets));
-
if (!bio_sectors(op->bio)) {
WARN_ONCE(1, "bch_data_insert() called with empty bio");
closure_return(cl);
}
+ /*
+ * This ought to be initialized in bch_data_insert_op_init(), but struct
+ * cache_set isn't exported
+ */
+ if (!op->io_wq)
+ op->io_wq = op->c->wq;
+
if (!op->discard)
bch_increment_clock(c, bio_sectors(op->bio), WRITE);
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 5398771ab9e1..b159bee07b9b 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -11,6 +11,7 @@ struct kmem_cache;
struct data_insert_op {
struct closure cl;
struct cache_set *c;
+ struct workqueue_struct *io_wq;
struct bio *bio;
/* Used internally, do not touch */
@@ -62,6 +63,7 @@ static inline void bch_data_insert_op_init(struct data_insert_op *op,
struct bkey *replace_key)
{
op->c = c;
+ op->io_wq = NULL;
op->bio = bio;
op->write_point = write_point;
op->error = 0;
@@ -70,6 +72,7 @@ static inline void bch_data_insert_op_init(struct data_insert_op *op,
op->discard = discard;
op->flush = flush;
+ memset(op->open_buckets, 0, sizeof(op->open_buckets));
bch_keylist_init(&op->insert_keys);
bkey_copy(&op->insert_key, insert_key);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 817892fe746d..54823cdb2d97 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1304,12 +1304,15 @@ static void cache_set_flush(struct closure *cl)
kobject_put(&c->internal);
kobject_del(&c->kobj);
+ cancel_delayed_work_sync(&c->tiering_pd.update);
+
c->tiering_pd.rate.rate = UINT_MAX;
bch_ratelimit_reset(&c->tiering_pd.rate);
- if (!IS_ERR_OR_NULL(c->tiering_thread))
- kthread_stop(c->tiering_thread);
+ if (!IS_ERR_OR_NULL(c->tiering_read))
+ kthread_stop(c->tiering_read);
- cancel_delayed_work_sync(&c->tiering_pd.update);
+ if (c->tiering_write)
+ destroy_workqueue(c->tiering_write);
if (!IS_ERR_OR_NULL(c->gc_thread))
kthread_stop(c->gc_thread);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c5403223020f..f30362e03e6d 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -652,8 +652,8 @@ STORE(__bch_cache_set)
?: (ssize_t) size;
for_each_cache(ca, c, i)
- if (ca->moving_gc_thread)
- wake_up_process(ca->moving_gc_thread);
+ if (ca->moving_gc_read)
+ wake_up_process(ca->moving_gc_read);
return ret;
}
@@ -661,8 +661,8 @@ STORE(__bch_cache_set)
ssize_t ret = strtoul_safe(buf, c->tiering_enabled)
?: (ssize_t) size;
- if (c->tiering_thread)
- wake_up_process(c->tiering_thread);
+ if (c->tiering_read)
+ wake_up_process(c->tiering_read);
return ret;
}
diff --git a/drivers/md/bcache/tier.c b/drivers/md/bcache/tier.c
index 387fae49710b..5d6e239689b4 100644
--- a/drivers/md/bcache/tier.c
+++ b/drivers/md/bcache/tier.c
@@ -111,6 +111,7 @@ static void read_tiering(struct cache_set *c)
bch_data_insert_op_init(&io->op, c, &io->bio.bio,
write_point, false, false, false,
&io->w->key, &io->w->key);
+ io->op.io_wq = c->tiering_write;
io->op.tiering = 1;
io->op.tier = 1;
@@ -166,13 +167,20 @@ void bch_tiering_init_cache_set(struct cache_set *c)
int bch_tiering_thread_start(struct cache_set *c)
{
- c->tiering_thread = kthread_create(bch_tiering_thread, c,
- "bcache_tier");
- if (IS_ERR(c->tiering_thread))
- return PTR_ERR(c->tiering_thread);
+ struct task_struct *t;
+ c->tiering_write = alloc_workqueue("bch_tier_write",
+ WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
+ if (!c->tiering_write)
+ return -ENOMEM;
+
+ t = kthread_create(bch_tiering_thread, c, "bch_tier_read");
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+
+ c->tiering_read = t;
bch_pd_controller_start(&c->tiering_pd);
- wake_up_process(c->tiering_thread);
+ wake_up_process(c->tiering_read);
return 0;
}