summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/migrate.c9
-rw-r--r--drivers/md/bcache/move.c133
-rw-r--r--drivers/md/bcache/move.h2
-rw-r--r--drivers/md/bcache/move_types.h5
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/super.c9
-rw-r--r--drivers/md/bcache/tier.c10
-rw-r--r--drivers/md/bcache/tier.h2
8 files changed, 13 insertions, 161 deletions
diff --git a/drivers/md/bcache/migrate.c b/drivers/md/bcache/migrate.c
index 46d05e32b2f6..eb47d6620e1a 100644
--- a/drivers/md/bcache/migrate.c
+++ b/drivers/md/bcache/migrate.c
@@ -78,15 +78,6 @@ int bch_move_data_off_device(struct cache *ca)
BUG_ON(ca->moving_gc_read != NULL);
- /*
- * This may actually need to start the work queue because the
- * device may have always been read-only and never have had it
- * started (moving gc usually starts it but not for RO
- * devices).
- */
-
- bch_queue_start(queue);
-
queue_io_resize(queue, MIGRATE_NR, MIGRATE_READ_NR, MIGRATE_WRITE_NR);
BUG_ON(queue->wq == NULL);
diff --git a/drivers/md/bcache/move.c b/drivers/md/bcache/move.c
index dbe48b7e309c..52a82740304f 100644
--- a/drivers/md/bcache/move.c
+++ b/drivers/md/bcache/move.c
@@ -259,11 +259,6 @@ static void moving_io_destructor(struct closure *cl)
list_del_init(&io->list);
- if (!atomic_read(&q->count) && q->stop_waitcl) {
- closure_put(q->stop_waitcl);
- q->stop_waitcl = NULL;
- }
-
if (q->rotational && bch_queue_reads_pending(q))
kick_writes = false;
@@ -291,21 +286,9 @@ static void moving_io_after_write(struct closure *cl)
static void write_moving(struct moving_io *io)
{
- bool stopped;
struct bch_write_op *op = &io->write.op;
- spin_lock_irq(&io->q->lock);
- BUG_ON(!atomic_read(&io->q->count));
- stopped = io->q->stopped;
- spin_unlock_irq(&io->q->lock);
-
- /*
- * If the queue has been stopped, prevent the write from occurring.
- * This stops all writes on a device going read-only as quickly
- * as possible.
- */
-
- if (op->error || stopped)
+ if (op->error)
closure_return_with_destructor(&io->cl, moving_io_destructor);
else {
closure_call(&op->cl, bch_write, NULL, &io->cl);
@@ -326,8 +309,7 @@ static void bch_queue_write_work(struct work_struct *work)
return;
}
- while (!q->stopped &&
- atomic_read(&q->write_count) < q->max_write_count) {
+ while (atomic_read(&q->write_count) < q->max_write_count) {
io = list_first_entry_or_null(&q->pending,
struct moving_io, list);
/*
@@ -390,15 +372,6 @@ int bch_queue_init(struct moving_queue *q,
return 0;
}
-void bch_queue_start(struct moving_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- q->stopped = false;
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
void queue_io_resize(struct moving_queue *q,
unsigned max_io,
unsigned max_read,
@@ -420,65 +393,6 @@ void bch_queue_destroy(struct moving_queue *q)
q->wq = NULL;
}
-static void bch_queue_cancel_writes(struct moving_queue *q)
-{
- struct moving_io *io;
- unsigned long flags;
- bool read_issued, read_completed;
-
- spin_lock_irqsave(&q->lock, flags);
-
- while (1) {
- io = list_first_entry_or_null(&q->pending,
- struct moving_io,
- list);
- if (!io)
- break;
-
- BUG_ON(io->write_issued);
- list_del_init(&io->list);
- read_issued = io->read_issued;
- read_completed = io->read_completed;
- if (!read_issued && !read_completed && q->rotational) {
- rb_erase(&io->node, &q->tree);
- wake_up(&q->wait);
- }
-
- spin_unlock_irqrestore(&q->lock, flags);
- if (read_completed)
- closure_return_with_destructor_noreturn(&io->cl,
- moving_io_destructor);
- else if (!read_issued)
- moving_io_destructor(&io->cl);
- spin_lock_irqsave(&q->lock, flags);
- }
-
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
-void bch_queue_stop(struct moving_queue *q)
-{
- struct closure waitcl;
-
- closure_init_stack(&waitcl);
-
- spin_lock_irq(&q->lock);
- if (q->stopped)
- BUG_ON(q->stop_waitcl != NULL);
- else {
- q->stopped = true;
- if (atomic_read(&q->count)) {
- q->stop_waitcl = &waitcl;
- closure_get(&waitcl);
- }
- }
- spin_unlock_irq(&q->lock);
-
- bch_queue_cancel_writes(q);
-
- closure_sync(&waitcl);
-}
-
static void pending_recalc_oldest_gens(struct cache_set *c, struct list_head *l)
{
struct moving_io *io;
@@ -515,10 +429,11 @@ static void read_moving_endio(struct bio *bio)
struct closure *cl = bio->bi_private;
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_queue *q = io->q;
- bool stopped;
unsigned long flags;
+ trace_bcache_move_read_done(q, &io->write.key.k);
+
if (bio->bi_error) {
io->write.op.error = bio->bi_error;
moving_error(io->context, MOVING_FLAG_READ);
@@ -526,28 +441,20 @@ static void read_moving_endio(struct bio *bio)
bio_put(bio);
- spin_lock_irqsave(&q->lock, flags);
-
- trace_bcache_move_read_done(q, &io->write.key.k);
-
BUG_ON(!io->read_issued);
BUG_ON(io->read_completed);
+
+ spin_lock_irqsave(&q->lock, flags);
io->read_issued = 0;
io->read_completed = 1;
BUG_ON(!atomic_read(&q->read_count));
atomic_dec(&q->read_count);
- wake_up(&q->wait);
-
- stopped = q->stopped;
- if (stopped)
- list_del_init(&io->list);
spin_unlock_irqrestore(&q->lock, flags);
- if (stopped)
- closure_return_with_destructor(&io->cl,
- moving_io_destructor);
- else if (!q->rotational)
+ wake_up(&q->wait);
+
+ if (!q->rotational)
bch_queue_write(q);
}
@@ -594,7 +501,6 @@ void bch_data_move(struct moving_queue *q,
struct moving_io *io)
{
unsigned size = io->write.key.k.size;
- bool stopped = false;
ctxt->keys_moved++;
ctxt->sectors_moved += size;
@@ -606,11 +512,6 @@ void bch_data_move(struct moving_queue *q,
io->context = ctxt;
spin_lock_irq(&q->lock);
- if (q->stopped) {
- stopped = true;
- goto out;
- }
-
atomic_inc(&q->count);
list_add_tail(&io->list, &q->pending);
trace_bcache_move_read(q, &io->write.key.k);
@@ -623,12 +524,9 @@ void bch_data_move(struct moving_queue *q,
atomic_inc(&q->read_count);
}
-out:
spin_unlock_irq(&q->lock);
- if (stopped)
- moving_io_free(io);
- else if (!q->rotational)
+ if (!q->rotational)
closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl);
}
@@ -639,7 +537,6 @@ static bool bch_queue_read(struct moving_queue *q,
{
struct rb_node *node;
struct moving_io *io;
- bool stopped;
BUG_ON(!q->rotational);
@@ -656,16 +553,10 @@ static bool bch_queue_read(struct moving_queue *q,
io->read_issued = 1;
atomic_inc(&q->read_count);
- stopped = q->stopped;
spin_unlock_irq(&q->lock);
- if (stopped) {
- moving_io_destructor(&io->cl);
- return false;
- } else {
- closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl);
- return true;
- }
+ closure_call(&io->cl, __bch_data_move, NULL, &ctxt->cl);
+ return true;
}
void bch_queue_run(struct moving_queue *q, struct moving_context *ctxt)
diff --git a/drivers/md/bcache/move.h b/drivers/md/bcache/move.h
index 8f30f078f3f5..75f507535887 100644
--- a/drivers/md/bcache/move.h
+++ b/drivers/md/bcache/move.h
@@ -122,7 +122,6 @@ int bch_queue_init(struct moving_queue *,
unsigned max_writes,
bool rotational,
const char *);
-void bch_queue_start(struct moving_queue *);
/*
* bch_queue_full() - return if more reads can be queued with bch_data_move().
@@ -147,7 +146,6 @@ void queue_io_resize(struct moving_queue *,
unsigned,
unsigned);
void bch_queue_destroy(struct moving_queue *);
-void bch_queue_stop(struct moving_queue *);
void bch_queue_recalc_oldest_gens(struct cache_set *, struct moving_queue *);
diff --git a/drivers/md/bcache/move_types.h b/drivers/md/bcache/move_types.h
index 25f6d2fab592..294a26d0c7af 100644
--- a/drivers/md/bcache/move_types.h
+++ b/drivers/md/bcache/move_types.h
@@ -22,14 +22,9 @@ struct moving_queue {
*/
bool rotational;
- /* This can be examined without locking */
- bool stopped;
-
/* Protects everything below */
spinlock_t lock;
- struct closure *stop_waitcl;
-
/*
* Tree of struct moving_io, sorted by moving_io->sort_key.
* Contains reads which have not yet been issued; when a read is
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index e8221054a7a8..ceb85f12ba73 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -287,8 +287,6 @@ int bch_moving_gc_thread_start(struct cache *ca)
/* The moving gc read thread must be stopped */
BUG_ON(ca->moving_gc_read != NULL);
- bch_queue_start(&ca->moving_gc_queue);
-
if (cache_set_init_fault("moving_gc_start"))
return -ENOMEM;
@@ -307,8 +305,6 @@ void bch_moving_gc_stop(struct cache *ca)
ca->moving_gc_pd.rate.rate = UINT_MAX;
bch_ratelimit_reset(&ca->moving_gc_pd.rate);
- bch_queue_stop(&ca->moving_gc_queue);
-
if (ca->moving_gc_read)
kthread_stop(ca->moving_gc_read);
ca->moving_gc_read = NULL;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index ea4ced7b5de0..bf8dfed64939 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -691,10 +691,8 @@ static void __bch_cache_set_read_only(struct cache_set *c)
bch_ratelimit_reset(&c->tiering_pd.rate);
bch_tiering_read_stop(c);
- for_each_cache(ca, c, i) {
- bch_tiering_write_stop(ca);
+ for_each_cache(ca, c, i)
bch_moving_gc_stop(ca);
- }
bch_gc_thread_stop(c);
@@ -846,8 +844,6 @@ static const char *__bch_cache_set_read_write(struct cache_set *c)
percpu_ref_put(&ca->ref);
goto err;
}
-
- bch_tiering_write_start(ca);
}
err = "error starting tiering thread";
@@ -1541,7 +1537,6 @@ static void __bch_cache_read_only(struct cache *ca)
{
trace_bcache_cache_read_only(ca);
- bch_tiering_write_stop(ca);
bch_moving_gc_stop(ca);
/*
@@ -1598,8 +1593,6 @@ static const char *__bch_cache_read_write(struct cache *ca)
trace_bcache_cache_read_write(ca);
- bch_tiering_write_start(ca);
-
trace_bcache_cache_read_write_done(ca);
/* XXX wtf? */
diff --git a/drivers/md/bcache/tier.c b/drivers/md/bcache/tier.c
index 069e68a38a4a..b69456d9f95c 100644
--- a/drivers/md/bcache/tier.c
+++ b/drivers/md/bcache/tier.c
@@ -253,11 +253,6 @@ int bch_tiering_init_cache(struct cache *ca)
"bch_tier_write");
}
-void bch_tiering_write_start(struct cache *ca)
-{
- bch_queue_start(&ca->tiering_queue);
-}
-
int bch_tiering_read_start(struct cache_set *c)
{
struct task_struct *t;
@@ -277,11 +272,6 @@ void bch_tiering_write_destroy(struct cache *ca)
bch_queue_destroy(&ca->tiering_queue);
}
-void bch_tiering_write_stop(struct cache *ca)
-{
- bch_queue_stop(&ca->tiering_queue);
-}
-
void bch_tiering_read_stop(struct cache_set *c)
{
if (!IS_ERR_OR_NULL(c->tiering_read)) {
diff --git a/drivers/md/bcache/tier.h b/drivers/md/bcache/tier.h
index 57b4acf86fb5..94923c2e3d7f 100644
--- a/drivers/md/bcache/tier.h
+++ b/drivers/md/bcache/tier.h
@@ -4,9 +4,7 @@
void bch_tiering_init_cache_set(struct cache_set *);
int bch_tiering_init_cache(struct cache *);
int bch_tiering_read_start(struct cache_set *);
-void bch_tiering_write_start(struct cache *);
void bch_tiering_write_destroy(struct cache *);
-void bch_tiering_write_stop(struct cache *);
void bch_tiering_read_stop(struct cache_set *);
#endif