summaryrefslogtreecommitdiff
path: root/libbcache/io.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-01 01:45:15 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-09 09:14:11 -0900
commita17f7bcec7ed810a247c24e56229af8f43a9a6ae (patch)
tree1b2d60b21661bd2991324e3efaa83b3cdd87a783 /libbcache/io.c
parent171ee48e57be78f4e95954c99851553fa523bf91 (diff)
cmd_migrate
Diffstat (limited to 'libbcache/io.c')
-rw-r--r--libbcache/io.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/libbcache/io.c b/libbcache/io.c
index be99a973..a3df3794 100644
--- a/libbcache/io.c
+++ b/libbcache/io.c
@@ -722,9 +722,7 @@ void bch_wake_delayed_writes(unsigned long data)
spin_lock_irqsave(&c->foreground_write_pd_lock, flags);
while ((op = c->write_wait_head)) {
- if (!test_bit(BCH_FS_RO, &c->flags) &&
- !test_bit(BCH_FS_STOPPING, &c->flags) &&
- time_after(op->expires, jiffies)) {
+ if (time_after(op->expires, jiffies)) {
mod_timer(&c->foreground_write_wakeup, op->expires);
break;
}
@@ -1068,9 +1066,7 @@ static void __bch_read_endio(struct cache_set *c, struct bch_read_bio *rbio)
return;
}
- if (rbio->promote &&
- !test_bit(BCH_FS_RO, &c->flags) &&
- !test_bit(BCH_FS_STOPPING, &c->flags)) {
+ if (rbio->promote) {
struct cache_promote_op *promote = rbio->promote;
struct closure *cl = &promote->cl;
@@ -1133,13 +1129,26 @@ static void bch_read_endio(struct bio *bio)
preempt_disable();
d = this_cpu_ptr(c->bio_decompress_worker);
llist_add(&rbio->list, &d->bio_list);
- queue_work(system_unbound_wq, &d->work);
+ queue_work(system_highpri_wq, &d->work);
preempt_enable();
} else {
__bch_read_endio(c, rbio);
}
}
+static bool should_promote(struct cache_set *c,
+ struct extent_pick_ptr *pick, unsigned flags)
+{
+ if (!(flags & BCH_READ_PROMOTE))
+ return false;
+
+ if (percpu_ref_is_dying(&c->writes))
+ return false;
+
+ return c->fastest_tier &&
+ c->fastest_tier < c->tiers + pick->ca->mi.tier;
+}
+
void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
struct bvec_iter iter, struct bkey_s_c k,
struct extent_pick_ptr *pick, unsigned flags)
@@ -1158,7 +1167,7 @@ void bch_read_extent_iter(struct cache_set *c, struct bch_read_bio *orig,
* XXX: multiple promotes can race with each other, wastefully. Keep a
* list of outstanding promotes?
*/
- if ((flags & BCH_READ_PROMOTE) && pick->ca->mi.tier) {
+ if (should_promote(c, pick, flags)) {
/*
* biovec needs to be big enough to hold decompressed data, if
* the bch_write_extent() has to decompress/recompress it: