summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-03-22 20:01:41 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 12:35:29 -0800
commit680952bd8aad281497a5a339271d69f3575fbc0c (patch)
tree5d15b11ea0bf0c000af185cbca9a09d2cb86c23a
parent545395b2cc66d36c0b3056ea53474552cd3b1446 (diff)
bcache: Fix allocator waking up gc
-rw-r--r--drivers/md/bcache/alloc.c8
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/btree_gc.c36
-rw-r--r--drivers/md/bcache/clock.c31
-rw-r--r--drivers/md/bcache/clock.h12
-rw-r--r--drivers/md/bcache/util.h15
6 files changed, 85 insertions, 18 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 79060ca4eea7..cbd0948b3963 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -399,6 +399,7 @@ int bch_prio_read(struct cache *ca)
*/
static int wait_buckets_available(struct cache *ca)
{
+ struct cache_set *c = ca->set;
int ret = 0;
while (1) {
@@ -408,9 +409,10 @@ static int wait_buckets_available(struct cache *ca)
break;
}
- if (ca->inc_gen_needs_gc > ca->free_inc.size) {
- if (ca->set->gc_thread) {
+ if (ca->inc_gen_needs_gc >= fifo_free(&ca->free_inc)) {
+ if (c->gc_thread) {
trace_bcache_gc_cannot_inc_gens(ca->set);
+ atomic_inc(&c->kick_gc);
wake_up_process(ca->set->gc_thread);
}
@@ -705,6 +707,8 @@ static void invalidate_buckets_random(struct cache *ca)
static void invalidate_buckets(struct cache *ca)
{
+ ca->inc_gen_needs_gc = 0;
+
switch (ca->mi.replacement) {
case CACHE_REPLACEMENT_LRU:
invalidate_buckets_lru(ca);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 1fc053c76d1a..3b65a408ceaa 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -666,6 +666,7 @@ struct cache_set {
/* GARBAGE COLLECTION */
struct task_struct *gc_thread;
+ atomic_t kick_gc;
/* This is a list of scan_keylists for btree GC to scan */
struct list_head gc_scan_keylists;
diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c
index 709daaba52dd..b4bcd8a9b8a1 100644
--- a/drivers/md/bcache/btree_gc.c
+++ b/drivers/md/bcache/btree_gc.c
@@ -426,10 +426,8 @@ void bch_gc(struct cache_set *c)
bch_writeback_recalc_oldest_gens(c);
bch_mark_scan_keylists(c);
- for_each_cache(ca, c, i) {
+ for_each_cache(ca, c, i)
atomic_long_set(&ca->saturated_count, 0);
- ca->inc_gen_needs_gc = 0;
- }
/* Indicates that gc is no longer in progress: */
gc_pos_set(c, gc_phase(GC_PHASE_DONE));
@@ -786,25 +784,33 @@ static int bch_gc_thread(void *arg)
struct cache_set *c = arg;
struct io_clock *clock = &c->io_clock[WRITE];
unsigned long last = atomic_long_read(&clock->now);
+ unsigned last_kick = atomic_read(&c->kick_gc);
struct cache *ca;
unsigned i;
while (1) {
- bch_kthread_io_clock_wait(clock, last + c->capacity / 16);
+ unsigned long next = last + c->capacity / 16;
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
- break;
- }
+ while (atomic_long_read(&clock->now) < next) {
+ set_current_state(TASK_INTERRUPTIBLE);
- last = atomic_long_read(&clock->now);
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
- bch_gc(c);
- bch_coalesce(c);
+ if (atomic_read(&c->kick_gc) != last_kick) {
+ __set_current_state(TASK_RUNNING);
+ break;
+ }
- debug_check_no_locks_held();
+ bch_io_clock_schedule_timeout(clock, next);
+ try_to_freeze();
+ }
- set_current_state(TASK_INTERRUPTIBLE);
+ last = atomic_long_read(&clock->now);
+ last_kick = atomic_read(&c->kick_gc);
+ bch_gc(c);
/*
* Wake up allocator in case it was waiting for buckets
@@ -812,6 +818,10 @@ static int bch_gc_thread(void *arg)
*/
for_each_cache(ca, c, i)
bch_wake_allocator(ca);
+
+ bch_coalesce(c);
+
+ debug_check_no_locks_held();
}
return 0;
diff --git a/drivers/md/bcache/clock.c b/drivers/md/bcache/clock.c
index 1e453cd30b92..c60269919d34 100644
--- a/drivers/md/bcache/clock.c
+++ b/drivers/md/bcache/clock.c
@@ -16,6 +16,21 @@ void bch_io_timer_add(struct io_clock *clock, struct io_timer *timer)
spin_unlock(&clock->timer_lock);
}
+void bch_io_timer_del(struct io_clock *clock, struct io_timer *timer)
+{
+ size_t i;
+
+ spin_lock(&clock->timer_lock);
+
+ for (i = 0; i < clock->timers.used; i++)
+ if (clock->timers.data[i] == timer) {
+ heap_del(&clock->timers, i, io_timer_cmp);
+ break;
+ }
+
+ spin_unlock(&clock->timer_lock);
+}
+
struct io_clock_wait {
struct io_timer timer;
struct task_struct *task;
@@ -31,6 +46,22 @@ static void io_clock_wait_fn(struct io_timer *timer)
wake_up_process(wait->task);
}
+void bch_io_clock_schedule_timeout(struct io_clock *clock, unsigned long until)
+{
+ struct io_clock_wait wait;
+
+ /* XXX: calculate sleep time rigorously */
+ wait.timer.expire = until;
+ wait.timer.fn = io_clock_wait_fn;
+ wait.task = current;
+ wait.expired = 0;
+ bch_io_timer_add(clock, &wait.timer);
+
+ schedule();
+
+ bch_io_timer_del(clock, &wait.timer);
+}
+
/*
* _only_ to be used from a kthread
*/
diff --git a/drivers/md/bcache/clock.h b/drivers/md/bcache/clock.h
index c38679699a16..f59f0716f611 100644
--- a/drivers/md/bcache/clock.h
+++ b/drivers/md/bcache/clock.h
@@ -2,9 +2,21 @@
#define _BCACHE_CLOCK_H
void bch_io_timer_add(struct io_clock *, struct io_timer *);
+void bch_io_timer_del(struct io_clock *, struct io_timer *);
void bch_kthread_io_clock_wait(struct io_clock *, unsigned long);
void bch_increment_clock(struct cache_set *, unsigned, int);
+void bch_io_clock_schedule_timeout(struct io_clock *, unsigned long);
+
+#define bch_kthread_wait_event_ioclock_timeout(condition, clock, timeout)\
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_timeout(wq, condition, timeout); \
+ __ret; \
+})
+
void bch_io_clock_exit(struct io_clock *);
int bch_io_clock_init(struct io_clock *);
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 73e7777667da..188630b418ed 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -129,14 +129,23 @@ do { \
_r; \
})
+#define heap_del(h, i, cmp) \
+do { \
+ size_t _i = (i); \
+ \
+ BUG_ON(_i >= (h)->used); \
+ (h)->used--; \
+ heap_swap(h, _i, (h)->used); \
+ heap_sift_down(h, _i, cmp); \
+ heap_sift(h, _i, cmp); \
+} while (0)
+
#define heap_pop(h, d, cmp) \
({ \
bool _r = (h)->used; \
if (_r) { \
(d) = (h)->data[0]; \
- (h)->used--; \
- heap_swap(h, 0, (h)->used); \
- heap_sift(h, 0, cmp); \
+ heap_del(h, 0, cmp); \
} \
_r; \
})