diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2016-02-23 15:47:37 -0900 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2016-10-07 12:35:38 -0800 |
commit | 891038a270f84280debeb8183fcdab01e5d8ad7f (patch) | |
tree | f1cea9b0fd0c35ca4956b8959ac7494eed6d2059 | |
parent | 1433598cc94755ed61fea909863e73142896ad42 (diff) |
bcache: maintain bucket_stats for whole device too
-rw-r--r-- | drivers/md/bcache/alloc.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/bcache.h | 7 | ||||
-rw-r--r-- | drivers/md/bcache/btree_gc.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/buckets.c | 109 | ||||
-rw-r--r-- | drivers/md/bcache/buckets.h | 65 | ||||
-rw-r--r-- | drivers/md/bcache/buckets_types.h | 8 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 4 | ||||
-rw-r--r-- | drivers/md/bcache/sysfs.c | 19 |
8 files changed, 123 insertions, 95 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 7d53fd71ca77..85b37142e7e6 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -126,7 +126,7 @@ static void pd_controllers_update(struct work_struct *work) rcu_read_lock(); for (i = CACHE_TIERS - 1; i >= 0; --i) group_for_each_cache_rcu(ca, &c->cache_tiers[i], iter) { - struct bucket_stats stats = bch_bucket_stats_read(ca); + struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca); unsigned bucket_bits = ca->bucket_bits + 9; /* diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 0e0350c06133..01a3ff1eca06 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -407,8 +407,8 @@ struct cache { * second contains a saved copy of the stats from the beginning * of GC. */ - struct bucket_stats __percpu *bucket_stats_percpu; - struct bucket_stats bucket_stats_cached; + struct bucket_stats_cache __percpu *bucket_stats_percpu; + struct bucket_stats_cache bucket_stats_cached; atomic_long_t saturated_count; size_t inc_gen_needs_gc; @@ -628,6 +628,9 @@ struct cache_set { atomic64_t sectors_reserved; atomic64_t sectors_reserved_cache; + struct bucket_stats_cache_set __percpu *bucket_stats_percpu; + struct bucket_stats_cache_set bucket_stats_cached; + struct mutex bucket_lock; struct closure_waitlist freelist_wait; diff --git a/drivers/md/bcache/btree_gc.c b/drivers/md/bcache/btree_gc.c index 29bf17d25c90..bce666327c9f 100644 --- a/drivers/md/bcache/btree_gc.c +++ b/drivers/md/bcache/btree_gc.c @@ -384,7 +384,9 @@ void bch_gc(struct cache_set *c) /* Save a copy of the existing bucket stats while we recompute them: */ for_each_cache(ca, c, i) - ca->bucket_stats_cached = __bucket_stats_read(ca); + ca->bucket_stats_cached = __bch_bucket_stats_read_cache(ca); + + c->bucket_stats_cached = __bch_bucket_stats_read_cache_set(c); /* Indicates to buckets code that gc is now in progress: */ __gc_pos_set(c, GC_POS_MIN); diff --git a/drivers/md/bcache/buckets.c b/drivers/md/bcache/buckets.c index 4b09103543c6..7f9fb8092e44 100644 --- a/drivers/md/bcache/buckets.c +++ b/drivers/md/bcache/buckets.c @@ -69,20 +69,63 @@ #include <trace/events/bcache.h> -struct bucket_stats bch_bucket_stats_read(struct cache *ca) +#define bucket_stats_read_raw(_stats) \ +({ \ + typeof(*this_cpu_ptr(_stats)) _acc, *_s; \ + unsigned i; \ + int cpu; \ + \ + memset(&_acc, 0, sizeof(_acc)); \ + \ + for_each_possible_cpu(cpu) { \ + _s = per_cpu_ptr((_stats), cpu); \ + \ + for (i = 0; i < sizeof(_acc) / sizeof(u64); i++) \ + ((u64 *) &_acc)[i] += ((u64 *) _s)[i]; \ + } \ + \ + _acc; \ +}) + +#define bucket_stats_read_cached(_c, _cached, _uncached) \ +({ \ + typeof(_cached) _ret; \ + unsigned _seq; \ + \ + do { \ + _seq = read_seqcount_begin(&(_c)->gc_pos_lock); \ + _ret = (_c)->gc_pos.phase == GC_PHASE_DONE \ + ? bucket_stats_read_raw(_uncached) \ + : (_cached); \ + } while (read_seqcount_retry(&(_c)->gc_pos_lock, _seq)); \ + \ + _ret; \ +}) + +struct bucket_stats_cache __bch_bucket_stats_read_cache(struct cache *ca) { - struct cache_set *c = ca->set; - struct bucket_stats ret; - unsigned seq; + return bucket_stats_read_raw(ca->bucket_stats_percpu); +} - do { - seq = read_seqcount_begin(&c->gc_pos_lock); - ret = c->gc_pos.phase == GC_PHASE_DONE - ? __bucket_stats_read(ca) - : ca->bucket_stats_cached; - } while (read_seqcount_retry(&c->gc_pos_lock, seq)); +struct bucket_stats_cache bch_bucket_stats_read_cache(struct cache *ca) +{ + return bucket_stats_read_cached(ca->set, + ca->bucket_stats_cached, + ca->bucket_stats_percpu); +} - return ret; +struct bucket_stats_cache_set +__bch_bucket_stats_read_cache_set(struct cache_set *c) +{ + return bucket_stats_read_raw(c->bucket_stats_percpu); +} + +struct bucket_stats_cache_set +bch_bucket_stats_read_cache_set(struct cache_set *c) +{ + return bucket_stats_read_cached(c, + c->bucket_stats_cached, + c->bucket_stats_percpu); } static inline int is_meta_bucket(struct bucket_mark m) @@ -105,36 +148,46 @@ static void bucket_stats_update(struct cache *ca, struct bucket_mark new, bool may_make_unavailable) { - struct bucket_stats *stats; + struct cache_set *c = ca->set; + struct bucket_stats_cache *cache_stats; + struct bucket_stats_cache_set *cache_set_stats; BUG_ON(!may_make_unavailable && is_available_bucket(old) && !is_available_bucket(new) && - ca->set->gc_pos.phase == GC_PHASE_DONE); + c->gc_pos.phase == GC_PHASE_DONE); preempt_disable(); - stats = this_cpu_ptr(ca->bucket_stats_percpu); + cache_stats = this_cpu_ptr(ca->bucket_stats_percpu); + cache_set_stats = this_cpu_ptr(c->bucket_stats_percpu); - stats->sectors_cached += + cache_stats->sectors_cached += + (int) new.cached_sectors - (int) old.cached_sectors; + cache_set_stats->sectors_cached += (int) new.cached_sectors - (int) old.cached_sectors; - if (old.is_metadata) - stats->sectors_meta -= old.dirty_sectors; - else - stats->sectors_dirty -= old.dirty_sectors; + if (old.is_metadata) { + cache_stats->sectors_meta -= old.dirty_sectors; + cache_set_stats->sectors_meta -= old.dirty_sectors; + } else { + cache_stats->sectors_dirty -= old.dirty_sectors; + cache_set_stats->sectors_dirty -= old.dirty_sectors; + } - if (new.is_metadata) - stats->sectors_meta += new.dirty_sectors; - else - stats->sectors_dirty += new.dirty_sectors; + if (new.is_metadata) { + cache_stats->sectors_meta += new.dirty_sectors; + cache_set_stats->sectors_meta += new.dirty_sectors; + } else { + cache_stats->sectors_dirty += new.dirty_sectors; + cache_set_stats->sectors_dirty += new.dirty_sectors; + } - stats->buckets_alloc += + cache_stats->buckets_alloc += (int) new.owned_by_allocator - (int) old.owned_by_allocator; - stats->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old); - stats->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old); - stats->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old); - + cache_stats->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old); + cache_stats->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old); + cache_stats->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old); preempt_enable(); if (!is_available_bucket(old) && is_available_bucket(new)) diff --git a/drivers/md/bcache/buckets.h b/drivers/md/bcache/buckets.h index 7ea15ec4f9c7..ac3f1b59d957 100644 --- a/drivers/md/bcache/buckets.h +++ b/drivers/md/bcache/buckets.h @@ -137,32 +137,6 @@ static inline void bucket_heap_push(struct cache *ca, struct bucket *g, * GC must be performed. */ #define GC_MAX_SECTORS_USED ((1U << 15) - 1) -static inline void __bucket_stats_add(struct bucket_stats *acc, - struct bucket_stats *s) -{ - unsigned i; - - for (i = 0; i < sizeof(*s) / sizeof(u64); i++) - ((u64 *) acc)[i] += ((u64 *) s)[i]; -} - -static inline struct bucket_stats __bucket_stats_read(struct cache *ca) -{ - struct bucket_stats ret; - int cpu; - - memset(&ret, 0, sizeof(ret)); - - for_each_possible_cpu(cpu) - __bucket_stats_add(&ret, - per_cpu_ptr(ca->bucket_stats_percpu, cpu)); - - - return ret; -} - -struct bucket_stats bch_bucket_stats_read(struct cache *); - static inline bool bucket_unused(struct bucket *g) { return !g->mark.counter; @@ -173,8 +147,13 @@ static inline unsigned bucket_sectors_used(struct bucket *g) return g->mark.dirty_sectors + g->mark.cached_sectors; } +/* Per device stats: */ + +struct bucket_stats_cache __bch_bucket_stats_read_cache(struct cache *); +struct bucket_stats_cache bch_bucket_stats_read_cache(struct cache *); + static inline size_t __buckets_available_cache(struct cache *ca, - struct bucket_stats stats) + struct bucket_stats_cache stats) { return max_t(s64, 0, ca->mi.nbuckets - ca->mi.first_bucket - @@ -188,11 +167,11 @@ static inline size_t __buckets_available_cache(struct cache *ca, */ static inline size_t buckets_available_cache(struct cache *ca) { - return __buckets_available_cache(ca, bch_bucket_stats_read(ca)); + return __buckets_available_cache(ca, bch_bucket_stats_read_cache(ca)); } static inline size_t __buckets_free_cache(struct cache *ca, - struct bucket_stats stats, + struct bucket_stats_cache stats, enum alloc_reserve reserve) { size_t free = __buckets_available_cache(ca, stats) + @@ -208,29 +187,21 @@ static inline size_t __buckets_free_cache(struct cache *ca, static inline size_t buckets_free_cache(struct cache *ca, enum alloc_reserve reserve) { - return __buckets_free_cache(ca, bch_bucket_stats_read(ca), reserve); + return __buckets_free_cache(ca, bch_bucket_stats_read_cache(ca), reserve); } +/* Cache set stats: */ + +struct bucket_stats_cache_set __bch_bucket_stats_read_cache_set(struct cache_set *); +struct bucket_stats_cache_set bch_bucket_stats_read_cache_set(struct cache_set *); + static inline u64 __cache_set_sectors_used(struct cache_set *c) { - struct cache *ca; - unsigned i; - u64 used = 0; - - rcu_read_lock(); - for_each_cache_rcu(ca, c, i) { - struct bucket_stats stats = bch_bucket_stats_read(ca); - - used += (((stats.buckets_alloc - - fifo_used(&ca->free[RESERVE_NONE]) - - fifo_used(&ca->free_inc)) + - stats.buckets_meta) << - ca->bucket_bits) + - stats.sectors_dirty; - } - rcu_read_unlock(); + struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c); - return used + atomic64_read(&c->sectors_reserved); + return stats.sectors_meta + + stats.sectors_dirty + + atomic64_read(&c->sectors_reserved); } static inline u64 cache_set_sectors_used(struct cache_set *c) diff --git a/drivers/md/bcache/buckets_types.h b/drivers/md/bcache/buckets_types.h index e5c5a0b810c1..2a2c4ef4dfce 100644 --- a/drivers/md/bcache/buckets_types.h +++ b/drivers/md/bcache/buckets_types.h @@ -32,7 +32,7 @@ struct bucket { u8 copygc_gen; }; -struct bucket_stats { +struct bucket_stats_cache { u64 buckets_dirty; u64 buckets_cached; u64 buckets_meta; @@ -43,6 +43,12 @@ struct bucket_stats { u64 sectors_meta; }; +struct bucket_stats_cache_set { + u64 sectors_dirty; + u64 sectors_cached; + u64 sectors_meta; +}; + struct bucket_heap_entry { struct bucket *g; unsigned long val; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index ef3d272baa18..c414ac48a70b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -873,6 +873,7 @@ static void cache_set_free(struct cache_set *c) bch_io_clock_exit(&c->io_clock[WRITE]); bch_io_clock_exit(&c->io_clock[READ]); bdi_destroy(&c->bdi); + free_percpu(c->bucket_stats_percpu); free_percpu(c->bio_decompress_worker); mempool_exit(&c->compression_workspace_pool); mempool_exit(&c->bio_bounce_pages); @@ -1133,6 +1134,7 @@ static struct cache_set *bch_cache_set_alloc(struct cache_sb *sb, mempool_init_page_pool(&c->compression_workspace_pool, 1, get_order(COMPRESSION_WORKSPACE_SIZE)) || !(c->bio_decompress_worker = alloc_percpu(*c->bio_decompress_worker)) || + !(c->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache_set)) || bdi_setup_and_register(&c->bdi, "bcache") || bch_io_clock_init(&c->io_clock[READ]) || bch_io_clock_init(&c->io_clock[WRITE]) || @@ -1922,7 +1924,7 @@ static const char *cache_alloc(struct bcache_superblock *sb, !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 2, GFP_KERNEL)) || !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || - !(ca->bucket_stats_percpu = alloc_percpu(struct bucket_stats)) || + !(ca->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache)) || !(ca->journal.bucket_seq = kcalloc(bch_nr_journal_buckets(ca->disk_sb.sb), sizeof(u64), GFP_KERNEL)) || !(ca->bio_prio = bio_kmalloc(GFP_NOIO, bucket_pages(ca))) || diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 278d431b6fe6..a684a04cc95c 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -572,25 +572,16 @@ static unsigned bch_average_key_size(struct cache_set *c) static ssize_t show_cache_set_alloc_debug(struct cache_set *c, char *buf) { - struct cache *ca; - unsigned i; - u64 meta = 0, dirty = 0; - - rcu_read_lock(); - for_each_cache_rcu(ca, c, i) { - struct bucket_stats stats = bch_bucket_stats_read(ca); - - meta += stats.buckets_meta << ca->bucket_bits; - dirty += stats.sectors_dirty; - } - rcu_read_unlock(); + struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c); return scnprintf(buf, PAGE_SIZE, "capacity:\t\t%llu\n" "meta sectors:\t\t%llu\n" "dirty sectors:\t\t%llu\n" "reserved sectors:\t%llu\n", - c->capacity, meta, dirty, + c->capacity, + stats.sectors_meta, + stats.sectors_dirty, (u64) atomic64_read(&c->sectors_reserved)); } @@ -1163,7 +1154,7 @@ SHOW(bch_cache) { struct cache *ca = container_of(kobj, struct cache, kobj); struct cache_set *c = ca->set; - struct bucket_stats stats = bch_bucket_stats_read(ca); + struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca); sysfs_printf(uuid, "%pU\n", ca->disk_sb.sb->disk_uuid.b); |