summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/bcache.h11
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/extents.c64
-rw-r--r--drivers/md/bcache/extents.h21
-rw-r--r--drivers/md/bcache/gc.c34
-rw-r--r--drivers/md/bcache/gc.h2
-rw-r--r--drivers/md/bcache/sysfs.c7
7 files changed, 20 insertions, 121 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 55019efd6909..9dcc83142d4a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -350,15 +350,6 @@ struct cache {
atomic_long_t sectors_written;
};
-struct gc_stat {
- u64 nodes;
- u64 key_bytes;
- u64 nkeys;
-
- u64 data; /* sectors */
- u64 inodes;
-};
-
/*
* Flag bits for what phase of startup/shutdown the cache set is at, how we're
* shutting down, etc.:
@@ -575,8 +566,6 @@ struct cache_set {
* it's not while a gc is in progress.
*/
struct rw_semaphore gc_lock;
- struct gc_stat gc_stats;
-
/* IO PATH */
struct bio_list read_race_list;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 00834bbf32a9..5c3b0c15aab9 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2451,7 +2451,7 @@ static void btree_split(struct btree *b, struct btree_iter *iter,
six_lock_write(&b->lock);
if (gc_will_visit_node(c, n2) &&
!gc_will_visit_node(c, n1))
- btree_gc_mark_node(c, n1, NULL);
+ btree_gc_mark_node(c, n1);
six_unlock_write(&b->lock);
} else {
trace_bcache_btree_node_compact(b, set1->u64s);
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index 55ee8043b9b8..2d5cf1fae0d6 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -19,14 +19,6 @@
#include <trace/events/bcache.h>
-#define bkey_extent_p(_f, _k) val_to_extent(bkeyp_val(_f, _k))
-
-static inline unsigned bkeyp_extent_ptrs(const struct bkey_format *f,
- const struct bkey_packed *k)
-{
- return bkeyp_val_u64s(f, k);
-}
-
static void sort_key_next(struct btree_node_iter *iter,
struct btree_keys *b,
struct btree_node_iter_set *i)
@@ -176,9 +168,8 @@ bool bch_extent_has_device(struct bkey_s_c_extent e, unsigned dev)
}
static bool should_drop_ptr(const struct cache_set *c,
- const struct bch_extent *e,
- const struct bch_extent_ptr *ptr,
- unsigned nr_ptrs)
+ struct bkey_s_c_extent e,
+ const struct bch_extent_ptr *ptr)
{
unsigned dev;
struct cache *ca;
@@ -196,50 +187,12 @@ static bool should_drop_ptr(const struct cache_set *c,
if (bch_is_zero(mi[dev].uuid.b, sizeof(uuid_le)))
return true;
- if (__bch_extent_ptr_is_dirty(c, e, ptr, nr_ptrs))
+ if (bch_extent_ptr_is_dirty(c, e, ptr))
return false;
return (ca = PTR_CACHE(c, ptr)) && ptr_stale(ca, ptr);
}
-unsigned bch_extent_nr_ptrs_after_normalize(struct cache_set *c,
- const struct btree *b,
- const struct bkey_packed *k)
-{
- const struct bkey_format *f = &b->keys.format;
- const struct bch_extent *e;
- unsigned ret = 0, ptr;
-
- switch (k->type) {
- case KEY_TYPE_DELETED:
- case KEY_TYPE_COOKIE:
- return 0;
-
- case KEY_TYPE_DISCARD:
- return bkey_unpack_key(f, k).version ? BKEY_U64s : 0;
-
- case KEY_TYPE_ERROR:
- return bkeyp_key_u64s(f, k);
-
- case BCH_EXTENT:
- e = bkey_p_c_extent_val(f, k);
-
- rcu_read_lock();
- for (ptr = 0; ptr < bkeyp_extent_ptrs(f, k); ptr++)
- if (!should_drop_ptr(c, e, &e->ptr[ptr],
- bkeyp_extent_ptrs(f, k)))
- ret++;
- rcu_read_unlock();
-
- if (ret)
- ret += bkeyp_key_u64s(f, k);
-
- return ret;
- default:
- BUG();
- }
-}
-
void bch_extent_drop_stale(struct cache_set *c, struct bkey_s k)
{
struct bkey_s_extent e = bkey_s_to_extent(k);
@@ -248,8 +201,7 @@ void bch_extent_drop_stale(struct cache_set *c, struct bkey_s k)
rcu_read_lock();
extent_for_each_ptr_backwards(e, ptr)
- if (should_drop_ptr(c, extent_s_to_s_c(e).v,
- ptr, bch_extent_ptrs(e)))
+ if (should_drop_ptr(c, extent_s_to_s_c(e), ptr))
bch_extent_drop_ptr(e, ptr - e.v->ptr);
rcu_read_unlock();
@@ -1569,14 +1521,6 @@ struct cache *bch_extent_pick_ptr_avoiding(struct cache_set *c,
}
}
-#if 0
-static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
-{
- return (l->val[bkeyp_extent_ptrs(l)] + r->val[bkeyp_extent_ptrs(r)]) &
- ~((uint64_t)1 << 63);
-}
-#endif
-
static enum merge_result bch_extent_merge(struct btree_keys *bk,
struct bkey_i *l, struct bkey_i *r)
{
diff --git a/drivers/md/bcache/extents.h b/drivers/md/bcache/extents.h
index 0c30090dd594..4b75ed26ade9 100644
--- a/drivers/md/bcache/extents.h
+++ b/drivers/md/bcache/extents.h
@@ -48,9 +48,6 @@ bool bch_insert_fixup_extent(struct cache_set *, struct btree *,
struct bch_replace_info *, struct bpos *,
struct journal_res *, unsigned);
-unsigned bch_extent_nr_ptrs_after_normalize(struct cache_set *,
- const struct btree *,
- const struct bkey_packed *);
void bch_extent_drop_stale(struct cache_set *c, struct bkey_s);
bool bch_extent_normalize(struct cache_set *, struct bkey_s);
@@ -80,25 +77,17 @@ static inline void bch_extent_drop_ptr(struct bkey_s_extent e,
(bch_extent_ptrs(extent_s_to_s_c(e)) - ptr) * sizeof(u64));
}
-static inline bool __bch_extent_ptr_is_dirty(const struct cache_set *c,
- const struct bch_extent *e,
- const struct bch_extent_ptr *ptr,
- unsigned nr_ptrs)
+static inline bool bch_extent_ptr_is_dirty(const struct cache_set *c,
+ struct bkey_s_c_extent e,
+ const struct bch_extent_ptr *ptr)
{
/* Dirty pointers come last */
- if (EXTENT_CACHED(e))
+ if (EXTENT_CACHED(e.v))
return false;
return ptr + CACHE_SET_DATA_REPLICAS_WANT(&c->sb) >=
- e->ptr + nr_ptrs;
-}
-
-static inline bool bch_extent_ptr_is_dirty(const struct cache_set *c,
- struct bkey_s_c_extent e,
- const struct bch_extent_ptr *ptr)
-{
- return __bch_extent_ptr_is_dirty(c, e.v, ptr, bch_extent_ptrs(e));
+ e.v->ptr + bch_extent_ptrs(e);
}
#define extent_for_each_ptr(_extent, _ptr) \
diff --git a/drivers/md/bcache/gc.c b/drivers/md/bcache/gc.c
index 60edad482917..fb335f89a68b 100644
--- a/drivers/md/bcache/gc.c
+++ b/drivers/md/bcache/gc.c
@@ -81,8 +81,7 @@ static inline bool btree_node_has_ptrs(struct btree *b)
return b->btree_id == BTREE_ID_EXTENTS || b->level > 0;
}
-bool btree_gc_mark_node(struct cache_set *c, struct btree *b,
- struct gc_stat *stat)
+bool btree_gc_mark_node(struct cache_set *c, struct btree *b)
{
struct bkey_format *f = &b->keys.format;
struct bset_tree *t;
@@ -93,15 +92,11 @@ bool btree_gc_mark_node(struct cache_set *c, struct btree *b,
bkey_cmp_packed(f, &b->key.k, &t->end) < 0,
b, "found short btree key in gc");
- if (stat)
- stat->nodes++;
-
if (btree_node_has_ptrs(b)) {
struct btree_node_iter iter;
struct bkey_packed *k_p;
struct bkey_tup tup;
struct bkey_s_c k;
- unsigned keys = 0, good_keys = 0, u64s;
u8 stale = 0;
for_each_btree_node_key(&b->keys, k_p, &iter) {
@@ -114,16 +109,6 @@ bool btree_gc_mark_node(struct cache_set *c, struct btree *b,
stale = max(stale,
bch_btree_key_recalc_oldest_gen(c, k));
- keys++;
-
- u64s = bch_extent_nr_ptrs_after_normalize(c, b, k_p);
- if (stat && u64s) {
- good_keys++;
-
- stat->key_bytes += k_p->u64s;
- stat->nkeys++;
- stat->data += tup.k.size;
- }
}
if (c->gc_rewrite_disabled)
@@ -131,9 +116,6 @@ bool btree_gc_mark_node(struct cache_set *c, struct btree *b,
if (stale > 10)
return true;
-
- if ((keys - good_keys) * 2 > keys)
- return true;
}
if (c->gc_always_rewrite)
@@ -142,8 +124,7 @@ bool btree_gc_mark_node(struct cache_set *c, struct btree *b,
return false;
}
-static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id,
- struct gc_stat *stat)
+static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id)
{
struct btree_iter iter;
struct btree *b;
@@ -168,7 +149,7 @@ static int bch_gc_btree(struct cache_set *c, enum btree_id btree_id,
bch_verify_btree_nr_keys(&b->keys);
- should_rewrite = btree_gc_mark_node(c, b, stat);
+ should_rewrite = btree_gc_mark_node(c, b);
BUG_ON(bkey_cmp(c->gc_cur_pos, b->key.k.p) > 0);
BUG_ON(!gc_will_visit_node(c, b));
@@ -317,7 +298,6 @@ static void bch_gc_finish(struct cache_set *c)
*/
void bch_gc(struct cache_set *c)
{
- struct gc_stat stats;
u64 start_time = local_clock();
if (test_bit(CACHE_SET_GC_FAILURE, &c->flags))
@@ -325,14 +305,12 @@ void bch_gc(struct cache_set *c)
trace_bcache_gc_start(c);
- memset(&stats, 0, sizeof(struct gc_stat));
-
down_write(&c->gc_lock);
bch_gc_start(c);
while (c->gc_cur_btree < BTREE_ID_NR) {
int ret = c->btree_roots[c->gc_cur_btree]
- ? bch_gc_btree(c, c->gc_cur_btree, &stats)
+ ? bch_gc_btree(c, c->gc_cur_btree)
: 0;
if (ret) {
@@ -354,10 +332,6 @@ void bch_gc(struct cache_set *c)
bch_time_stats_update(&c->btree_gc_time, start_time);
- stats.key_bytes *= sizeof(u64);
- stats.data <<= 9;
- memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
-
trace_bcache_gc_end(c);
}
diff --git a/drivers/md/bcache/gc.h b/drivers/md/bcache/gc.h
index 1b6556e47240..81e50be44af5 100644
--- a/drivers/md/bcache/gc.h
+++ b/drivers/md/bcache/gc.h
@@ -12,7 +12,7 @@ int bch_initial_gc(struct cache_set *, struct list_head *);
u8 bch_btree_key_recalc_oldest_gen(struct cache_set *, struct bkey_s_c);
void __bch_btree_mark_key(struct cache_set *, int, struct bkey_s_c);
-bool btree_gc_mark_node(struct cache_set *, struct btree *, struct gc_stat *);
+bool btree_gc_mark_node(struct cache_set *, struct btree *);
/**
* __gc_will_visit_node - for checking GC marks while holding a btree read lock
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 8641cf6b08f4..9099dcb8808f 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -548,6 +548,7 @@ static unsigned bch_cache_available_percent(struct cache_set *c)
c->capacity ?: 1);
}
+#if 0
static unsigned bch_btree_used(struct cache_set *c)
{
return div64_u64(c->gc_stats.key_bytes * 100,
@@ -560,6 +561,7 @@ static unsigned bch_average_key_size(struct cache_set *c)
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
: 0;
}
+#endif
static ssize_t show_cache_set_alloc_debug(struct cache_set *c, char *buf)
{
@@ -611,9 +613,12 @@ SHOW(bch_cache_set)
sysfs_print_time_stats(&c->btree_read_time, btree_read, ms, us);
sysfs_print_time_stats(&c->journal.full_time, journal_full, sec, ms);
+#if 0
+ /* XXX: reimplement */
sysfs_print(btree_used_percent, bch_btree_used(c));
sysfs_print(btree_nodes, c->gc_stats.nodes);
sysfs_hprint(average_key_size, bch_average_key_size(c));
+#endif
sysfs_print(cache_read_races,
atomic_long_read(&c->cache_read_races));
@@ -711,8 +716,6 @@ STORE(__bch_cache_set)
if (attr == &sysfs_clear_stats) {
atomic_long_set(&c->writeback_keys_done, 0);
atomic_long_set(&c->writeback_keys_failed, 0);
-
- memset(&c->gc_stats, 0, sizeof(struct gc_stat));
bch_cache_accounting_clear(&c->accounting);
return size;