summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2015-08-21 00:06:50 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2016-10-07 09:00:24 -0800
commit7749419c79b9d1c61d0c1d3c238313f0a9bf1d41 (patch)
treeddec1ecae54ad758158fa4fddd6109a81484d2bc
parent6945fe6a72d331d2e670fbfdaa61af0b6d59c367 (diff)
bcache: More consistent tracepoints
We've ended up with a scheme where *_fail tracepoints are the slowpaths, so maintain that here. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--drivers/md/bcache/btree.c6
-rw-r--r--include/trace/events/bcache.h18
2 files changed, 17 insertions, 7 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 301d311e2d16..b56cac2149e5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -908,6 +908,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k,
if (ret)
return ERR_PTR(ret);
+ trace_bcache_mca_cannibalize(c, cl);
+
list_for_each_entry_reverse(b, &c->btree_cache, list)
if (!mca_reap(b, btree_order(k), false))
goto out;
@@ -1182,9 +1184,9 @@ static int __btree_check_reserve(struct cache_set *c,
for_each_cache(ca, c, i) {
if (fifo_used(&ca->free[reserve]) < required) {
- trace_bcache_btree_check_reserve(ca,
+ trace_bcache_btree_check_reserve_fail(ca,
fifo_used(&ca->free[reserve]),
- reserve);
+ reserve, cl);
ret = bch_bucket_wait(c, reserve, cl);
mutex_unlock(&c->bucket_lock);
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 6ac900e71628..1863242e0e90 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -338,6 +338,11 @@ DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock,
TP_ARGS(c, cl)
);
+DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize,
+ TP_PROTO(struct cache_set *c, struct closure *cl),
+ TP_ARGS(c, cl)
+);
+
DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock,
TP_PROTO(struct cache_set *c),
TP_ARGS(c)
@@ -468,24 +473,27 @@ TRACE_EVENT(bcache_alloc_batch,
__entry->uuid, __entry->free, __entry->total)
);
-TRACE_EVENT(bcache_btree_check_reserve,
- TP_PROTO(struct cache *ca, enum btree_id id, size_t free),
- TP_ARGS(ca, id, free),
+TRACE_EVENT(bcache_btree_check_reserve_fail,
+ TP_PROTO(struct cache *ca, enum btree_id id, size_t free,
+ struct closure *cl),
+ TP_ARGS(ca, id, free, cl),
TP_STRUCT__entry(
__array(char, uuid, 16 )
__field(enum btree_id, id )
__field(size_t, free )
+ __field(struct closure *, cl )
),
TP_fast_assign(
memcpy(__entry->uuid, ca->sb.uuid.b, 16);
__entry->id = id;
__entry->free = free;
+ __entry->cl = cl;
),
- TP_printk("%pU id %u free %zu",
- __entry->uuid, __entry->id, __entry->free)
+ TP_printk("%pU id %u free %zu by %p",
+ __entry->uuid, __entry->id, __entry->free, __entry->cl)
);
DEFINE_EVENT(cache, bcache_moving_gc_start,