diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2015-08-21 00:06:50 -0800 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2017-01-18 20:19:50 -0900 |
commit | 75fc548134a46e3d216902c46d044bdfdc22a154 (patch) | |
tree | ffd349576d571f6bb0a00bd2f2b29a99b3ae7dad | |
parent | fb4831a24c87ff3bb0bc217ca469655ce170e7e7 (diff) |
bcache: More consistent tracepoints
We've ended up with a scheme where *_fail tracepoints are the slowpaths, so
maintain that here.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | drivers/md/bcache/btree.c | 6 | ||||
-rw-r--r-- | include/trace/events/bcache.h | 18 |
2 files changed, 17 insertions, 7 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 8a66295aff7f..084defd1c27b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -904,6 +904,8 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct bkey *k, if (ret) return ERR_PTR(ret); + trace_bcache_mca_cannibalize(c, cl); + list_for_each_entry_reverse(b, &c->btree_cache, list) if (!mca_reap(b, btree_order(k), false)) goto out; @@ -1178,9 +1180,9 @@ static int __btree_check_reserve(struct cache_set *c, for_each_cache(ca, c, i) { if (fifo_used(&ca->free[reserve]) < required) { - trace_bcache_btree_check_reserve(ca, + trace_bcache_btree_check_reserve_fail(ca, fifo_used(&ca->free[reserve]), - reserve); + reserve, cl); ret = bch_bucket_wait(c, reserve, cl); mutex_unlock(&c->bucket_lock); diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h index 6ac900e71628..1863242e0e90 100644 --- a/include/trace/events/bcache.h +++ b/include/trace/events/bcache.h @@ -338,6 +338,11 @@ DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize_lock, TP_ARGS(c, cl) ); +DEFINE_EVENT(mca_cannibalize_lock, bcache_mca_cannibalize, + TP_PROTO(struct cache_set *c, struct closure *cl), + TP_ARGS(c, cl) +); + DEFINE_EVENT(cache_set, bcache_mca_cannibalize_unlock, TP_PROTO(struct cache_set *c), TP_ARGS(c) @@ -468,24 +473,27 @@ TRACE_EVENT(bcache_alloc_batch, __entry->uuid, __entry->free, __entry->total) ); -TRACE_EVENT(bcache_btree_check_reserve, - TP_PROTO(struct cache *ca, enum btree_id id, size_t free), - TP_ARGS(ca, id, free), +TRACE_EVENT(bcache_btree_check_reserve_fail, + TP_PROTO(struct cache *ca, enum btree_id id, size_t free, + struct closure *cl), + TP_ARGS(ca, id, free, cl), TP_STRUCT__entry( __array(char, uuid, 16 ) __field(enum btree_id, id ) __field(size_t, free ) + __field(struct closure *, cl ) ), TP_fast_assign( memcpy(__entry->uuid, ca->sb.uuid.b, 16); __entry->id = id; __entry->free = free; + __entry->cl = cl; ), - TP_printk("%pU id %u free %zu", - __entry->uuid, __entry->id, __entry->free) + TP_printk("%pU id %u free %zu by %p", + __entry->uuid, __entry->id, __entry->free, __entry->cl) ); DEFINE_EVENT(cache, bcache_moving_gc_start, |