diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2020-06-02 16:36:11 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2020-11-05 12:45:07 -0500 |
commit | 70747877ef41ad95cac1d3c80a73889f7a2731ca (patch) | |
tree | 20c51e06259c9b5618656e49da0d4277e28eb19f | |
parent | 7f3f82e75b2a2a54e1c2bf192777dc7fe5da05d3 (diff) |
bcachefs: Add debug code to print btree transactions
Intented to help debug deadlocks, since we can't use lockdep to check
btree node lock ordering.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/bcachefs.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 62 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.h | 12 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/clock.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/sysfs.c | 8 |
8 files changed, 92 insertions, 5 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 4a28e46fbf71..68b2b073d1d9 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -627,6 +627,9 @@ struct bch_fs { struct workqueue_struct *btree_interior_update_worker; struct work_struct btree_interior_update_work; + /* btree_iter.c: */ + struct mutex btree_trans_lock; + struct list_head btree_trans_list; mempool_t btree_iters_pool; struct workqueue_struct *wq; diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 1cded0540af5..29929298a1a9 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1912,7 +1912,7 @@ static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans) struct btree_iter *iter; trans_for_each_iter(trans, iter) { - pr_err("iter: btree %s pos %llu:%llu%s%s%s %pf", + pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps", bch2_btree_ids[iter->btree_id], iter->pos.inode, iter->pos.offset, @@ -2192,12 +2192,24 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, if (expected_mem_bytes) bch2_trans_preload_mem(trans, expected_mem_bytes); + +#ifdef CONFIG_BCACHEFS_DEBUG + mutex_lock(&c->btree_trans_lock); + list_add(&trans->list, &c->btree_trans_list); + mutex_unlock(&c->btree_trans_lock); +#endif } int bch2_trans_exit(struct btree_trans *trans) { bch2_trans_unlock(trans); +#ifdef CONFIG_BCACHEFS_DEBUG + mutex_lock(&trans->c->btree_trans_lock); + list_del(&trans->list); + mutex_unlock(&trans->c->btree_trans_lock); +#endif + kfree(trans->fs_usage_deltas); kfree(trans->mem); if (trans->used_mempool) @@ -2210,6 +2222,51 @@ int bch2_trans_exit(struct btree_trans *trans) return trans->error ? -EIO : 0; } +void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) +{ +#ifdef CONFIG_BCACHEFS_DEBUG + struct btree_trans *trans; + struct btree_iter *iter; + struct btree *b; + unsigned l; + + mutex_lock(&c->btree_trans_lock); + list_for_each_entry(trans, &c->btree_trans_list, list) { + pr_buf(out, "%ps\n", (void *) trans->ip); + + trans_for_each_iter(trans, iter) { + if (!iter->nodes_locked) + continue; + + pr_buf(out, " iter %s:", bch2_btree_ids[iter->btree_id]); + bch2_bpos_to_text(out, iter->pos); + pr_buf(out, "\n"); + + for (l = 0; l < BTREE_MAX_DEPTH; l++) { + if (btree_node_locked(iter, l)) { + b = iter->l[l].b; + + pr_buf(out, " %p l=%u %s ", + b, l, btree_node_intent_locked(iter, l) ? "i" : "r"); + bch2_bpos_to_text(out, b->key.k.p); + pr_buf(out, "\n"); + } + } + } + + b = READ_ONCE(trans->locking); + if (b) { + pr_buf(out, " locking %px l=%u %s:", + b, b->level, + bch2_btree_ids[b->btree_id]); + bch2_bpos_to_text(out, b->key.k.p); + pr_buf(out, "\n"); + } + } + mutex_unlock(&c->btree_trans_lock); +#endif +} + void bch2_fs_btree_iter_exit(struct bch_fs *c) { mempool_exit(&c->btree_iters_pool); @@ -2219,6 +2276,9 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) { unsigned nr = BTREE_ITER_MAX; + INIT_LIST_HEAD(&c->btree_trans_list); + mutex_init(&c->btree_trans_lock); + return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1, sizeof(struct btree_iter) * nr + sizeof(struct btree_insert_entry) * nr + diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 6456787a8f77..841a5834f1a8 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -303,6 +303,8 @@ void *bch2_trans_kmalloc(struct btree_trans *, size_t); void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t); int bch2_trans_exit(struct btree_trans *); +void bch2_btree_trans_to_text(struct printbuf *, struct bch_fs *); + void bch2_fs_btree_iter_exit(struct bch_fs *); int bch2_fs_btree_iter_init(struct bch_fs *); diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index bb4f66646da2..730a9dc89de8 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -182,11 +182,21 @@ static inline bool btree_node_lock(struct btree *b, struct bpos pos, struct btree_iter *iter, enum six_lock_type type) { + bool ret; + EBUG_ON(level >= BTREE_MAX_DEPTH); +#ifdef CONFIG_BCACHEFS_DEBUG + iter->trans->locking = b; +#endif - return likely(six_trylock_type(&b->lock, type)) || + ret = likely(six_trylock_type(&b->lock, type)) || btree_node_lock_increment(iter, b, level, type) || __bch2_btree_node_lock(b, pos, level, iter, type); + +#ifdef CONFIG_BCACHEFS_DEBUG + iter->trans->locking = NULL; +#endif + return ret; } bool __bch2_btree_node_relock(struct btree_iter *, unsigned); diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 8357b5251a43..b86d7369eb2d 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -281,6 +281,10 @@ struct btree_insert_entry { struct btree_trans { struct bch_fs *c; +#ifdef CONFIG_BCACHEFS_DEBUG + struct list_head list; + struct btree *locking; +#endif unsigned long ip; u64 iters_linked; diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c index d9de0d1302e2..a9f5d5696622 100644 --- a/fs/bcachefs/clock.c +++ b/fs/bcachefs/clock.c @@ -162,7 +162,7 @@ ssize_t bch2_io_timers_show(struct io_clock *clock, char *buf) now = atomic_long_read(&clock->now); for (i = 0; i < clock->timers.used; i++) - pr_buf(&out, "%pf:\t%li\n", + pr_buf(&out, "%ps:\t%li\n", clock->timers.data[i]->fn, clock->timers.data[i]->expire - now); spin_unlock(&clock->timer_lock); diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 32999161bdd8..17dc60d98dc3 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1235,14 +1235,14 @@ ssize_t bch2_journal_print_pins(struct journal *j, char *buf) i, atomic_read(&pin_list->count)); list_for_each_entry(pin, &pin_list->list, list) - pr_buf(&out, "\t%p %pf\n", + pr_buf(&out, "\t%px %ps\n", pin, pin->flush); if (!list_empty(&pin_list->flushed)) pr_buf(&out, "flushed:\n"); list_for_each_entry(pin, &pin_list->flushed, list) - pr_buf(&out, "\t%p %pf\n", + pr_buf(&out, "\t%px %ps\n", pin, pin->flush); } spin_unlock(&j->lock); diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index d78ffcc0e8a4..5f2bc933b0e9 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -166,6 +166,7 @@ read_attribute(journal_debug); read_attribute(journal_pins); read_attribute(btree_updates); read_attribute(dirty_btree_nodes); +read_attribute(btree_transactions); read_attribute(internal_uuid); @@ -401,6 +402,12 @@ SHOW(bch2_fs) if (attr == &sysfs_dirty_btree_nodes) return bch2_dirty_btree_nodes_print(c, buf); + if (attr == &sysfs_btree_transactions) { + struct printbuf out = _PBUF(buf, PAGE_SIZE); + + bch2_btree_trans_to_text(&out, c); + return out.pos - buf; + } if (attr == &sysfs_compression_stats) return bch2_compression_stats(c, buf); @@ -571,6 +578,7 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_journal_pins, &sysfs_btree_updates, &sysfs_dirty_btree_nodes, + &sysfs_btree_transactions, &sysfs_read_realloc_races, &sysfs_extent_migrate_done, |