summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/md/bcache/btree_iter.c32
-rw-r--r--drivers/md/bcache/dirent.c10
2 files changed, 31 insertions, 11 deletions
diff --git a/drivers/md/bcache/btree_iter.c b/drivers/md/bcache/btree_iter.c
index 113cb8cd54de..42ac601be585 100644
--- a/drivers/md/bcache/btree_iter.c
+++ b/drivers/md/bcache/btree_iter.c
@@ -399,15 +399,34 @@ static void btree_iter_up(struct btree_iter *iter)
btree_node_unlock(iter, iter->level++);
}
-static void verify_no_read_locks_held(struct btree_iter *iter)
+static void btree_iter_verify_locking(struct btree_iter *iter)
{
#ifdef CONFIG_BCACHE_DEBUG
struct btree_iter *linked;
unsigned level;
+ /*
+ * Can't hold _any_ read locks (including in linked iterators) when
+ * taking intent locks, that leads to a fun deadlock involving write
+ * locks and journal reservations
+ *
+ * We could conceivably drop read locks, then retake them and if
+ * retaking fails then return -EINTR... but, let's keep things simple
+ * for now:
+ */
+
for_each_linked_btree_iter(iter, linked)
for (level = 0; level < BTREE_MAX_DEPTH; level++)
BUG_ON(btree_node_read_locked(linked, level));
+
+ /*
+ * Also, we have to take intent locks on interior nodes before leaf
+ * nodes - verify that linked iterators don't have intent locks held at
+ * depths lower than where we're at:
+ */
+ for_each_linked_btree_iter(iter, linked)
+ for (level = 0; level < iter->level; level++)
+ BUG_ON(btree_node_intent_locked(linked, level));
#endif
}
@@ -426,17 +445,8 @@ static int __must_check __bch_btree_iter_traverse(struct btree_iter *iter,
if (!iter->nodes[iter->level])
return 0;
- /*
- * Can't hold _any_ read locks (including in linked iterators) when
- * taking intent locks, that leads to a fun deadlock involving write
- * locks and journal reservations
- *
- * We could conceivably drop read locks, then retake them and if
- * retaking fails then return -EINTR... but, let's keep things simple
- * for now:
- */
if (iter->locks_want >= 0)
- verify_no_read_locks_held(iter);
+ btree_iter_verify_locking(iter);
retry:
/*
* If the current node isn't locked, go up until we have a locked node
diff --git a/drivers/md/bcache/dirent.c b/drivers/md/bcache/dirent.c
index e5ecc073825c..5f3f031c6b40 100644
--- a/drivers/md/bcache/dirent.c
+++ b/drivers/md/bcache/dirent.c
@@ -287,6 +287,16 @@ int bch_dirent_rename(struct cache_set *c,
do {
/*
+ * When taking intent locks, we have to take interior node locks
+ * before leaf node locks; if the second iter we traverse has
+ * locks_want > the first iter, we could end up taking an intent
+ * lock on an interior node after traversing the first iterator
+ * only took an intent lock on a leaf.
+ */
+ src_iter.locks_want = dst_iter.locks_want =
+ max(src_iter.locks_want, dst_iter.locks_want);
+
+ /*
* Have to traverse lower btree nodes before higher - due to
* lock ordering.
*/