summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2018-02-13 17:16:35 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2018-03-06 15:27:06 -0500
commit54fb0e10e1317285fa71a59d3b182d082b318959 (patch)
treef7709f99ddd7340e2986a84af8fd7568219ee4dc
parentaaf183b582b723c4a7a5d5098645b239303c95aa (diff)
lockdep thing
-rw-r--r--fs/bcachefs/btree_cache.c39
-rw-r--r--fs/bcachefs/btree_cache.h2
-rw-r--r--fs/bcachefs/btree_iter.c2
-rw-r--r--fs/bcachefs/six.c15
4 files changed, 37 insertions, 21 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 8295af589182..07629ee19dfe 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -93,6 +93,8 @@ err:
list_move(&b->list, &bc->freed);
}
+static struct lock_class_key btree_lock_key;
+
static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{
struct btree *b = kzalloc(sizeof(struct btree), gfp);
@@ -100,7 +102,7 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_extent_init(&b->key);
- six_lock_init(&b->lock);
+ __six_lock_init(&b->lock, "b->lock", &btree_lock_key);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
@@ -108,6 +110,17 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return b->data ? b : NULL;
}
+void bch2_verify_no_btree_locks_held(void)
+{
+ struct task_struct *curr = current;
+ struct held_lock *i;
+
+ for (i = curr->held_locks;
+ i < curr->held_locks + curr->lockdep_depth;
+ i++)
+ BUG_ON(i->instance->key == &btree_lock_key);
+}
+
/* Btree in memory cache - hash table */
void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
@@ -155,7 +168,7 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc,
* this version is for btree nodes that have already been freed (we're not
* reaping a real btree node)
*/
-static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
+static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
{
struct btree_cache *bc = &c->btree_cache;
int ret = 0;
@@ -209,16 +222,6 @@ out_unlock_intent:
goto out;
}
-static int btree_node_reclaim(struct bch_fs *c, struct btree *b)
-{
- return __btree_node_reclaim(c, b, false);
-}
-
-static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b)
-{
- return __btree_node_reclaim(c, b, true);
-}
-
static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -260,7 +263,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
break;
if (++i > 3 &&
- !btree_node_reclaim(c, b)) {
+ !btree_node_reclaim(c, b, false)) {
btree_node_data_free(c, b);
six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
@@ -279,7 +282,7 @@ restart:
}
if (!btree_node_accessed(b) &&
- !btree_node_reclaim(c, b)) {
+ !btree_node_reclaim(c, b, false)) {
/* can't call bch2_btree_node_hash_remove under lock */
freed++;
if (&t->list != &bc->live)
@@ -486,12 +489,12 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
struct btree *b;
list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_reclaim(c, b))
+ if (!btree_node_reclaim(c, b, false))
return b;
while (1) {
list_for_each_entry_reverse(b, &bc->live, list)
- if (!btree_node_write_and_reclaim(c, b))
+ if (!btree_node_reclaim(c, b, true))
return b;
/*
@@ -516,7 +519,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
* the list. Check if there's any freed nodes there:
*/
list_for_each_entry(b, &bc->freeable, list)
- if (!btree_node_reclaim(c, b))
+ if (!btree_node_reclaim(c, b, false))
goto out_unlock;
/*
@@ -524,7 +527,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &bc->freed, list)
- if (!btree_node_reclaim(c, b)) {
+ if (!btree_node_reclaim(c, b, false)) {
btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
if (b->data)
goto out_unlock;
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index bc5899ca09f1..390bd88c54cc 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -97,4 +97,6 @@ static inline unsigned btree_blocks(struct bch_fs *c)
int bch2_print_btree_node(struct bch_fs *, struct btree *,
char *, size_t);
+void bch2_verify_no_btree_locks_held(void);
+
#endif /* _BCACHEFS_BTREE_CACHE_H */
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 9725a02f6448..566b6b3ec6de 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -279,6 +279,8 @@ int bch2_btree_iter_unlock(struct btree_iter *iter)
__bch2_btree_iter_unlock(linked);
__bch2_btree_iter_unlock(iter);
+ bch2_verify_no_btree_locks_held();
+
return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
}
diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c
index f0ff8d41923c..248cd910ad1d 100644
--- a/fs/bcachefs/six.c
+++ b/fs/bcachefs/six.c
@@ -55,6 +55,13 @@ struct six_lock_vals {
}, \
}
+static inline bool six_lock_held(union six_lock_state state, enum six_lock_type type)
+{
+ const struct six_lock_vals l[] = LOCK_VALS;
+
+ return (state.v & l[type].held_mask) != 0;
+}
+
static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type,
union six_lock_state old)
{
@@ -371,9 +378,9 @@ static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
const struct six_lock_vals l[] = LOCK_VALS;
union six_lock_state state;
- EBUG_ON(!(lock->state.v & l[type].held_mask));
+ EBUG_ON(!six_lock_held(lock->state, type));
EBUG_ON(type == SIX_LOCK_write &&
- !(lock->state.v & __SIX_LOCK_HELD_intent));
+ !six_lock_held(lock->state, SIX_LOCK_intent));
six_clear_owner(lock, type);
@@ -454,7 +461,7 @@ bool six_lock_tryupgrade(struct six_lock *lock)
do {
new.v = old.v = v;
- EBUG_ON(!(old.v & l[SIX_LOCK_read].held_mask));
+ EBUG_ON(!six_lock_held(old, SIX_LOCK_read));
new.v += l[SIX_LOCK_read].unlock_val;
@@ -495,8 +502,10 @@ bool six_trylock_convert(struct six_lock *lock,
void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
{
const struct six_lock_vals l[] = LOCK_VALS;
+ u64 v;
EBUG_ON(type == SIX_LOCK_write);
+
six_acquire(&lock->dep_map, 0);
/* XXX: assert already locked, and that we don't overflow: */