summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-09 17:49:24 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-06-09 21:32:47 -0400
commitd69504f98bb021f817c0282848434dbb120afb04 (patch)
treeb48762a0c365918d5c461ec16f7a676699ce5729
parentef308610cfacdc594c5d52c9c3a6ed0b2a124c7a (diff)
bcachefs: Don't allocate memory under the btree cache lock
The btree cache lock is needed for reclaiming from the btree node cache, and memory allocation can potentially spin and sleep (for 100 ms at a time), so.. don't do that. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r--fs/bcachefs/btree_cache.c87
1 files changed, 58 insertions, 29 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index a6b8b0b58925..6cbb263576d3 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -72,24 +72,33 @@ static const struct rhashtable_params bch_btree_cache_params = {
.obj_cmpfn = bch2_btree_cache_cmp_fn,
};
-static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+static int __btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
- struct btree_cache *bc = &c->btree_cache;
+ BUG_ON(b->data || b->aux_data);
b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
- goto err;
+ return -ENOMEM;
- if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
- goto err;
+ if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp)) {
+ kvpfree(b->data, btree_bytes(c));
+ b->data = NULL;
+ return -ENOMEM;
+ }
- bc->used++;
- list_move(&b->list, &bc->freeable);
- return;
-err:
- kvpfree(b->data, btree_bytes(c));
- b->data = NULL;
- list_move(&b->list, &bc->freed);
+ return 0;
+}
+
+static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ if (!__btree_node_data_alloc(c, b, gfp)) {
+ bc->used++;
+ list_move(&b->list, &bc->freeable);
+ } else {
+ list_move(&b->list, &bc->freed);
+ }
}
static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
@@ -524,35 +533,47 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
*/
list_for_each_entry(b, &bc->freeable, list)
if (!btree_node_reclaim(c, b))
- goto out_unlock;
+ goto got_node;
/*
* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &bc->freed, list)
- if (!btree_node_reclaim(c, b)) {
- btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
- if (b->data)
- goto out_unlock;
+ if (!btree_node_reclaim(c, b))
+ goto got_node;
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ b = NULL;
+got_node:
+ if (b)
+ list_del_init(&b->list);
+ mutex_unlock(&bc->lock);
+
+ if (!b) {
+ b = kzalloc(sizeof(struct btree), GFP_KERNEL);
+ if (!b)
goto err;
- }
- b = btree_node_mem_alloc(c, __GFP_NOWARN|GFP_NOIO);
- if (!b)
- goto err;
+ bkey_btree_ptr_init(&b->key);
+ six_lock_init(&b->lock);
+ INIT_LIST_HEAD(&b->list);
+ INIT_LIST_HEAD(&b->write_blocked);
+
+ BUG_ON(!six_trylock_intent(&b->lock));
+ BUG_ON(!six_trylock_write(&b->lock));
+ }
+
+ if (!b->data) {
+ if (__btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
+ goto err;
+
+ mutex_lock(&bc->lock);
+ bc->used++;
+ mutex_unlock(&bc->lock);
+ }
- BUG_ON(!six_trylock_intent(&b->lock));
- BUG_ON(!six_trylock_write(&b->lock));
-out_unlock:
BUG_ON(btree_node_hashed(b));
BUG_ON(btree_node_write_in_flight(b));
-
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
out:
b->flags = 0;
b->written = 0;
@@ -568,6 +589,14 @@ out:
memalloc_nofs_restore(flags);
return b;
err:
+ mutex_lock(&bc->lock);
+
+ if (b) {
+ list_add(&b->list, &bc->freed);
+ six_unlock_write(&b->lock);
+ six_unlock_intent(&b->lock);
+ }
+
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
b = btree_node_cannibalize(c);