diff options
-rw-r--r-- | fs/bcachefs/bcachefs.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 161 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.h | 5 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 19 |
4 files changed, 188 insertions, 0 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index d6dc3bd457d3..7d69cca34a32 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -615,6 +615,9 @@ struct bch_fs { mempool_t btree_iters_pool; + struct mutex btree_key_cache_lock; + struct rhashtable btree_key_cache[BTREE_ID_NR]; + struct workqueue_struct *wq; /* copygc needs its own workqueue for index updates.. */ struct workqueue_struct *copygc_wq; diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c new file mode 100644 index 000000000000..822e0484a27a --- /dev/null +++ b/fs/bcachefs/btree_key_cache.c @@ -0,0 +1,161 @@ + +#include "bcachefs.h" +#include "btree_iter.h" +#include "btree_key_cache.h" + +static const struct rhashtable_params bch_btree_key_cache_params = { + .head_offset = offsetof(struct btree_key_cache, hash), + .key_offset = offsetof(struct btree_key_cache, k.k.p), + .key_len = sizeof(struct bpos), +}; + +__flatten +static inline struct btree_key_cache *btree_key_cache_find(struct bch_fs *c, + enum btree_id btree_id, + struct bpos pos) +{ + return rhashtable_lookup_fast(&c->btree_key_cache[btree_id], &pos, + bch_btree_key_cache_params); +} + +static struct btree_key_cache * +btree_key_cache_fill(struct bch_fs *c, + enum btree_id btree_id, + struct bpos pos) +{ + struct btree_key_cache *c_k; + unsigned u64s = 64; + int ret; + + mutex_lock(&c->btree_key_cache_lock); + + rcu_read_lock(); + c_k = btree_key_cache_find(c, btree_id, pos); + if (c_k) { + atomic_inc(&c_k->ref); + rcu_read_unlock(); + return c_k; + } + rcu_read_unlock(); + + c_k = kmalloc(offsetof(struct btree_key_cache, k) + + u64s * sizeof(u64), GFP_NOFS); + if (!c_k) { + mutex_unlock(&c->btree_key_cache_lock); + return ERR_PTR(-ENOMEM); + } + + memset(c_k, 0, offsetof(struct btree_key_cache, k)); + + mutex_init(&c_k->lock); + BUG_ON(!mutex_trylock(&c_k->lock)); + atomic_set(&c_k->ref, 1); + + c_k->allocated_u64s = u64s; + c_k->btree_id = btree_id; + c_k->k.k.p = pos; + + ret = rhashtable_lookup_insert_fast(&c->btree_key_cache[btree_id], + &c_k->hash, + bch_btree_key_cache_params); + BUG_ON(ret); + + mutex_unlock(&c->btree_key_cache_lock); + + return c_k; +} + +static int btree_key_cache_read(struct btree_trans *trans, + struct btree_key_cache *c_k) +{ + struct btree_iter *iter; + struct bkey_s_c k; + int ret; + + iter = bch2_trans_get_iter(trans, c_k->btree_id, c_k->k.k.p, 0); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + k = bch2_btree_iter_peek_slot(iter); + ret = btree_iter_err(k); + if (ret) + return ret; + + BUG_ON(k.k->u64s > c_k->allocated_u64s); + bkey_reassemble(&c_k->k, k); + c_k->read_done = true; + + bch2_trans_iter_put(trans, iter); + + return 0; +} + +void bch2_btree_key_cache_put(struct bch_fs *c, + struct btree_key_cache *c_k) +{ + if (atomic_dec_and_test(&c_k->ref)) { + } +} + +struct btree_key_cache * +bch2_btree_key_cache_get(struct btree_trans *trans, + enum btree_id btree_id, + struct bpos pos) +{ + struct bch_fs *c = trans->c; + struct btree_key_cache *c_k; + + rcu_read_lock(); + c_k = btree_key_cache_find(c, btree_id, pos); + if (c_k) { + atomic_inc(&c_k->ref); + rcu_read_unlock(); + goto out; + } + + rcu_read_unlock(); + + c_k = btree_key_cache_fill(c, btree_id, pos); + if (IS_ERR(c_k)) + return c_k; +out: + if (!c_k->read_done) { + int ret = 0; + + mutex_lock(&c_k->lock); + if (!c_k->read_done) + ret = btree_key_cache_read(trans, c_k); + mutex_unlock(&c_k->lock); + + if (ret) { + bch2_btree_key_cache_put(c, c_k); + return ERR_PTR(ret); + } + } + + return c_k; +} + +void bch2_btree_key_cache_exit(struct bch_fs *c) +{ + unsigned i; + + for (i = 0; i < ARRAY_SIZE(c->btree_key_cache); i++) { + rhashtable_destroy(&c->btree_key_cache[i]); + } +} + +int bch2_btree_key_cache_init(struct bch_fs *c) +{ + unsigned i; + int ret; + + for (i = 0; i < ARRAY_SIZE(c->btree_key_cache); i++) { + ret = rhashtable_init(&c->btree_key_cache[i], + &bch_btree_key_cache_params); + if (ret) + return ret; + } + + return 0; +} diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h new file mode 100644 index 000000000000..856c4e5374b3 --- /dev/null +++ b/fs/bcachefs/btree_key_cache.h @@ -0,0 +1,5 @@ +#ifndef _BCACHEFS_BTREE_KEY_CACHE_H +#define _BCACHEFS_BTREE_KEY_CACHE_H + + +#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */ diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 57ef50142ee1..031087e33aa6 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -252,6 +252,25 @@ struct deferred_update { struct bkey_i k; }; +struct btree_key_cache { + struct rhash_head hash; + struct mutex lock; + atomic_t ref; + struct rcu_head rcu; + + struct journal_preres res; + struct journal_entry_pin journal; + + unsigned read_done:1; + unsigned dirty:1; + + u8 allocated_u64s; + enum btree_id btree_id; + + /* must be last: */ + struct bkey_i k; +}; + struct btree_insert_entry { struct bkey_i *k; |