summaryrefslogtreecommitdiff
path: root/fs/bcachefs/keylist.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2016-02-20 04:26:28 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2016-08-28 19:16:16 -0800
commit8257a6962f0ce690672fc228d220c82126a54f35 (patch)
treef6951d0210d96b1b8f21369da14cffca1dbbe2df /fs/bcachefs/keylist.c
parent41d25a23991ff1138cdf4018e52221568c130aab (diff)
bcache: new combined copygc/tiering
Diffstat (limited to 'fs/bcachefs/keylist.c')
-rw-r--r--fs/bcachefs/keylist.c190
1 files changed, 0 insertions, 190 deletions
diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c
index 638596300575..644734b1d4f2 100644
--- a/fs/bcachefs/keylist.c
+++ b/fs/bcachefs/keylist.c
@@ -117,193 +117,3 @@ void bch_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
BUG_ON(l->top_p > l->end_keys_p);
bkey_copy(where, insert);
}
-
-/* Scan keylists simple utilities */
-
-void bch_scan_keylist_init(struct scan_keylist *kl,
- struct cache_set *c,
- unsigned max_size)
-
-{
- kl->c = c;
- kl->owner = NULL;
-
- mutex_init(&kl->lock);
- kl->max_size = max_size;
- bch_keylist_init(&kl->list, NULL, 0);
-
- /*
- * Order of initialization is tricky, and this makes sure that
- * we have a valid cache set in case the order of
- * initialization chages and breaks things.
- */
- BUG_ON(c == NULL);
- mutex_lock(&c->gc_scan_keylist_lock);
- list_add_tail(&kl->mark_list, &c->gc_scan_keylists);
- mutex_unlock(&c->gc_scan_keylist_lock);
-}
-
-void bch_scan_keylist_destroy(struct scan_keylist *kl)
-{
- if (kl->c) {
- mutex_lock(&kl->c->gc_scan_keylist_lock);
- list_del(&kl->mark_list);
- mutex_unlock(&kl->c->gc_scan_keylist_lock);
- }
-
- mutex_lock(&kl->lock);
- bch_keylist_free(&kl->list);
- mutex_unlock(&kl->lock);
-}
-
-void bch_scan_keylist_reset(struct scan_keylist *kl)
-{
- mutex_lock(&kl->lock);
- kl->list.bot_p = kl->list.top_p = kl->list.start_keys_p;
- mutex_unlock(&kl->lock);
-}
-
-/*
- * This should only be called from sysfs, and holding a lock that prevents
- * re-entrancy.
- */
-void bch_scan_keylist_resize(struct scan_keylist *kl,
- unsigned max_size)
-{
- mutex_lock(&kl->lock);
- kl->max_size = max_size; /* May be smaller than current size */
- mutex_unlock(&kl->lock);
-}
-
-/**
- * bch_keylist_recalc_oldest_gens - update oldest_gen pointers from keylist keys
- *
- * This prevents us from wrapping around gens for a bucket only referenced from
- * the tiering or moving GC keylists. We don't actually care that the data in
- * those buckets is marked live, only that we don't wrap the gens.
- *
- * Note: This interlocks with insertions, but not all dequeues interlock.
- * The particular case in which dequeues don't interlock is when a
- * scan list used by the copy offload ioctls is used as a plain
- * keylist for btree insertion.
- * The btree insertion code doesn't go through
- * bch_scan_keylist_dequeue below, and instead uses plain
- * bch_keylist_dequeue. The other pointers (top, start, end) are
- * unchanged in this case.
- * A little care with the bottomp pointer suffices in this case.
- * Of course, we may end up marking stuff that we don't need to mark,
- * but was recently valid and we have likely just inserted in the tree
- * anyway.
- */
-void bch_keylist_recalc_oldest_gens(struct cache_set *c,
- struct scan_keylist *kl)
-{
- struct bkey_i *k;
-
- mutex_lock(&kl->lock);
-
- for_each_keylist_key(&kl->list, k)
- bch_btree_key_recalc_oldest_gen(c, bkey_i_to_s_c(k));
-
- mutex_unlock(&kl->lock);
-}
-
-int bch_scan_keylist_add(struct scan_keylist *kl, struct bkey_s_c k)
-{
- int ret;
-
- mutex_lock(&kl->lock);
- ret = bch_keylist_realloc_max(&kl->list,
- k.k->u64s,
- kl->max_size);
-
- if (!ret) {
- bkey_reassemble(kl->list.top, k);
- bch_keylist_enqueue(&kl->list);
- atomic64_add(k.k->size, &kl->sectors);
- }
- mutex_unlock(&kl->lock);
-
- return ret;
-}
-
-/* Actual scanning functionality of scan_keylists */
-
-static void bch_refill_scan_keylist(struct cache_set *c,
- struct scan_keylist *kl,
- struct bpos *last_scanned,
- struct bpos end,
- scan_keylist_pred_fn *pred)
-{
- struct bpos start = *last_scanned;
- struct btree_iter iter;
- struct bkey_s_c k;
- unsigned nr_found = 0;
-
- for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, *last_scanned, k) {
- if (bkey_cmp(k.k->p, end) >= 0) {
- *last_scanned = k.k->p;
- goto done;
- }
-
- if (pred(kl, k)) {
- if (bch_scan_keylist_add(kl, k))
- goto done;
-
- nr_found++;
- }
-
- *last_scanned = k.k->p;
- bch_btree_iter_cond_resched(&iter);
- }
-
- /* If we end up here, it means:
- * - the map_fn didn't fill up the keybuf
- * - the map_fn didn't see the end key
- * - there were no more keys to map over
- * Therefore, we are at the end of the key space */
- *last_scanned = POS_MAX;
-done:
- bch_btree_iter_unlock(&iter);
-
- trace_bcache_keyscan(nr_found,
- start.inode, start.offset,
- last_scanned->inode,
- last_scanned->offset);
-}
-
-struct bkey_i *bch_scan_keylist_next(struct scan_keylist *kl)
-{
- if (bch_keylist_empty(&kl->list))
- return NULL;
-
- return bch_keylist_front(&kl->list);
-}
-
-struct bkey_i *bch_scan_keylist_next_rescan(struct cache_set *c,
- struct scan_keylist *kl,
- struct bpos *last_scanned,
- struct bpos end,
- scan_keylist_pred_fn *pred)
-{
- if (bch_keylist_empty(&kl->list)) {
- if (bkey_cmp(*last_scanned, end) >= 0)
- return NULL;
-
- bch_refill_scan_keylist(c, kl, last_scanned, end, pred);
- }
-
- return bch_scan_keylist_next(kl);
-}
-
-void bch_scan_keylist_dequeue(struct scan_keylist *kl)
-{
- u64 sectors;
-
- mutex_lock(&kl->lock);
- sectors = kl->list.bot->k.size;
- bch_keylist_dequeue(&kl->list);
- mutex_unlock(&kl->lock);
-
- BUG_ON(atomic64_sub_return(sectors, &kl->sectors) < 0);
-}