summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2020-06-29 19:24:13 -0400
committerKent Overstreet <kent.overstreet@gmail.com>2020-06-29 19:25:57 -0400
commit217eb889b5c4ace879083617690d4b3d615de073 (patch)
tree022151c655efd43e631e73a8f4e69ba7dc13a6ab
parentce0522135ae04886e05b24bb48cc163f33bd8206 (diff)
Merge with b7020ae929 bcachefs: Refactor dio write code to reinit bch_write_op
-rw-r--r--fs/bcachefs/Makefile1
-rw-r--r--fs/bcachefs/alloc_background.c362
-rw-r--r--fs/bcachefs/alloc_background.h5
-rw-r--r--fs/bcachefs/alloc_foreground.c4
-rw-r--r--fs/bcachefs/alloc_types.h16
-rw-r--r--fs/bcachefs/bcachefs.h33
-rw-r--r--fs/bcachefs/bcachefs_format.h2
-rw-r--r--fs/bcachefs/bcachefs_ioctl.h4
-rw-r--r--fs/bcachefs/bkey_methods.c21
-rw-r--r--fs/bcachefs/bset.c41
-rw-r--r--fs/bcachefs/bset.h4
-rw-r--r--fs/bcachefs/btree_cache.c185
-rw-r--r--fs/bcachefs/btree_cache.h2
-rw-r--r--fs/bcachefs/btree_gc.c145
-rw-r--r--fs/bcachefs/btree_gc.h3
-rw-r--r--fs/bcachefs/btree_io.c59
-rw-r--r--fs/bcachefs/btree_io.h2
-rw-r--r--fs/bcachefs/btree_iter.c466
-rw-r--r--fs/bcachefs/btree_iter.h35
-rw-r--r--fs/bcachefs/btree_key_cache.c519
-rw-r--r--fs/bcachefs/btree_key_cache.h25
-rw-r--r--fs/bcachefs/btree_locking.h71
-rw-r--r--fs/bcachefs/btree_types.h91
-rw-r--r--fs/bcachefs/btree_update.h5
-rw-r--r--fs/bcachefs/btree_update_interior.c951
-rw-r--r--fs/bcachefs/btree_update_interior.h73
-rw-r--r--fs/bcachefs/btree_update_leaf.c186
-rw-r--r--fs/bcachefs/buckets.c400
-rw-r--r--fs/bcachefs/buckets.h2
-rw-r--r--fs/bcachefs/buckets_types.h5
-rw-r--r--fs/bcachefs/chardev.c9
-rw-r--r--fs/bcachefs/checksum.c31
-rw-r--r--fs/bcachefs/checksum.h6
-rw-r--r--fs/bcachefs/clock.c2
-rw-r--r--fs/bcachefs/compress.c10
-rw-r--r--fs/bcachefs/debug.c10
-rw-r--r--fs/bcachefs/dirent.c2
-rw-r--r--fs/bcachefs/ec.c47
-rw-r--r--fs/bcachefs/ec.h2
-rw-r--r--fs/bcachefs/error.c6
-rw-r--r--fs/bcachefs/error.h1
-rw-r--r--fs/bcachefs/extent_update.c3
-rw-r--r--fs/bcachefs/extents.c2
-rw-r--r--fs/bcachefs/fs-io.c342
-rw-r--r--fs/bcachefs/fs-io.h4
-rw-r--r--fs/bcachefs/fs.c27
-rw-r--r--fs/bcachefs/fsck.c2
-rw-r--r--fs/bcachefs/io.c29
-rw-r--r--fs/bcachefs/io.h18
-rw-r--r--fs/bcachefs/journal.c28
-rw-r--r--fs/bcachefs/journal.h54
-rw-r--r--fs/bcachefs/journal_io.c46
-rw-r--r--fs/bcachefs/journal_reclaim.c67
-rw-r--r--fs/bcachefs/journal_reclaim.h12
-rw-r--r--fs/bcachefs/journal_types.h1
-rw-r--r--fs/bcachefs/keylist.c4
-rw-r--r--fs/bcachefs/keylist.h4
-rw-r--r--fs/bcachefs/migrate.c11
-rw-r--r--fs/bcachefs/move.c19
-rw-r--r--fs/bcachefs/move_types.h1
-rw-r--r--fs/bcachefs/movinggc.c17
-rw-r--r--fs/bcachefs/opts.h10
-rw-r--r--fs/bcachefs/recovery.c238
-rw-r--r--fs/bcachefs/recovery.h7
-rw-r--r--fs/bcachefs/reflink.c3
-rw-r--r--fs/bcachefs/super-io.c22
-rw-r--r--fs/bcachefs/super.c194
-rw-r--r--fs/bcachefs/sysfs.c53
-rw-r--r--fs/bcachefs/util.h29
-rw-r--r--include/linux/six.h13
-rw-r--r--include/trace/events/bcachefs.h6
-rw-r--r--kernel/locking/six.c69
72 files changed, 2998 insertions, 2181 deletions
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile
index c7727d05cf49..d85ced62c0dd 100644
--- a/fs/bcachefs/Makefile
+++ b/fs/bcachefs/Makefile
@@ -13,6 +13,7 @@ bcachefs-y := \
btree_gc.o \
btree_io.o \
btree_iter.o \
+ btree_key_cache.o \
btree_update_interior.o \
btree_update_leaf.o \
buckets.o \
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index c37945189c68..cb720ee04b86 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -4,6 +4,7 @@
#include "alloc_foreground.h"
#include "btree_cache.h"
#include "btree_io.h"
+#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
@@ -208,29 +209,25 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
get_alloc_field(a.v, &d, i));
}
-int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
+static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_s_c k)
{
- struct btree_trans trans;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- struct bch_dev *ca;
- unsigned i;
- int ret = 0;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- bch2_btree_and_journal_iter_init(&iter, &trans, journal_keys,
- BTREE_ID_ALLOC, POS_MIN);
-
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ if (!level)
bch2_mark_key(c, k, 0, 0, NULL, 0,
BTREE_TRIGGER_ALLOC_READ|
BTREE_TRIGGER_NOATOMIC);
- bch2_btree_and_journal_iter_advance(&iter);
- }
+ return 0;
+}
- ret = bch2_trans_exit(&trans) ?: ret;
+int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret = 0;
+
+ ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
+ NULL, bch2_alloc_read_fn);
if (ret) {
bch_err(c, "error reading alloc info: %i", ret);
return ret;
@@ -280,6 +277,13 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bkey_i_alloc *a;
int ret;
retry:
+ bch2_trans_begin(trans);
+
+ ret = bch2_btree_key_cache_flush(trans,
+ BTREE_ID_ALLOC, iter->pos);
+ if (ret)
+ goto err;
+
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
@@ -334,7 +338,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
- bch2_trans_init(&trans, c, 0, 0);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
@@ -368,25 +372,6 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
return ret < 0 ? ret : 0;
}
-int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
-{
- struct btree_trans trans;
- struct btree_iter *iter;
- int ret;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
-
- ret = bch2_alloc_write_key(&trans, iter,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW|
- BTREE_INSERT_JOURNAL_REPLAY);
- bch2_trans_exit(&trans);
- return ret < 0 ? ret : 0;
-}
-
/* Bucket IO clocks: */
static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
@@ -516,6 +501,7 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
{
unsigned long gc_count = c->gc_count;
+ u64 available;
int ret = 0;
ca->allocator_state = ALLOCATOR_BLOCKED;
@@ -531,9 +517,11 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
if (gc_count != c->gc_count)
ca->inc_gen_really_needs_gc = 0;
- if ((ssize_t) (dev_buckets_available(c, ca) -
- ca->inc_gen_really_needs_gc) >=
- (ssize_t) fifo_free(&ca->free_inc))
+ available = max_t(s64, 0, dev_buckets_available(c, ca) -
+ ca->inc_gen_really_needs_gc);
+
+ if (available > fifo_free(&ca->free_inc) ||
+ (available && !fifo_full(&ca->free[RESERVE_BTREE])))
break;
up_read(&c->gc_lock);
@@ -844,10 +832,9 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
struct bkey_alloc_unpacked u;
struct bucket *g;
struct bucket_mark m;
- struct bkey_s_c k;
bool invalidating_cached_data;
size_t b;
- int ret;
+ int ret = 0;
BUG_ON(!ca->alloc_heap.used ||
!ca->alloc_heap.data[0].nr);
@@ -861,32 +848,53 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
BUG_ON(!fifo_push(&ca->free_inc, b));
- bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
+ g = bucket(ca, b);
+ m = READ_ONCE(g->mark);
+
+ invalidating_cached_data = m.cached_sectors != 0;
+
+ /*
+ * If we're not invalidating cached data, we only increment the bucket
+ * gen in memory here, the incremented gen will be updated in the btree
+ * by bch2_trans_mark_pointer():
+ */
+
+ if (!invalidating_cached_data)
+ bch2_invalidate_bucket(c, ca, b, &m);
+ else
+ bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
spin_unlock(&c->freelist_lock);
percpu_up_read(&c->mark_lock);
+ if (!invalidating_cached_data)
+ goto out;
+
+ /*
+ * If the read-only path is trying to shut down, we can't be generating
+ * new btree updates:
+ */
+ if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
+ ret = 1;
+ goto out;
+ }
+
BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
retry:
- k = bch2_btree_iter_peek_slot(iter);
- ret = bkey_err(k);
+ ret = bch2_btree_iter_traverse(iter);
if (ret)
return ret;
- /*
- * The allocator has to start before journal replay is finished - thus,
- * we have to trust the in memory bucket @m, not the version in the
- * btree:
- */
percpu_down_read(&c->mark_lock);
- g = bucket(ca, b);
+ g = bucket(ca, iter->pos.offset);
m = READ_ONCE(g->mark);
u = alloc_mem_to_key(g, m);
+
percpu_up_read(&c->mark_lock);
- invalidating_cached_data = m.cached_sectors != 0;
+ invalidating_cached_data = u.cached_sectors != 0;
u.gen++;
u.data_type = 0;
@@ -919,7 +927,7 @@ retry:
flags);
if (ret == -EINTR)
goto retry;
-
+out:
if (!ret) {
/* remove from alloc_heap: */
struct alloc_heap_entry e, *top = ca->alloc_heap.data;
@@ -953,32 +961,7 @@ retry:
percpu_up_read(&c->mark_lock);
}
- return ret;
-}
-
-static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t bucket, u64 *flush_seq)
-{
- struct bucket_mark m;
-
- percpu_down_read(&c->mark_lock);
- spin_lock(&c->freelist_lock);
-
- bch2_invalidate_bucket(c, ca, bucket, &m);
-
- verify_not_on_freelist(c, ca, bucket);
- BUG_ON(!fifo_push(&ca->free_inc, bucket));
-
- spin_unlock(&c->freelist_lock);
-
- bucket_io_clock_reset(c, ca, bucket, READ);
- bucket_io_clock_reset(c, ca, bucket, WRITE);
-
- percpu_up_read(&c->mark_lock);
-
- *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
-
- return m.cached_sectors != 0;
+ return ret < 0 ? ret : 0;
}
/*
@@ -995,7 +978,9 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
/* Only use nowait if we've already invalidated at least one bucket: */
while (!ret &&
@@ -1119,6 +1104,8 @@ static int bch2_allocator_thread(void *arg)
while (1) {
cond_resched();
+ if (kthread_should_stop())
+ break;
pr_debug("discarding %zu invalidated buckets",
fifo_used(&ca->free_inc));
@@ -1436,221 +1423,6 @@ int bch2_dev_allocator_start(struct bch_dev *ca)
return 0;
}
-static bool flush_held_btree_writes(struct bch_fs *c)
-{
- struct bucket_table *tbl;
- struct rhash_head *pos;
- struct btree *b;
- bool nodes_unwritten;
- size_t i;
-again:
- cond_resched();
- nodes_unwritten = false;
-
- if (bch2_journal_error(&c->journal))
- return true;
-
- rcu_read_lock();
- for_each_cached_btree(b, c, tbl, i, pos)
- if (btree_node_need_write(b)) {
- if (btree_node_may_write(b)) {
- rcu_read_unlock();
- btree_node_lock_type(c, b, SIX_LOCK_read);
- bch2_btree_node_write(c, b, SIX_LOCK_read);
- six_unlock_read(&b->lock);
- goto again;
- } else {
- nodes_unwritten = true;
- }
- }
- rcu_read_unlock();
-
- if (c->btree_roots_dirty) {
- bch2_journal_meta(&c->journal);
- goto again;
- }
-
- return !nodes_unwritten &&
- !bch2_btree_interior_updates_nr_pending(c);
-}
-
-static void allocator_start_issue_discards(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- size_t bu;
-
- for_each_rw_member(ca, c, dev_iter)
- while (fifo_pop(&ca->free_inc, bu))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bu),
- ca->mi.bucket_size, GFP_NOIO, 0);
-}
-
-static int resize_free_inc(struct bch_dev *ca)
-{
- alloc_fifo free_inc;
-
- if (!fifo_full(&ca->free_inc))
- return 0;
-
- if (!init_fifo(&free_inc,
- ca->free_inc.size * 2,
- GFP_KERNEL))
- return -ENOMEM;
-
- fifo_move(&free_inc, &ca->free_inc);
- swap(free_inc, ca->free_inc);
- free_fifo(&free_inc);
- return 0;
-}
-
-static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- bool ret = true;
-
- if (test_alloc_startup(c))
- return false;
-
- down_read(&c->gc_lock);
-
- /* Scan for buckets that are already invalidated: */
- for_each_rw_member(ca, c, dev_iter) {
- struct bucket_array *buckets;
- struct bucket_mark m;
- long bu;
-
- down_read(&ca->bucket_lock);
- buckets = bucket_array(ca);
-
- for (bu = buckets->first_bucket;
- bu < buckets->nbuckets; bu++) {
- m = READ_ONCE(buckets->b[bu].mark);
-
- if (!buckets->b[bu].gen_valid ||
- !is_available_bucket(m) ||
- m.cached_sectors ||
- (ca->buckets_nouse &&
- test_bit(bu, ca->buckets_nouse)))
- continue;
-
- percpu_down_read(&c->mark_lock);
- bch2_mark_alloc_bucket(c, ca, bu, true,
- gc_pos_alloc(c, NULL), 0);
- percpu_up_read(&c->mark_lock);
-
- fifo_push(&ca->free_inc, bu);
-
- discard_invalidated_buckets(c, ca);
-
- if (fifo_full(&ca->free[RESERVE_BTREE]))
- break;
- }
- up_read(&ca->bucket_lock);
- }
-
- up_read(&c->gc_lock);
-
- /* did we find enough buckets? */
- for_each_rw_member(ca, c, dev_iter)
- if (!fifo_full(&ca->free[RESERVE_BTREE]))
- ret = false;
-
- return ret;
-}
-
-int bch2_fs_allocator_start(struct bch_fs *c)
-{
- struct bch_dev *ca;
- unsigned dev_iter;
- u64 journal_seq = 0;
- bool wrote;
- long bu;
- int ret = 0;
-
- if (!test_alloc_startup(c) &&
- bch2_fs_allocator_start_fast(c))
- return 0;
-
- pr_debug("not enough empty buckets; scanning for reclaimable buckets");
-
- /*
- * We're moving buckets to freelists _before_ they've been marked as
- * invalidated on disk - we have to so that we can allocate new btree
- * nodes to mark them as invalidated on disk.
- *
- * However, we can't _write_ to any of these buckets yet - they might
- * have cached data in them, which is live until they're marked as
- * invalidated on disk:
- */
- set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
-
- down_read(&c->gc_lock);
- do {
- wrote = false;
-
- for_each_rw_member(ca, c, dev_iter) {
- find_reclaimable_buckets(c, ca);
-
- while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
- (bu = next_alloc_bucket(ca)) >= 0) {
- ret = resize_free_inc(ca);
- if (ret) {
- percpu_ref_put(&ca->io_ref);
- up_read(&c->gc_lock);
- goto err;
- }
-
- bch2_invalidate_one_bucket(c, ca, bu,
- &journal_seq);
-
- fifo_push(&ca->free[RESERVE_BTREE], bu);
- }
- }
-
- pr_debug("done scanning for reclaimable buckets");
-
- /*
- * XXX: it's possible for this to deadlock waiting on journal reclaim,
- * since we're holding btree writes. What then?
- */
- ret = bch2_alloc_write(c,
- BTREE_INSERT_NOCHECK_RW|
- BTREE_INSERT_USE_ALLOC_RESERVE|
- BTREE_INSERT_NOWAIT, &wrote);
-
- /*
- * If bch2_alloc_write() did anything, it may have used some
- * buckets, and we need the RESERVE_BTREE freelist full - so we
- * need to loop and scan again.
- * And if it errored, it may have been because there weren't
- * enough buckets, so just scan and loop again as long as it
- * made some progress:
- */
- } while (wrote);
- up_read(&c->gc_lock);
-
- if (ret)
- goto err;
-
- pr_debug("flushing journal");
-
- ret = bch2_journal_flush(&c->journal);
- if (ret)
- goto err;
-
- pr_debug("issuing discards");
- allocator_start_issue_discards(c);
-err:
- clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
- closure_wait_event(&c->btree_interior_update_wait,
- flush_held_btree_writes(c));
-
- return ret;
-}
-
void bch2_fs_allocator_background_init(struct bch_fs *c)
{
spin_lock_init(&c->freelist_lock);
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index 501c444353fb..f6b9f27f0713 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -54,7 +54,6 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
struct journal_keys;
int bch2_alloc_read(struct bch_fs *, struct journal_keys *);
-int bch2_alloc_replay_key(struct bch_fs *, struct bkey_i *);
static inline void bch2_wake_allocator(struct bch_dev *ca)
{
@@ -70,8 +69,7 @@ static inline void bch2_wake_allocator(struct bch_dev *ca)
static inline void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
size_t bucket)
{
- if (expensive_debug_checks(c) &&
- test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
+ if (expensive_debug_checks(c)) {
size_t iter;
long i;
unsigned j;
@@ -94,7 +92,6 @@ void bch2_dev_allocator_stop(struct bch_dev *);
int bch2_dev_allocator_start(struct bch_dev *);
int bch2_alloc_write(struct bch_fs *, unsigned, bool *);
-int bch2_fs_allocator_start(struct bch_fs *);
void bch2_fs_allocator_background_init(struct bch_fs *);
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 697d576802b6..979aba30bc9d 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -212,9 +212,9 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
case RESERVE_ALLOC:
return 0;
case RESERVE_BTREE:
- return BTREE_NODE_OPEN_BUCKET_RESERVE;
+ return OPEN_BUCKETS_COUNT / 4;
default:
- return BTREE_NODE_OPEN_BUCKET_RESERVE * 2;
+ return OPEN_BUCKETS_COUNT / 2;
}
}
diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h
index 832568dc9551..4f1465077994 100644
--- a/fs/bcachefs/alloc_types.h
+++ b/fs/bcachefs/alloc_types.h
@@ -46,16 +46,22 @@ enum alloc_reserve {
typedef FIFO(long) alloc_fifo;
-/* Enough for 16 cache devices, 2 tiers and some left over for pipelining */
-#define OPEN_BUCKETS_COUNT 256
+#define OPEN_BUCKETS_COUNT 1024
#define WRITE_POINT_HASH_NR 32
#define WRITE_POINT_MAX 32
+typedef u16 open_bucket_idx_t;
+
struct open_bucket {
spinlock_t lock;
atomic_t pin;
- u8 freelist;
+ open_bucket_idx_t freelist;
+
+ /*
+ * When an open bucket has an ec_stripe attached, this is the index of
+ * the block in the stripe this open_bucket corresponds to:
+ */
u8 ec_idx;
u8 type;
unsigned valid:1;
@@ -68,8 +74,8 @@ struct open_bucket {
#define OPEN_BUCKET_LIST_MAX 15
struct open_buckets {
- u8 nr;
- u8 v[OPEN_BUCKET_LIST_MAX];
+ open_bucket_idx_t nr;
+ open_bucket_idx_t v[OPEN_BUCKET_LIST_MAX];
};
struct dev_stripe_state {
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 289d7ae4e98c..893c89dbee60 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -190,6 +190,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/rhashtable.h>
#include <linux/rwsem.h>
+#include <linux/semaphore.h>
#include <linux/seqlock.h>
#include <linux/shrinker.h>
#include <linux/types.h>
@@ -338,7 +339,7 @@ enum bch_time_stats {
#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
/* Size of the freelist we allocate btree nodes from: */
-#define BTREE_NODE_RESERVE BTREE_RESERVE_MAX
+#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
@@ -426,8 +427,8 @@ struct bch_dev {
alloc_fifo free[RESERVE_NR];
alloc_fifo free_inc;
- u8 open_buckets_partial[OPEN_BUCKETS_COUNT];
- unsigned open_buckets_partial_nr;
+ open_bucket_idx_t open_buckets_partial[OPEN_BUCKETS_COUNT];
+ open_bucket_idx_t open_buckets_partial_nr;
size_t fifo_last_bucket;
@@ -477,9 +478,11 @@ struct bch_dev {
enum {
/* startup: */
BCH_FS_ALLOC_READ_DONE,
- BCH_FS_ALLOCATOR_STARTED,
+ BCH_FS_ALLOC_CLEAN,
BCH_FS_ALLOCATOR_RUNNING,
+ BCH_FS_ALLOCATOR_STOPPING,
BCH_FS_INITIAL_GC_DONE,
+ BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
BCH_FS_FSCK_DONE,
BCH_FS_STARTED,
BCH_FS_RW,
@@ -548,8 +551,8 @@ struct bch_fs {
struct super_block *vfs_sb;
char name[40];
- /* ro/rw, add/remove devices: */
- struct mutex state_lock;
+ /* ro/rw, add/remove/resize devices: */
+ struct rw_semaphore state_lock;
/* Counts outstanding writes, for clean transition to read-only */
struct percpu_ref writes;
@@ -600,13 +603,10 @@ struct bch_fs {
struct bio_set btree_bio;
struct btree_root btree_roots[BTREE_ID_NR];
- bool btree_roots_dirty;
struct mutex btree_root_lock;
struct btree_cache btree_cache;
- mempool_t btree_reserve_pool;
-
/*
* Cache of allocated btree nodes - if we allocate a btree node and
* don't use it, if we free it that space can't be reused until going
@@ -624,8 +624,16 @@ struct bch_fs {
struct mutex btree_interior_update_lock;
struct closure_waitlist btree_interior_update_wait;
+ struct workqueue_struct *btree_interior_update_worker;
+ struct work_struct btree_interior_update_work;
+
+ /* btree_iter.c: */
+ struct mutex btree_trans_lock;
+ struct list_head btree_trans_list;
mempool_t btree_iters_pool;
+ struct btree_key_cache btree_key_cache;
+
struct workqueue_struct *wq;
/* copygc needs its own workqueue for index updates.. */
struct workqueue_struct *copygc_wq;
@@ -682,8 +690,8 @@ struct bch_fs {
struct closure_waitlist freelist_wait;
u64 blocked_allocate;
u64 blocked_allocate_open_bucket;
- u8 open_buckets_freelist;
- u8 open_buckets_nr_free;
+ open_bucket_idx_t open_buckets_freelist;
+ open_bucket_idx_t open_buckets_nr_free;
struct closure_waitlist open_buckets_wait;
struct open_bucket open_buckets[OPEN_BUCKETS_COUNT];
@@ -719,6 +727,7 @@ struct bch_fs {
struct rw_semaphore gc_lock;
/* IO PATH */
+ struct semaphore io_in_flight;
struct bio_set bio_read;
struct bio_set bio_read_split;
struct bio_set bio_write;
@@ -732,7 +741,7 @@ struct bch_fs {
ZSTD_parameters zstd_params;
struct crypto_shash *sha256;
- struct crypto_skcipher *chacha20;
+ struct crypto_sync_skcipher *chacha20;
struct crypto_shash *poly1305;
atomic64_t key_version;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 616863ef77d4..f808e63a713d 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -1262,6 +1262,8 @@ LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
+LE64_BITMASK(BCH_SB_REFLINK, struct bch_sb, flags[0], 61, 62);
+
/* 61-64 unused */
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
diff --git a/fs/bcachefs/bcachefs_ioctl.h b/fs/bcachefs/bcachefs_ioctl.h
index ba8c75706bf1..d71157a3e073 100644
--- a/fs/bcachefs/bcachefs_ioctl.h
+++ b/fs/bcachefs/bcachefs_ioctl.h
@@ -275,9 +275,13 @@ struct bch_ioctl_dev_usage {
__u32 bucket_size;
__u64 nr_buckets;
+ __u64 available_buckets;
__u64 buckets[BCH_DATA_NR];
__u64 sectors[BCH_DATA_NR];
+
+ __u64 ec_buckets;
+ __u64 ec_sectors;
};
/*
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 55ef4032b37c..36e0c5152b47 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -176,13 +176,17 @@ void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
{
- pr_buf(out, "u64s %u type %s ", k->u64s,
- bch2_bkey_types[k->type]);
+ if (k) {
+ pr_buf(out, "u64s %u type %s ", k->u64s,
+ bch2_bkey_types[k->type]);
- bch2_bpos_to_text(out, k->p);
+ bch2_bpos_to_text(out, k->p);
- pr_buf(out, " snap %u len %u ver %llu",
- k->p.snapshot, k->size, k->version.lo);
+ pr_buf(out, " snap %u len %u ver %llu",
+ k->p.snapshot, k->size, k->version.lo);
+ } else {
+ pr_buf(out, "(null)");
+ }
}
void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
@@ -198,8 +202,11 @@ void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
bch2_bkey_to_text(out, k.k);
- pr_buf(out, ": ");
- bch2_val_to_text(out, c, k);
+
+ if (k.k) {
+ pr_buf(out, ": ");
+ bch2_val_to_text(out, c, k);
+ }
}
void bch2_bkey_swab_val(struct bkey_s k)
diff --git a/fs/bcachefs/bset.c b/fs/bcachefs/bset.c
index 6360b2e8cf73..6fc91e6a35e8 100644
--- a/fs/bcachefs/bset.c
+++ b/fs/bcachefs/bset.c
@@ -64,21 +64,27 @@ struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
* by the time we actually do the insert will all be deleted.
*/
-void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
+void bch2_dump_bset(struct bch_fs *c, struct btree *b,
+ struct bset *i, unsigned set)
{
struct bkey_packed *_k, *_n;
- struct bkey k, n;
- char buf[120];
+ struct bkey uk, n;
+ struct bkey_s_c k;
+ char buf[200];
if (!i->u64s)
return;
- for (_k = i->start, k = bkey_unpack_key(b, _k);
+ for (_k = i->start;
_k < vstruct_last(i);
- _k = _n, k = n) {
+ _k = _n) {
_n = bkey_next_skip_noops(_k, vstruct_last(i));
- bch2_bkey_to_text(&PBUF(buf), &k);
+ k = bkey_disassemble(b, _k, &uk);
+ if (c)
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ else
+ bch2_bkey_to_text(&PBUF(buf), k.k);
printk(KERN_ERR "block %u key %5zu: %s\n", set,
_k->_data - i->_data, buf);
@@ -87,31 +93,24 @@ void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
n = bkey_unpack_key(b, _n);
- if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
+ if (bkey_cmp(bkey_start_pos(&n), k.k->p) < 0) {
printk(KERN_ERR "Key skipped backwards\n");
continue;
}
- /*
- * Weird check for duplicate non extent keys: extents are
- * deleted iff they have 0 size, so if it has zero size and it's
- * not deleted these aren't extents:
- */
- if (((!k.size && !bkey_deleted(&k)) ||
- (!n.size && !bkey_deleted(&n))) &&
- !bkey_deleted(&k) &&
- !bkey_cmp(n.p, k.p))
+ if (!bkey_deleted(k.k) &&
+ !bkey_cmp(n.p, k.k->p))
printk(KERN_ERR "Duplicate keys\n");
}
}
-void bch2_dump_btree_node(struct btree *b)
+void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
{
struct bset_tree *t;
console_lock();
for_each_bset(b, t)
- bch2_dump_bset(b, bset(b, t), t - b->set);
+ bch2_dump_bset(c, b, bset(b, t), t - b->set);
console_unlock();
}
@@ -170,7 +169,7 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
struct bkey nu = bkey_unpack_key(b, n);
char buf1[80], buf2[80];
- bch2_dump_btree_node(b);
+ bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &ku);
bch2_bkey_to_text(&PBUF(buf2), &nu);
printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
@@ -248,7 +247,7 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
char buf1[100];
char buf2[100];
- bch2_dump_btree_node(b);
+ bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &k1);
bch2_bkey_to_text(&PBUF(buf2), &k2);
@@ -269,7 +268,7 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
char buf1[100];
char buf2[100];
- bch2_dump_btree_node(b);
+ bch2_dump_btree_node(NULL, b);
bch2_bkey_to_text(&PBUF(buf1), &k1);
bch2_bkey_to_text(&PBUF(buf2), &k2);
diff --git a/fs/bcachefs/bset.h b/fs/bcachefs/bset.h
index 7338ccbc8cbd..652ffed4adfb 100644
--- a/fs/bcachefs/bset.h
+++ b/fs/bcachefs/bset.h
@@ -600,8 +600,8 @@ void bch2_bfloat_to_text(struct printbuf *, struct btree *,
/* Debug stuff */
-void bch2_dump_bset(struct btree *, struct bset *, unsigned);
-void bch2_dump_btree_node(struct btree *);
+void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned);
+void bch2_dump_btree_node(struct bch_fs *, struct btree *);
void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
#ifdef CONFIG_BCACHEFS_DEBUG
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index c12f8a6b5205..d3addd3a8964 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -28,7 +28,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++)
if (c->btree_roots[i].b)
reserve += min_t(unsigned, 1,
- c->btree_roots[i].b->level) * 8;
+ c->btree_roots[i].b->c.level) * 8;
c->btree_cache.reserve = reserve;
}
@@ -72,24 +72,33 @@ static const struct rhashtable_params bch_btree_cache_params = {
.obj_cmpfn = bch2_btree_cache_cmp_fn,
};
-static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+static int __btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
- struct btree_cache *bc = &c->btree_cache;
+ BUG_ON(b->data || b->aux_data);
b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
- goto err;
+ return -ENOMEM;
- if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
- goto err;
+ if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp)) {
+ kvpfree(b->data, btree_bytes(c));
+ b->data = NULL;
+ return -ENOMEM;
+ }
- bc->used++;
- list_move(&b->list, &bc->freeable);
- return;
-err:
- kvpfree(b->data, btree_bytes(c));
- b->data = NULL;
- list_move(&b->list, &bc->freed);
+ return 0;
+}
+
+static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+{
+ struct btree_cache *bc = &c->btree_cache;
+
+ if (!__btree_node_data_alloc(c, b, gfp)) {
+ bc->used++;
+ list_move(&b->list, &bc->freeable);
+ } else {
+ list_move(&b->list, &bc->freed);
+ }
}
static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
@@ -99,7 +108,7 @@ static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
return NULL;
bkey_btree_ptr_init(&b->key);
- six_lock_init(&b->lock);
+ six_lock_init(&b->c.lock);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
@@ -131,8 +140,8 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
{
int ret;
- b->level = level;
- b->btree_id = id;
+ b->c.level = level;
+ b->c.btree_id = id;
mutex_lock(&bc->lock);
ret = __bch2_btree_node_hash_insert(bc, b);
@@ -163,10 +172,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
lockdep_assert_held(&bc->lock);
- if (!six_trylock_intent(&b->lock))
+ if (!six_trylock_intent(&b->c.lock))
return -ENOMEM;
- if (!six_trylock_write(&b->lock))
+ if (!six_trylock_write(&b->c.lock))
goto out_unlock_intent;
if (btree_node_noevict(b))
@@ -207,9 +216,9 @@ out:
trace_btree_node_reap(c, b);
return ret;
out_unlock:
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
out_unlock_intent:
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
ret = -ENOMEM;
goto out;
}
@@ -241,7 +250,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
- if (sc->gfp_mask & __GFP_IO)
+ if (sc->gfp_mask & __GFP_FS)
mutex_lock(&bc->lock);
else if (!mutex_trylock(&bc->lock))
return -1;
@@ -267,8 +276,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
if (++i > 3 &&
!btree_node_reclaim(c, b)) {
btree_node_data_free(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
freed++;
}
}
@@ -294,13 +303,13 @@ restart:
mutex_unlock(&bc->lock);
bch2_btree_node_hash_remove(bc, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
if (freed >= nr)
goto out;
- if (sc->gfp_mask & __GFP_IO)
+ if (sc->gfp_mask & __GFP_FS)
mutex_lock(&bc->lock);
else if (!mutex_trylock(&bc->lock))
goto out;
@@ -524,36 +533,47 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
*/
list_for_each_entry(b, &bc->freeable, list)
if (!btree_node_reclaim(c, b))
- goto out_unlock;
+ goto got_node;
/*
* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
list_for_each_entry(b, &bc->freed, list)
- if (!btree_node_reclaim(c, b)) {
- btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
- if (b->data)
- goto out_unlock;
+ if (!btree_node_reclaim(c, b))
+ goto got_node;
+
+ b = NULL;
+got_node:
+ if (b)
+ list_del_init(&b->list);
+ mutex_unlock(&bc->lock);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ if (!b) {
+ b = kzalloc(sizeof(struct btree), GFP_KERNEL);
+ if (!b)
goto err;
- }
- b = btree_node_mem_alloc(c, __GFP_NOWARN|GFP_NOIO);
- if (!b)
- goto err;
+ bkey_btree_ptr_init(&b->key);
+ six_lock_init(&b->c.lock);
+ INIT_LIST_HEAD(&b->list);
+ INIT_LIST_HEAD(&b->write_blocked);
+
+ BUG_ON(!six_trylock_intent(&b->c.lock));
+ BUG_ON(!six_trylock_write(&b->c.lock));
+ }
+
+ if (!b->data) {
+ if (__btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
+ goto err;
+
+ mutex_lock(&bc->lock);
+ bc->used++;
+ mutex_unlock(&bc->lock);
+ }
- BUG_ON(!six_trylock_intent(&b->lock));
- BUG_ON(!six_trylock_write(&b->lock));
-out_unlock:
BUG_ON(btree_node_hashed(b));
BUG_ON(btree_node_write_in_flight(b));
-
- list_del_init(&b->list);
- mutex_unlock(&bc->lock);
- memalloc_nofs_restore(flags);
out:
b->flags = 0;
b->written = 0;
@@ -566,8 +586,17 @@ out:
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
start_time);
+ memalloc_nofs_restore(flags);
return b;
err:
+ mutex_lock(&bc->lock);
+
+ if (b) {
+ list_add(&b->list, &bc->freed);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
+ }
+
/* Try to cannibalize another cached btree node: */
if (bc->alloc_lock == current) {
b = btree_node_cannibalize(c);
@@ -581,6 +610,7 @@ err:
}
mutex_unlock(&bc->lock);
+ memalloc_nofs_restore(flags);
return ERR_PTR(-ENOMEM);
}
@@ -619,8 +649,8 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
list_add(&b->list, &bc->freeable);
mutex_unlock(&bc->lock);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return NULL;
}
@@ -634,19 +664,27 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
bch2_btree_node_read(c, b, sync);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
if (!sync) {
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
return NULL;
}
if (lock_type == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
return b;
}
+static int lock_node_check_fn(struct six_lock *lock, void *p)
+{
+ struct btree *b = container_of(lock, struct btree, c.lock);
+ const struct bkey_i *k = p;
+
+ return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
+}
+
/**
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
@@ -719,13 +757,17 @@ lock_node:
if (btree_node_read_locked(iter, level + 1))
btree_node_unlock(iter, level + 1);
- if (!btree_node_lock(b, k->k.p, level, iter, lock_type))
+ if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
+ lock_node_check_fn, (void *) k)) {
+ if (b->hash_val != btree_ptr_hash_val(k))
+ goto retry;
return ERR_PTR(-EINTR);
+ }
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->level != level ||
+ b->c.level != level ||
race_fault())) {
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
if (bch2_btree_node_relock(iter, level + 1))
goto retry;
@@ -753,11 +795,11 @@ lock_node:
set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) {
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
return ERR_PTR(-EIO);
}
- EBUG_ON(b->btree_id != iter->btree_id ||
+ EBUG_ON(b->c.btree_id != iter->btree_id ||
BTREE_NODE_LEVEL(b->data) != level ||
bkey_cmp(b->data->max_key, k->k.p));
@@ -772,6 +814,7 @@ struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
+ int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH);
@@ -792,12 +835,14 @@ retry:
return b;
} else {
lock_node:
- six_lock_read(&b->lock);
+ ret = six_lock_read(&b->c.lock, lock_node_check_fn, (void *) k);
+ if (ret)
+ goto retry;
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
- b->btree_id != btree_id ||
- b->level != level)) {
- six_unlock_read(&b->lock);
+ b->c.btree_id != btree_id ||
+ b->c.level != level)) {
+ six_unlock_read(&b->c.lock);
goto retry;
}
}
@@ -821,11 +866,11 @@ lock_node:
set_btree_node_accessed(b);
if (unlikely(btree_node_read_error(b))) {
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
return ERR_PTR(-EIO);
}
- EBUG_ON(b->btree_id != btree_id ||
+ EBUG_ON(b->c.btree_id != btree_id ||
BTREE_NODE_LEVEL(b->data) != level ||
bkey_cmp(b->data->max_key, k->k.p));
@@ -843,18 +888,30 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
struct bkey_packed *k;
BKEY_PADDED(k) tmp;
struct btree *ret = NULL;
- unsigned level = b->level;
+ unsigned level = b->c.level;
parent = btree_iter_node(iter, level + 1);
if (!parent)
return NULL;
+ /*
+ * There's a corner case where a btree_iter might have a node locked
+ * that is just outside its current pos - when
+ * bch2_btree_iter_set_pos_same_leaf() gets to the end of the node.
+ *
+ * But the lock ordering checks in __bch2_btree_node_lock() go off of
+ * iter->pos, not the node's key: so if the iterator is marked as
+ * needing to be traversed, we risk deadlock if we don't bail out here:
+ */
+ if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
+ return ERR_PTR(-EINTR);
+
if (!bch2_btree_node_relock(iter, level + 1)) {
ret = ERR_PTR(-EINTR);
goto out;
}
- node_iter = iter->l[parent->level].iter;
+ node_iter = iter->l[parent->c.level].iter;
k = bch2_btree_node_iter_peek_all(&node_iter, parent);
BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
@@ -901,7 +958,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
if (!IS_ERR(ret)) {
- six_unlock_intent(&ret->lock);
+ six_unlock_intent(&ret->c.lock);
ret = ERR_PTR(-EINTR);
}
}
@@ -962,7 +1019,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
pr_buf(out,
"l %u %llu:%llu - %llu:%llu:\n"
" ptrs: ",
- b->level,
+ b->c.level,
b->data->min_key.inode,
b->data->min_key.offset,
b->data->max_key.inode,
diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h
index 98cca30778ea..2160012c734f 100644
--- a/fs/bcachefs/btree_cache.h
+++ b/fs/bcachefs/btree_cache.h
@@ -101,7 +101,7 @@ static inline unsigned btree_blocks(struct bch_fs *c)
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) + \
(BTREE_FOREGROUND_MERGE_THRESHOLD(c) << 2))
-#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->btree_id].b)
+#define btree_node_root(_c, _b) ((_c)->btree_roots[(_b)->c.btree_id].b)
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
struct btree *);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 146f2428fe04..8771ef1f07cc 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -186,7 +186,7 @@ static int btree_gc_mark_node(struct bch_fs *c, struct btree *b, u8 *max_stale,
bch2_btree_node_iter_advance(&iter, b);
- if (b->level) {
+ if (b->c.level) {
ret = bch2_gc_check_topology(c, k,
&next_node_start,
b->data->max_key,
@@ -252,7 +252,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
if (!btree_node_fake(b))
ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, initial);
- gc_pos_set(c, gc_pos_btree_root(b->btree_id));
+ gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
mutex_unlock(&c->btree_root_lock);
return ret;
@@ -280,7 +280,7 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
if (ret)
break;
- if (b->level) {
+ if (b->c.level) {
struct btree *child;
BKEY_PADDED(k) tmp;
@@ -296,16 +296,16 @@ static int bch2_gc_btree_init_recurse(struct bch_fs *c, struct btree *b,
if (ret)
break;
- if (b->level > target_depth) {
+ if (b->c.level > target_depth) {
child = bch2_btree_node_get_noiter(c, &tmp.k,
- b->btree_id, b->level - 1);
+ b->c.btree_id, b->c.level - 1);
ret = PTR_ERR_OR_ZERO(child);
if (ret)
break;
ret = bch2_gc_btree_init_recurse(c, child,
journal_keys, target_depth);
- six_unlock_read(&child->lock);
+ six_unlock_read(&child->c.lock);
if (ret)
break;
@@ -336,7 +336,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
if (btree_node_fake(b))
return 0;
- six_lock_read(&b->lock);
+ six_lock_read(&b->c.lock, NULL, NULL);
if (fsck_err_on(bkey_cmp(b->data->min_key, POS_MIN), c,
"btree root with incorrect min_key: %llu:%llu",
b->data->min_key.inode,
@@ -351,7 +351,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
BUG();
}
- if (b->level >= target_depth)
+ if (b->c.level >= target_depth)
ret = bch2_gc_btree_init_recurse(c, b,
journal_keys, target_depth);
@@ -359,7 +359,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
ret = bch2_gc_mark_key(c, bkey_i_to_s_c(&b->key),
&max_stale, true);
fsck_err:
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
return ret;
}
@@ -464,6 +464,7 @@ static void bch2_mark_superblocks(struct bch_fs *c)
mutex_unlock(&c->sb_lock);
}
+#if 0
/* Also see bch2_pending_btree_node_free_insert_done() */
static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
{
@@ -481,6 +482,7 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
mutex_unlock(&c->btree_interior_update_lock);
}
+#endif
static void bch2_mark_allocator_buckets(struct bch_fs *c)
{
@@ -579,8 +581,10 @@ static int bch2_gc_done(struct bch_fs *c,
#define copy_bucket_field(_f) \
if (dst->b[b].mark._f != src->b[b].mark._f) { \
if (verify) \
- fsck_err(c, "dev %u bucket %zu has wrong " #_f \
+ fsck_err(c, "bucket %u:%zu gen %u data type %s has wrong " #_f \
": got %u, should be %u", i, b, \
+ dst->b[b].mark.gen, \
+ bch2_data_types[dst->b[b].mark.data_type],\
dst->b[b].mark._f, src->b[b].mark._f); \
dst->b[b]._mark._f = src->b[b].mark._f; \
}
@@ -794,9 +798,14 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
unsigned i, iter = 0;
int ret;
+ lockdep_assert_held(&c->state_lock);
trace_gc_start(c);
down_write(&c->gc_lock);
+
+ /* flush interior btree updates: */
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
again:
ret = bch2_gc_start(c, metadata_only);
if (ret)
@@ -808,7 +817,9 @@ again:
if (ret)
goto out;
+#if 0
bch2_mark_pending_btree_node_frees(c);
+#endif
bch2_mark_allocator_buckets(c);
c->gc_count++;
@@ -874,6 +885,87 @@ out:
return ret;
}
+/*
+ * For recalculating oldest gen, we only need to walk keys in leaf nodes; btree
+ * node pointers currently never have cached pointers that can become stale:
+ */
+static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id id)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for_each_btree_key(&trans, iter, id, POS_MIN, BTREE_ITER_PREFETCH, k, ret) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+ const struct bch_extent_ptr *ptr;
+
+ percpu_down_read(&c->mark_lock);
+ bkey_for_each_ptr(ptrs, ptr) {
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr, false);
+
+ if (gen_after(g->gc_gen, ptr->gen))
+ g->gc_gen = ptr->gen;
+
+ if (gen_after(g->mark.gen, ptr->gen) > 32) {
+ /* rewrite btree node */
+
+ }
+ }
+ percpu_up_read(&c->mark_lock);
+ }
+
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+int bch2_gc_gens(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ struct bucket_array *buckets;
+ struct bucket *g;
+ unsigned i;
+ int ret;
+
+ /*
+ * Ideally we would be using state_lock and not gc_lock here, but that
+ * introduces a deadlock in the RO path - we currently take the state
+ * lock at the start of going RO, thus the gc thread may get stuck:
+ */
+ down_read(&c->gc_lock);
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->gc_gen = g->mark.gen;
+ up_read(&ca->bucket_lock);
+ }
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if (btree_node_type_needs_gc(i)) {
+ ret = bch2_gc_btree_gens(c, i);
+ if (ret)
+ goto err;
+ }
+
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ g->oldest_gen = g->gc_gen;
+ up_read(&ca->bucket_lock);
+ }
+err:
+ up_read(&c->gc_lock);
+ return ret;
+}
+
/* Btree coalescing */
static void recalc_packed_keys(struct btree *b)
@@ -997,9 +1089,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
set_btree_bset_end(n1, n1->set);
- six_unlock_write(&n2->lock);
+ six_unlock_write(&n2->c.lock);
bch2_btree_node_free_never_inserted(c, n2);
- six_unlock_intent(&n2->lock);
+ six_unlock_intent(&n2->c.lock);
memmove(new_nodes + i - 1,
new_nodes + i,
@@ -1033,7 +1125,9 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
btree_node_reset_sib_u64s(n);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+
+ bch2_btree_update_add_new_node(as, n);
+ six_unlock_write(&n->c.lock);
bch2_btree_node_write(c, n, SIX_LOCK_intent);
}
@@ -1076,12 +1170,12 @@ next:
BUG_ON(!bch2_keylist_empty(&keylist));
- BUG_ON(iter->l[old_nodes[0]->level].b != old_nodes[0]);
+ BUG_ON(iter->l[old_nodes[0]->c.level].b != old_nodes[0]);
bch2_btree_iter_node_replace(iter, new_nodes[0]);
for (i = 0; i < nr_new_nodes; i++)
- bch2_open_buckets_put(c, &new_nodes[i]->ob);
+ bch2_btree_update_get_open_buckets(as, new_nodes[i]);
/* Free the old nodes and update our sliding window */
for (i = 0; i < nr_old_nodes; i++) {
@@ -1101,7 +1195,7 @@ next:
}
for (i = 0; i < nr_new_nodes; i++)
- six_unlock_intent(&new_nodes[i]->lock);
+ six_unlock_intent(&new_nodes[i]->c.lock);
bch2_btree_update_done(as);
bch2_keylist_free(&keylist, NULL);
@@ -1142,11 +1236,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
for (i = 1; i < GC_MERGE_NODES; i++) {
if (!merge[i] ||
- !six_relock_intent(&merge[i]->lock, lock_seq[i]))
+ !six_relock_intent(&merge[i]->c.lock, lock_seq[i]))
break;
- if (merge[i]->level != merge[0]->level) {
- six_unlock_intent(&merge[i]->lock);
+ if (merge[i]->c.level != merge[0]->c.level) {
+ six_unlock_intent(&merge[i]->c.lock);
break;
}
}
@@ -1155,11 +1249,11 @@ static int bch2_coalesce_btree(struct bch_fs *c, enum btree_id btree_id)
bch2_coalesce_nodes(c, iter, merge);
for (i = 1; i < GC_MERGE_NODES && merge[i]; i++) {
- lock_seq[i] = merge[i]->lock.state.seq;
- six_unlock_intent(&merge[i]->lock);
+ lock_seq[i] = merge[i]->c.lock.state.seq;
+ six_unlock_intent(&merge[i]->c.lock);
}
- lock_seq[0] = merge[0]->lock.state.seq;
+ lock_seq[0] = merge[0]->c.lock.state.seq;
if (kthread && kthread_should_stop()) {
bch2_trans_exit(&trans);
@@ -1247,7 +1341,14 @@ static int bch2_gc_thread(void *arg)
last = atomic_long_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
ret = bch2_gc(c, NULL, false, false);
+#else
+ ret = bch2_gc_gens(c);
+#endif
if (ret)
bch_err(c, "btree gc failed: %i", ret);
diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h
index bd5f2752954f..3694a3df62a8 100644
--- a/fs/bcachefs/btree_gc.h
+++ b/fs/bcachefs/btree_gc.h
@@ -8,6 +8,7 @@ void bch2_coalesce(struct bch_fs *);
struct journal_keys;
int bch2_gc(struct bch_fs *, struct journal_keys *, bool, bool);
+int bch2_gc_gens(struct bch_fs *);
void bch2_gc_thread_stop(struct bch_fs *);
int bch2_gc_thread_start(struct bch_fs *);
void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
@@ -81,7 +82,7 @@ static inline struct gc_pos gc_pos_btree(enum btree_id id,
*/
static inline struct gc_pos gc_pos_btree_node(struct btree *b)
{
- return gc_pos_btree(b->btree_id, b->key.k.p, b->level);
+ return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
}
/*
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 01a0ae0007c9..bb3aeccef67e 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -584,8 +584,8 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
struct btree_node_entry *bne;
bool did_sort;
- EBUG_ON(!(b->lock.state.seq & 1));
- EBUG_ON(iter && iter->l[b->level].b != b);
+ EBUG_ON(!(b->c.lock.state.seq & 1));
+ EBUG_ON(iter && iter->l[b->c.level].b != b);
did_sort = btree_node_compact(c, b, iter);
@@ -620,7 +620,7 @@ static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
bytes);
- nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
+ nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
}
bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
@@ -631,14 +631,14 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
struct btree *b, struct bset *i,
unsigned offset, int write)
{
- pr_buf(out, "error validating btree node %s"
- "at btree %u level %u/%u\n"
- "pos %llu:%llu node offset %u",
+ pr_buf(out, "error validating btree node %sat btree %u level %u/%u\n"
+ "pos ",
write ? "before write " : "",
- b->btree_id, b->level,
- c->btree_roots[b->btree_id].level,
- b->key.k.p.inode, b->key.k.p.offset,
- b->written);
+ b->c.btree_id, b->c.level,
+ c->btree_roots[b->c.btree_id].level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+
+ pr_buf(out, " node offset %u", b->written);
if (i)
pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
}
@@ -747,11 +747,11 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
"incorrect sequence number (wrong btree node)");
}
- btree_err_on(BTREE_NODE_ID(bn) != b->btree_id,
+ btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect btree id");
- btree_err_on(BTREE_NODE_LEVEL(bn) != b->level,
+ btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
BTREE_ERR_MUST_RETRY, c, b, i,
"incorrect level");
@@ -762,7 +762,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
}
if (!write)
- compat_btree_node(b->level, b->btree_id, version,
+ compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
@@ -783,7 +783,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
"incorrect max key");
if (write)
- compat_btree_node(b->level, b->btree_id, version,
+ compat_btree_node(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write, bn);
/* XXX: ideally we would be validating min_key too */
@@ -805,7 +805,7 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
BTREE_ERR_FATAL, c, b, i,
"invalid bkey format: %s", err);
- compat_bformat(b->level, b->btree_id, version,
+ compat_bformat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&bn->format);
}
@@ -851,7 +851,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
/* XXX: validate k->u64s */
if (!write)
- bch2_bkey_compat(b->level, b->btree_id, version,
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
@@ -874,7 +874,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
}
if (write)
- bch2_bkey_compat(b->level, b->btree_id, version,
+ bch2_bkey_compat(b->c.level, b->c.btree_id, version,
BSET_BIG_ENDIAN(i), write,
&b->format, k);
@@ -897,7 +897,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
bch2_bkey_to_text(&PBUF(buf1), &up);
bch2_bkey_to_text(&PBUF(buf2), u.k);
- bch2_dump_bset(b, i, 0);
+ bch2_dump_bset(c, b, i, 0);
btree_err(BTREE_ERR_FATAL, c, b, i,
"keys out of order: %s > %s",
buf1, buf2);
@@ -944,7 +944,8 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry
btree_err_on(b->data->keys.seq != bp->seq,
BTREE_ERR_MUST_RETRY, c, b, NULL,
- "got wrong btree node");
+ "got wrong btree node (seq %llx want %llx)",
+ b->data->keys.seq, bp->seq);
}
while (b->written < c->opts.btree_node_size) {
@@ -1279,8 +1280,8 @@ int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
bch2_btree_set_root_for_read(c, b);
err:
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
return ret;
}
@@ -1324,15 +1325,15 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
bch2_trans_init(&trans, c, 0, 0);
- iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
- BTREE_MAX_DEPTH, b->level, 0);
+ iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
+ BTREE_MAX_DEPTH, b->c.level, 0);
retry:
ret = bch2_btree_iter_traverse(iter);
if (ret)
goto err;
/* has node been freed? */
- if (iter->l[b->level].b != b) {
+ if (iter->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
goto out;
@@ -1763,18 +1764,18 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
BUG_ON(lock_type_held == SIX_LOCK_write);
if (lock_type_held == SIX_LOCK_intent ||
- six_lock_tryupgrade(&b->lock)) {
+ six_lock_tryupgrade(&b->c.lock)) {
__bch2_btree_node_write(c, b, SIX_LOCK_intent);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
- six_trylock_write(&b->lock)) {
+ six_trylock_write(&b->c.lock)) {
bch2_btree_post_write_cleanup(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
if (lock_type_held == SIX_LOCK_read)
- six_lock_downgrade(&b->lock);
+ six_lock_downgrade(&b->c.lock);
} else {
__bch2_btree_node_write(c, b, SIX_LOCK_read);
}
@@ -1844,7 +1845,7 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
b,
(flags & (1 << BTREE_NODE_dirty)) != 0,
(flags & (1 << BTREE_NODE_need_write)) != 0,
- b->level,
+ b->c.level,
b->written,
!list_empty_careful(&b->write_blocked),
b->will_make_reachable != 0,
diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h
index 337d2bdd29e8..f3d7ec749b61 100644
--- a/fs/bcachefs/btree_io.h
+++ b/fs/bcachefs/btree_io.h
@@ -114,7 +114,7 @@ static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
break;
}
- six_unlock_type(&b->lock, lock_held);
+ six_unlock_type(&b->c.lock, lock_held);
btree_node_wait_on_io(b);
btree_node_lock_type(c, b, lock_held);
}
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 5528ba0f1d44..6fab76c3220c 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -4,22 +4,16 @@
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update.h"
#include "debug.h"
#include "extents.h"
+#include "journal.h"
#include <linux/prefetch.h>
#include <trace/events/bcachefs.h>
-#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
-#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
-#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
-#define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
-#define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
-#define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
-#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
-
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
{
return l < BTREE_MAX_DEPTH &&
@@ -51,7 +45,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
struct btree *b)
{
- return iter->btree_id == b->btree_id &&
+ return iter->btree_id == b->c.btree_id &&
!btree_iter_pos_before_node(iter, b) &&
!btree_iter_pos_after_node(iter, b);
}
@@ -68,11 +62,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
struct btree_iter *linked;
unsigned readers = 0;
- EBUG_ON(!btree_node_intent_locked(iter, b->level));
+ EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
trans_for_each_iter(iter->trans, linked)
- if (linked->l[b->level].b == b &&
- btree_node_read_locked(linked, b->level))
+ if (linked->l[b->c.level].b == b &&
+ btree_node_read_locked(linked, b->c.level))
readers++;
/*
@@ -82,10 +76,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
* locked:
*/
atomic64_sub(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
atomic64_add(__SIX_VAL(read_lock, readers),
- &b->lock.state.counter);
+ &b->c.lock.state.counter);
}
bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
@@ -99,9 +93,9 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
if (race_fault())
return false;
- if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) ||
+ if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
(btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter, b, level, want))) {
+ btree_node_lock_increment(iter->trans, b, level, want))) {
mark_btree_node_locked(iter, level, want);
return true;
} else {
@@ -125,12 +119,12 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
return false;
if (btree_node_locked(iter, level)
- ? six_lock_tryupgrade(&b->lock)
- : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+ ? six_lock_tryupgrade(&b->c.lock)
+ : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
goto success;
if (btree_node_lock_seq_matches(iter, b, level) &&
- btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
+ btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
btree_node_unlock(iter, level);
goto success;
}
@@ -162,7 +156,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
? 0
: (unsigned long) iter->l[l].b,
is_btree_node(iter, l)
- ? iter->l[l].b->lock.state.seq
+ ? iter->l[l].b->c.lock.state.seq
: 0);
fail_idx = l;
@@ -191,24 +185,31 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
return iter->uptodate < BTREE_ITER_NEED_RELOCK;
}
+static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
+ enum btree_iter_type type)
+{
+ return type != BTREE_ITER_CACHED
+ ? container_of(_b, struct btree, c)->key.k.p
+ : container_of(_b, struct bkey_cached, c)->key.pos;
+}
+
/* Slowpath: */
bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level,
- struct btree_iter *iter,
- enum six_lock_type type)
+ unsigned level, struct btree_iter *iter,
+ enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn,
+ void *p)
{
+ struct btree_trans *trans = iter->trans;
struct btree_iter *linked;
+ u64 start_time = local_clock();
bool ret = true;
/* Check if it's safe to block: */
- trans_for_each_iter(iter->trans, linked) {
+ trans_for_each_iter(trans, linked) {
if (!linked->nodes_locked)
continue;
- /* * Must lock btree nodes in key order: */
- if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
- ret = false;
-
/*
* Can't block taking an intent lock if we have _any_ nodes read
* locked:
@@ -223,13 +224,15 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
*/
if (type == SIX_LOCK_intent &&
linked->nodes_locked != linked->nodes_intent_locked) {
- if (!(iter->trans->nounlock)) {
+ if (!(trans->nounlock)) {
linked->locks_want = max_t(unsigned,
linked->locks_want,
__fls(linked->nodes_locked) + 1);
- btree_iter_get_locks(linked, true, false);
+ if (!btree_iter_get_locks(linked, true, false))
+ ret = false;
+ } else {
+ ret = false;
}
- ret = false;
}
/*
@@ -239,14 +242,38 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
*/
if (linked->btree_id == iter->btree_id &&
level > __fls(linked->nodes_locked)) {
- if (!(iter->trans->nounlock)) {
+ if (!(trans->nounlock)) {
linked->locks_want =
max(level + 1, max_t(unsigned,
linked->locks_want,
iter->locks_want));
- btree_iter_get_locks(linked, true, false);
+ if (!btree_iter_get_locks(linked, true, false))
+ ret = false;
+ } else {
+ ret = false;
}
+ }
+
+ /* Must lock btree nodes in key order: */
+ if ((cmp_int(iter->btree_id, linked->btree_id) ?:
+ -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0)
+ ret = false;
+
+ if (iter->btree_id == linked->btree_id &&
+ btree_node_locked(linked, level) &&
+ bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
+ btree_iter_type(linked))) <= 0)
ret = false;
+
+ /*
+ * Recheck if this is a node we already have locked - since one
+ * of the get_locks() calls might've successfully
+ * upgraded/relocked it:
+ */
+ if (linked->l[level].b == b &&
+ btree_node_locked_type(linked, level) >= type) {
+ six_lock_increment(&b->c.lock, type);
+ return true;
}
}
@@ -255,7 +282,14 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
return false;
}
- __btree_node_lock_type(iter->trans->c, b, type);
+ if (six_trylock_type(&b->c.lock, type))
+ return true;
+
+ if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
+ return false;
+
+ bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
+ start_time);
return true;
}
@@ -266,7 +300,12 @@ static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
unsigned l;
- for (l = 0; btree_iter_node(iter, l); l++) {
+ if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+ BUG_ON(iter->nodes_locked);
+ return;
+ }
+
+ for (l = 0; is_btree_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
continue;
@@ -280,7 +319,7 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
{
struct btree_iter *iter;
- trans_for_each_iter(trans, iter)
+ trans_for_each_iter_all(trans, iter)
bch2_btree_iter_verify_locks(iter);
}
#else
@@ -288,7 +327,7 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
#endif
__flatten
-static bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
{
return btree_iter_get_locks(iter, false, trace);
}
@@ -348,31 +387,20 @@ bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
void __bch2_btree_iter_downgrade(struct btree_iter *iter,
unsigned downgrade_to)
{
- struct btree_iter *linked;
- unsigned l;
-
- /*
- * We downgrade linked iterators as well because btree_iter_upgrade
- * might have had to modify locks_want on linked iterators due to lock
- * ordering:
- */
- trans_for_each_iter(iter->trans, linked) {
- unsigned new_locks_want = downgrade_to ?:
- (linked->flags & BTREE_ITER_INTENT ? 1 : 0);
+ unsigned l, new_locks_want = downgrade_to ?:
+ (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
- if (linked->locks_want <= new_locks_want)
- continue;
-
- linked->locks_want = new_locks_want;
+ if (iter->locks_want < downgrade_to) {
+ iter->locks_want = new_locks_want;
- while (linked->nodes_locked &&
- (l = __fls(linked->nodes_locked)) >= linked->locks_want) {
- if (l > linked->level) {
- btree_node_unlock(linked, l);
+ while (iter->nodes_locked &&
+ (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
+ if (l > iter->level) {
+ btree_node_unlock(iter, l);
} else {
- if (btree_node_intent_locked(linked, l)) {
- six_lock_downgrade(&linked->l[l].b->lock);
- linked->nodes_intent_locked ^= 1 << l;
+ if (btree_node_intent_locked(iter, l)) {
+ six_lock_downgrade(&iter->l[l].b->c.lock);
+ iter->nodes_intent_locked ^= 1 << l;
}
break;
}
@@ -382,6 +410,14 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
bch2_btree_trans_verify_locks(iter->trans);
}
+void bch2_trans_downgrade(struct btree_trans *trans)
+{
+ struct btree_iter *iter;
+
+ trans_for_each_iter(trans, iter)
+ bch2_btree_iter_downgrade(iter);
+}
+
/* Btree transaction locking: */
bool bch2_trans_relock(struct btree_trans *trans)
@@ -408,6 +444,22 @@ void bch2_trans_unlock(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
+static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
+{
+ struct bkey_cached *ck;
+ bool locked = btree_node_locked(iter, 0);
+
+ if (!bch2_btree_node_relock(iter, 0))
+ return;
+
+ ck = (void *) iter->l[0].b;
+ BUG_ON(ck->key.btree_id != iter->btree_id ||
+ bkey_cmp(ck->key.pos, iter->pos));
+
+ if (!locked)
+ btree_node_unlock(iter, 0);
+}
+
static void bch2_btree_iter_verify_level(struct btree_iter *iter,
unsigned level)
{
@@ -422,6 +474,12 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter,
if (!debug_check_iterators(iter->trans->c))
return;
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+ if (!level)
+ bch2_btree_iter_verify_cached(iter);
+ return;
+ }
+
BUG_ON(iter->level < iter->min_depth);
if (!btree_iter_node(iter, level))
@@ -513,7 +571,7 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
return;
trans_for_each_iter_with_node(trans, b, iter)
- bch2_btree_iter_verify_level(iter, b->level);
+ bch2_btree_iter_verify_level(iter, b->c.level);
}
#else
@@ -544,7 +602,7 @@ static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
struct btree *b,
struct bkey_packed *where)
{
- struct btree_iter_level *l = &iter->l[b->level];
+ struct btree_iter_level *l = &iter->l[b->c.level];
struct bpos pos = btree_iter_search_key(iter);
if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
@@ -564,7 +622,7 @@ void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_iter_fix_key_modified(linked, b, where);
- bch2_btree_iter_verify_level(linked, b->level);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
@@ -634,7 +692,7 @@ fixup_done:
*/
if (!bch2_btree_node_iter_end(node_iter) &&
iter_current_key_modified &&
- (b->level ||
+ (b->c.level ||
btree_node_type_is_extents(iter->btree_id))) {
struct bset_tree *t;
struct bkey_packed *k, *k2, *p;
@@ -661,7 +719,7 @@ fixup_done:
}
}
- if (!b->level &&
+ if (!b->c.level &&
node_iter == &iter->l[0].iter &&
iter_current_key_modified)
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
@@ -677,7 +735,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
struct bset_tree *t = bch2_bkey_to_bset(b, where);
struct btree_iter *linked;
- if (node_iter != &iter->l[b->level].iter) {
+ if (node_iter != &iter->l[b->c.level].iter) {
__bch2_btree_node_iter_fix(iter, b, node_iter, t,
where, clobber_u64s, new_u64s);
@@ -687,9 +745,9 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
trans_for_each_iter_with_node(iter->trans, b, linked) {
__bch2_btree_node_iter_fix(linked, b,
- &linked->l[b->level].iter, t,
+ &linked->l[b->c.level].iter, t,
where, clobber_u64s, new_u64s);
- bch2_btree_iter_verify_level(linked, b->level);
+ bch2_btree_iter_verify_level(linked, b->c.level);
}
}
@@ -773,7 +831,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
return;
- plevel = b->level + 1;
+ plevel = b->c.level + 1;
if (!btree_iter_node(iter, plevel))
return;
@@ -796,7 +854,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
}
if (!parent_locked)
- btree_node_unlock(iter, b->level + 1);
+ btree_node_unlock(iter, b->c.level + 1);
}
static inline void __btree_iter_init(struct btree_iter *iter,
@@ -813,14 +871,16 @@ static inline void __btree_iter_init(struct btree_iter *iter,
static inline void btree_iter_node_set(struct btree_iter *iter,
struct btree *b)
{
+ BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
+
btree_iter_verify_new_node(iter, b);
EBUG_ON(!btree_iter_pos_in_node(iter, b));
- EBUG_ON(b->lock.state.seq & 1);
+ EBUG_ON(b->c.lock.state.seq & 1);
- iter->l[b->level].lock_seq = b->lock.state.seq;
- iter->l[b->level].b = b;
- __btree_iter_init(iter, b->level);
+ iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+ iter->l[b->c.level].b = b;
+ __btree_iter_init(iter, b->c.level);
}
/*
@@ -833,18 +893,19 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked;
trans_for_each_iter(iter->trans, linked)
- if (btree_iter_pos_in_node(linked, b)) {
+ if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
+ btree_iter_pos_in_node(linked, b)) {
/*
* bch2_btree_iter_node_drop() has already been called -
* the old node we're replacing has already been
* unlocked and the pointer invalidated
*/
- BUG_ON(btree_node_locked(linked, b->level));
+ BUG_ON(btree_node_locked(linked, b->c.level));
- t = btree_lock_want(linked, b->level);
+ t = btree_lock_want(linked, b->c.level);
if (t != BTREE_NODE_UNLOCKED) {
- six_lock_increment(&b->lock, t);
- mark_btree_node_locked(linked, b->level, t);
+ six_lock_increment(&b->c.lock, t);
+ mark_btree_node_locked(linked, b->c.level, t);
}
btree_iter_node_set(linked, b);
@@ -854,7 +915,7 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
{
struct btree_iter *linked;
- unsigned level = b->level;
+ unsigned level = b->c.level;
trans_for_each_iter(iter->trans, linked)
if (linked->l[level].b == b) {
@@ -872,22 +933,30 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
struct btree_iter *linked;
trans_for_each_iter_with_node(iter->trans, b, linked)
- __btree_iter_init(linked, b->level);
+ __btree_iter_init(linked, b->c.level);
+}
+
+static int lock_root_check_fn(struct six_lock *lock, void *p)
+{
+ struct btree *b = container_of(lock, struct btree, c.lock);
+ struct btree **rootp = p;
+
+ return b == *rootp ? 0 : -1;
}
static inline int btree_iter_lock_root(struct btree_iter *iter,
unsigned depth_want)
{
struct bch_fs *c = iter->trans->c;
- struct btree *b;
+ struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
enum six_lock_type lock_type;
unsigned i;
EBUG_ON(iter->nodes_locked);
while (1) {
- b = READ_ONCE(c->btree_roots[iter->btree_id].b);
- iter->level = READ_ONCE(b->level);
+ b = READ_ONCE(*rootp);
+ iter->level = READ_ONCE(b->c.level);
if (unlikely(iter->level < depth_want)) {
/*
@@ -904,11 +973,12 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
lock_type = __btree_lock_want(iter, iter->level);
if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
- iter, lock_type)))
+ iter, lock_type,
+ lock_root_check_fn, rootp)))
return -EINTR;
- if (likely(b == c->btree_roots[iter->btree_id].b &&
- b->level == iter->level &&
+ if (likely(b == READ_ONCE(*rootp) &&
+ b->c.level == iter->level &&
!race_fault())) {
for (i = 0; i < iter->level; i++)
iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
@@ -921,7 +991,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
return 0;
}
- six_unlock_type(&b->lock, lock_type);
+ six_unlock_type(&b->c.lock, lock_type);
}
}
@@ -1016,24 +1086,28 @@ static void btree_iter_up(struct btree_iter *iter)
static int btree_iter_traverse_one(struct btree_iter *);
-static int __btree_iter_traverse_all(struct btree_trans *trans,
- struct btree_iter *orig_iter, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
{
struct bch_fs *c = trans->c;
struct btree_iter *iter;
u8 sorted[BTREE_ITER_MAX];
unsigned i, nr_sorted = 0;
+ if (trans->in_traverse_all)
+ return -EINTR;
+
+ trans->in_traverse_all = true;
+retry_all:
+ nr_sorted = 0;
+
trans_for_each_iter(trans, iter)
- sorted[nr_sorted++] = iter - trans->iters;
+ sorted[nr_sorted++] = iter->idx;
#define btree_iter_cmp_by_idx(_l, _r) \
btree_iter_cmp(&trans->iters[_l], &trans->iters[_r])
bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
#undef btree_iter_cmp_by_idx
-
-retry_all:
bch2_trans_unlock(trans);
if (unlikely(ret == -ENOMEM)) {
@@ -1049,11 +1123,6 @@ retry_all:
if (unlikely(ret == -EIO)) {
trans->error = true;
- if (orig_iter) {
- orig_iter->flags |= BTREE_ITER_ERROR;
- orig_iter->l[orig_iter->level].b =
- BTREE_ITER_NO_NODE_ERROR;
- }
goto out;
}
@@ -1061,9 +1130,16 @@ retry_all:
/* Now, redo traversals in correct order: */
for (i = 0; i < nr_sorted; i++) {
- iter = &trans->iters[sorted[i]];
+ unsigned idx = sorted[i];
+
+ /*
+ * sucessfully traversing one iterator can cause another to be
+ * unlinked, in btree_key_cache_fill()
+ */
+ if (!(trans->iters_linked & (1ULL << idx)))
+ continue;
- ret = btree_iter_traverse_one(iter);
+ ret = btree_iter_traverse_one(&trans->iters[idx]);
if (ret)
goto retry_all;
}
@@ -1078,12 +1154,14 @@ retry_all:
}
out:
bch2_btree_cache_cannibalize_unlock(c);
+
+ trans->in_traverse_all = false;
return ret;
}
int bch2_btree_iter_traverse_all(struct btree_trans *trans)
{
- return __btree_iter_traverse_all(trans, NULL, 0);
+ return __btree_iter_traverse_all(trans, 0);
}
static inline bool btree_iter_good_node(struct btree_iter *iter,
@@ -1128,9 +1206,6 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
{
unsigned depth_want = iter->level;
- if (unlikely(iter->level >= BTREE_MAX_DEPTH))
- return 0;
-
/*
* if we need interior nodes locked, call btree_iter_relock() to make
* sure we walk back up enough that we lock them:
@@ -1139,9 +1214,15 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
iter->locks_want > 1)
bch2_btree_iter_relock(iter, false);
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED)
+ return bch2_btree_iter_traverse_cached(iter);
+
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
return 0;
+ if (unlikely(iter->level >= BTREE_MAX_DEPTH))
+ return 0;
+
/*
* XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
* here unnecessary
@@ -1154,7 +1235,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
*
* XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
*/
- if (btree_iter_node(iter, iter->level)) {
+ if (is_btree_node(iter, iter->level)) {
BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
@@ -1175,7 +1256,15 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
return 0;
iter->level = depth_want;
- iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN;
+
+ if (ret == -EIO) {
+ iter->flags |= BTREE_ITER_ERROR;
+ iter->l[iter->level].b =
+ BTREE_ITER_NO_NODE_ERROR;
+ } else {
+ iter->l[iter->level].b =
+ BTREE_ITER_NO_NODE_DOWN;
+ }
return ret;
}
}
@@ -1188,23 +1277,25 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
int ret;
- ret = bch2_trans_cond_resched(iter->trans) ?:
+ ret = bch2_trans_cond_resched(trans) ?:
btree_iter_traverse_one(iter);
if (unlikely(ret))
- ret = __btree_iter_traverse_all(iter->trans, iter, ret);
+ ret = __btree_iter_traverse_all(trans, ret);
return ret;
}
-static inline void bch2_btree_iter_checks(struct btree_iter *iter,
- enum btree_iter_type type)
+static inline void bch2_btree_iter_checks(struct btree_iter *iter)
{
+ enum btree_iter_type type = btree_iter_type(iter);
+
EBUG_ON(iter->btree_id >= BTREE_ID_NR);
- EBUG_ON(btree_iter_type(iter) != type);
- BUG_ON(type == BTREE_ITER_KEYS &&
+ BUG_ON((type == BTREE_ITER_KEYS ||
+ type == BTREE_ITER_CACHED) &&
(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
bkey_cmp(iter->pos, iter->k.p) > 0));
@@ -1219,7 +1310,8 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
struct btree *b;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
+ bch2_btree_iter_checks(iter);
if (iter->uptodate == BTREE_ITER_UPTODATE)
return iter->l[iter->level].b;
@@ -1247,7 +1339,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
struct btree *b;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
+ bch2_btree_iter_checks(iter);
/* already got to end? */
if (!btree_iter_node(iter, iter->level))
@@ -1320,6 +1413,16 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_
btree_iter_advance_to_pos(iter, l, -1);
+ /*
+ * XXX:
+ * keeping a node locked that's outside (even just outside) iter->pos
+ * breaks __bch2_btree_node_lock(). This seems to only affect
+ * bch2_btree_node_get_sibling so for now it's fixed there, but we
+ * should try to get rid of this corner case.
+ *
+ * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
+ */
+
if (bch2_btree_node_iter_end(&l->iter) &&
btree_iter_pos_after_node(iter, l->b))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
@@ -1332,6 +1435,13 @@ static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
if (!cmp)
goto out;
+ if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
+ btree_node_unlock(iter, 0);
+ iter->l[0].b = BTREE_ITER_NO_NODE_UP;
+ btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+ return;
+ }
+
l = btree_iter_up_until_good_node(iter, cmp);
if (btree_iter_node(iter, l)) {
@@ -1458,7 +1568,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
struct bkey_s_c k;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ bch2_btree_iter_checks(iter);
if (iter->uptodate == BTREE_ITER_UPTODATE &&
!bkey_deleted(&iter->k))
@@ -1545,7 +1656,8 @@ struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
struct bkey_s_c k;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ bch2_btree_iter_checks(iter);
while (1) {
ret = bch2_btree_iter_traverse(iter);
@@ -1605,7 +1717,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
struct bkey_s_c k;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ bch2_btree_iter_checks(iter);
if (iter->uptodate == BTREE_ITER_UPTODATE &&
!bkey_deleted(&iter->k))
@@ -1641,7 +1754,8 @@ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ bch2_btree_iter_checks(iter);
if (unlikely(!bkey_cmp(pos, POS_MIN)))
return bkey_s_c_null;
@@ -1722,7 +1836,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
struct bkey_s_c k;
int ret;
- bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+ bch2_btree_iter_checks(iter);
if (iter->uptodate == BTREE_ITER_UPTODATE)
return btree_iter_peek_uptodate(iter);
@@ -1763,6 +1878,27 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
return bch2_btree_iter_peek_slot(iter);
}
+struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
+{
+ struct bkey_cached *ck;
+ int ret;
+
+ EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
+ bch2_btree_iter_checks(iter);
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (unlikely(ret))
+ return bkey_s_c_err(ret);
+
+ ck = (void *) iter->l[0].b;
+
+ EBUG_ON(iter->btree_id != ck->key.btree_id ||
+ bkey_cmp(iter->pos, ck->key.pos));
+ BUG_ON(!ck->valid);
+
+ return bkey_i_to_s_c(ck->k);
+}
+
static inline void bch2_btree_iter_init(struct btree_trans *trans,
struct btree_iter *iter, enum btree_id btree_id,
struct bpos pos, unsigned flags)
@@ -1912,7 +2048,7 @@ static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
struct btree_iter *iter;
trans_for_each_iter(trans, iter) {
- pr_err("iter: btree %s pos %llu:%llu%s%s%s %pf",
+ pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
bch2_btree_ids[iter->btree_id],
iter->pos.inode,
iter->pos.offset,
@@ -1948,10 +2084,11 @@ static inline void btree_iter_copy(struct btree_iter *dst,
*dst = *src;
dst->idx = idx;
+ dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i))
- six_lock_increment(&dst->l[i].b->lock,
+ six_lock_increment(&dst->l[i].b->c.lock,
__btree_lock_want(dst, i));
dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
@@ -2006,8 +2143,9 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
iter = best;
}
- iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
- iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+ iter->flags &= ~BTREE_ITER_USER_FLAGS;
+ iter->flags |= flags & BTREE_ITER_USER_FLAGS;
if (iter->flags & BTREE_ITER_INTENT)
bch2_btree_iter_upgrade(iter, 1);
@@ -2153,6 +2291,9 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
trans->nr_updates2 = 0;
trans->mem_top = 0;
+ trans->extra_journal_entries = NULL;
+ trans->extra_journal_entry_u64s = 0;
+
if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0;
memset(&trans->fs_usage_deltas->memset_start, 0,
@@ -2189,12 +2330,27 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
if (expected_mem_bytes)
bch2_trans_preload_mem(trans, expected_mem_bytes);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->pid = current->pid;
+ mutex_lock(&c->btree_trans_lock);
+ list_add(&trans->list, &c->btree_trans_list);
+ mutex_unlock(&c->btree_trans_lock);
+#endif
}
int bch2_trans_exit(struct btree_trans *trans)
{
bch2_trans_unlock(trans);
+#ifdef CONFIG_BCACHEFS_DEBUG
+ mutex_lock(&trans->c->btree_trans_lock);
+ list_del(&trans->list);
+ mutex_unlock(&trans->c->btree_trans_lock);
+#endif
+
+ bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
+
kfree(trans->fs_usage_deltas);
kfree(trans->mem);
if (trans->used_mempool)
@@ -2207,6 +2363,69 @@ int bch2_trans_exit(struct btree_trans *trans)
return trans->error ? -EIO : 0;
}
+static void bch2_btree_iter_node_to_text(struct printbuf *out,
+ struct btree_bkey_cached_common *_b,
+ enum btree_iter_type type)
+{
+ pr_buf(out, " %px l=%u %s:",
+ _b, _b->level, bch2_btree_ids[_b->btree_id]);
+ bch2_bpos_to_text(out, btree_node_pos(_b, type));
+}
+
+void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct btree_trans *trans;
+ struct btree_iter *iter;
+ struct btree *b;
+ unsigned l;
+
+ mutex_lock(&c->btree_trans_lock);
+ list_for_each_entry(trans, &c->btree_trans_list, list) {
+ pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip);
+
+ trans_for_each_iter(trans, iter) {
+ if (!iter->nodes_locked)
+ continue;
+
+ pr_buf(out, " iter %u %s:",
+ iter->idx,
+ bch2_btree_ids[iter->btree_id]);
+ bch2_bpos_to_text(out, iter->pos);
+ pr_buf(out, "\n");
+
+ for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+ if (btree_node_locked(iter, l)) {
+ pr_buf(out, " %s l=%u ",
+ btree_node_intent_locked(iter, l) ? "i" : "r", l);
+ bch2_btree_iter_node_to_text(out,
+ (void *) iter->l[l].b,
+ btree_iter_type(iter));
+ pr_buf(out, "\n");
+ }
+ }
+ }
+
+ b = READ_ONCE(trans->locking);
+ if (b) {
+ pr_buf(out, " locking iter %u l=%u %s:",
+ trans->locking_iter_idx,
+ trans->locking_level,
+ bch2_btree_ids[trans->locking_btree_id]);
+ bch2_bpos_to_text(out, trans->locking_pos);
+
+
+ pr_buf(out, " node ");
+ bch2_btree_iter_node_to_text(out,
+ (void *) b,
+ btree_iter_type(&trans->iters[trans->locking_iter_idx]));
+ pr_buf(out, "\n");
+ }
+ }
+ mutex_unlock(&c->btree_trans_lock);
+#endif
+}
+
void bch2_fs_btree_iter_exit(struct bch_fs *c)
{
mempool_exit(&c->btree_iters_pool);
@@ -2216,6 +2435,9 @@ int bch2_fs_btree_iter_init(struct bch_fs *c)
{
unsigned nr = BTREE_ITER_MAX;
+ INIT_LIST_HEAD(&c->btree_trans_list);
+ mutex_init(&c->btree_trans_lock);
+
return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
sizeof(struct btree_iter) * nr +
sizeof(struct btree_insert_entry) * nr +
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 6456787a8f77..bd9ec3ec9a92 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -27,13 +27,13 @@ static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
* that write lock. The lock sequence number is incremented by taking
* and releasing write locks and is even when unlocked:
*/
- return iter->l[level].lock_seq >> 1 == b->lock.state.seq >> 1;
+ return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}
static inline struct btree *btree_node_parent(struct btree_iter *iter,
struct btree *b)
{
- return btree_iter_node(iter, b->level + 1);
+ return btree_iter_node(iter, b->c.level + 1);
}
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
@@ -73,8 +73,8 @@ __trans_next_iter(struct btree_trans *trans, unsigned idx)
static inline bool __iter_has_node(const struct btree_iter *iter,
const struct btree *b)
{
- return iter->l[b->level].b == b &&
- btree_node_lock_seq_matches(iter, b, b->level);
+ return iter->l[b->c.level].b == b &&
+ btree_node_lock_seq_matches(iter, b, b->c.level);
}
static inline struct btree_iter *
@@ -110,6 +110,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
+bool bch2_btree_iter_relock(struct btree_iter *, bool);
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
@@ -136,6 +137,8 @@ static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
__bch2_btree_iter_downgrade(iter, 0);
}
+void bch2_trans_downgrade(struct btree_trans *);
+
void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
@@ -168,21 +171,18 @@ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *);
+
void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
void __bch2_btree_iter_set_pos(struct btree_iter *, struct bpos, bool);
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
-static inline int __btree_iter_cmp(enum btree_id id,
- struct bpos pos,
- const struct btree_iter *r)
-{
- return cmp_int(id, r->btree_id) ?: bkey_cmp(pos, r->pos);
-}
-
static inline int btree_iter_cmp(const struct btree_iter *l,
const struct btree_iter *r)
{
- return __btree_iter_cmp(l->btree_id, l->pos, r);
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ -cmp_int(btree_iter_type(l), btree_iter_type(r)) ?:
+ bkey_cmp(l->pos, r->pos);
}
/*
@@ -216,9 +216,12 @@ static inline int bch2_trans_cond_resched(struct btree_trans *trans)
static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_SLOTS
- ? bch2_btree_iter_peek_slot(iter)
- : bch2_btree_iter_peek(iter);
+ if ((flags & BTREE_ITER_TYPE) == BTREE_ITER_CACHED)
+ return bch2_btree_iter_peek_cached(iter);
+ else
+ return flags & BTREE_ITER_SLOTS
+ ? bch2_btree_iter_peek_slot(iter)
+ : bch2_btree_iter_peek(iter);
}
static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
@@ -303,6 +306,8 @@ void *bch2_trans_kmalloc(struct btree_trans *, size_t);
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
int bch2_trans_exit(struct btree_trans *);
+void bch2_btree_trans_to_text(struct printbuf *, struct bch_fs *);
+
void bch2_fs_btree_iter_exit(struct bch_fs *);
int bch2_fs_btree_iter_init(struct bch_fs *);
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
new file mode 100644
index 000000000000..d73cc8ddadac
--- /dev/null
+++ b/fs/bcachefs/btree_key_cache.c
@@ -0,0 +1,519 @@
+
+#include "bcachefs.h"
+#include "btree_cache.h"
+#include "btree_iter.h"
+#include "btree_key_cache.h"
+#include "btree_locking.h"
+#include "btree_update.h"
+#include "error.h"
+#include "journal.h"
+#include "journal_reclaim.h"
+
+#include <trace/events/bcachefs.h>
+
+static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct bkey_cached *ck = obj;
+ const struct bkey_cached_key *key = arg->key;
+
+ return cmp_int(ck->key.btree_id, key->btree_id) ?:
+ bkey_cmp(ck->key.pos, key->pos);
+}
+
+static const struct rhashtable_params bch2_btree_key_cache_params = {
+ .head_offset = offsetof(struct bkey_cached, hash),
+ .key_offset = offsetof(struct bkey_cached, key),
+ .key_len = sizeof(struct bkey_cached_key),
+ .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
+};
+
+__flatten
+static inline struct bkey_cached *
+btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
+{
+ struct bkey_cached_key key = {
+ .btree_id = btree_id,
+ .pos = pos,
+ };
+
+ return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
+ bch2_btree_key_cache_params);
+}
+
+static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
+{
+ if (!six_trylock_intent(&ck->c.lock))
+ return false;
+
+ if (!six_trylock_write(&ck->c.lock)) {
+ six_unlock_intent(&ck->c.lock);
+ return false;
+ }
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+ return false;
+ }
+
+ return true;
+}
+
+static void bkey_cached_evict(struct btree_key_cache *c,
+ struct bkey_cached *ck)
+{
+ BUG_ON(rhashtable_remove_fast(&c->table, &ck->hash,
+ bch2_btree_key_cache_params));
+ memset(&ck->key, ~0, sizeof(ck->key));
+}
+
+static void bkey_cached_free(struct btree_key_cache *c,
+ struct bkey_cached *ck)
+{
+ list_move(&ck->list, &c->freed);
+
+ kfree(ck->k);
+ ck->k = NULL;
+ ck->u64s = 0;
+
+ six_unlock_write(&ck->c.lock);
+ six_unlock_intent(&ck->c.lock);
+}
+
+static struct bkey_cached *
+bkey_cached_alloc(struct btree_key_cache *c)
+{
+ struct bkey_cached *ck;
+
+ list_for_each_entry(ck, &c->freed, list)
+ if (bkey_cached_lock_for_evict(ck))
+ return ck;
+
+ list_for_each_entry(ck, &c->clean, list)
+ if (bkey_cached_lock_for_evict(ck)) {
+ bkey_cached_evict(c, ck);
+ return ck;
+ }
+
+ ck = kzalloc(sizeof(*ck), GFP_NOFS);
+ if (!ck)
+ return NULL;
+
+ INIT_LIST_HEAD(&ck->list);
+ six_lock_init(&ck->c.lock);
+ BUG_ON(!six_trylock_intent(&ck->c.lock));
+ BUG_ON(!six_trylock_write(&ck->c.lock));
+
+ return ck;
+}
+
+static struct bkey_cached *
+btree_key_cache_create(struct btree_key_cache *c,
+ enum btree_id btree_id,
+ struct bpos pos)
+{
+ struct bkey_cached *ck;
+
+ ck = bkey_cached_alloc(c);
+ if (!ck)
+ return ERR_PTR(-ENOMEM);
+
+ ck->c.level = 0;
+ ck->c.btree_id = btree_id;
+ ck->key.btree_id = btree_id;
+ ck->key.pos = pos;
+ ck->valid = false;
+
+ BUG_ON(ck->flags);
+
+ if (rhashtable_lookup_insert_fast(&c->table,
+ &ck->hash,
+ bch2_btree_key_cache_params)) {
+ /* We raced with another fill: */
+ bkey_cached_free(c, ck);
+ return NULL;
+ }
+
+ list_move(&ck->list, &c->clean);
+ six_unlock_write(&ck->c.lock);
+
+ return ck;
+}
+
+static int btree_key_cache_fill(struct btree_trans *trans,
+ struct btree_iter *ck_iter,
+ struct bkey_cached *ck)
+{
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ unsigned new_u64s = 0;
+ struct bkey_i *new_k = NULL;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, ck->key.btree_id,
+ ck->key.pos, BTREE_ITER_SLOTS);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ k = bch2_btree_iter_peek_slot(iter);
+ ret = bkey_err(k);
+ if (ret) {
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+ }
+
+ if (!bch2_btree_node_relock(ck_iter, 0)) {
+ bch2_trans_iter_put(trans, iter);
+ trace_transaction_restart_ip(trans->ip, _THIS_IP_);
+ return -EINTR;
+ }
+
+ if (k.k->u64s > ck->u64s) {
+ new_u64s = roundup_pow_of_two(k.k->u64s);
+ new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
+ if (!new_k) {
+ bch2_trans_iter_put(trans, iter);
+ return -ENOMEM;
+ }
+ }
+
+ bch2_btree_node_lock_write(ck_iter->l[0].b, ck_iter);
+ if (new_k) {
+ kfree(ck->k);
+ ck->u64s = new_u64s;
+ ck->k = new_k;
+ }
+
+ bkey_reassemble(ck->k, k);
+ ck->valid = true;
+ bch2_btree_node_unlock_write(ck_iter->l[0].b, ck_iter);
+
+ /* We're not likely to need this iterator again: */
+ bch2_trans_iter_free(trans, iter);
+
+ return 0;
+}
+
+static int bkey_cached_check_fn(struct six_lock *lock, void *p)
+{
+ struct bkey_cached *ck = container_of(lock, struct bkey_cached, c.lock);
+ const struct btree_iter *iter = p;
+
+ return ck->key.btree_id == iter->btree_id &&
+ !bkey_cmp(ck->key.pos, iter->pos) ? 0 : -1;
+}
+
+int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
+{
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck;
+ int ret = 0;
+
+ BUG_ON(iter->level);
+
+ if (btree_node_locked(iter, 0)) {
+ ck = (void *) iter->l[0].b;
+ goto fill;
+ }
+retry:
+ ck = btree_key_cache_find(c, iter->btree_id, iter->pos);
+ if (!ck) {
+ if (iter->flags & BTREE_ITER_CACHED_NOCREATE) {
+ iter->l[0].b = NULL;
+ return 0;
+ }
+
+ mutex_lock(&c->btree_key_cache.lock);
+ ck = btree_key_cache_create(&c->btree_key_cache,
+ iter->btree_id, iter->pos);
+ mutex_unlock(&c->btree_key_cache.lock);
+
+ ret = PTR_ERR_OR_ZERO(ck);
+ if (ret)
+ goto err;
+ if (!ck)
+ goto retry;
+
+ mark_btree_node_locked(iter, 0, SIX_LOCK_intent);
+ iter->locks_want = 1;
+ } else {
+ enum six_lock_type lock_want = __btree_lock_want(iter, 0);
+
+ if (!btree_node_lock((void *) ck, iter->pos, 0, iter, lock_want,
+ bkey_cached_check_fn, iter)) {
+ if (ck->key.btree_id != iter->btree_id ||
+ bkey_cmp(ck->key.pos, iter->pos)) {
+ goto retry;
+ }
+
+ trace_transaction_restart_ip(trans->ip, _THIS_IP_);
+ ret = -EINTR;
+ goto err;
+ }
+
+ if (ck->key.btree_id != iter->btree_id ||
+ bkey_cmp(ck->key.pos, iter->pos)) {
+ six_unlock_type(&ck->c.lock, lock_want);
+ goto retry;
+ }
+
+ mark_btree_node_locked(iter, 0, lock_want);
+ }
+
+ iter->l[0].lock_seq = ck->c.lock.state.seq;
+ iter->l[0].b = (void *) ck;
+fill:
+ if (!ck->valid && !(iter->flags & BTREE_ITER_CACHED_NOFILL)) {
+ if (!btree_node_intent_locked(iter, 0))
+ bch2_btree_iter_upgrade(iter, 1);
+ if (!btree_node_intent_locked(iter, 0)) {
+ trace_transaction_restart_ip(trans->ip, _THIS_IP_);
+ ret = -EINTR;
+ goto err;
+ }
+
+ ret = btree_key_cache_fill(trans, iter, ck);
+ if (ret)
+ goto err;
+ }
+
+ iter->uptodate = BTREE_ITER_NEED_PEEK;
+ bch2_btree_iter_downgrade(iter);
+ return ret;
+err:
+ if (ret != -EINTR) {
+ btree_node_unlock(iter, 0);
+ iter->flags |= BTREE_ITER_ERROR;
+ iter->l[0].b = BTREE_ITER_NO_NODE_ERROR;
+ }
+ return ret;
+}
+
+static int btree_key_cache_flush_pos(struct btree_trans *trans,
+ struct bkey_cached_key key,
+ u64 journal_seq,
+ bool evict)
+{
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ struct btree_iter *c_iter = NULL, *b_iter = NULL;
+ struct bkey_cached *ck;
+ int ret;
+
+ b_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
+ BTREE_ITER_SLOTS|
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(b_iter);
+ if (ret)
+ goto out;
+
+ c_iter = bch2_trans_get_iter(trans, key.btree_id, key.pos,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_CACHED_NOCREATE|
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(c_iter);
+ if (ret)
+ goto out;
+retry:
+ ret = bch2_btree_iter_traverse(c_iter);
+ if (ret)
+ goto err;
+
+ ck = (void *) c_iter->l[0].b;
+ if (!ck ||
+ (journal_seq && ck->journal.seq != journal_seq))
+ goto out;
+
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ if (!evict)
+ goto out;
+ goto evict;
+ }
+
+ ret = bch2_btree_iter_traverse(b_iter) ?:
+ bch2_trans_update(trans, b_iter, ck->k, BTREE_TRIGGER_NORUN) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOUNLOCK|
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_USE_ALLOC_RESERVE|
+ BTREE_INSERT_JOURNAL_RESERVED|
+ BTREE_INSERT_JOURNAL_RECLAIM);
+err:
+ if (ret == -EINTR)
+ goto retry;
+
+ BUG_ON(ret && !bch2_journal_error(j));
+
+ if (ret)
+ goto out;
+
+ bch2_journal_pin_drop(j, &ck->journal);
+ bch2_journal_preres_put(j, &ck->res);
+ clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
+
+ if (!evict) {
+ mutex_lock(&c->btree_key_cache.lock);
+ list_move_tail(&ck->list, &c->btree_key_cache.clean);
+ mutex_unlock(&c->btree_key_cache.lock);
+ } else {
+evict:
+ BUG_ON(!btree_node_intent_locked(c_iter, 0));
+
+ mark_btree_node_unlocked(c_iter, 0);
+ c_iter->l[0].b = NULL;
+
+ six_lock_write(&ck->c.lock, NULL, NULL);
+
+ mutex_lock(&c->btree_key_cache.lock);
+ bkey_cached_evict(&c->btree_key_cache, ck);
+ bkey_cached_free(&c->btree_key_cache, ck);
+ mutex_unlock(&c->btree_key_cache.lock);
+ }
+out:
+ bch2_trans_iter_put(trans, b_iter);
+ bch2_trans_iter_put(trans, c_iter);
+ return ret;
+}
+
+static void btree_key_cache_journal_flush(struct journal *j,
+ struct journal_entry_pin *pin,
+ u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bkey_cached *ck =
+ container_of(pin, struct bkey_cached, journal);
+ struct bkey_cached_key key;
+ struct btree_trans trans;
+
+ six_lock_read(&ck->c.lock, NULL, NULL);
+ key = READ_ONCE(ck->key);
+
+ if (ck->journal.seq != seq ||
+ !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ six_unlock_read(&ck->c.lock);
+ return;
+ }
+ six_unlock_read(&ck->c.lock);
+
+ bch2_trans_init(&trans, c, 0, 0);
+ btree_key_cache_flush_pos(&trans, key, seq, false);
+ bch2_trans_exit(&trans);
+}
+
+/*
+ * Flush and evict a key from the key cache:
+ */
+int bch2_btree_key_cache_flush(struct btree_trans *trans,
+ enum btree_id id, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached_key key = { id, pos };
+
+ /* Fastpath - assume it won't be found: */
+ if (!btree_key_cache_find(c, id, pos))
+ return 0;
+
+ return btree_key_cache_flush_pos(trans, key, 0, true);
+}
+
+bool bch2_btree_insert_key_cached(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_cached *ck = (void *) iter->l[0].b;
+
+ BUG_ON(insert->u64s > ck->u64s);
+
+ if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ int difference;
+
+ BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
+
+ difference = jset_u64s(insert->u64s) - ck->res.u64s;
+ if (difference > 0) {
+ trans->journal_preres.u64s -= difference;
+ ck->res.u64s += difference;
+ }
+ }
+
+ bkey_copy(ck->k, insert);
+ ck->valid = true;
+
+ if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
+ mutex_lock(&c->btree_key_cache.lock);
+ list_del_init(&ck->list);
+
+ set_bit(BKEY_CACHED_DIRTY, &ck->flags);
+ mutex_unlock(&c->btree_key_cache.lock);
+ }
+
+ bch2_journal_pin_update(&c->journal, trans->journal_res.seq,
+ &ck->journal, btree_key_cache_journal_flush);
+ return true;
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
+ enum btree_id id, struct bpos pos)
+{
+ BUG_ON(btree_key_cache_find(trans->c, id, pos));
+}
+#endif
+
+void bch2_fs_btree_key_cache_exit(struct btree_key_cache *c)
+{
+ struct bkey_cached *ck, *n;
+
+ mutex_lock(&c->lock);
+ list_for_each_entry_safe(ck, n, &c->clean, list) {
+ kfree(ck->k);
+ kfree(ck);
+ }
+ list_for_each_entry_safe(ck, n, &c->freed, list)
+ kfree(ck);
+ mutex_unlock(&c->lock);
+
+ rhashtable_destroy(&c->table);
+}
+
+void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
+{
+ mutex_init(&c->lock);
+ INIT_LIST_HEAD(&c->freed);
+ INIT_LIST_HEAD(&c->clean);
+}
+
+int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
+{
+ return rhashtable_init(&c->table, &bch2_btree_key_cache_params);
+}
+
+void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
+{
+ struct bucket_table *tbl;
+ struct bkey_cached *ck;
+ struct rhash_head *pos;
+ size_t i;
+
+ mutex_lock(&c->lock);
+ tbl = rht_dereference_rcu(c->table.tbl, &c->table);
+
+ for (i = 0; i < tbl->size; i++) {
+ rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
+ pr_buf(out, "%s:",
+ bch2_btree_ids[ck->key.btree_id]);
+ bch2_bpos_to_text(out, ck->key.pos);
+
+ if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
+ pr_buf(out, " journal seq %llu", ck->journal.seq);
+ pr_buf(out, "\n");
+ }
+ }
+ mutex_unlock(&c->lock);
+}
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
new file mode 100644
index 000000000000..b1756c6c622c
--- /dev/null
+++ b/fs/bcachefs/btree_key_cache.h
@@ -0,0 +1,25 @@
+#ifndef _BCACHEFS_BTREE_KEY_CACHE_H
+#define _BCACHEFS_BTREE_KEY_CACHE_H
+
+int bch2_btree_iter_traverse_cached(struct btree_iter *);
+
+bool bch2_btree_insert_key_cached(struct btree_trans *,
+ struct btree_iter *, struct bkey_i *);
+int bch2_btree_key_cache_flush(struct btree_trans *,
+ enum btree_id, struct bpos);
+#ifdef CONFIG_BCACHEFS_DEBUG
+void bch2_btree_key_cache_verify_clean(struct btree_trans *,
+ enum btree_id, struct bpos);
+#else
+static inline void
+bch2_btree_key_cache_verify_clean(struct btree_trans *trans,
+ enum btree_id id, struct bpos pos) {}
+#endif
+
+void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
+void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
+int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
+
+void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
+
+#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h
index 9081d3fc238a..81fbf3e18647 100644
--- a/fs/bcachefs/btree_locking.h
+++ b/fs/bcachefs/btree_locking.h
@@ -102,7 +102,7 @@ static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
EBUG_ON(level >= BTREE_MAX_DEPTH);
if (lock_type != BTREE_NODE_UNLOCKED)
- six_unlock_type(&iter->l[level].b->lock, lock_type);
+ six_unlock_type(&iter->l[level].b->c.lock, lock_type);
mark_btree_node_unlocked(iter, level);
}
@@ -143,14 +143,14 @@ static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
{
u64 start_time = local_clock();
- six_lock_type(&b->lock, type);
+ six_lock_type(&b->c.lock, type, NULL, NULL);
bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
}
static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
enum six_lock_type type)
{
- if (!six_trylock_type(&b->lock, type))
+ if (!six_trylock_type(&b->c.lock, type))
__btree_node_lock_type(c, b, type);
}
@@ -158,16 +158,16 @@ static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
* Lock a btree node if we already have it locked on one of our linked
* iterators:
*/
-static inline bool btree_node_lock_increment(struct btree_iter *iter,
+static inline bool btree_node_lock_increment(struct btree_trans *trans,
struct btree *b, unsigned level,
enum btree_node_locked_type want)
{
- struct btree_iter *linked;
+ struct btree_iter *iter;
- trans_for_each_iter(iter->trans, linked)
- if (linked->l[level].b == b &&
- btree_node_locked_type(linked, level) >= want) {
- six_lock_increment(&b->lock, want);
+ trans_for_each_iter(trans, iter)
+ if (iter->l[level].b == b &&
+ btree_node_locked_type(iter, level) >= want) {
+ six_lock_increment(&b->c.lock, want);
return true;
}
@@ -175,18 +175,37 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
}
bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
- struct btree_iter *, enum six_lock_type);
-
-static inline bool btree_node_lock(struct btree *b, struct bpos pos,
- unsigned level,
- struct btree_iter *iter,
- enum six_lock_type type)
+ struct btree_iter *, enum six_lock_type,
+ six_lock_should_sleep_fn, void *);
+
+static inline bool btree_node_lock(struct btree *b,
+ struct bpos pos, unsigned level,
+ struct btree_iter *iter,
+ enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
{
- EBUG_ON(level >= BTREE_MAX_DEPTH);
+ struct btree_trans *trans = iter->trans;
+ bool ret;
- return likely(six_trylock_type(&b->lock, type)) ||
- btree_node_lock_increment(iter, b, level, type) ||
- __bch2_btree_node_lock(b, pos, level, iter, type);
+ EBUG_ON(level >= BTREE_MAX_DEPTH);
+ EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->locking = b;
+ trans->locking_iter_idx = iter->idx;
+ trans->locking_pos = pos;
+ trans->locking_btree_id = iter->btree_id;
+ trans->locking_level = level;
+#endif
+ ret = likely(six_trylock_type(&b->c.lock, type)) ||
+ btree_node_lock_increment(trans, b, level, type) ||
+ __bch2_btree_node_lock(b, pos, level, iter, type,
+ should_sleep_fn, p);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans->locking = NULL;
+#endif
+ return ret;
}
bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
@@ -211,13 +230,13 @@ bch2_btree_node_unlock_write_inlined(struct btree *b, struct btree_iter *iter)
{
struct btree_iter *linked;
- EBUG_ON(iter->l[b->level].b != b);
- EBUG_ON(iter->l[b->level].lock_seq + 1 != b->lock.state.seq);
+ EBUG_ON(iter->l[b->c.level].b != b);
+ EBUG_ON(iter->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
trans_for_each_iter_with_node(iter->trans, b, linked)
- linked->l[b->level].lock_seq += 2;
+ linked->l[b->c.level].lock_seq += 2;
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
}
void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
@@ -226,10 +245,10 @@ void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
{
- EBUG_ON(iter->l[b->level].b != b);
- EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(iter->l[b->c.level].b != b);
+ EBUG_ON(iter->l[b->c.level].lock_seq != b->c.lock.state.seq);
- if (unlikely(!six_trylock_write(&b->lock)))
+ if (unlikely(!six_trylock_write(&b->c.lock)))
__bch2_btree_node_lock_write(b, iter);
}
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 732cdc35aa7c..16c4d058358b 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -60,17 +60,20 @@ struct btree_alloc {
BKEY_PADDED(k);
};
+struct btree_bkey_cached_common {
+ struct six_lock lock;
+ u8 level;
+ u8 btree_id;
+};
+
struct btree {
- /* Hottest entries first */
+ struct btree_bkey_cached_common c;
+
struct rhash_head hash;
u64 hash_val;
- struct six_lock lock;
-
unsigned long flags;
u16 written;
- u8 level;
- u8 btree_id;
u8 nsets;
u8 nr_key_bits;
@@ -180,6 +183,7 @@ struct btree_node_iter {
enum btree_iter_type {
BTREE_ITER_KEYS,
BTREE_ITER_NODES,
+ BTREE_ITER_CACHED,
};
#define BTREE_ITER_TYPE ((1 << 2) - 1)
@@ -211,6 +215,15 @@ enum btree_iter_type {
#define BTREE_ITER_IS_EXTENTS (1 << 6)
#define BTREE_ITER_ERROR (1 << 7)
#define BTREE_ITER_SET_POS_AFTER_COMMIT (1 << 8)
+#define BTREE_ITER_CACHED_NOFILL (1 << 9)
+#define BTREE_ITER_CACHED_NOCREATE (1 << 10)
+
+#define BTREE_ITER_USER_FLAGS \
+ (BTREE_ITER_SLOTS \
+ |BTREE_ITER_INTENT \
+ |BTREE_ITER_PREFETCH \
+ |BTREE_ITER_CACHED_NOFILL \
+ |BTREE_ITER_CACHED_NOCREATE)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
@@ -219,6 +232,14 @@ enum btree_iter_uptodate {
BTREE_ITER_NEED_TRAVERSE = 3,
};
+#define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
+#define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
+#define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
+#define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
+#define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
+#define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
+#define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
+
/*
* @pos - iterator's current position
* @level - current btree depth
@@ -256,7 +277,8 @@ struct btree_iter {
unsigned long ip_allocated;
};
-static inline enum btree_iter_type btree_iter_type(struct btree_iter *iter)
+static inline enum btree_iter_type
+btree_iter_type(const struct btree_iter *iter)
{
return iter->flags & BTREE_ITER_TYPE;
}
@@ -266,6 +288,37 @@ static inline struct btree_iter_level *iter_l(struct btree_iter *iter)
return iter->l + iter->level;
}
+struct btree_key_cache {
+ struct mutex lock;
+ struct rhashtable table;
+ struct list_head freed;
+ struct list_head clean;
+};
+
+struct bkey_cached_key {
+ u32 btree_id;
+ struct bpos pos;
+} __attribute__((packed, aligned(4)));
+
+#define BKEY_CACHED_DIRTY 0
+
+struct bkey_cached {
+ struct btree_bkey_cached_common c;
+
+ unsigned long flags;
+ u8 u64s;
+ bool valid;
+ struct bkey_cached_key key;
+
+ struct rhash_head hash;
+ struct list_head list;
+
+ struct journal_preres res;
+ struct journal_entry_pin journal;
+
+ struct bkey_i *k;
+};
+
struct btree_insert_entry {
unsigned trigger_flags;
unsigned trans_triggers_run:1;
@@ -281,6 +334,15 @@ struct btree_insert_entry {
struct btree_trans {
struct bch_fs *c;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ struct list_head list;
+ struct btree *locking;
+ unsigned locking_iter_idx;
+ struct bpos locking_pos;
+ u8 locking_btree_id;
+ u8 locking_level;
+ pid_t pid;
+#endif
unsigned long ip;
u64 iters_linked;
@@ -295,6 +357,7 @@ struct btree_trans {
unsigned error:1;
unsigned nounlock:1;
unsigned need_reset:1;
+ unsigned in_traverse_all:1;
unsigned mem_top;
unsigned mem_bytes;
@@ -305,6 +368,10 @@ struct btree_trans {
struct btree_insert_entry *updates2;
/* update path: */
+ struct jset_entry *extra_journal_entries;
+ unsigned extra_journal_entry_u64s;
+ struct journal_entry_pin *journal_pin;
+
struct journal_res journal_res;
struct journal_preres journal_preres;
u64 *journal_seq;
@@ -482,7 +549,7 @@ static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_
/* Type of keys @b contains: */
static inline enum btree_node_type btree_node_type(struct btree *b)
{
- return __btree_node_type(b->level, b->btree_id);
+ return __btree_node_type(b->c.level, b->c.btree_id);
}
static inline bool btree_node_type_is_extents(enum btree_node_type type)
@@ -501,6 +568,16 @@ static inline bool btree_node_is_extents(struct btree *b)
return btree_node_type_is_extents(btree_node_type(b));
}
+static inline enum btree_node_type btree_iter_key_type(struct btree_iter *iter)
+{
+ return __btree_node_type(iter->level, iter->btree_id);
+}
+
+static inline bool btree_iter_is_extents(struct btree_iter *iter)
+{
+ return btree_node_type_is_extents(btree_iter_key_type(iter));
+}
+
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
((1U << BKEY_TYPE_EXTENTS)| \
(1U << BKEY_TYPE_ALLOC)| \
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index 11f7d02de622..e0b1bde37484 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -23,6 +23,7 @@ enum btree_insert_flags {
__BTREE_INSERT_USE_ALLOC_RESERVE,
__BTREE_INSERT_JOURNAL_REPLAY,
__BTREE_INSERT_JOURNAL_RESERVED,
+ __BTREE_INSERT_JOURNAL_RECLAIM,
__BTREE_INSERT_NOWAIT,
__BTREE_INSERT_GC_LOCK_HELD,
__BCH_HASH_SET_MUST_CREATE,
@@ -47,8 +48,12 @@ enum btree_insert_flags {
/* Insert is for journal replay - don't get journal reservations: */
#define BTREE_INSERT_JOURNAL_REPLAY (1 << __BTREE_INSERT_JOURNAL_REPLAY)
+/* Indicates that we have pre-reserved space in the journal: */
#define BTREE_INSERT_JOURNAL_RESERVED (1 << __BTREE_INSERT_JOURNAL_RESERVED)
+/* Insert is being called from journal reclaim path: */
+#define BTREE_INSERT_JOURNAL_RECLAIM (1 << __BTREE_INSERT_JOURNAL_RECLAIM)
+
/* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD)
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 75b70187a954..a8cd6ffb6c7c 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -21,10 +21,6 @@
#include <linux/random.h>
#include <trace/events/bcachefs.h>
-static void btree_node_will_make_reachable(struct btree_update *,
- struct btree *);
-static void btree_update_drop_new_node(struct bch_fs *, struct btree *);
-
/* Debug code: */
/*
@@ -39,7 +35,7 @@ static void btree_node_interior_verify(struct btree *b)
struct bkey_s_c_btree_ptr_v2 bp;
struct bkey unpacked;
- BUG_ON(!b->level);
+ BUG_ON(!b->c.level);
bch2_btree_node_iter_init_from_start(&iter, b);
@@ -124,74 +120,6 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
/* Btree node freeing/allocation: */
-static bool btree_key_matches(struct bch_fs *c,
- struct bkey_s_c l,
- struct bkey_s_c r)
-{
- struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(l);
- struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(r);
- const struct bch_extent_ptr *ptr1, *ptr2;
-
- bkey_for_each_ptr(ptrs1, ptr1)
- bkey_for_each_ptr(ptrs2, ptr2)
- if (ptr1->dev == ptr2->dev &&
- ptr1->gen == ptr2->gen &&
- ptr1->offset == ptr2->offset)
- return true;
-
- return false;
-}
-
-/*
- * We're doing the index update that makes @b unreachable, update stuff to
- * reflect that:
- *
- * Must be called _before_ btree_update_updated_root() or
- * btree_update_updated_node:
- */
-static void bch2_btree_node_free_index(struct btree_update *as, struct btree *b,
- struct bkey_s_c k,
- struct bch_fs_usage *stats)
-{
- struct bch_fs *c = as->c;
- struct pending_btree_node_free *d;
-
- for (d = as->pending; d < as->pending + as->nr_pending; d++)
- if (!bkey_cmp(k.k->p, d->key.k.p) &&
- btree_key_matches(c, k, bkey_i_to_s_c(&d->key)))
- goto found;
- BUG();
-found:
- BUG_ON(d->index_update_done);
- d->index_update_done = true;
-
- /*
- * We're dropping @k from the btree, but it's still live until the
- * index update is persistent so we need to keep a reference around for
- * mark and sweep to find - that's primarily what the
- * btree_node_pending_free list is for.
- *
- * So here (when we set index_update_done = true), we're moving an
- * existing reference to a different part of the larger "gc keyspace" -
- * and the new position comes after the old position, since GC marks
- * the pending free list after it walks the btree.
- *
- * If we move the reference while mark and sweep is _between_ the old
- * and the new position, mark and sweep will see the reference twice
- * and it'll get double accounted - so check for that here and subtract
- * to cancel out one of mark and sweep's markings if necessary:
- */
-
- if (gc_pos_cmp(c->gc_pos, b
- ? gc_pos_btree_node(b)
- : gc_pos_btree_root(as->btree_id)) >= 0 &&
- gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
- bch2_mark_key_locked(c, bkey_i_to_s_c(&d->key),
- 0, 0, NULL, 0,
- BTREE_TRIGGER_OVERWRITE|
- BTREE_TRIGGER_GC);
-}
-
static void __btree_node_free(struct bch_fs *c, struct btree *b)
{
trace_btree_node_free(c, b);
@@ -207,6 +135,8 @@ static void __btree_node_free(struct bch_fs *c, struct btree *b)
bch2_btree_node_hash_remove(&c->btree_cache, b);
+ six_lock_wakeup_all(&b->c.lock);
+
mutex_lock(&c->btree_cache.lock);
list_move(&b->list, &c->btree_cache.freeable);
mutex_unlock(&c->btree_cache.lock);
@@ -216,15 +146,13 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
{
struct open_buckets ob = b->ob;
- btree_update_drop_new_node(c, b);
-
b->ob.nr = 0;
clear_btree_node_dirty(b);
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
bch2_open_buckets_put(c, &ob);
}
@@ -235,39 +163,12 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
struct btree_iter *linked;
trans_for_each_iter(iter->trans, linked)
- BUG_ON(linked->l[b->level].b == b);
+ BUG_ON(linked->l[b->c.level].b == b);
- /*
- * Is this a node that isn't reachable on disk yet?
- *
- * Nodes that aren't reachable yet have writes blocked until they're
- * reachable - now that we've cancelled any pending writes and moved
- * things waiting on that write to wait on this update, we can drop this
- * node from the list of nodes that the other update is making
- * reachable, prior to freeing it:
- */
- btree_update_drop_new_node(c, b);
-
- six_lock_write(&b->lock);
+ six_lock_write(&b->c.lock, NULL, NULL);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
-}
-
-static void bch2_btree_node_free_ondisk(struct bch_fs *c,
- struct pending_btree_node_free *pending,
- u64 journal_seq)
-{
- BUG_ON(!pending->index_update_done);
-
- bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
- 0, 0, NULL, journal_seq, BTREE_TRIGGER_OVERWRITE);
-
- if (gc_visited(c, gc_phase(GC_PHASE_PENDING_DELETE)))
- bch2_mark_key(c, bkey_i_to_s_c(&pending->key),
- 0, 0, NULL, journal_seq,
- BTREE_TRIGGER_OVERWRITE|
- BTREE_TRIGGER_GC);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
static struct btree *__bch2_btree_node_alloc(struct bch_fs *c,
@@ -357,17 +258,17 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
int ret;
BUG_ON(level >= BTREE_MAX_DEPTH);
- BUG_ON(!as->reserve->nr);
+ BUG_ON(!as->nr_prealloc_nodes);
- b = as->reserve->b[--as->reserve->nr];
+ b = as->prealloc_nodes[--as->nr_prealloc_nodes];
set_btree_node_accessed(b);
set_btree_node_dirty(b);
set_btree_node_need_write(b);
bch2_bset_init_first(b, &b->data->keys);
- b->level = level;
- b->btree_id = as->btree_id;
+ b->c.level = level;
+ b->c.btree_id = as->btree_id;
memset(&b->nr, 0, sizeof(b->nr));
b->data->magic = cpu_to_le64(bset_magic(c));
@@ -394,8 +295,6 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
bch2_btree_build_aux_trees(b);
- btree_node_will_make_reachable(as, b);
-
ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
BUG_ON(ret);
@@ -422,7 +321,7 @@ struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *as,
{
struct btree *n;
- n = bch2_btree_node_alloc(as, b->level);
+ n = bch2_btree_node_alloc(as, b->c.level);
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
@@ -466,21 +365,22 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
btree_node_set_format(b, b->data->format);
bch2_btree_build_aux_trees(b);
- six_unlock_write(&b->lock);
+ bch2_btree_update_add_new_node(as, b);
+ six_unlock_write(&b->c.lock);
return b;
}
-static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reserve)
+static void bch2_btree_reserve_put(struct btree_update *as)
{
- bch2_disk_reservation_put(c, &reserve->disk_res);
+ struct bch_fs *c = as->c;
mutex_lock(&c->btree_reserve_cache_lock);
- while (reserve->nr) {
- struct btree *b = reserve->b[--reserve->nr];
+ while (as->nr_prealloc_nodes) {
+ struct btree *b = as->prealloc_nodes[--as->nr_prealloc_nodes];
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
if (c->btree_reserve_cache_nr <
ARRAY_SIZE(c->btree_reserve_cache)) {
@@ -496,42 +396,20 @@ static void bch2_btree_reserve_put(struct bch_fs *c, struct btree_reserve *reser
btree_node_lock_type(c, b, SIX_LOCK_write);
__btree_node_free(c, b);
- six_unlock_write(&b->lock);
+ six_unlock_write(&b->c.lock);
- six_unlock_intent(&b->lock);
+ six_unlock_intent(&b->c.lock);
}
mutex_unlock(&c->btree_reserve_cache_lock);
-
- mempool_free(reserve, &c->btree_reserve_pool);
}
-static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
- unsigned nr_nodes,
- unsigned flags,
- struct closure *cl)
+static int bch2_btree_reserve_get(struct btree_update *as, unsigned nr_nodes,
+ unsigned flags, struct closure *cl)
{
- struct btree_reserve *reserve;
+ struct bch_fs *c = as->c;
struct btree *b;
- struct disk_reservation disk_res = { 0, 0 };
- unsigned sectors = nr_nodes * c->opts.btree_node_size;
- int ret, disk_res_flags = 0;
-
- if (flags & BTREE_INSERT_NOFAIL)
- disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
-
- /*
- * This check isn't necessary for correctness - it's just to potentially
- * prevent us from doing a lot of work that'll end up being wasted:
- */
- ret = bch2_journal_error(&c->journal);
- if (ret)
- return ERR_PTR(ret);
-
- if (bch2_disk_reservation_get(c, &disk_res, sectors,
- c->opts.metadata_replicas,
- disk_res_flags))
- return ERR_PTR(-ENOSPC);
+ int ret;
BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
@@ -540,18 +418,11 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
* open bucket reserve:
*/
ret = bch2_btree_cache_cannibalize_lock(c, cl);
- if (ret) {
- bch2_disk_reservation_put(c, &disk_res);
- return ERR_PTR(ret);
- }
-
- reserve = mempool_alloc(&c->btree_reserve_pool, GFP_NOIO);
-
- reserve->disk_res = disk_res;
- reserve->nr = 0;
+ if (ret)
+ return ret;
- while (reserve->nr < nr_nodes) {
- b = __bch2_btree_node_alloc(c, &disk_res,
+ while (as->nr_prealloc_nodes < nr_nodes) {
+ b = __bch2_btree_node_alloc(c, &as->disk_res,
flags & BTREE_INSERT_NOWAIT
? NULL : cl, flags);
if (IS_ERR(b)) {
@@ -563,21 +434,20 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
if (ret)
goto err_free;
- reserve->b[reserve->nr++] = b;
+ as->prealloc_nodes[as->nr_prealloc_nodes++] = b;
}
bch2_btree_cache_cannibalize_unlock(c);
- return reserve;
+ return 0;
err_free:
- bch2_btree_reserve_put(c, reserve);
bch2_btree_cache_cannibalize_unlock(c);
trace_btree_reserve_get_fail(c, nr_nodes, cl);
- return ERR_PTR(ret);
+ return ret;
}
/* Asynchronous interior node update machinery */
-static void __bch2_btree_update_free(struct btree_update *as)
+static void bch2_btree_update_free(struct btree_update *as)
{
struct bch_fs *c = as->c;
@@ -585,14 +455,13 @@ static void __bch2_btree_update_free(struct btree_update *as)
bch2_journal_pin_drop(&c->journal, &as->journal);
bch2_journal_pin_flush(&c->journal, &as->journal);
+ bch2_disk_reservation_put(c, &as->disk_res);
+ bch2_btree_reserve_put(as);
- BUG_ON(as->nr_new_nodes || as->nr_pending);
-
- if (as->reserve)
- bch2_btree_reserve_put(c, as->reserve);
-
+ mutex_lock(&c->btree_interior_update_lock);
list_del(&as->unwritten_list);
list_del(&as->list);
+ mutex_unlock(&c->btree_interior_update_lock);
closure_debug_destroy(&as->cl);
mempool_free(as, &c->btree_interior_update_pool);
@@ -600,37 +469,59 @@ static void __bch2_btree_update_free(struct btree_update *as)
closure_wake_up(&c->btree_interior_update_wait);
}
-static void bch2_btree_update_free(struct btree_update *as)
+static void btree_update_will_delete_key(struct btree_update *as,
+ struct bkey_i *k)
{
- struct bch_fs *c = as->c;
+ BUG_ON(bch2_keylist_u64s(&as->old_keys) + k->k.u64s >
+ ARRAY_SIZE(as->_old_keys));
+ bch2_keylist_add(&as->old_keys, k);
+}
- mutex_lock(&c->btree_interior_update_lock);
- __bch2_btree_update_free(as);
- mutex_unlock(&c->btree_interior_update_lock);
+static void btree_update_will_add_key(struct btree_update *as,
+ struct bkey_i *k)
+{
+ BUG_ON(bch2_keylist_u64s(&as->new_keys) + k->k.u64s >
+ ARRAY_SIZE(as->_new_keys));
+ bch2_keylist_add(&as->new_keys, k);
}
-static inline bool six_trylock_intentwrite(struct six_lock *lock)
+/*
+ * The transactional part of an interior btree node update, where we journal the
+ * update we did to the interior node and update alloc info:
+ */
+static int btree_update_nodes_written_trans(struct btree_trans *trans,
+ struct btree_update *as)
{
- if (!six_trylock_intent(lock))
- return false;
+ struct bkey_i *k;
+ int ret;
+
+ trans->extra_journal_entries = (void *) &as->journal_entries[0];
+ trans->extra_journal_entry_u64s = as->journal_u64s;
+ trans->journal_pin = &as->journal;
- if (!six_trylock_write(lock)) {
- six_unlock_intent(lock);
- return false;
+ for_each_keylist_key(&as->new_keys, k) {
+ ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(k),
+ 0, 0, BTREE_TRIGGER_INSERT);
+ if (ret)
+ return ret;
}
- return true;
+ for_each_keylist_key(&as->old_keys, k) {
+ ret = bch2_trans_mark_key(trans, bkey_i_to_s_c(k),
+ 0, 0, BTREE_TRIGGER_OVERWRITE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
-static void btree_update_nodes_written(struct closure *cl)
+static void btree_update_nodes_written(struct btree_update *as)
{
- struct btree_update *as = container_of(cl, struct btree_update, cl);
- struct btree *nodes_need_write[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES + 1];
- unsigned nr_nodes_need_write;
- struct journal_res res = { 0 };
struct bch_fs *c = as->c;
- struct btree_root *r;
- struct btree *b;
+ struct btree *b = as->b;
+ u64 journal_seq = 0;
+ unsigned i;
int ret;
/*
@@ -638,78 +529,28 @@ static void btree_update_nodes_written(struct closure *cl)
* to child nodes that weren't written yet: now, the child nodes have
* been written so we can write out the update to the interior node.
*/
- mutex_lock(&c->btree_interior_update_lock);
- as->nodes_written = true;
-again:
- nr_nodes_need_write = 0;
- as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
- struct btree_update, unwritten_list);
- if (!as || !as->nodes_written) {
- mutex_unlock(&c->btree_interior_update_lock);
- return;
- }
-
- b = as->b;
- if (b && !six_trylock_intentwrite(&b->lock)) {
- mutex_unlock(&c->btree_interior_update_lock);
-
- btree_node_lock_type(c, b, SIX_LOCK_intent);
- six_lock_write(&b->lock);
-
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
-
- mutex_lock(&c->btree_interior_update_lock);
- goto again;
- }
-
- ret = bch2_journal_res_get(&c->journal, &res, as->journal_u64s,
- JOURNAL_RES_GET_NONBLOCK|
- JOURNAL_RES_GET_RESERVED);
- if (ret == -EAGAIN) {
- unsigned u64s = as->journal_u64s;
-
- if (b) {
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
- }
-
- mutex_unlock(&c->btree_interior_update_lock);
-
- ret = bch2_journal_res_get(&c->journal, &res, u64s,
- JOURNAL_RES_GET_CHECK|
- JOURNAL_RES_GET_RESERVED);
- if (!ret) {
- mutex_lock(&c->btree_interior_update_lock);
- goto again;
- }
- }
-
- if (!ret) {
- struct journal_buf *buf = &c->journal.buf[res.idx];
- struct jset_entry *entry = vstruct_idx(buf->data, res.offset);
-
- res.offset += as->journal_u64s;
- res.u64s -= as->journal_u64s;
- memcpy_u64s(entry, as->journal_entries, as->journal_u64s);
- } else {
- /*
- * On journal error we have to run most of the normal path so
- * that shutdown works - unblocking btree node writes in
- * particular and writing them if needed - except for
- * journalling the update:
- */
-
- BUG_ON(!bch2_journal_error(&c->journal));
- }
-
- switch (as->mode) {
- case BTREE_INTERIOR_NO_UPDATE:
- BUG();
- case BTREE_INTERIOR_UPDATING_NODE:
- /* @b is the node we did the final insert into: */
+ /*
+ * We can't call into journal reclaim here: we'd block on the journal
+ * reclaim lock, but we may need to release the open buckets we have
+ * pinned in order for other btree updates to make forward progress, and
+ * journal reclaim does btree updates when flushing bkey_cached entries,
+ * which may require allocations as well.
+ */
+ ret = bch2_trans_do(c, &as->disk_res, &journal_seq,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_USE_ALLOC_RESERVE|
+ BTREE_INSERT_NOCHECK_RW|
+ BTREE_INSERT_JOURNAL_RECLAIM|
+ BTREE_INSERT_JOURNAL_RESERVED,
+ btree_update_nodes_written_trans(&trans, as));
+ BUG_ON(ret && !bch2_journal_error(&c->journal));
+
+ if (b) {
/*
+ * @b is the node we did the final insert into:
+ *
* On failure to get a journal reservation, we still have to
* unblock the write and allow most of the write path to happen
* so that shutdown works, but the i->journal_seq mechanism
@@ -719,83 +560,90 @@ again:
* we're in journal error state:
*/
+ btree_node_lock_type(c, b, SIX_LOCK_intent);
+ btree_node_lock_type(c, b, SIX_LOCK_write);
+ mutex_lock(&c->btree_interior_update_lock);
+
list_del(&as->write_blocked_list);
- if (!ret) {
+ if (!ret && as->b == b) {
struct bset *i = btree_bset_last(b);
+ BUG_ON(!b->c.level);
+ BUG_ON(!btree_node_dirty(b));
+
i->journal_seq = cpu_to_le64(
- max(res.seq,
+ max(journal_seq,
le64_to_cpu(i->journal_seq)));
- bch2_btree_add_journal_pin(c, b, res.seq);
+ bch2_btree_add_journal_pin(c, b, journal_seq);
}
- nodes_need_write[nr_nodes_need_write++] = b;
-
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
- break;
-
- case BTREE_INTERIOR_UPDATING_AS:
- BUG_ON(b);
- break;
-
- case BTREE_INTERIOR_UPDATING_ROOT:
- r = &c->btree_roots[as->btree_id];
-
- BUG_ON(b);
+ mutex_unlock(&c->btree_interior_update_lock);
+ six_unlock_write(&b->c.lock);
- mutex_lock(&c->btree_root_lock);
- bkey_copy(&r->key, as->parent_keys.keys);
- r->level = as->level;
- r->alive = true;
- c->btree_roots_dirty = true;
- mutex_unlock(&c->btree_root_lock);
- break;
+ btree_node_write_if_need(c, b, SIX_LOCK_intent);
+ six_unlock_intent(&b->c.lock);
}
bch2_journal_pin_drop(&c->journal, &as->journal);
- bch2_journal_res_put(&c->journal, &res);
bch2_journal_preres_put(&c->journal, &as->journal_preres);
- while (as->nr_new_nodes) {
- b = as->new_nodes[--as->nr_new_nodes];
+ mutex_lock(&c->btree_interior_update_lock);
+ for (i = 0; i < as->nr_new_nodes; i++) {
+ b = as->new_nodes[i];
BUG_ON(b->will_make_reachable != (unsigned long) as);
b->will_make_reachable = 0;
+ }
+ mutex_unlock(&c->btree_interior_update_lock);
- nodes_need_write[nr_nodes_need_write++] = b;
+ for (i = 0; i < as->nr_new_nodes; i++) {
+ b = as->new_nodes[i];
+
+ btree_node_lock_type(c, b, SIX_LOCK_read);
+ btree_node_write_if_need(c, b, SIX_LOCK_read);
+ six_unlock_read(&b->c.lock);
}
- while (as->nr_pending)
- bch2_btree_node_free_ondisk(c,
- &as->pending[--as->nr_pending], res.seq);
+ for (i = 0; i < as->nr_open_buckets; i++)
+ bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
- __bch2_btree_update_free(as);
- /*
- * for flush_held_btree_writes() waiting on updates to flush or
- * nodes to be writeable:
- */
- closure_wake_up(&c->btree_interior_update_wait);
+ bch2_btree_update_free(as);
+}
- /*
- * Can't take btree node locks while holding btree_interior_update_lock:
- * */
- mutex_unlock(&c->btree_interior_update_lock);
+static void btree_interior_update_work(struct work_struct *work)
+{
+ struct bch_fs *c =
+ container_of(work, struct bch_fs, btree_interior_update_work);
+ struct btree_update *as;
- /* Do btree writes after dropping journal res/locks: */
- while (nr_nodes_need_write) {
- b = nodes_need_write[--nr_nodes_need_write];
+ while (1) {
+ mutex_lock(&c->btree_interior_update_lock);
+ as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
+ struct btree_update, unwritten_list);
+ if (as && !as->nodes_written)
+ as = NULL;
+ mutex_unlock(&c->btree_interior_update_lock);
- btree_node_lock_type(c, b, SIX_LOCK_read);
- bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
- six_unlock_read(&b->lock);
+ if (!as)
+ break;
+
+ btree_update_nodes_written(as);
}
+}
+
+static void btree_update_set_nodes_written(struct closure *cl)
+{
+ struct btree_update *as = container_of(cl, struct btree_update, cl);
+ struct bch_fs *c = as->c;
mutex_lock(&c->btree_interior_update_lock);
- goto again;
+ as->nodes_written = true;
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
}
/*
@@ -814,7 +662,6 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b)
as->mode = BTREE_INTERIOR_UPDATING_NODE;
as->b = b;
- as->level = b->level;
list_add(&as->write_blocked_list, &b->write_blocked);
mutex_unlock(&c->btree_interior_update_lock);
@@ -845,25 +692,45 @@ static void btree_update_reparent(struct btree_update *as,
static void btree_update_updated_root(struct btree_update *as, struct btree *b)
{
+ struct bkey_i *insert = &b->key;
struct bch_fs *c = as->c;
BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
- BUG_ON(!bch2_keylist_empty(&as->parent_keys));
+
+ BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
+ ARRAY_SIZE(as->journal_entries));
+
+ as->journal_u64s +=
+ journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
+ BCH_JSET_ENTRY_btree_root,
+ b->c.btree_id, b->c.level,
+ insert, insert->k.u64s);
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
as->mode = BTREE_INTERIOR_UPDATING_ROOT;
- as->level = b->level;
- bch2_keylist_add(&as->parent_keys, &b->key);
mutex_unlock(&c->btree_interior_update_lock);
}
-static void btree_node_will_make_reachable(struct btree_update *as,
- struct btree *b)
+/*
+ * bch2_btree_update_add_new_node:
+ *
+ * This causes @as to wait on @b to be written, before it gets to
+ * bch2_btree_update_nodes_written
+ *
+ * Additionally, it sets b->will_make_reachable to prevent any additional writes
+ * to @b from happening besides the first until @b is reachable on disk
+ *
+ * And it adds @b to the list of @as's new nodes, so that we can update sector
+ * counts in bch2_btree_update_nodes_written:
+ */
+void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
{
struct bch_fs *c = as->c;
+ closure_get(&as->cl);
+
mutex_lock(&c->btree_interior_update_lock);
BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
BUG_ON(b->will_make_reachable);
@@ -871,10 +738,14 @@ static void btree_node_will_make_reachable(struct btree_update *as,
as->new_nodes[as->nr_new_nodes++] = b;
b->will_make_reachable = 1UL|(unsigned long) as;
- closure_get(&as->cl);
mutex_unlock(&c->btree_interior_update_lock);
+
+ btree_update_will_add_key(as, &b->key);
}
+/*
+ * returns true if @b was a new node
+ */
static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
{
struct btree_update *as;
@@ -882,6 +753,11 @@ static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
unsigned i;
mutex_lock(&c->btree_interior_update_lock);
+ /*
+ * When b->will_make_reachable != 0, it owns a ref on as->cl that's
+ * dropped when it gets written by bch2_btree_complete_write - the
+ * xchg() is for synchronization with bch2_btree_complete_write:
+ */
v = xchg(&b->will_make_reachable, 0);
as = (struct btree_update *) (v & ~1UL);
@@ -903,25 +779,11 @@ found:
closure_put(&as->cl);
}
-static void btree_interior_update_add_node_reference(struct btree_update *as,
- struct btree *b)
+void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
{
- struct bch_fs *c = as->c;
- struct pending_btree_node_free *d;
-
- mutex_lock(&c->btree_interior_update_lock);
-
- /* Add this node to the list of nodes being freed: */
- BUG_ON(as->nr_pending >= ARRAY_SIZE(as->pending));
-
- d = &as->pending[as->nr_pending++];
- d->index_update_done = false;
- d->seq = b->data->keys.seq;
- d->btree_id = b->btree_id;
- d->level = b->level;
- bkey_copy(&d->key, &b->key);
-
- mutex_unlock(&c->btree_interior_update_lock);
+ while (b->ob.nr)
+ as->open_buckets[as->nr_open_buckets++] =
+ b->ob.v[--b->ob.nr];
}
/*
@@ -941,8 +803,6 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
if (btree_node_fake(b))
return;
- btree_interior_update_add_node_reference(as, b);
-
mutex_lock(&c->btree_interior_update_lock);
/*
@@ -954,7 +814,7 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
* operations complete
*/
list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
- list_del(&p->write_blocked_list);
+ list_del_init(&p->write_blocked_list);
btree_update_reparent(as, p);
/*
@@ -984,16 +844,28 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
bch2_journal_pin_drop(&c->journal, &w->journal);
mutex_unlock(&c->btree_interior_update_lock);
+
+ /*
+ * Is this a node that isn't reachable on disk yet?
+ *
+ * Nodes that aren't reachable yet have writes blocked until they're
+ * reachable - now that we've cancelled any pending writes and moved
+ * things waiting on that write to wait on this update, we can drop this
+ * node from the list of nodes that the other update is making
+ * reachable, prior to freeing it:
+ */
+ btree_update_drop_new_node(c, b);
+
+ btree_update_will_delete_key(as, &b->key);
}
void bch2_btree_update_done(struct btree_update *as)
{
BUG_ON(as->mode == BTREE_INTERIOR_NO_UPDATE);
- bch2_btree_reserve_put(as->c, as->reserve);
- as->reserve = NULL;
+ bch2_btree_reserve_put(as);
- continue_at(&as->cl, btree_update_nodes_written, system_freezable_wq);
+ continue_at(&as->cl, btree_update_set_nodes_written, system_freezable_wq);
}
struct btree_update *
@@ -1002,60 +874,79 @@ bch2_btree_update_start(struct btree_trans *trans, enum btree_id id,
struct closure *cl)
{
struct bch_fs *c = trans->c;
- struct journal_preres journal_preres = { 0 };
- struct btree_reserve *reserve;
struct btree_update *as;
- int ret;
+ int disk_res_flags = (flags & BTREE_INSERT_NOFAIL)
+ ? BCH_DISK_RESERVATION_NOFAIL : 0;
+ int journal_flags = (flags & BTREE_INSERT_JOURNAL_RESERVED)
+ ? JOURNAL_RES_GET_RECLAIM : 0;
+ int ret = 0;
+
+ /*
+ * This check isn't necessary for correctness - it's just to potentially
+ * prevent us from doing a lot of work that'll end up being wasted:
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ return ERR_PTR(ret);
+
+ as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
+ memset(as, 0, sizeof(*as));
+ closure_init(&as->cl, NULL);
+ as->c = c;
+ as->mode = BTREE_INTERIOR_NO_UPDATE;
+ as->btree_id = id;
+ INIT_LIST_HEAD(&as->list);
+ INIT_LIST_HEAD(&as->unwritten_list);
+ INIT_LIST_HEAD(&as->write_blocked_list);
+ bch2_keylist_init(&as->old_keys, as->_old_keys);
+ bch2_keylist_init(&as->new_keys, as->_new_keys);
+ bch2_keylist_init(&as->parent_keys, as->inline_keys);
- ret = bch2_journal_preres_get(&c->journal, &journal_preres,
+ ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
BTREE_UPDATE_JOURNAL_RES,
- JOURNAL_RES_GET_NONBLOCK);
+ journal_flags|JOURNAL_RES_GET_NONBLOCK);
if (ret == -EAGAIN) {
if (flags & BTREE_INSERT_NOUNLOCK)
return ERR_PTR(-EINTR);
bch2_trans_unlock(trans);
- ret = bch2_journal_preres_get(&c->journal, &journal_preres,
- BTREE_UPDATE_JOURNAL_RES, 0);
+ ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
+ BTREE_UPDATE_JOURNAL_RES,
+ journal_flags);
if (ret)
return ERR_PTR(ret);
if (!bch2_trans_relock(trans)) {
- bch2_journal_preres_put(&c->journal, &journal_preres);
- return ERR_PTR(-EINTR);
+ ret = -EINTR;
+ goto err;
}
}
- reserve = bch2_btree_reserve_get(c, nr_nodes, flags, cl);
- if (IS_ERR(reserve)) {
- bch2_journal_preres_put(&c->journal, &journal_preres);
- return ERR_CAST(reserve);
- }
-
- as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOIO);
- memset(as, 0, sizeof(*as));
- closure_init(&as->cl, NULL);
- as->c = c;
- as->mode = BTREE_INTERIOR_NO_UPDATE;
- as->btree_id = id;
- as->reserve = reserve;
- INIT_LIST_HEAD(&as->write_blocked_list);
- INIT_LIST_HEAD(&as->unwritten_list);
- as->journal_preres = journal_preres;
+ ret = bch2_disk_reservation_get(c, &as->disk_res,
+ nr_nodes * c->opts.btree_node_size,
+ c->opts.metadata_replicas,
+ disk_res_flags);
+ if (ret)
+ goto err;
- bch2_keylist_init(&as->parent_keys, as->inline_keys);
+ ret = bch2_btree_reserve_get(as, nr_nodes, flags, cl);
+ if (ret)
+ goto err;
mutex_lock(&c->btree_interior_update_lock);
list_add_tail(&as->list, &c->btree_interior_update_list);
mutex_unlock(&c->btree_interior_update_lock);
return as;
+err:
+ bch2_btree_update_free(as);
+ return ERR_PTR(ret);
}
/* Btree root updates: */
-static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
+static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
{
/* Root nodes cannot be reaped */
mutex_lock(&c->btree_cache.lock);
@@ -1064,7 +955,7 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
mutex_lock(&c->btree_root_lock);
BUG_ON(btree_node_root(c, b) &&
- (b->level < btree_node_root(c, b)->level ||
+ (b->c.level < btree_node_root(c, b)->c.level ||
!btree_node_dying(btree_node_root(c, b))));
btree_node_root(c, b) = b;
@@ -1073,38 +964,6 @@ static void __bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
bch2_recalc_btree_reserve(c);
}
-static void bch2_btree_set_root_inmem(struct btree_update *as, struct btree *b)
-{
- struct bch_fs *c = as->c;
- struct btree *old = btree_node_root(c, b);
- struct bch_fs_usage *fs_usage;
-
- __bch2_btree_set_root_inmem(c, b);
-
- mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->mark_lock);
- fs_usage = bch2_fs_usage_scratch_get(c);
-
- bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
- 0, 0, fs_usage, 0,
- BTREE_TRIGGER_INSERT);
- if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
- bch2_mark_key_locked(c, bkey_i_to_s_c(&b->key),
- 0, 0, NULL, 0,
- BTREE_TRIGGER_INSERT|
- BTREE_TRIGGER_GC);
-
- if (old && !btree_node_fake(old))
- bch2_btree_node_free_index(as, NULL,
- bkey_i_to_s_c(&old->key),
- fs_usage);
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
-
- bch2_fs_usage_scratch_put(c, fs_usage);
- percpu_up_read(&c->mark_lock);
- mutex_unlock(&c->btree_interior_update_lock);
-}
-
/**
* bch_btree_set_root - update the root in memory and on disk
*
@@ -1135,7 +994,7 @@ static void bch2_btree_set_root(struct btree_update *as, struct btree *b,
*/
bch2_btree_node_lock_write(old, iter);
- bch2_btree_set_root_inmem(as, b);
+ bch2_btree_set_root_inmem(c, b);
btree_update_updated_root(as, b);
@@ -1156,57 +1015,21 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
struct bkey_i *insert,
struct btree_node_iter *node_iter)
{
- struct bch_fs *c = as->c;
- struct bch_fs_usage *fs_usage;
- struct jset_entry *entry;
struct bkey_packed *k;
- struct bkey tmp;
BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
ARRAY_SIZE(as->journal_entries));
- entry = (void *) &as->journal_entries[as->journal_u64s];
- memset(entry, 0, sizeof(*entry));
- entry->u64s = cpu_to_le16(insert->k.u64s);
- entry->type = BCH_JSET_ENTRY_btree_keys;
- entry->btree_id = b->btree_id;
- entry->level = b->level;
- memcpy_u64s_small(entry->_data, insert, insert->k.u64s);
- as->journal_u64s += jset_u64s(insert->k.u64s);
-
- mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->mark_lock);
- fs_usage = bch2_fs_usage_scratch_get(c);
-
- bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
- 0, 0, fs_usage, 0,
- BTREE_TRIGGER_INSERT);
-
- if (gc_visited(c, gc_pos_btree_node(b)))
- bch2_mark_key_locked(c, bkey_i_to_s_c(insert),
- 0, 0, NULL, 0,
- BTREE_TRIGGER_INSERT|
- BTREE_TRIGGER_GC);
+ as->journal_u64s +=
+ journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
+ BCH_JSET_ENTRY_btree_keys,
+ b->c.btree_id, b->c.level,
+ insert, insert->k.u64s);
while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
bch2_btree_node_iter_advance(node_iter, b);
- /*
- * If we're overwriting, look up pending delete and mark so that gc
- * marks it on the pending delete list:
- */
- if (k && !bkey_cmp_packed(b, k, &insert->k))
- bch2_btree_node_free_index(as, b,
- bkey_disassemble(b, k, &tmp),
- fs_usage);
-
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
-
- bch2_fs_usage_scratch_put(c, fs_usage);
- percpu_up_read(&c->mark_lock);
- mutex_unlock(&c->btree_interior_update_lock);
-
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
set_btree_node_need_write(b);
@@ -1225,7 +1048,8 @@ static struct btree *__btree_split_node(struct btree_update *as,
struct bset *set1, *set2;
struct bkey_packed *k, *prev = NULL;
- n2 = bch2_btree_node_alloc(as, n1->level);
+ n2 = bch2_btree_node_alloc(as, n1->c.level);
+ bch2_btree_update_add_new_node(as, n2);
n2->data->max_key = n1->data->max_key;
n2->data->format = n1->format;
@@ -1293,7 +1117,7 @@ static struct btree *__btree_split_node(struct btree_update *as,
bch2_verify_btree_nr_keys(n1);
bch2_verify_btree_nr_keys(n2);
- if (n1->level) {
+ if (n1->c.level) {
btree_node_interior_verify(n1);
btree_node_interior_verify(n2);
}
@@ -1321,14 +1145,6 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
struct bkey_packed *src, *dst, *n;
struct bset *i;
- /*
- * XXX
- *
- * these updates must be journalled
- *
- * oops
- */
-
BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
bch2_btree_node_iter_init(&node_iter, b, &k->k.p);
@@ -1375,11 +1191,12 @@ static void btree_split(struct btree_update *as, struct btree *b,
u64 start_time = local_clock();
BUG_ON(!parent && (b != btree_node_root(c, b)));
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
+ BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
bch2_btree_interior_update_will_free_node(as, b);
n1 = bch2_btree_node_alloc_replacement(as, b);
+ bch2_btree_update_add_new_node(as, n1);
if (keys)
btree_split_insert_keys(as, n1, iter, keys);
@@ -1391,8 +1208,8 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_build_aux_trees(n2);
bch2_btree_build_aux_trees(n1);
- six_unlock_write(&n2->lock);
- six_unlock_write(&n1->lock);
+ six_unlock_write(&n2->c.lock);
+ six_unlock_write(&n1->c.lock);
bch2_btree_node_write(c, n2, SIX_LOCK_intent);
@@ -1406,7 +1223,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
if (!parent) {
/* Depth increases, make a new root */
- n3 = __btree_root_alloc(as, b->level + 1);
+ n3 = __btree_root_alloc(as, b->c.level + 1);
n3->sib_u64s[0] = U16_MAX;
n3->sib_u64s[1] = U16_MAX;
@@ -1419,7 +1236,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
trace_btree_compact(c, b);
bch2_btree_build_aux_trees(n1);
- six_unlock_write(&n1->lock);
+ six_unlock_write(&n1->c.lock);
if (parent)
bch2_keylist_add(&as->parent_keys, &n1->key);
@@ -1439,15 +1256,15 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_set_root(as, n1, iter);
}
- bch2_open_buckets_put(c, &n1->ob);
+ bch2_btree_update_get_open_buckets(as, n1);
if (n2)
- bch2_open_buckets_put(c, &n2->ob);
+ bch2_btree_update_get_open_buckets(as, n2);
if (n3)
- bch2_open_buckets_put(c, &n3->ob);
+ bch2_btree_update_get_open_buckets(as, n3);
/* Successful split, update the iterator to point to the new nodes: */
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
if (n3)
bch2_btree_iter_node_replace(iter, n3);
@@ -1464,10 +1281,10 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_node_free_inmem(c, b, iter);
if (n3)
- six_unlock_intent(&n3->lock);
+ six_unlock_intent(&n3->c.lock);
if (n2)
- six_unlock_intent(&n2->lock);
- six_unlock_intent(&n1->lock);
+ six_unlock_intent(&n2->c.lock);
+ six_unlock_intent(&n1->c.lock);
bch2_btree_trans_verify_locks(iter->trans);
@@ -1485,7 +1302,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
struct bkey_packed *k;
/* Don't screw up @iter's position: */
- node_iter = iter->l[b->level].iter;
+ node_iter = iter->l[b->c.level].iter;
/*
* btree_split(), btree_gc_coalesce() will insert keys before
@@ -1502,7 +1319,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node(as, b);
trans_for_each_iter_with_node(iter->trans, b, linked)
- bch2_btree_node_iter_peek(&linked->l[b->level].iter, b);
+ bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
bch2_btree_trans_verify_iters(iter->trans, b);
}
@@ -1528,8 +1345,8 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->level));
- BUG_ON(!b->level);
+ BUG_ON(!btree_node_intent_locked(iter, btree_node_root(c, b)->c.level));
+ BUG_ON(!b->c.level);
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
@@ -1538,7 +1355,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
bch2_btree_node_lock_for_insert(c, b, iter);
- if (!bch2_btree_node_insert_fits(c, b, bch_keylist_u64s(keys))) {
+ if (!bch2_btree_node_insert_fits(c, b, bch2_keylist_u64s(keys))) {
bch2_btree_node_unlock_write(b, iter);
goto split;
}
@@ -1566,7 +1383,7 @@ void bch2_btree_insert_node(struct btree_update *as, struct btree *b,
* the btree iterator yet, so the merge path's unlock/wait/relock dance
* won't work:
*/
- bch2_foreground_maybe_merge(c, iter, b->level,
+ bch2_foreground_maybe_merge(c, iter, b->c.level,
flags|BTREE_INSERT_NOUNLOCK);
return;
split:
@@ -1581,14 +1398,14 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
struct btree_update *as;
struct closure cl;
int ret = 0;
- struct btree_iter *linked;
+ struct btree_insert_entry *i;
/*
* We already have a disk reservation and open buckets pinned; this
* allocation must not block:
*/
- trans_for_each_iter(trans, linked)
- if (linked->btree_id == BTREE_ID_EXTENTS)
+ trans_for_each_update(trans, i)
+ if (btree_node_type_needs_gc(i->iter->btree_id))
flags |= BTREE_INSERT_USE_RESERVE;
closure_init_stack(&cl);
@@ -1718,7 +1535,7 @@ retry:
b->sib_u64s[sib] = sib_u64s;
if (b->sib_u64s[sib] > BTREE_FOREGROUND_MERGE_THRESHOLD(c)) {
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
goto out;
}
@@ -1748,7 +1565,8 @@ retry:
bch2_btree_interior_update_will_free_node(as, b);
bch2_btree_interior_update_will_free_node(as, m);
- n = bch2_btree_node_alloc(as, b->level);
+ n = bch2_btree_node_alloc(as, b->c.level);
+ bch2_btree_update_add_new_node(as, n);
btree_set_min(n, prev->data->min_key);
btree_set_max(n, next->data->max_key);
@@ -1760,7 +1578,7 @@ retry:
bch2_btree_sort_into(c, n, next);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+ six_unlock_write(&n->c.lock);
bkey_init(&delete.k);
delete.k.p = prev->key.k.p;
@@ -1771,9 +1589,9 @@ retry:
bch2_btree_insert_node(as, parent, iter, &as->parent_keys, flags);
- bch2_open_buckets_put(c, &n->ob);
+ bch2_btree_update_get_open_buckets(as, n);
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_drop(iter, m);
@@ -1784,7 +1602,7 @@ retry:
bch2_btree_node_free_inmem(c, b, iter);
bch2_btree_node_free_inmem(c, m, iter);
- six_unlock_intent(&n->lock);
+ six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as);
@@ -1806,7 +1624,7 @@ out:
return;
err_cycle_gc_lock:
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
if (flags & BTREE_INSERT_NOUNLOCK)
goto out;
@@ -1819,7 +1637,7 @@ err_cycle_gc_lock:
goto err;
err_unlock:
- six_unlock_intent(&m->lock);
+ six_unlock_intent(&m->c.lock);
if (!(flags & BTREE_INSERT_GC_LOCK_HELD))
up_read(&c->gc_lock);
err:
@@ -1859,9 +1677,10 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_interior_update_will_free_node(as, b);
n = bch2_btree_node_alloc_replacement(as, b);
+ bch2_btree_update_add_new_node(as, n);
bch2_btree_build_aux_trees(n);
- six_unlock_write(&n->lock);
+ six_unlock_write(&n->c.lock);
trace_btree_gc_rewrite_node(c, b);
@@ -1874,13 +1693,13 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
bch2_btree_set_root(as, n, iter);
}
- bch2_open_buckets_put(c, &n->ob);
+ bch2_btree_update_get_open_buckets(as, n);
- six_lock_increment(&b->lock, SIX_LOCK_intent);
+ six_lock_increment(&b->c.lock, SIX_LOCK_intent);
bch2_btree_iter_node_drop(iter, b);
bch2_btree_iter_node_replace(iter, n);
bch2_btree_node_free_inmem(c, b, iter);
- six_unlock_intent(&n->lock);
+ six_unlock_intent(&n->c.lock);
bch2_btree_update_done(as);
return 0;
@@ -1949,56 +1768,15 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
struct btree *parent;
int ret;
- /*
- * Two corner cases that need to be thought about here:
- *
- * @b may not be reachable yet - there might be another interior update
- * operation waiting on @b to be written, and we're gonna deliver the
- * write completion to that interior update operation _before_
- * persisting the new_key update
- *
- * That ends up working without us having to do anything special here:
- * the reason is, we do kick off (and do the in memory updates) for the
- * update for @new_key before we return, creating a new interior_update
- * operation here.
- *
- * The new interior update operation here will in effect override the
- * previous one. The previous one was going to terminate - make @b
- * reachable - in one of two ways:
- * - updating the btree root pointer
- * In that case,
- * no, this doesn't work. argh.
- */
-
- if (b->will_make_reachable)
- as->must_rewrite = true;
-
- btree_interior_update_add_node_reference(as, b);
-
- /*
- * XXX: the rest of the update path treats this like we're actually
- * inserting a new node and deleting the existing node, so the
- * reservation needs to include enough space for @b
- *
- * that is actually sketch as fuck though and I am surprised the code
- * seems to work like that, definitely need to go back and rework it
- * into something saner.
- *
- * (I think @b is just getting double counted until the btree update
- * finishes and "deletes" @b on disk)
- */
- ret = bch2_disk_reservation_add(c, &as->reserve->disk_res,
- c->opts.btree_node_size *
- bch2_bkey_nr_ptrs(bkey_i_to_s_c(new_key)),
- BCH_DISK_RESERVATION_NOFAIL);
- BUG_ON(ret);
+ btree_update_will_delete_key(as, &b->key);
+ btree_update_will_add_key(as, new_key);
parent = btree_node_parent(iter, b);
if (parent) {
if (new_hash) {
bkey_copy(&new_hash->key, new_key);
ret = bch2_btree_node_hash_insert(&c->btree_cache,
- new_hash, b->level, b->btree_id);
+ new_hash, b->c.level, b->c.btree_id);
BUG_ON(ret);
}
@@ -2019,44 +1797,18 @@ static void __bch2_btree_node_update_key(struct bch_fs *c,
bkey_copy(&b->key, new_key);
}
} else {
- struct bch_fs_usage *fs_usage;
-
BUG_ON(btree_node_root(c, b) != b);
bch2_btree_node_lock_write(b, iter);
+ bkey_copy(&b->key, new_key);
- mutex_lock(&c->btree_interior_update_lock);
- percpu_down_read(&c->mark_lock);
- fs_usage = bch2_fs_usage_scratch_get(c);
-
- bch2_mark_key_locked(c, bkey_i_to_s_c(new_key),
- 0, 0, fs_usage, 0,
- BTREE_TRIGGER_INSERT);
- if (gc_visited(c, gc_pos_btree_root(b->btree_id)))
- bch2_mark_key_locked(c, bkey_i_to_s_c(new_key),
- 0, 0, NULL, 0,
- BTREE_TRIGGER_INSERT||
- BTREE_TRIGGER_GC);
-
- bch2_btree_node_free_index(as, NULL,
- bkey_i_to_s_c(&b->key),
- fs_usage);
- bch2_fs_usage_apply(c, fs_usage, &as->reserve->disk_res, 0);
-
- bch2_fs_usage_scratch_put(c, fs_usage);
- percpu_up_read(&c->mark_lock);
- mutex_unlock(&c->btree_interior_update_lock);
-
- if (btree_ptr_hash_val(new_key) != b->hash_val) {
+ if (btree_ptr_hash_val(&b->key) != b->hash_val) {
mutex_lock(&c->btree_cache.lock);
bch2_btree_node_hash_remove(&c->btree_cache, b);
- bkey_copy(&b->key, new_key);
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
BUG_ON(ret);
mutex_unlock(&c->btree_cache.lock);
- } else {
- bkey_copy(&b->key, new_key);
}
btree_update_updated_root(as, b);
@@ -2150,8 +1902,8 @@ err:
list_move(&new_hash->list, &c->btree_cache.freeable);
mutex_unlock(&c->btree_cache.lock);
- six_unlock_write(&new_hash->lock);
- six_unlock_intent(&new_hash->lock);
+ six_unlock_write(&new_hash->c.lock);
+ six_unlock_intent(&new_hash->c.lock);
}
up_read(&c->gc_lock);
closure_sync(&cl);
@@ -2171,7 +1923,7 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
{
BUG_ON(btree_node_root(c, b));
- __bch2_btree_set_root_inmem(c, b);
+ bch2_btree_set_root_inmem(c, b);
}
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
@@ -2191,8 +1943,8 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
bch2_btree_cache_cannibalize_unlock(c);
set_btree_node_fake(b);
- b->level = 0;
- b->btree_id = id;
+ b->c.level = 0;
+ b->c.btree_id = id;
bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX;
@@ -2207,13 +1959,14 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->data->format = bch2_btree_calc_format(b);
btree_node_set_format(b, b->data->format);
- ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->level, b->btree_id);
+ ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
+ b->c.level, b->c.btree_id);
BUG_ON(ret);
- __bch2_btree_set_root_inmem(c, b);
+ bch2_btree_set_root_inmem(c, b);
- six_unlock_write(&b->lock);
- six_unlock_intent(&b->lock);
+ six_unlock_write(&b->c.lock);
+ six_unlock_intent(&b->c.lock);
}
ssize_t bch2_btree_updates_print(struct bch_fs *c, char *buf)
@@ -2246,3 +1999,75 @@ size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *c)
return ret;
}
+
+void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
+{
+ struct btree_root *r;
+ struct jset_entry *entry;
+
+ mutex_lock(&c->btree_root_lock);
+
+ vstruct_for_each(jset, entry)
+ if (entry->type == BCH_JSET_ENTRY_btree_root) {
+ r = &c->btree_roots[entry->btree_id];
+ r->level = entry->level;
+ r->alive = true;
+ bkey_copy(&r->key, &entry->start[0]);
+ }
+
+ mutex_unlock(&c->btree_root_lock);
+}
+
+struct jset_entry *
+bch2_btree_roots_to_journal_entries(struct bch_fs *c,
+ struct jset_entry *start,
+ struct jset_entry *end)
+{
+ struct jset_entry *entry;
+ unsigned long have = 0;
+ unsigned i;
+
+ for (entry = start; entry < end; entry = vstruct_next(entry))
+ if (entry->type == BCH_JSET_ENTRY_btree_root)
+ __set_bit(entry->btree_id, &have);
+
+ mutex_lock(&c->btree_root_lock);
+
+ for (i = 0; i < BTREE_ID_NR; i++)
+ if (c->btree_roots[i].alive && !test_bit(i, &have)) {
+ journal_entry_set(end,
+ BCH_JSET_ENTRY_btree_root,
+ i, c->btree_roots[i].level,
+ &c->btree_roots[i].key,
+ c->btree_roots[i].key.u64s);
+ end = vstruct_next(end);
+ }
+
+ mutex_unlock(&c->btree_root_lock);
+
+ return end;
+}
+
+void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
+{
+ if (c->btree_interior_update_worker)
+ destroy_workqueue(c->btree_interior_update_worker);
+ mempool_exit(&c->btree_interior_update_pool);
+}
+
+int bch2_fs_btree_interior_update_init(struct bch_fs *c)
+{
+ mutex_init(&c->btree_reserve_cache_lock);
+ INIT_LIST_HEAD(&c->btree_interior_update_list);
+ INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
+ mutex_init(&c->btree_interior_update_lock);
+ INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
+
+ c->btree_interior_update_worker =
+ alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
+ if (!c->btree_interior_update_worker)
+ return -ENOMEM;
+
+ return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
+ sizeof(struct btree_update));
+}
diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h
index 2fddf5d31eb9..4a5b9dcfbdd0 100644
--- a/fs/bcachefs/btree_update_interior.h
+++ b/fs/bcachefs/btree_update_interior.h
@@ -6,34 +6,13 @@
#include "btree_locking.h"
#include "btree_update.h"
-struct btree_reserve {
- struct disk_reservation disk_res;
- unsigned nr;
- struct btree *b[BTREE_RESERVE_MAX];
-};
-
void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
struct bkey_format *);
-/* Btree node freeing/allocation: */
-
-/*
- * Tracks a btree node that has been (or is about to be) freed in memory, but
- * has _not_ yet been freed on disk (because the write that makes the new
- * node(s) visible and frees the old hasn't completed yet)
- */
-struct pending_btree_node_free {
- bool index_update_done;
-
- __le64 seq;
- enum btree_id btree_id;
- unsigned level;
- __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
-};
+#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
-#define BTREE_UPDATE_JOURNAL_RES \
- ((BKEY_BTREE_PTR_U64s_MAX + 1) * (BTREE_MAX_DEPTH - 1) * 2)
+#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
/*
* Tracks an in progress split/rewrite of a btree node and the update to the
@@ -72,9 +51,8 @@ struct btree_update {
unsigned nodes_written:1;
enum btree_id btree_id;
- u8 level;
- struct btree_reserve *reserve;
+ struct disk_reservation disk_res;
struct journal_preres journal_preres;
/*
@@ -96,17 +74,28 @@ struct btree_update {
*/
struct journal_entry_pin journal;
- /*
- * Nodes being freed:
- * Protected by c->btree_node_pending_free_lock
- */
- struct pending_btree_node_free pending[BTREE_MAX_DEPTH + GC_MERGE_NODES];
- unsigned nr_pending;
+ /* Preallocated nodes we reserve when we start the update: */
+ struct btree *prealloc_nodes[BTREE_UPDATE_NODES_MAX];
+ unsigned nr_prealloc_nodes;
+
+ /* Nodes being freed: */
+ struct keylist old_keys;
+ u64 _old_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_VAL_U64s_MAX];
+
+ /* Nodes being added: */
+ struct keylist new_keys;
+ u64 _new_keys[BTREE_UPDATE_NODES_MAX *
+ BKEY_BTREE_PTR_VAL_U64s_MAX];
/* New nodes, that will be made reachable by this update: */
- struct btree *new_nodes[BTREE_MAX_DEPTH * 2 + GC_MERGE_NODES];
+ struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
unsigned nr_new_nodes;
+ open_bucket_idx_t open_buckets[BTREE_UPDATE_NODES_MAX *
+ BCH_REPLICAS_MAX];
+ open_bucket_idx_t nr_open_buckets;
+
unsigned journal_u64s;
u64 journal_entries[BTREE_UPDATE_JOURNAL_RES];
@@ -120,14 +109,12 @@ struct btree_update {
u64 inline_keys[BKEY_BTREE_PTR_U64s_MAX * 3];
};
-#define for_each_pending_btree_node_free(c, as, p) \
- list_for_each_entry(as, &c->btree_interior_update_list, list) \
- for (p = as->pending; p < as->pending + as->nr_pending; p++)
-
void bch2_btree_node_free_inmem(struct bch_fs *, struct btree *,
struct btree_iter *);
void bch2_btree_node_free_never_inserted(struct bch_fs *, struct btree *);
+void bch2_btree_update_get_open_buckets(struct btree_update *, struct btree *);
+
struct btree *__bch2_btree_node_alloc_replacement(struct btree_update *,
struct btree *,
struct bkey_format);
@@ -139,6 +126,7 @@ bch2_btree_update_start(struct btree_trans *, enum btree_id, unsigned,
void bch2_btree_interior_update_will_free_node(struct btree_update *,
struct btree *);
+void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
void bch2_btree_insert_node(struct btree_update *, struct btree *,
struct btree_iter *, struct keylist *,
@@ -185,7 +173,7 @@ void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
struct btree *b)
{
- unsigned depth = btree_node_root(c, b)->level + 1;
+ unsigned depth = btree_node_root(c, b)->c.level + 1;
/*
* Number of nodes we might have to allocate in a worst case btree
@@ -193,9 +181,9 @@ static inline unsigned btree_update_reserve_required(struct bch_fs *c,
* a new root, unless we're already at max depth:
*/
if (depth < BTREE_MAX_DEPTH)
- return (depth - b->level) * 2 + 1;
+ return (depth - b->c.level) * 2 + 1;
else
- return (depth - b->level) * 2 - 1;
+ return (depth - b->c.level) * 2 - 1;
}
static inline void btree_node_reset_sib_u64s(struct btree *b)
@@ -333,4 +321,11 @@ ssize_t bch2_btree_updates_print(struct bch_fs *, char *);
size_t bch2_btree_interior_updates_nr_pending(struct bch_fs *);
+void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
+struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
+ struct jset_entry *, struct jset_entry *);
+
+void bch2_fs_btree_interior_update_exit(struct bch_fs *);
+int bch2_fs_btree_interior_update_init(struct bch_fs *);
+
#endif /* _BCACHEFS_BTREE_UPDATE_INTERIOR_H */
diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c
index 7faf98fd2f64..6e9688d0bb77 100644
--- a/fs/bcachefs/btree_update_leaf.c
+++ b/fs/bcachefs/btree_update_leaf.c
@@ -6,6 +6,7 @@
#include "btree_gc.h"
#include "btree_io.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_locking.h"
#include "buckets.h"
#include "debug.h"
@@ -32,6 +33,9 @@ inline void bch2_btree_node_lock_for_insert(struct bch_fs *c, struct btree *b,
{
bch2_btree_node_lock_write(b, iter);
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED)
+ return;
+
if (unlikely(btree_node_just_written(b)) &&
bch2_btree_post_write_cleanup(c, b))
bch2_btree_iter_reinit_node(iter, b);
@@ -135,7 +139,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
btree_node_lock_type(c, b, SIX_LOCK_read);
bch2_btree_node_write_cond(c, b,
(btree_current_write(b) == w && w->journal.seq == seq));
- six_unlock_read(&b->lock);
+ six_unlock_read(&b->c.lock);
}
static void btree_node_flush0(struct journal *j, struct journal_entry_pin *pin, u64 seq)
@@ -159,71 +163,35 @@ inline void bch2_btree_add_journal_pin(struct bch_fs *c,
: btree_node_flush1);
}
-static inline void __btree_journal_key(struct btree_trans *trans,
- enum btree_id btree_id,
- struct bkey_i *insert)
-{
- struct journal *j = &trans->c->journal;
- u64 seq = trans->journal_res.seq;
- bool needs_whiteout = insert->k.needs_whiteout;
-
- /* ick */
- insert->k.needs_whiteout = false;
- bch2_journal_add_keys(j, &trans->journal_res,
- btree_id, insert);
- insert->k.needs_whiteout = needs_whiteout;
-
- bch2_journal_set_has_inode(j, &trans->journal_res,
- insert->k.p.inode);
-
- if (trans->journal_seq)
- *trans->journal_seq = seq;
-}
-
-static void bch2_btree_journal_key(struct btree_trans *trans,
- struct btree_iter *iter,
- struct bkey_i *insert)
-{
- struct bch_fs *c = trans->c;
- struct journal *j = &c->journal;
- struct btree *b = iter_l(iter)->b;
-
- EBUG_ON(trans->journal_res.ref !=
- !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
-
- if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
- __btree_journal_key(trans, iter->btree_id, insert);
- btree_bset_last(b)->journal_seq =
- cpu_to_le64(trans->journal_res.seq);
- }
-
- bch2_btree_add_journal_pin(c, b,
- likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- ? trans->journal_res.seq
- : j->replay_journal_seq);
-
- if (unlikely(!btree_node_dirty(b)))
- set_btree_node_dirty(b);
-}
-
/**
* btree_insert_key - insert a key one key into a leaf node
*/
-static void btree_insert_key_leaf(struct btree_trans *trans,
+static bool btree_insert_key_leaf(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert)
{
struct bch_fs *c = trans->c;
struct btree *b = iter_l(iter)->b;
struct bset_tree *t = bset_tree_last(b);
+ struct bset *i = bset(b, t);
int old_u64s = bset_u64s(t);
int old_live_u64s = b->nr.live_u64s;
int live_u64s_added, u64s_added;
- insert->k.needs_whiteout = false;
+ EBUG_ON(!iter->level &&
+ !test_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags));
+
+ if (unlikely(!bch2_btree_bset_insert_key(iter, b,
+ &iter_l(iter)->iter, insert)))
+ return false;
+
+ i->journal_seq = cpu_to_le64(max(trans->journal_res.seq,
+ le64_to_cpu(i->journal_seq)));
+
+ bch2_btree_add_journal_pin(c, b, trans->journal_res.seq);
- if (likely(bch2_btree_bset_insert_key(iter, b, &iter_l(iter)->iter, insert)))
- bch2_btree_journal_key(trans, iter, insert);
+ if (unlikely(!btree_node_dirty(b)))
+ set_btree_node_dirty(b);
live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
u64s_added = (int) bset_u64s(t) - old_u64s;
@@ -238,8 +206,11 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
bch2_btree_iter_reinit_node(iter, b);
trace_btree_insert_key(c, b, insert);
+ return true;
}
+/* Cached btree updates: */
+
/* Normal update interface: */
static inline void btree_insert_entry_checks(struct btree_trans *trans,
@@ -310,7 +281,7 @@ btree_key_can_insert(struct btree_trans *trans,
if (unlikely(btree_node_old_extent_overwrite(b)))
return BTREE_INSERT_BTREE_NODE_FULL;
- ret = !(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ret = !btree_iter_is_extents(iter)
? BTREE_INSERT_OK
: bch2_extent_can_insert(trans, iter, insert);
if (ret)
@@ -322,11 +293,60 @@ btree_key_can_insert(struct btree_trans *trans,
return BTREE_INSERT_OK;
}
+static enum btree_insert_ret
+btree_key_can_insert_cached(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_i *insert,
+ unsigned *u64s)
+{
+ struct bkey_cached *ck = (void *) iter->l[0].b;
+ unsigned new_u64s;
+ struct bkey_i *new_k;
+
+ BUG_ON(iter->level);
+
+ if (*u64s <= ck->u64s)
+ return BTREE_INSERT_OK;
+
+ new_u64s = roundup_pow_of_two(*u64s);
+ new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
+ if (!new_k)
+ return -ENOMEM;
+
+ ck->u64s = new_u64s;
+ ck->k = new_k;
+ return BTREE_INSERT_OK;
+}
+
static inline void do_btree_insert_one(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert)
{
- btree_insert_key_leaf(trans, iter, insert);
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ bool did_work;
+
+ EBUG_ON(trans->journal_res.ref !=
+ !(trans->flags & BTREE_INSERT_JOURNAL_REPLAY));
+
+ insert->k.needs_whiteout = false;
+
+ did_work = (btree_iter_type(iter) != BTREE_ITER_CACHED)
+ ? btree_insert_key_leaf(trans, iter, insert)
+ : bch2_btree_insert_key_cached(trans, iter, insert);
+ if (!did_work)
+ return;
+
+ if (likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))) {
+ bch2_journal_add_keys(j, &trans->journal_res,
+ iter->btree_id, insert);
+
+ bch2_journal_set_has_inode(j, &trans->journal_res,
+ insert->k.p.inode);
+
+ if (trans->journal_seq)
+ *trans->journal_seq = trans->journal_res.seq;
+ }
}
static inline bool iter_has_trans_triggers(struct btree_iter *iter)
@@ -351,10 +371,16 @@ static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct btree_insert_entry *i;
- trans_for_each_update(trans, i)
- if (gc_visited(c, gc_pos_btree_node(iter_l(i->iter)->b)))
+ trans_for_each_update(trans, i) {
+ /*
+ * XXX: synchronization of cached update triggers with gc
+ */
+ BUG_ON(btree_iter_type(i->iter) == BTREE_ITER_CACHED);
+
+ if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i->iter, i->k, NULL,
i->trigger_flags|BTREE_TRIGGER_GC);
+ }
}
static inline int
@@ -387,7 +413,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
u64s = 0;
u64s += i->k->k.u64s;
- ret = btree_key_can_insert(trans, i->iter, i->k, &u64s);
+ ret = btree_iter_type(i->iter) != BTREE_ITER_CACHED
+ ? btree_key_can_insert(trans, i->iter, i->k, &u64s)
+ : btree_key_can_insert_cached(trans, i->iter, i->k, &u64s);
if (ret) {
*stopped_at = i;
return ret;
@@ -411,6 +439,17 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
JOURNAL_RES_GET_NONBLOCK);
if (ret)
goto err;
+ } else {
+ trans->journal_res.seq = c->journal.replay_journal_seq;
+ }
+
+ if (unlikely(trans->extra_journal_entry_u64s)) {
+ memcpy_u64s_small(journal_res_entry(&c->journal, &trans->journal_res),
+ trans->extra_journal_entries,
+ trans->extra_journal_entry_u64s);
+
+ trans->journal_res.offset += trans->extra_journal_entry_u64s;
+ trans->journal_res.u64s -= trans->extra_journal_entry_u64s;
}
/*
@@ -472,7 +511,9 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
ret = bch2_journal_preres_get(&trans->c->journal,
&trans->journal_preres, trans->journal_preres_u64s,
- JOURNAL_RES_GET_NONBLOCK);
+ JOURNAL_RES_GET_NONBLOCK|
+ ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM)
+ ? JOURNAL_RES_GET_RECLAIM : 0));
if (unlikely(ret == -EAGAIN))
ret = bch2_trans_journal_preres_get_cold(trans,
trans->journal_preres_u64s);
@@ -486,7 +527,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
* or anything else that might call bch2_trans_relock(), since that
* would just retake the read locks:
*/
- trans_for_each_iter_all(trans, iter) {
+ trans_for_each_iter(trans, iter) {
if (iter->nodes_locked != iter->nodes_intent_locked) {
EBUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
EBUG_ON(trans->iters_live & (1ULL << iter->idx));
@@ -511,6 +552,10 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
bch2_btree_node_unlock_write_inlined(iter_l(i->iter)->b,
i->iter);
+ if (!ret && trans->journal_pin)
+ bch2_journal_pin_add(&trans->c->journal, trans->journal_res.seq,
+ trans->journal_pin, NULL);
+
/*
* Drop journal reservation after dropping write locks, since dropping
* the journal reservation may kick off a journal write:
@@ -524,14 +569,14 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
trans->nounlock = true;
trans_for_each_update2(trans, i)
- if (!same_leaf_as_prev(trans, i))
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED &&
+ !same_leaf_as_prev(trans, i))
bch2_foreground_maybe_merge(trans->c, i->iter,
0, trans->flags);
trans->nounlock = false;
- trans_for_each_update2(trans, i)
- bch2_btree_iter_downgrade(i->iter);
+ bch2_trans_downgrade(trans);
return 0;
}
@@ -800,7 +845,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
memset(&trans->journal_preres, 0, sizeof(trans->journal_preres));
- trans->journal_u64s = 0;
+ trans->journal_u64s = trans->extra_journal_entry_u64s;
trans->journal_preres_u64s = 0;
if (!(trans->flags & BTREE_INSERT_NOCHECK_RW) &&
@@ -810,6 +855,14 @@ int __bch2_trans_commit(struct btree_trans *trans)
return ret;
}
+#ifdef CONFIG_BCACHEFS_DEBUG
+ trans_for_each_update(trans, i)
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED &&
+ !(i->trigger_flags & BTREE_TRIGGER_NORUN))
+ bch2_btree_key_cache_verify_clean(trans,
+ i->iter->btree_id, i->iter->pos);
+#endif
+
/*
* Running triggers will append more updates to the list of updates as
* we're walking it:
@@ -818,9 +871,9 @@ int __bch2_trans_commit(struct btree_trans *trans)
trans_trigger_run = false;
trans_for_each_update(trans, i) {
- if (unlikely(i->iter->uptodate > BTREE_ITER_NEED_PEEK)) {
+ if (unlikely(i->iter->uptodate > BTREE_ITER_NEED_PEEK &&
+ (ret = bch2_btree_iter_traverse(i->iter)))) {
trace_trans_restart_traverse(trans->ip);
- ret = -EINTR;
goto out;
}
@@ -882,7 +935,8 @@ int __bch2_trans_commit(struct btree_trans *trans)
BUG_ON(i->iter->locks_want < 1);
u64s = jset_u64s(i->k->k.u64s);
- if (0)
+ if (btree_iter_type(i->iter) == BTREE_ITER_CACHED &&
+ likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY)))
trans->journal_preres_u64s += u64s;
trans->journal_u64s += u64s;
}
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 2e1df04c760d..0ec194b93c71 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -374,6 +374,11 @@ static inline int is_fragmented_bucket(struct bucket_mark m,
return 0;
}
+static inline int bucket_stripe_sectors(struct bucket_mark m)
+{
+ return m.stripe ? m.dirty_sectors : 0;
+}
+
static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
return m.cached_sectors && !m.dirty_sectors
@@ -441,33 +446,35 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
struct bucket_mark old, struct bucket_mark new,
bool gc)
{
- struct bch_dev_usage *dev_usage;
+ struct bch_dev_usage *u;
percpu_rwsem_assert_held(&c->mark_lock);
preempt_disable();
- dev_usage = this_cpu_ptr(ca->usage[gc]);
+ u = this_cpu_ptr(ca->usage[gc]);
if (bucket_type(old))
- account_bucket(fs_usage, dev_usage, bucket_type(old),
+ account_bucket(fs_usage, u, bucket_type(old),
-1, -ca->mi.bucket_size);
if (bucket_type(new))
- account_bucket(fs_usage, dev_usage, bucket_type(new),
+ account_bucket(fs_usage, u, bucket_type(new),
1, ca->mi.bucket_size);
- dev_usage->buckets_alloc +=
+ u->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
- dev_usage->buckets_ec +=
- (int) new.stripe - (int) old.stripe;
- dev_usage->buckets_unavailable +=
+ u->buckets_unavailable +=
is_unavailable_bucket(new) - is_unavailable_bucket(old);
- dev_usage->sectors[old.data_type] -= old.dirty_sectors;
- dev_usage->sectors[new.data_type] += new.dirty_sectors;
- dev_usage->sectors[BCH_DATA_CACHED] +=
+ u->buckets_ec += (int) new.stripe - (int) old.stripe;
+ u->sectors_ec += bucket_stripe_sectors(new) -
+ bucket_stripe_sectors(old);
+
+ u->sectors[old.data_type] -= old.dirty_sectors;
+ u->sectors[new.data_type] += new.dirty_sectors;
+ u->sectors[BCH_DATA_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
- dev_usage->sectors_fragmented +=
+ u->sectors_fragmented +=
is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
preempt_enable();
@@ -778,29 +785,31 @@ static int bch2_mark_alloc(struct bch_fs *c, struct bkey_s_c k,
})
static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type type,
+ size_t b, enum bch_data_type data_type,
unsigned sectors, bool gc)
{
struct bucket *g = __bucket(ca, b, gc);
struct bucket_mark old, new;
bool overflow;
- BUG_ON(type != BCH_DATA_SB &&
- type != BCH_DATA_JOURNAL);
+ BUG_ON(data_type != BCH_DATA_SB &&
+ data_type != BCH_DATA_JOURNAL);
old = bucket_cmpxchg(g, new, ({
- new.data_type = type;
+ new.data_type = data_type;
overflow = checked_add(new.dirty_sectors, sectors);
}));
bch2_fs_inconsistent_on(old.data_type &&
- old.data_type != type, c,
+ old.data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_types[old.data_type],
- bch2_data_types[type]);
+ bch2_data_types[data_type]);
bch2_fs_inconsistent_on(overflow, c,
- "bucket sector count overflow: %u + %u > U16_MAX",
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
+ ca->dev_idx, b, new.gen,
+ bch2_data_types[old.data_type ?: data_type],
old.dirty_sectors, sectors);
if (c)
@@ -916,58 +925,117 @@ static void bucket_set_stripe(struct bch_fs *c,
}
}
-static bool bch2_mark_pointer(struct bch_fs *c,
- struct extent_ptr_decoded p,
- s64 sectors, enum bch_data_type data_type,
- struct bch_fs_usage *fs_usage,
- u64 journal_seq, unsigned flags)
+static int __mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ s64 sectors, enum bch_data_type ptr_data_type,
+ u8 bucket_gen, u8 *bucket_data_type,
+ u16 *dirty_sectors, u16 *cached_sectors)
+{
+ u16 *dst_sectors = !p.ptr.cached
+ ? dirty_sectors
+ : cached_sectors;
+ u16 orig_sectors = *dst_sectors;
+ char buf[200];
+
+ if (gen_after(p.ptr.gen, bucket_gen)) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
+ bucket_gen,
+ bch2_data_types[*bucket_data_type ?: ptr_data_type],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ return -EIO;
+ }
+
+ if (gen_cmp(bucket_gen, p.ptr.gen) >= 96U) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
+ bucket_gen,
+ bch2_data_types[*bucket_data_type ?: ptr_data_type],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ return -EIO;
+ }
+
+ if (bucket_gen != p.ptr.gen && !p.ptr.cached) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s: stale dirty ptr (gen %u)\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
+ bucket_gen,
+ bch2_data_types[*bucket_data_type ?: ptr_data_type],
+ p.ptr.gen,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ return -EIO;
+ }
+
+ if (bucket_gen != p.ptr.gen)
+ return 1;
+
+ if (*bucket_data_type && *bucket_data_type != ptr_data_type) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
+ bucket_gen,
+ bch2_data_types[*bucket_data_type],
+ bch2_data_types[ptr_data_type],
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ return -EIO;
+ }
+
+ if (checked_add(*dst_sectors, sectors)) {
+ bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U16_MAX\n"
+ "while marking %s",
+ p.ptr.dev, PTR_BUCKET_NR(bch_dev_bkey_exists(c, p.ptr.dev), &p.ptr),
+ bucket_gen,
+ bch2_data_types[*bucket_data_type ?: ptr_data_type],
+ orig_sectors, sectors,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
+ return -EIO;
+ }
+
+ *bucket_data_type = *dirty_sectors || *cached_sectors
+ ? ptr_data_type : 0;
+ return 0;
+}
+
+static int bch2_mark_pointer(struct bch_fs *c, struct bkey_s_c k,
+ struct extent_ptr_decoded p,
+ s64 sectors, enum bch_data_type data_type,
+ struct bch_fs_usage *fs_usage,
+ u64 journal_seq, unsigned flags)
{
bool gc = flags & BTREE_TRIGGER_GC;
struct bucket_mark old, new;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
struct bucket *g = PTR_BUCKET(ca, &p.ptr, gc);
- bool overflow;
+ u8 bucket_data_type;
u64 v;
+ int ret;
v = atomic64_read(&g->_mark.v);
do {
new.v.counter = old.v.counter = v;
+ bucket_data_type = new.data_type;
- /*
- * Check this after reading bucket mark to guard against
- * the allocator invalidating a bucket after we've already
- * checked the gen
- */
- if (gen_after(p.ptr.gen, new.gen)) {
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "pointer gen in the future");
- return true;
- }
-
- if (new.gen != p.ptr.gen) {
- /* XXX write repair code for this */
- if (!p.ptr.cached &&
- test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "stale dirty pointer");
- return true;
- }
-
- if (!p.ptr.cached)
- overflow = checked_add(new.dirty_sectors, sectors);
- else
- overflow = checked_add(new.cached_sectors, sectors);
+ ret = __mark_pointer(c, k, p, sectors, data_type, new.gen,
+ &bucket_data_type,
+ &new.dirty_sectors,
+ &new.cached_sectors);
+ if (ret)
+ return ret;
- if (!new.dirty_sectors &&
- !new.cached_sectors) {
- new.data_type = 0;
+ new.data_type = bucket_data_type;
- if (journal_seq) {
- new.journal_seq_valid = 1;
- new.journal_seq = journal_seq;
- }
- } else {
- new.data_type = data_type;
+ if (journal_seq) {
+ new.journal_seq_valid = 1;
+ new.journal_seq = journal_seq;
}
if (flags & BTREE_TRIGGER_NOATOMIC) {
@@ -978,25 +1046,11 @@ static bool bch2_mark_pointer(struct bch_fs *c,
old.v.counter,
new.v.counter)) != old.v.counter);
- if (old.data_type && old.data_type != data_type)
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
- p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
- new.gen,
- bch2_data_types[old.data_type],
- bch2_data_types[data_type]);
-
- bch2_fs_inconsistent_on(overflow, c,
- "bucket sector count overflow: %u + %lli > U16_MAX",
- !p.ptr.cached
- ? old.dirty_sectors
- : old.cached_sectors, sectors);
-
bch2_dev_usage_update(c, ca, fs_usage, old, new, gc);
BUG_ON(!gc && bucket_became_unavailable(old, new));
- return false;
+ return 0;
}
static int bch2_mark_stripe_ptr(struct bch_fs *c,
@@ -1060,6 +1114,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
struct extent_ptr_decoded p;
struct bch_replicas_padded r;
s64 dirty_sectors = 0;
+ bool stale;
int ret;
r.e.data_type = data_type;
@@ -1072,8 +1127,13 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
s64 disk_sectors = data_type == BCH_DATA_BTREE
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
- bool stale = bch2_mark_pointer(c, p, disk_sectors, data_type,
- fs_usage, journal_seq, flags);
+
+ ret = bch2_mark_pointer(c, k, p, disk_sectors, data_type,
+ fs_usage, journal_seq, flags);
+ if (ret < 0)
+ return ret;
+
+ stale = ret > 0;
if (p.ptr.cached) {
if (!stale)
@@ -1175,7 +1235,7 @@ static int bch2_mark_stripe(struct bch_fs *c, struct bkey_s_c k,
return 0;
}
-int bch2_mark_key_locked(struct bch_fs *c,
+static int bch2_mark_key_locked(struct bch_fs *c,
struct bkey_s_c k,
unsigned offset, s64 sectors,
struct bch_fs_usage *fs_usage,
@@ -1307,8 +1367,8 @@ int bch2_mark_update(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret = 0;
@@ -1370,45 +1430,49 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
disk_res_sectors);
trans_for_each_update(trans, i) {
- struct btree_iter *iter = i->iter;
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
- struct bkey_packed *_k;
-
pr_err("while inserting");
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(i->k));
pr_err("%s", buf);
pr_err("overlapping with");
- node_iter = iter->l[0].iter;
- while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
- struct bkey unpacked;
- struct bkey_s_c k;
+ if (btree_iter_type(i->iter) != BTREE_ITER_CACHED) {
+ struct btree *b = iter_l(i->iter)->b;
+ struct btree_node_iter node_iter = iter_l(i->iter)->iter;
+ struct bkey_packed *_k;
- k = bkey_disassemble(b, _k, &unpacked);
+ while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
+ struct bkey unpacked;
+ struct bkey_s_c k;
- if (btree_node_is_extents(b)
- ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
- : bkey_cmp(i->k->k.p, k.k->p))
- break;
+ pr_info("_k %px format %u", _k, _k->format);
+ k = bkey_disassemble(b, _k, &unpacked);
- bch2_bkey_val_to_text(&PBUF(buf), c, k);
- pr_err("%s", buf);
+ if (btree_node_is_extents(b)
+ ? bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) <= 0
+ : bkey_cmp(i->k->k.p, k.k->p))
+ break;
+
+ bch2_bkey_val_to_text(&PBUF(buf), c, k);
+ pr_err("%s", buf);
+
+ bch2_btree_node_iter_advance(&node_iter, b);
+ }
+ } else {
+ struct bkey_cached *ck = (void *) i->iter->l[0].b;
- bch2_btree_node_iter_advance(&node_iter, b);
+ bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(ck->k));
+ pr_err("%s", buf);
}
}
}
/* trans_mark: */
-static int trans_get_key(struct btree_trans *trans,
- enum btree_id btree_id, struct bpos pos,
- struct btree_iter **iter,
- struct bkey_s_c *k)
+static struct btree_iter *trans_get_update(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ struct bkey_s_c *k)
{
struct btree_insert_entry *i;
- int ret;
trans_for_each_update(trans, i)
if (i->iter->btree_id == btree_id &&
@@ -1416,17 +1480,33 @@ static int trans_get_key(struct btree_trans *trans,
? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
bkey_cmp(pos, i->k->k.p) < 0
: !bkey_cmp(pos, i->iter->pos))) {
- *iter = i->iter;
- *k = bkey_i_to_s_c(i->k);
- return 1;
+ *k = bkey_i_to_s_c(i->k);
+ return i->iter;
}
+ return NULL;
+}
+
+static int trans_get_key(struct btree_trans *trans,
+ enum btree_id btree_id, struct bpos pos,
+ struct btree_iter **iter,
+ struct bkey_s_c *k)
+{
+ unsigned flags = btree_id != BTREE_ID_ALLOC
+ ? BTREE_ITER_SLOTS
+ : BTREE_ITER_CACHED;
+ int ret;
+
+ *iter = trans_get_update(trans, btree_id, pos, k);
+ if (*iter)
+ return 1;
+
*iter = bch2_trans_get_iter(trans, btree_id, pos,
- BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+ flags|BTREE_ITER_INTENT);
if (IS_ERR(*iter))
return PTR_ERR(*iter);
- *k = bch2_btree_iter_peek_slot(*iter);
+ *k = __bch2_btree_iter_peek(*iter, flags);
ret = bkey_err(*k);
if (ret)
bch2_trans_iter_put(trans, *iter);
@@ -1434,85 +1514,44 @@ static int trans_get_key(struct btree_trans *trans,
}
static int bch2_trans_mark_pointer(struct btree_trans *trans,
- struct extent_ptr_decoded p,
+ struct bkey_s_c k, struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
+ struct bpos pos = POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr));
struct btree_iter *iter;
- struct bkey_s_c k;
+ struct bkey_s_c k_a;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc *a;
- u16 *dst_sectors, orig_sectors;
+ struct bucket *g;
int ret;
- ret = trans_get_key(trans, BTREE_ID_ALLOC,
- POS(p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr)),
- &iter, &k);
- if (ret < 0)
- return ret;
-
- if (!ret && unlikely(!test_bit(BCH_FS_ALLOC_WRITTEN, &c->flags))) {
- /*
- * During journal replay, and if gc repairs alloc info at
- * runtime, the alloc info in the btree might not be up to date
- * yet - so, trust the in memory mark:
- */
- struct bucket *g;
- struct bucket_mark m;
-
- percpu_down_read(&c->mark_lock);
- g = bucket(ca, iter->pos.offset);
- m = READ_ONCE(g->mark);
- u = alloc_mem_to_key(g, m);
- percpu_up_read(&c->mark_lock);
+ iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k_a);
+ if (iter) {
+ u = bch2_alloc_unpack(k_a);
} else {
- /*
- * Unless we're already updating that key:
- */
- if (k.k->type != KEY_TYPE_alloc) {
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "pointer to nonexistent bucket %llu:%llu",
- iter->pos.inode, iter->pos.offset);
- ret = -1;
+ iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = bch2_btree_iter_traverse(iter);
+ if (ret)
goto out;
- }
-
- u = bch2_alloc_unpack(k);
- }
-
- if (gen_after(u.gen, p.ptr.gen)) {
- ret = 1;
- goto out;
- }
- if (u.data_type && u.data_type != data_type) {
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
- "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s",
- iter->pos.inode, iter->pos.offset,
- u.gen,
- bch2_data_types[u.data_type],
- bch2_data_types[data_type]);
- ret = -1;
- goto out;
+ percpu_down_read(&c->mark_lock);
+ g = bucket(ca, pos.offset);
+ u = alloc_mem_to_key(g, READ_ONCE(g->mark));
+ percpu_up_read(&c->mark_lock);
}
- dst_sectors = !p.ptr.cached
- ? &u.dirty_sectors
- : &u.cached_sectors;
- orig_sectors = *dst_sectors;
-
- if (checked_add(*dst_sectors, sectors)) {
- bch2_fs_inconsistent(c,
- "bucket sector count overflow: %u + %lli > U16_MAX",
- orig_sectors, sectors);
- /* return an error indicating that we need full fsck */
- ret = -EIO;
+ ret = __mark_pointer(c, k, p, sectors, data_type, u.gen, &u.data_type,
+ &u.dirty_sectors, &u.cached_sectors);
+ if (ret)
goto out;
- }
-
- u.data_type = u.dirty_sectors || u.cached_sectors
- ? data_type : 0;
a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
ret = PTR_ERR_OR_ZERO(a);
@@ -1520,7 +1559,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
goto out;
bkey_alloc_init(&a->k_i);
- a->k.p = iter->pos;
+ a->k.p = pos;
bch2_alloc_pack(a, u);
bch2_trans_update(trans, iter, &a->k_i, 0);
out:
@@ -1597,7 +1636,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
? sectors
: ptr_disk_sectors_delta(p, offset, sectors, flags);
- ret = bch2_trans_mark_pointer(trans, p, disk_sectors,
+ ret = bch2_trans_mark_pointer(trans, k, p, disk_sectors,
data_type);
if (ret < 0)
return ret;
@@ -1774,8 +1813,8 @@ int bch2_trans_mark_update(struct btree_trans *trans,
struct bkey_i *insert,
unsigned flags)
{
- struct btree *b = iter->l[0].b;
- struct btree_node_iter node_iter = iter->l[0].iter;
+ struct btree *b = iter_l(iter)->b;
+ struct btree_node_iter node_iter = iter_l(iter)->iter;
struct bkey_packed *_k;
int ret;
@@ -1793,6 +1832,13 @@ int bch2_trans_mark_update(struct btree_trans *trans,
if (unlikely(flags & BTREE_TRIGGER_NOOVERWRITES))
return 0;
+ if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+ struct bkey_cached *ck = (void *) iter->l[0].b;
+
+ return bch2_trans_mark_key(trans, bkey_i_to_s_c(ck->k),
+ 0, 0, BTREE_TRIGGER_OVERWRITE);
+ }
+
while ((_k = bch2_btree_node_iter_peek(&node_iter, b))) {
struct bkey unpacked;
struct bkey_s_c k;
@@ -2009,8 +2055,10 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
swap(ca->buckets_nouse, buckets_nouse);
- if (resize)
+ if (resize) {
percpu_up_write(&c->mark_lock);
+ up_write(&c->gc_lock);
+ }
spin_lock(&c->freelist_lock);
for (i = 0; i < RESERVE_NR; i++) {
@@ -2029,10 +2077,8 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
nbuckets = ca->mi.nbuckets;
- if (resize) {
+ if (resize)
up_write(&ca->bucket_lock);
- up_write(&c->gc_lock);
- }
if (start_copygc &&
bch2_copygc_start(c, ca))
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 765650ce9d0a..97265fe90e96 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -259,8 +259,6 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
-int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c, unsigned, s64,
- struct bch_fs_usage *, u64, unsigned);
int bch2_mark_key(struct bch_fs *, struct bkey_s_c, unsigned, s64,
struct bch_fs_usage *, u64, unsigned);
int bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index f3ff4a18b1fd..53f22726893d 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -39,6 +39,7 @@ struct bucket {
u16 io_time[2];
u8 oldest_gen;
+ u8 gc_gen;
unsigned gen_valid:1;
};
@@ -52,12 +53,14 @@ struct bucket_array {
struct bch_dev_usage {
u64 buckets[BCH_DATA_NR];
u64 buckets_alloc;
- u64 buckets_ec;
u64 buckets_unavailable;
/* _compressed_ sectors: */
u64 sectors[BCH_DATA_NR];
u64 sectors_fragmented;
+
+ u64 buckets_ec;
+ u64 sectors_ec;
};
struct bch_fs_usage {
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 5028d0dcc2d6..3af521947502 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -470,9 +470,12 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
src = bch2_dev_usage_read(c, ca);
- arg.state = ca->mi.state;
- arg.bucket_size = ca->mi.bucket_size;
- arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
+ arg.state = ca->mi.state;
+ arg.bucket_size = ca->mi.bucket_size;
+ arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
+ arg.available_buckets = arg.nr_buckets - src.buckets_unavailable;
+ arg.ec_buckets = src.buckets_ec;
+ arg.ec_sectors = src.sectors_ec;
for (i = 0; i < BCH_DATA_NR; i++) {
arg.buckets[i] = src.buckets[i];
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index a01073e54a33..3d88719ba86c 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -10,7 +10,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
#include <crypto/hash.h>
#include <crypto/poly1305.h>
#include <crypto/skcipher.h>
@@ -68,21 +68,21 @@ static u64 bch2_checksum_update(unsigned type, u64 crc, const void *data, size_t
}
}
-static inline void do_encrypt_sg(struct crypto_skcipher *tfm,
+static inline void do_encrypt_sg(struct crypto_sync_skcipher *tfm,
struct nonce nonce,
struct scatterlist *sg, size_t len)
{
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
ret = crypto_skcipher_encrypt(req);
BUG_ON(ret);
}
-static inline void do_encrypt(struct crypto_skcipher *tfm,
+static inline void do_encrypt(struct crypto_sync_skcipher *tfm,
struct nonce nonce,
void *buf, size_t len)
{
@@ -95,8 +95,8 @@ static inline void do_encrypt(struct crypto_skcipher *tfm,
int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
void *buf, size_t len)
{
- struct crypto_skcipher *chacha20 =
- crypto_alloc_skcipher("chacha20", 0, 0);
+ struct crypto_sync_skcipher *chacha20 =
+ crypto_alloc_sync_skcipher("chacha20", 0, 0);
int ret;
if (!chacha20) {
@@ -104,7 +104,8 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
return PTR_ERR(chacha20);
}
- ret = crypto_skcipher_setkey(chacha20, (void *) key, sizeof(*key));
+ ret = crypto_skcipher_setkey(&chacha20->base,
+ (void *) key, sizeof(*key));
if (ret) {
pr_err("crypto_skcipher_setkey() error: %i", ret);
goto err;
@@ -112,7 +113,7 @@ int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
do_encrypt(chacha20, nonce, buf, len);
err:
- crypto_free_skcipher(chacha20);
+ crypto_free_sync_skcipher(chacha20);
return ret;
}
@@ -199,7 +200,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
kunmap_atomic(p);
}
#else
- __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
crc = bch2_checksum_update(type, crc,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
@@ -224,7 +225,7 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
kunmap_atomic(p);
}
#else
- __bio_for_each_contig_segment(bv, bio, *iter, *iter)
+ __bio_for_each_bvec(bv, bio, *iter, *iter)
crypto_shash_update(desc,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
@@ -463,7 +464,7 @@ err:
static int bch2_alloc_ciphers(struct bch_fs *c)
{
if (!c->chacha20)
- c->chacha20 = crypto_alloc_skcipher("chacha20", 0, 0);
+ c->chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
if (IS_ERR(c->chacha20)) {
bch_err(c, "error requesting chacha20 module: %li",
PTR_ERR(c->chacha20));
@@ -546,7 +547,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
goto err;
}
- ret = crypto_skcipher_setkey(c->chacha20,
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
(void *) &key.key, sizeof(key.key));
if (ret)
goto err;
@@ -574,7 +575,7 @@ void bch2_fs_encryption_exit(struct bch_fs *c)
if (!IS_ERR_OR_NULL(c->poly1305))
crypto_free_shash(c->poly1305);
if (!IS_ERR_OR_NULL(c->chacha20))
- crypto_free_skcipher(c->chacha20);
+ crypto_free_sync_skcipher(c->chacha20);
if (!IS_ERR_OR_NULL(c->sha256))
crypto_free_shash(c->sha256);
}
@@ -606,7 +607,7 @@ int bch2_fs_encryption_init(struct bch_fs *c)
if (ret)
goto out;
- ret = crypto_skcipher_setkey(c->chacha20,
+ ret = crypto_skcipher_setkey(&c->chacha20->base,
(void *) &key.key, sizeof(key.key));
if (ret)
goto out;
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 833537cc8fd0..24dee8039d57 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -7,7 +7,7 @@
#include "super-io.h"
#include <linux/crc64.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
static inline bool bch2_checksum_mergeable(unsigned type)
{
@@ -138,9 +138,9 @@ static inline bool bch2_crc_cmp(struct bch_csum l, struct bch_csum r)
/* for skipping ahead and encrypting/decrypting at an offset: */
static inline struct nonce nonce_add(struct nonce nonce, unsigned offset)
{
- EBUG_ON(offset & (CHACHA20_BLOCK_SIZE - 1));
+ EBUG_ON(offset & (CHACHA_BLOCK_SIZE - 1));
- le32_add_cpu(&nonce.d[0], offset / CHACHA20_BLOCK_SIZE);
+ le32_add_cpu(&nonce.d[0], offset / CHACHA_BLOCK_SIZE);
return nonce;
}
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index d9de0d1302e2..a9f5d5696622 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -162,7 +162,7 @@ ssize_t bch2_io_timers_show(struct io_clock *clock, char *buf)
now = atomic_long_read(&clock->now);
for (i = 0; i < clock->timers.used; i++)
- pr_buf(&out, "%pf:\t%li\n",
+ pr_buf(&out, "%ps:\t%li\n",
clock->timers.data[i]->fn,
clock->timers.data[i]->expire - now);
spin_unlock(&clock->timer_lock);
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index c431b1e79702..3d75527d2d81 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -7,6 +7,7 @@
#include "super-io.h"
#include <linux/lz4.h>
+#include <linux/sched/mm.h>
#include <linux/zlib.h>
#include <linux/zstd.h>
@@ -45,7 +46,7 @@ static bool bio_phys_contig(struct bio *bio, struct bvec_iter start)
struct bvec_iter iter;
void *expected_start = NULL;
- __bio_for_each_segment(bv, bio, iter, start) {
+ __bio_for_each_bvec(bv, bio, iter, start) {
if (expected_start &&
expected_start != page_address(bv.bv_page) + bv.bv_offset)
return false;
@@ -63,7 +64,7 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
struct bbuf ret;
struct bio_vec bv;
struct bvec_iter iter;
- unsigned nr_pages = 0;
+ unsigned nr_pages = 0, flags;
struct page *stack_pages[16];
struct page **pages = NULL;
void *data;
@@ -103,7 +104,10 @@ static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio,
__bio_for_each_segment(bv, bio, iter, start)
pages[nr_pages++] = bv.bv_page;
+ flags = memalloc_nofs_save();
data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+ memalloc_nofs_restore(flags);
+
if (pages != stack_pages)
kfree(pages);
@@ -603,7 +607,7 @@ have_compressed:
}
if (!mempool_initialized(&c->decompress_workspace)) {
- ret = mempool_init_kmalloc_pool(
+ ret = mempool_init_kvpmalloc_pool(
&c->decompress_workspace,
1, decompress_workspace_size);
if (ret)
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 69b123bad83b..aa10591a3b1a 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -52,8 +52,8 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
bkey_copy(&v->key, &b->key);
v->written = 0;
- v->level = b->level;
- v->btree_id = b->btree_id;
+ v->c.level = b->c.level;
+ v->c.btree_id = b->c.btree_id;
bch2_btree_keys_init(v, &c->expensive_debug_checks);
if (bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
@@ -97,10 +97,10 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
console_lock();
printk(KERN_ERR "*** in memory:\n");
- bch2_dump_bset(b, inmemory, 0);
+ bch2_dump_bset(c, b, inmemory, 0);
printk(KERN_ERR "*** read back in:\n");
- bch2_dump_bset(v, sorted, 0);
+ bch2_dump_bset(c, v, sorted, 0);
while (offset < b->written) {
if (!offset ) {
@@ -117,7 +117,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
}
printk(KERN_ERR "*** on disk block %u:\n", offset);
- bch2_dump_bset(b, i, offset);
+ bch2_dump_bset(c, b, i, offset);
offset += sectors;
}
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index ae5c9fd8d9f7..f34bfda8ab0d 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -104,7 +104,7 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
- pr_buf(out, " -> %llu", d.v->d_inum);
+ pr_buf(out, " -> %llu type %u", d.v->d_inum, d.v->d_type);
}
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 933945b65925..9442d6e4041c 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -1273,38 +1273,28 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote)
return ret;
}
-int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
+static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_s_c k)
{
- struct btree_trans trans;
- struct btree_and_journal_iter iter;
- struct bkey_s_c k;
- int ret;
-
- ret = bch2_fs_ec_start(c);
- if (ret)
- return ret;
-
- bch2_trans_init(&trans, c, 0, 0);
-
- bch2_btree_and_journal_iter_init(&iter, &trans, journal_keys,
- BTREE_ID_EC, POS_MIN);
-
+ int ret = 0;
- while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
- bch2_mark_key(c, k, 0, 0, NULL, 0,
- BTREE_TRIGGER_ALLOC_READ|
- BTREE_TRIGGER_NOATOMIC);
+ if (k.k->type == KEY_TYPE_stripe)
+ ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL) ?:
+ bch2_mark_key(c, k, 0, 0, NULL, 0,
+ BTREE_TRIGGER_ALLOC_READ|
+ BTREE_TRIGGER_NOATOMIC);
- bch2_btree_and_journal_iter_advance(&iter);
- }
+ return ret;
+}
- ret = bch2_trans_exit(&trans) ?: ret;
- if (ret) {
+int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
+{
+ int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_EC,
+ NULL, bch2_stripes_read_fn);
+ if (ret)
bch_err(c, "error reading stripes: %i", ret);
- return ret;
- }
- return 0;
+ return ret;
}
int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
@@ -1343,11 +1333,6 @@ int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
return 0;
}
-int bch2_fs_ec_start(struct bch_fs *c)
-{
- return bch2_ec_mem_alloc(c, false);
-}
-
void bch2_fs_ec_exit(struct bch_fs *c)
{
struct ec_stripe_head *h;
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index cf67abd48490..4dfaac034886 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -157,8 +157,6 @@ int bch2_stripes_write(struct bch_fs *, unsigned, bool *);
int bch2_ec_mem_alloc(struct bch_fs *, bool);
-int bch2_fs_ec_start(struct bch_fs *);
-
void bch2_fs_ec_exit(struct bch_fs *);
int bch2_fs_ec_init(struct bch_fs *);
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 5a5cfee623e2..cd46706fb6f5 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -37,7 +37,7 @@ void bch2_io_error_work(struct work_struct *work)
struct bch_fs *c = ca->fs;
bool dev;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO,
BCH_FORCE_IF_DEGRADED);
if (dev
@@ -47,7 +47,7 @@ void bch2_io_error_work(struct work_struct *work)
bch_err(ca,
"too many IO errors, setting %s RO",
dev ? "device" : "filesystem");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
void bch2_io_error(struct bch_dev *ca)
@@ -85,7 +85,7 @@ enum fsck_err_ret bch2_fsck_err(struct bch_fs *c, unsigned flags,
if (s->fmt == fmt)
goto found;
- s = kzalloc(sizeof(*s), GFP_KERNEL);
+ s = kzalloc(sizeof(*s), GFP_NOFS);
if (!s) {
if (!c->fsck_alloc_err)
bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index de319794ccd1..94b53312fbbd 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -102,6 +102,7 @@ struct fsck_err_state {
#define FSCK_CAN_IGNORE (1 << 1)
#define FSCK_NEED_FSCK (1 << 2)
+__printf(3, 4) __cold
enum fsck_err_ret bch2_fsck_err(struct bch_fs *,
unsigned, const char *, ...);
void bch2_flush_fsck_errs(struct bch_fs *);
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index d0af1bc17018..fd011df3cb99 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -76,7 +76,8 @@ static int count_iters_for_insert(struct btree_trans *trans,
if (*nr_iters >= max_iters) {
struct bpos pos = bkey_start_pos(k.k);
- pos.offset += r_k.k->p.offset - idx;
+ pos.offset += min_t(u64, k.k->size,
+ r_k.k->p.offset - idx);
*end = bpos_min(*end, pos);
ret = 1;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index d1a4ab04fbbf..251d4af773a5 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -220,7 +220,7 @@ void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
{
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
- pr_buf(out, "seq %llu sectors %u written %u min_key ",
+ pr_buf(out, "seq %llx sectors %u written %u min_key ",
le64_to_cpu(bp.v->seq),
le16_to_cpu(bp.v->sectors),
le16_to_cpu(bp.v->sectors_written));
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index ab2d808eea43..162aa55d5b20 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -54,6 +54,7 @@ struct dio_write {
sync:1,
free_iov:1;
struct quota_res quota_res;
+ u64 written;
struct iov_iter iter;
struct iovec inline_vecs[2];
@@ -602,7 +603,7 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
EBUG_ON(!PageLocked(page));
EBUG_ON(!PageLocked(newpage));
- ret = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+ ret = migrate_page_move_mapping(mapping, newpage, page, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
@@ -627,10 +628,10 @@ int bch2_migrate_page(struct address_space *mapping, struct page *newpage,
static void bch2_readpages_end_io(struct bio *bio)
{
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
- bio_for_each_segment_all(bv, bio, i) {
+ bio_for_each_segment_all(bv, bio, iter) {
struct page *page = bv->bv_page;
if (!bio->bi_status) {
@@ -782,11 +783,8 @@ static void readpage_bio_extend(struct readpages_iter *iter,
if (!get_more)
break;
- rcu_read_lock();
- page = radix_tree_lookup(&iter->mapping->i_pages, page_offset);
- rcu_read_unlock();
-
- if (page && !radix_tree_exceptional_entry(page))
+ page = xa_load(&iter->mapping->i_pages, page_offset);
+ if (page && !xa_is_value(page))
break;
page = __page_cache_alloc(readahead_gfp_mask(iter->mapping));
@@ -848,7 +846,7 @@ retry:
sectors = k.k->size - offset_into_extent;
ret = bch2_read_indirect_extent(trans,
- &offset_into_extent, sk.k);
+ &offset_into_extent, &sk);
if (ret)
break;
@@ -1037,32 +1035,33 @@ static void bch2_writepage_io_done(struct closure *cl)
struct bch_writepage_io, cl);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bvec;
- unsigned i, j;
+ unsigned i;
if (io->op.error) {
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
SetPageError(bvec->bv_page);
- mapping_set_error(io->inode->v.i_mapping, -EIO);
+ mapping_set_error(bvec->bv_page->mapping, -EIO);
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s;
s = __bch2_page_state(bvec->bv_page);
spin_lock(&s->lock);
- for (j = 0; j < PAGE_SECTORS; j++)
- s->s[j].nr_replicas = 0;
+ for (i = 0; i < PAGE_SECTORS; i++)
+ s->s[i].nr_replicas = 0;
spin_unlock(&s->lock);
}
}
@@ -1086,7 +1085,7 @@ static void bch2_writepage_io_done(struct closure *cl)
*/
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
- bio_for_each_segment_all(bvec, bio, i) {
+ bio_for_each_segment_all(bvec, bio, iter) {
struct bch_page_state *s = __bch2_page_state(bvec->bv_page);
if (atomic_dec_and_test(&s->write_count))
@@ -1240,7 +1239,7 @@ do_io:
if (w->io &&
(w->io->op.res.nr_replicas != nr_replicas_this_write ||
- bio_full(&w->io->op.wbio.bio) ||
+ bio_full(&w->io->op.wbio.bio, PAGE_SIZE) ||
w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
(BIO_MAX_PAGES * PAGE_SIZE) ||
bio_end_sector(&w->io->op.wbio.bio) != sector))
@@ -1798,17 +1797,19 @@ ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
/* O_DIRECT writes */
+static void bch2_dio_write_loop_async(struct bch_write_op *);
+
static long bch2_dio_write_loop(struct dio_write *dio)
{
bool kthread = (current->flags & PF_KTHREAD) != 0;
- struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req;
struct address_space *mapping = req->ki_filp->f_mapping;
struct bch_inode_info *inode = file_bch_inode(req->ki_filp);
+ struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bio *bio = &dio->op.wbio.bio;
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i, unaligned;
- u64 new_i_size;
+ unsigned unaligned;
bool sync = dio->sync;
long ret;
@@ -1849,14 +1850,30 @@ static long bch2_dio_write_loop(struct dio_write *dio)
* bio_iov_iter_get_pages was only able to get <
* blocksize worth of pages:
*/
- bio_for_each_segment_all(bv, bio, i)
+ bio_for_each_segment_all(bv, bio, iter)
put_page(bv->bv_page);
ret = -EFAULT;
goto err;
}
- dio->op.pos = POS(inode->v.i_ino,
- (req->ki_pos >> 9) + dio->op.written);
+ bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
+ dio->op.end_io = bch2_dio_write_loop_async;
+ dio->op.target = dio->op.opts.foreground_target;
+ op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
+ dio->op.write_point = writepoint_hashed((unsigned long) current);
+ dio->op.nr_replicas = dio->op.opts.data_replicas;
+ dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
+
+ if ((req->ki_flags & IOCB_DSYNC) &&
+ !c->opts.journal_flush_disabled)
+ dio->op.flags |= BCH_WRITE_FLUSH;
+
+ ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
+ dio->op.opts.data_replicas, 0);
+ if (unlikely(ret) &&
+ !bch2_check_range_allocated(c, dio->op.pos,
+ bio_sectors(bio), dio->op.opts.data_replicas))
+ goto err;
task_io_account_write(bio->bi_iter.bi_size);
@@ -1888,16 +1905,15 @@ do_io:
loop:
i_sectors_acct(c, inode, &dio->quota_res,
dio->op.i_sectors_delta);
- dio->op.i_sectors_delta = 0;
-
- new_i_size = req->ki_pos + ((u64) dio->op.written << 9);
+ req->ki_pos += (u64) dio->op.written << 9;
+ dio->written += dio->op.written;
spin_lock(&inode->v.i_lock);
- if (new_i_size > inode->v.i_size)
- i_size_write(&inode->v, new_i_size);
+ if (req->ki_pos > inode->v.i_size)
+ i_size_write(&inode->v, req->ki_pos);
spin_unlock(&inode->v.i_lock);
- bio_for_each_segment_all(bv, bio, i)
+ bio_for_each_segment_all(bv, bio, iter)
put_page(bv->bv_page);
if (!dio->iter.count || dio->op.error)
break;
@@ -1906,10 +1922,9 @@ loop:
reinit_completion(&dio->done);
}
- ret = dio->op.error ?: ((long) dio->op.written << 9);
+ ret = dio->op.error ?: ((long) dio->written << 9);
err:
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_disk_reservation_put(c, &dio->op.res);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
if (dio->free_iov)
@@ -1944,7 +1959,6 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
struct address_space *mapping = file->f_mapping;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct dio_write *dio;
struct bio *bio;
bool locked = true, extending;
@@ -1992,35 +2006,14 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
dio->sync = is_sync_kiocb(req) || extending;
dio->free_iov = false;
dio->quota_res.sectors = 0;
+ dio->written = 0;
dio->iter = *iter;
- bch2_write_op_init(&dio->op, c, opts);
- dio->op.end_io = bch2_dio_write_loop_async;
- dio->op.target = opts.foreground_target;
- op_journal_seq_set(&dio->op, &inode->ei_journal_seq);
- dio->op.write_point = writepoint_hashed((unsigned long) current);
- dio->op.flags |= BCH_WRITE_NOPUT_RESERVATION;
-
- if ((req->ki_flags & IOCB_DSYNC) &&
- !c->opts.journal_flush_disabled)
- dio->op.flags |= BCH_WRITE_FLUSH;
-
ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
iter->count >> 9, true);
if (unlikely(ret))
goto err_put_bio;
- dio->op.nr_replicas = dio->op.opts.data_replicas;
-
- ret = bch2_disk_reservation_get(c, &dio->op.res, iter->count >> 9,
- dio->op.opts.data_replicas, 0);
- if (unlikely(ret) &&
- !bch2_check_range_allocated(c, POS(inode->v.i_ino,
- req->ki_pos >> 9),
- iter->count >> 9,
- dio->op.opts.data_replicas))
- goto err_put_bio;
-
ret = write_invalidate_inode_pages_range(mapping,
req->ki_pos,
req->ki_pos + iter->count - 1);
@@ -2031,12 +2024,9 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
err:
if (locked)
inode_unlock(&inode->v);
- if (ret > 0)
- req->ki_pos += ret;
return ret;
err_put_bio:
bch2_pagecache_block_put(&inode->ei_pagecache_lock);
- bch2_disk_reservation_put(c, &dio->op.res);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
bio_put(bio);
inode_dio_end(&inode->v);
@@ -2834,235 +2824,6 @@ static void mark_range_unallocated(struct bch_inode_info *inode,
} while (index <= end_index);
}
-static int generic_access_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- struct inode *inode = file->f_mapping->host;
- loff_t max_size = inode->i_sb->s_maxbytes;
-
- if (!(file->f_flags & O_LARGEFILE))
- max_size = MAX_NON_LFS;
-
- if (unlikely(pos >= max_size))
- return -EFBIG;
- *count = min(*count, max_size - pos);
- return 0;
-}
-
-static int generic_write_check_limits(struct file *file, loff_t pos,
- loff_t *count)
-{
- loff_t limit = rlimit(RLIMIT_FSIZE);
-
- if (limit != RLIM_INFINITY) {
- if (pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- *count = min(*count, limit - pos);
- }
-
- return generic_access_check_limits(file, pos, count);
-}
-
-static int generic_remap_checks(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *req_count, unsigned int remap_flags)
-{
- struct inode *inode_in = file_in->f_mapping->host;
- struct inode *inode_out = file_out->f_mapping->host;
- uint64_t count = *req_count;
- uint64_t bcount;
- loff_t size_in, size_out;
- loff_t bs = inode_out->i_sb->s_blocksize;
- int ret;
-
- /* The start of both ranges must be aligned to an fs block. */
- if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
- return -EINVAL;
-
- /* Ensure offsets don't wrap. */
- if (pos_in + count < pos_in || pos_out + count < pos_out)
- return -EINVAL;
-
- size_in = i_size_read(inode_in);
- size_out = i_size_read(inode_out);
-
- /* Dedupe requires both ranges to be within EOF. */
- if ((remap_flags & REMAP_FILE_DEDUP) &&
- (pos_in >= size_in || pos_in + count > size_in ||
- pos_out >= size_out || pos_out + count > size_out))
- return -EINVAL;
-
- /* Ensure the infile range is within the infile. */
- if (pos_in >= size_in)
- return -EINVAL;
- count = min(count, size_in - (uint64_t)pos_in);
-
- ret = generic_access_check_limits(file_in, pos_in, &count);
- if (ret)
- return ret;
-
- ret = generic_write_check_limits(file_out, pos_out, &count);
- if (ret)
- return ret;
-
- /*
- * If the user wanted us to link to the infile's EOF, round up to the
- * next block boundary for this check.
- *
- * Otherwise, make sure the count is also block-aligned, having
- * already confirmed the starting offsets' block alignment.
- */
- if (pos_in + count == size_in) {
- bcount = ALIGN(size_in, bs) - pos_in;
- } else {
- if (!IS_ALIGNED(count, bs))
- count = ALIGN_DOWN(count, bs);
- bcount = count;
- }
-
- /* Don't allow overlapped cloning within the same file. */
- if (inode_in == inode_out &&
- pos_out + bcount > pos_in &&
- pos_out < pos_in + bcount)
- return -EINVAL;
-
- /*
- * We shortened the request but the caller can't deal with that, so
- * bounce the request back to userspace.
- */
- if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
- return -EINVAL;
-
- *req_count = count;
- return 0;
-}
-
-static int generic_remap_check_len(struct inode *inode_in,
- struct inode *inode_out,
- loff_t pos_out,
- loff_t *len,
- unsigned int remap_flags)
-{
- u64 blkmask = i_blocksize(inode_in) - 1;
- loff_t new_len = *len;
-
- if ((*len & blkmask) == 0)
- return 0;
-
- if ((remap_flags & REMAP_FILE_DEDUP) ||
- pos_out + *len < i_size_read(inode_out))
- new_len &= ~blkmask;
-
- if (new_len == *len)
- return 0;
-
- if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
- *len = new_len;
- return 0;
- }
-
- return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
-}
-
-static int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- loff_t *len, unsigned int remap_flags)
-{
- struct inode *inode_in = file_inode(file_in);
- struct inode *inode_out = file_inode(file_out);
- bool same_inode = (inode_in == inode_out);
- int ret;
-
- /* Don't touch certain kinds of inodes */
- if (IS_IMMUTABLE(inode_out))
- return -EPERM;
-
- if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
- return -ETXTBSY;
-
- /* Don't reflink dirs, pipes, sockets... */
- if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
- return -EISDIR;
- if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
- return -EINVAL;
-
- /* Zero length dedupe exits immediately; reflink goes to EOF. */
- if (*len == 0) {
- loff_t isize = i_size_read(inode_in);
-
- if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
- return 0;
- if (pos_in > isize)
- return -EINVAL;
- *len = isize - pos_in;
- if (*len == 0)
- return 0;
- }
-
- /* Check that we don't violate system file offset limits. */
- ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
- remap_flags);
- if (ret)
- return ret;
-
- /* Wait for the completion of any pending IOs on both files */
- inode_dio_wait(inode_in);
- if (!same_inode)
- inode_dio_wait(inode_out);
-
- ret = filemap_write_and_wait_range(inode_in->i_mapping,
- pos_in, pos_in + *len - 1);
- if (ret)
- return ret;
-
- ret = filemap_write_and_wait_range(inode_out->i_mapping,
- pos_out, pos_out + *len - 1);
- if (ret)
- return ret;
-
- /*
- * Check that the extents are the same.
- */
- if (remap_flags & REMAP_FILE_DEDUP) {
- bool is_same = false;
-
- ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
- inode_out, pos_out, *len, &is_same);
- if (ret)
- return ret;
- if (!is_same)
- return -EBADE;
- }
-
- ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
- remap_flags);
- if (ret)
- return ret;
-
- /* If can't alter the file contents, we're done. */
- if (!(remap_flags & REMAP_FILE_DEDUP)) {
- /* Update the timestamps, since we can alter file contents. */
- if (!(file_out->f_mode & FMODE_NOCMTIME)) {
- ret = file_update_time(file_out);
- if (ret)
- return ret;
- }
-
- /*
- * Clear the security bits if the process is not being run by
- * root. This keeps people from modifying setuid and setgid
- * binaries.
- */
- ret = file_remove_privs(file_out);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
struct file *file_dst, loff_t pos_dst,
loff_t len, unsigned remap_flags)
@@ -3074,6 +2835,9 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
u64 aligned_len;
loff_t ret = 0;
+ if (!c->opts.reflink)
+ return -EOPNOTSUPP;
+
if (remap_flags & ~(REMAP_FILE_DEDUP|REMAP_FILE_ADVISORY))
return -EINVAL;
@@ -3255,7 +3019,7 @@ static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
loff_t ret = -1;
page = find_lock_entry(mapping, index);
- if (!page || radix_tree_exception(page))
+ if (!page || xa_is_value(page))
return offset;
pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
diff --git a/fs/bcachefs/fs-io.h b/fs/bcachefs/fs-io.h
index 1b593ea707d5..7063556d289b 100644
--- a/fs/bcachefs/fs-io.h
+++ b/fs/bcachefs/fs-io.h
@@ -35,10 +35,6 @@ int bch2_fsync(struct file *, loff_t, loff_t, int);
int bch2_truncate(struct bch_inode_info *, struct iattr *);
long bch2_fallocate_dispatch(struct file *, int, loff_t, loff_t);
-#define REMAP_FILE_ADVISORY (0)
-#define REMAP_FILE_DEDUP (1 << 0)
-#define REMAP_FILE_CAN_SHORTEN (1 << 1)
-
loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
loff_t, loff_t, unsigned);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index b1445bdc3e9d..a47923d67f7a 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -889,7 +889,7 @@ retry:
sectors = k.k->size - offset_into_extent;
ret = bch2_read_indirect_extent(&trans,
- &offset_into_extent, cur.k);
+ &offset_into_extent, &cur);
if (ret)
break;
@@ -966,15 +966,6 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
return bch2_readdir(c, inode->v.i_ino, ctx);
}
-static int bch2_clone_file_range(struct file *file_src, loff_t pos_src,
- struct file *file_dst, loff_t pos_dst,
- u64 len)
-{
- return bch2_remap_file_range(file_src, pos_src,
- file_dst, pos_dst,
- len, 0);
-}
-
static const struct file_operations bch_file_operations = {
.llseek = bch2_llseek,
.read_iter = bch2_read_iter,
@@ -992,7 +983,7 @@ static const struct file_operations bch_file_operations = {
#ifdef CONFIG_COMPAT
.compat_ioctl = bch2_compat_fs_ioctl,
#endif
- .clone_file_range = bch2_clone_file_range,
+ .remap_file_range = bch2_remap_file_range,
};
static const struct inode_operations bch_file_inode_operations = {
@@ -1324,16 +1315,16 @@ static struct bch_fs *__bch2_open_as_blockdevs(const char *dev_name, char * cons
if (IS_ERR(c))
return c;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
closure_put(&c->cl);
pr_err("err mounting %s: incomplete filesystem", dev_name);
return ERR_PTR(-EINVAL);
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
set_bit(BCH_FS_BDEV_MOUNTED, &c->flags);
return c;
@@ -1382,7 +1373,7 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
return ret;
if (opts.read_only != c->opts.read_only) {
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (opts.read_only) {
bch2_fs_read_only(c);
@@ -1392,7 +1383,7 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
ret = bch2_fs_read_write(c);
if (ret) {
bch_err(c, "error going rw: %i", ret);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return -EINVAL;
}
@@ -1401,7 +1392,7 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
c->opts.read_only = opts.read_only;
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
if (opts.errors >= 0)
@@ -1523,7 +1514,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
sb->s_bdi->congested_fn = bch2_congested;
sb->s_bdi->congested_data = c;
- sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
+ sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
for_each_online_member(ca, c, i) {
struct block_device *bdev = ca->disk_sb.bdev;
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 3ab621c62c43..c6ca5968a2e0 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -1169,7 +1169,7 @@ static int check_inode_nlink(struct bch_fs *c,
}
if (!S_ISDIR(u->bi_mode) && link->dir_count) {
- need_fsck_err(c, "non directory with subdirectories",
+ need_fsck_err(c, "non directory with subdirectories (inum %llu)",
u->bi_inum);
return 0;
}
diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c
index b2684803c0d9..8c441050b02a 100644
--- a/fs/bcachefs/io.c
+++ b/fs/bcachefs/io.c
@@ -124,10 +124,10 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
{
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
- bio_for_each_segment_all(bv, bio, i)
+ bio_for_each_segment_all(bv, bio, iter)
if (bv->bv_page != ZERO_PAGE(0))
mempool_free(bv->bv_page, &c->bio_bounce_pages);
bio->bi_vcnt = 0;
@@ -493,13 +493,15 @@ static void bch2_write_done(struct closure *cl)
if (!op->error && (op->flags & BCH_WRITE_FLUSH))
op->error = bch2_journal_error(&c->journal);
- if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
- bch2_disk_reservation_put(c, &op->res);
+ bch2_disk_reservation_put(c, &op->res);
percpu_ref_put(&c->writes);
bch2_keylist_free(&op->insert_keys, op->inline_keys);
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
+ if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
+ up(&c->io_in_flight);
+
if (op->end_io) {
EBUG_ON(cl->parent);
closure_debug_destroy(cl);
@@ -1258,6 +1260,12 @@ void bch2_write(struct closure *cl)
goto err;
}
+ /*
+ * Can't ratelimit copygc - we'd deadlock:
+ */
+ if (!(op->flags & BCH_WRITE_FROM_INTERNAL))
+ down(&c->io_in_flight);
+
bch2_increment_clock(c, bio_sectors(bio), WRITE);
data_len = min_t(u64, bio->bi_iter.bi_size,
@@ -1272,8 +1280,7 @@ void bch2_write(struct closure *cl)
continue_at_nobarrier(cl, __bch2_write, NULL);
return;
err:
- if (!(op->flags & BCH_WRITE_NOPUT_RESERVATION))
- bch2_disk_reservation_put(c, &op->res);
+ bch2_disk_reservation_put(c, &op->res);
if (op->end_io) {
EBUG_ON(cl->parent);
@@ -1641,7 +1648,7 @@ retry:
sectors = k.k->size - offset_into_extent;
ret = bch2_read_indirect_extent(&trans,
- &offset_into_extent, sk.k);
+ &offset_into_extent, &sk);
if (ret)
break;
@@ -1943,14 +1950,14 @@ static void bch2_read_endio(struct bio *bio)
int __bch2_read_indirect_extent(struct btree_trans *trans,
unsigned *offset_into_extent,
- struct bkey_i *orig_k)
+ struct bkey_on_stack *orig_k)
{
struct btree_iter *iter;
struct bkey_s_c k;
u64 reflink_offset;
int ret;
- reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k)->v.idx) +
+ reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
*offset_into_extent;
iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
@@ -1973,7 +1980,7 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
}
*offset_into_extent = iter->pos.offset - bkey_start_offset(k.k);
- bkey_reassemble(orig_k, k);
+ bkey_on_stack_reassemble(orig_k, trans->c, k);
err:
bch2_trans_iter_put(trans, iter);
return ret;
@@ -2273,7 +2280,7 @@ retry:
k = bkey_i_to_s_c(sk.k);
ret = bch2_read_indirect_extent(&trans,
- &offset_into_extent, sk.k);
+ &offset_into_extent, &sk);
if (ret)
goto err;
diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h
index c4c847306345..0ad293bd6295 100644
--- a/fs/bcachefs/io.h
+++ b/fs/bcachefs/io.h
@@ -3,6 +3,7 @@
#define _BCACHEFS_IO_H
#include "checksum.h"
+#include "bkey_on_stack.h"
#include "io_types.h"
#define to_wbio(_bio) \
@@ -29,14 +30,13 @@ enum bch_write_flags {
BCH_WRITE_PAGES_STABLE = (1 << 4),
BCH_WRITE_PAGES_OWNED = (1 << 5),
BCH_WRITE_ONLY_SPECIFIED_DEVS = (1 << 6),
- BCH_WRITE_NOPUT_RESERVATION = (1 << 7),
- BCH_WRITE_WROTE_DATA_INLINE = (1 << 8),
- BCH_WRITE_FROM_INTERNAL = (1 << 9),
+ BCH_WRITE_WROTE_DATA_INLINE = (1 << 7),
+ BCH_WRITE_FROM_INTERNAL = (1 << 8),
/* Internal: */
- BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 10),
- BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 11),
- BCH_WRITE_DONE = (1 << 12),
+ BCH_WRITE_JOURNAL_SEQ_PTR = (1 << 9),
+ BCH_WRITE_SKIP_CLOSURE_PUT = (1 << 10),
+ BCH_WRITE_DONE = (1 << 11),
};
static inline u64 *op_journal_seq(struct bch_write_op *op)
@@ -110,13 +110,13 @@ struct cache_promote_op;
struct extent_ptr_decoded;
int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
- struct bkey_i *);
+ struct bkey_on_stack *);
static inline int bch2_read_indirect_extent(struct btree_trans *trans,
unsigned *offset_into_extent,
- struct bkey_i *k)
+ struct bkey_on_stack *k)
{
- return k->k.type == KEY_TYPE_reflink_p
+ return k->k->k.type == KEY_TYPE_reflink_p
? __bch2_read_indirect_extent(trans, offset_into_extent, k)
: 0;
}
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index 0a4538b3dc60..b4f7b61ba9ac 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -428,9 +428,10 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
static bool journal_preres_available(struct journal *j,
struct journal_preres *res,
- unsigned new_u64s)
+ unsigned new_u64s,
+ unsigned flags)
{
- bool ret = bch2_journal_preres_get_fast(j, res, new_u64s);
+ bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags);
if (!ret)
bch2_journal_reclaim_work(&j->reclaim_work.work);
@@ -440,13 +441,14 @@ static bool journal_preres_available(struct journal *j,
int __bch2_journal_preres_get(struct journal *j,
struct journal_preres *res,
- unsigned new_u64s)
+ unsigned new_u64s,
+ unsigned flags)
{
int ret;
closure_wait_event(&j->preres_wait,
(ret = bch2_journal_error(j)) ||
- journal_preres_available(j, res, new_u64s));
+ journal_preres_available(j, res, new_u64s, flags));
return ret;
}
@@ -959,15 +961,12 @@ void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
void bch2_fs_journal_stop(struct journal *j)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
-
bch2_journal_flush_all_pins(j);
wait_event(j->wait, journal_entry_close(j));
/* do we need to write another journal entry? */
- if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
- c->btree_roots_dirty)
+ if (test_bit(JOURNAL_NOT_EMPTY, &j->flags))
bch2_journal_meta(j);
journal_quiesce(j);
@@ -988,9 +987,8 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
u64 last_seq = cur_seq, nr, seq;
if (!list_empty(journal_entries))
- last_seq = le64_to_cpu(list_first_entry(journal_entries,
- struct journal_replay,
- list)->j.seq);
+ last_seq = le64_to_cpu(list_last_entry(journal_entries,
+ struct journal_replay, list)->j.last_seq);
nr = cur_seq - last_seq;
@@ -1019,8 +1017,10 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
list_for_each_entry(i, journal_entries, list) {
seq = le64_to_cpu(i->j.seq);
+ BUG_ON(seq >= cur_seq);
- BUG_ON(seq < last_seq || seq >= cur_seq);
+ if (seq < last_seq)
+ continue;
journal_seq_pin(j, seq)->devs = i->devs;
}
@@ -1238,14 +1238,14 @@ ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
i, atomic_read(&pin_list->count));
list_for_each_entry(pin, &pin_list->list, list)
- pr_buf(&out, "\t%p %pf\n",
+ pr_buf(&out, "\t%px %ps\n",
pin, pin->flush);
if (!list_empty(&pin_list->flushed))
pr_buf(&out, "flushed:\n");
list_for_each_entry(pin, &pin_list->flushed, list)
- pr_buf(&out, "\t%p %pf\n",
+ pr_buf(&out, "\t%px %ps\n",
pin, pin->flush);
}
spin_unlock(&j->lock);
diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h
index 1ba8b62b9f2b..30de6d96188e 100644
--- a/fs/bcachefs/journal.h
+++ b/fs/bcachefs/journal.h
@@ -199,27 +199,39 @@ bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s)
return entry;
}
+static inline struct jset_entry *
+journal_res_entry(struct journal *j, struct journal_res *res)
+{
+ return vstruct_idx(j->buf[res->idx].data, res->offset);
+}
+
+static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type,
+ enum btree_id id, unsigned level,
+ const void *data, unsigned u64s)
+{
+ memset(entry, 0, sizeof(*entry));
+ entry->u64s = cpu_to_le16(u64s);
+ entry->type = type;
+ entry->btree_id = id;
+ entry->level = level;
+ memcpy_u64s_small(entry->_data, data, u64s);
+
+ return jset_u64s(u64s);
+}
+
static inline void bch2_journal_add_entry(struct journal *j, struct journal_res *res,
unsigned type, enum btree_id id,
unsigned level,
const void *data, unsigned u64s)
{
- struct journal_buf *buf = &j->buf[res->idx];
- struct jset_entry *entry = vstruct_idx(buf->data, res->offset);
- unsigned actual = jset_u64s(u64s);
+ unsigned actual = journal_entry_set(journal_res_entry(j, res),
+ type, id, level, data, u64s);
EBUG_ON(!res->ref);
EBUG_ON(actual > res->u64s);
res->offset += actual;
res->u64s -= actual;
-
- memset(entry, 0, sizeof(*entry));
- entry->u64s = cpu_to_le16(u64s);
- entry->type = type;
- entry->btree_id = id;
- entry->level = level;
- memcpy_u64s(entry->_data, data, u64s);
}
static inline void bch2_journal_add_keys(struct journal *j, struct journal_res *res,
@@ -269,7 +281,7 @@ static inline void bch2_journal_res_put(struct journal *j,
if (!res->ref)
return;
- lock_release(&j->res_map, 0, _THIS_IP_);
+ lock_release(&j->res_map, _THIS_IP_);
while (res->u64s)
bch2_journal_add_entry(j, res,
@@ -287,6 +299,7 @@ int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *,
#define JOURNAL_RES_GET_NONBLOCK (1 << 0)
#define JOURNAL_RES_GET_CHECK (1 << 1)
#define JOURNAL_RES_GET_RESERVED (1 << 2)
+#define JOURNAL_RES_GET_RECLAIM (1 << 3)
static inline int journal_res_get_fast(struct journal *j,
struct journal_res *res,
@@ -394,11 +407,12 @@ static inline void bch2_journal_preres_put(struct journal *j,
}
int __bch2_journal_preres_get(struct journal *,
- struct journal_preres *, unsigned);
+ struct journal_preres *, unsigned, unsigned);
static inline int bch2_journal_preres_get_fast(struct journal *j,
struct journal_preres *res,
- unsigned new_u64s)
+ unsigned new_u64s,
+ unsigned flags)
{
int d = new_u64s - res->u64s;
union journal_preres_state old, new;
@@ -409,7 +423,15 @@ static inline int bch2_journal_preres_get_fast(struct journal *j,
new.reserved += d;
- if (new.reserved > new.remaining)
+ /*
+ * If we're being called from the journal reclaim path, we have
+ * to unconditionally give out the pre-reservation, there's
+ * nothing else sensible we can do - otherwise we'd recurse back
+ * into the reclaim path and deadlock:
+ */
+
+ if (!(flags & JOURNAL_RES_GET_RECLAIM) &&
+ new.reserved > new.remaining)
return 0;
} while ((v = atomic64_cmpxchg(&j->prereserved.counter,
old.v, new.v)) != old.v);
@@ -426,13 +448,13 @@ static inline int bch2_journal_preres_get(struct journal *j,
if (new_u64s <= res->u64s)
return 0;
- if (bch2_journal_preres_get_fast(j, res, new_u64s))
+ if (bch2_journal_preres_get_fast(j, res, new_u64s, flags))
return 0;
if (flags & JOURNAL_RES_GET_NONBLOCK)
return -EAGAIN;
- return __bch2_journal_preres_get(j, res, new_u64s);
+ return __bch2_journal_preres_get(j, res, new_u64s, flags);
}
/* journal_entry_res: */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 39bb2154cce1..b7625285b3ad 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "btree_io.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
#include "error.h"
@@ -40,19 +41,21 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
list)->j.last_seq
: 0;
- /* Is this entry older than the range we need? */
- if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
- ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
- goto out;
- }
+ if (!c->opts.read_entire_journal) {
+ /* Is this entry older than the range we need? */
+ if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
+ ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
+ goto out;
+ }
- /* Drop entries we don't need anymore */
- list_for_each_entry_safe(i, pos, jlist->head, list) {
- if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
- break;
- list_del(&i->list);
- kvpfree(i, offsetof(struct journal_replay, j) +
- vstruct_bytes(&i->j));
+ /* Drop entries we don't need anymore */
+ list_for_each_entry_safe(i, pos, jlist->head, list) {
+ if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
+ break;
+ list_del(&i->list);
+ kvpfree(i, offsetof(struct journal_replay, j) +
+ vstruct_bytes(&i->j));
+ }
}
list_for_each_entry_reverse(i, jlist->head, list) {
@@ -993,8 +996,23 @@ void bch2_journal_write(struct closure *cl)
j->write_start_time = local_clock();
- start = vstruct_last(jset);
- end = bch2_journal_super_entries_add_common(c, start,
+ /*
+ * New btree roots are set by journalling them; when the journal entry
+ * gets written we have to propagate them to c->btree_roots
+ *
+ * But, every journal entry we write has to contain all the btree roots
+ * (at least for now); so after we copy btree roots to c->btree_roots we
+ * have to get any missing btree roots and add them to this journal
+ * entry:
+ */
+
+ bch2_journal_entries_to_btree_roots(c, jset);
+
+ start = end = vstruct_last(jset);
+
+ end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
+
+ end = bch2_journal_super_entries_add_common(c, end,
le64_to_cpu(jset->seq));
u64s = (u64 *) end - (u64 *) start;
BUG_ON(u64s > j->entry_u64s_reserved);
diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c
index d34434f62454..4811ab9f879e 100644
--- a/fs/bcachefs/journal_reclaim.c
+++ b/fs/bcachefs/journal_reclaim.c
@@ -28,18 +28,10 @@ unsigned bch2_journal_dev_buckets_available(struct journal *j,
struct journal_device *ja,
enum journal_space_from from)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned available = (journal_space_from(ja, from) -
ja->cur_idx - 1 + ja->nr) % ja->nr;
/*
- * Allocator startup needs some journal space before we can do journal
- * replay:
- */
- if (available && test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags))
- --available;
-
- /*
* Don't use the last bucket unless writing the new last_seq
* will make another bucket available:
*/
@@ -330,7 +322,7 @@ static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
__journal_pin_drop(j, pin);
- BUG_ON(!atomic_read(&pin_list->count));
+ BUG_ON(!atomic_read(&pin_list->count) && seq == journal_last_seq(j));
atomic_inc(&pin_list->count);
pin->seq = seq;
@@ -354,6 +346,37 @@ void __bch2_journal_pin_add(struct journal *j, u64 seq,
journal_wake(j);
}
+void bch2_journal_pin_update(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
+{
+ if (journal_pin_active(pin) && pin->seq < seq)
+ return;
+
+ spin_lock(&j->lock);
+
+ if (pin->seq != seq) {
+ bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
+ } else {
+ struct journal_entry_pin_list *pin_list =
+ journal_seq_pin(j, seq);
+
+ /*
+ * If the pin is already pinning the right sequence number, it
+ * still might've already been flushed:
+ */
+ list_move(&pin->list, &pin_list->list);
+ }
+
+ spin_unlock(&j->lock);
+
+ /*
+ * If the journal is currently full, we might want to call flush_fn
+ * immediately:
+ */
+ journal_wake(j);
+}
+
void bch2_journal_pin_copy(struct journal *j,
struct journal_entry_pin *dst,
struct journal_entry_pin *src,
@@ -393,6 +416,9 @@ journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *ret = NULL;
+ if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
+ return NULL;
+
spin_lock(&j->lock);
fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
@@ -413,10 +439,12 @@ journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
return ret;
}
-static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
+/* returns true if we did work */
+static bool journal_flush_pins(struct journal *j, u64 seq_to_flush,
unsigned min_nr)
{
struct journal_entry_pin *pin;
+ bool ret = false;
u64 seq;
lockdep_assert_held(&j->reclaim_lock);
@@ -431,7 +459,10 @@ static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
BUG_ON(j->flush_in_progress != pin);
j->flush_in_progress = NULL;
wake_up(&j->pin_flush_wait);
+ ret = true;
}
+
+ return ret;
}
/**
@@ -523,7 +554,8 @@ void bch2_journal_reclaim_work(struct work_struct *work)
mutex_unlock(&j->reclaim_lock);
}
-static int journal_flush_done(struct journal *j, u64 seq_to_flush)
+static int journal_flush_done(struct journal *j, u64 seq_to_flush,
+ bool *did_work)
{
int ret;
@@ -533,7 +565,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush)
mutex_lock(&j->reclaim_lock);
- journal_flush_pins(j, seq_to_flush, 0);
+ *did_work = journal_flush_pins(j, seq_to_flush, 0);
spin_lock(&j->lock);
/*
@@ -551,12 +583,17 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush)
return ret;
}
-void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
+bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
{
+ bool did_work = false;
+
if (!test_bit(JOURNAL_STARTED, &j->flags))
- return;
+ return false;
+
+ closure_wait_event(&j->async_wait,
+ journal_flush_done(j, seq_to_flush, &did_work));
- closure_wait_event(&j->async_wait, journal_flush_done(j, seq_to_flush));
+ return did_work;
}
int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
diff --git a/fs/bcachefs/journal_reclaim.h b/fs/bcachefs/journal_reclaim.h
index 883a0a5680af..8128907a7623 100644
--- a/fs/bcachefs/journal_reclaim.h
+++ b/fs/bcachefs/journal_reclaim.h
@@ -38,10 +38,14 @@ static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
- if (unlikely(!journal_pin_active(pin)))
+ if (unlikely(!journal_pin_active(pin) || pin->seq > seq))
__bch2_journal_pin_add(j, seq, pin, flush_fn);
}
+void bch2_journal_pin_update(struct journal *, u64,
+ struct journal_entry_pin *,
+ journal_pin_flush_fn);
+
void bch2_journal_pin_copy(struct journal *,
struct journal_entry_pin *,
struct journal_entry_pin *,
@@ -53,11 +57,11 @@ void bch2_journal_do_discards(struct journal *);
void bch2_journal_reclaim(struct journal *);
void bch2_journal_reclaim_work(struct work_struct *);
-void bch2_journal_flush_pins(struct journal *, u64);
+bool bch2_journal_flush_pins(struct journal *, u64);
-static inline void bch2_journal_flush_all_pins(struct journal *j)
+static inline bool bch2_journal_flush_all_pins(struct journal *j)
{
- bch2_journal_flush_pins(j, U64_MAX);
+ return bch2_journal_flush_pins(j, U64_MAX);
}
int bch2_journal_flush_device_pins(struct journal *, int);
diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h
index 8eea12a03c06..154b51b891d3 100644
--- a/fs/bcachefs/journal_types.h
+++ b/fs/bcachefs/journal_types.h
@@ -125,6 +125,7 @@ union journal_preres_state {
enum {
JOURNAL_REPLAY_DONE,
JOURNAL_STARTED,
+ JOURNAL_RECLAIM_STARTED,
JOURNAL_NEED_WRITE,
JOURNAL_NOT_EMPTY,
JOURNAL_MAY_GET_UNRESERVED,
diff --git a/fs/bcachefs/keylist.c b/fs/bcachefs/keylist.c
index 5da54ced9cad..864dfaa67b7a 100644
--- a/fs/bcachefs/keylist.c
+++ b/fs/bcachefs/keylist.c
@@ -6,7 +6,7 @@
int bch2_keylist_realloc(struct keylist *l, u64 *inline_u64s,
size_t nr_inline_u64s, size_t new_u64s)
{
- size_t oldsize = bch_keylist_u64s(l);
+ size_t oldsize = bch2_keylist_u64s(l);
size_t newsize = oldsize + new_u64s;
u64 *old_buf = l->keys_p == inline_u64s ? NULL : l->keys_p;
u64 *new_keys;
@@ -52,7 +52,7 @@ void bch2_keylist_pop_front(struct keylist *l)
memmove_u64s_down(l->keys,
bkey_next(l->keys),
- bch_keylist_u64s(l));
+ bch2_keylist_u64s(l));
}
#ifdef CONFIG_BCACHEFS_DEBUG
diff --git a/fs/bcachefs/keylist.h b/fs/bcachefs/keylist.h
index a7ff86b08abc..195799bb20bc 100644
--- a/fs/bcachefs/keylist.h
+++ b/fs/bcachefs/keylist.h
@@ -36,14 +36,14 @@ static inline bool bch2_keylist_empty(struct keylist *l)
return l->top == l->keys;
}
-static inline size_t bch_keylist_u64s(struct keylist *l)
+static inline size_t bch2_keylist_u64s(struct keylist *l)
{
return l->top_p - l->keys_p;
}
static inline size_t bch2_keylist_bytes(struct keylist *l)
{
- return bch_keylist_u64s(l) * sizeof(u64);
+ return bch2_keylist_u64s(l) * sizeof(u64);
}
static inline struct bkey_i *bch2_keylist_front(struct keylist *l)
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index e26fa1608f39..96c8690adc5b 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -151,15 +151,8 @@ retry:
}
/* flush relevant btree updates */
- while (1) {
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c) ||
- c->btree_roots_dirty);
- if (c->btree_roots_dirty)
- bch2_journal_meta(&c->journal);
- if (!bch2_btree_interior_updates_nr_pending(c))
- break;
- }
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
ret = 0;
err:
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index b82cd23fc848..b42350f9e9fb 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -178,9 +178,12 @@ next:
}
continue;
nomatch:
- if (m->ctxt)
+ if (m->ctxt) {
+ BUG_ON(k.k->p.offset <= iter->pos.offset);
+ atomic64_inc(&m->ctxt->stats->keys_raced);
atomic64_add(k.k->p.offset - iter->pos.offset,
&m->ctxt->stats->sectors_raced);
+ }
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k);
bch2_btree_iter_next_slot(iter);
@@ -313,12 +316,12 @@ static void move_free(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->write.ctxt;
+ struct bvec_iter_all iter;
struct bio_vec *bv;
- unsigned i;
bch2_disk_reservation_put(io->write.op.c, &io->write.op.res);
- bio_for_each_segment_all(bv, &io->write.op.wbio.bio, i)
+ bio_for_each_segment_all(bv, &io->write.op.wbio.bio, iter)
if (bv->bv_page)
__free_page(bv->bv_page);
@@ -775,14 +778,8 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_move_btree(c, rereplicate_pred, c, stats) ?: ret;
- while (1) {
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c) ||
- c->btree_roots_dirty);
- if (!bch2_btree_interior_updates_nr_pending(c))
- break;
- bch2_journal_meta(&c->journal);
- }
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
ret = bch2_replicas_gc2(c) ?: ret;
diff --git a/fs/bcachefs/move_types.h b/fs/bcachefs/move_types.h
index 6788170d3f95..fc0de165af9f 100644
--- a/fs/bcachefs/move_types.h
+++ b/fs/bcachefs/move_types.h
@@ -8,6 +8,7 @@ struct bch_move_stats {
struct bpos pos;
atomic64_t keys_moved;
+ atomic64_t keys_raced;
atomic64_t sectors_moved;
atomic64_t sectors_seen;
atomic64_t sectors_raced;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index e9cb2304576f..0a87cd7405dd 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -78,7 +78,17 @@ static bool __copygc_pred(struct bch_dev *ca,
ssize_t i = eytzinger0_find_le(h->data, h->used,
sizeof(h->data[0]),
bucket_offset_cmp, &search);
+#if 0
+ /* eytzinger search verify code: */
+ ssize_t j = -1, k;
+ for (k = 0; k < h->used; k++)
+ if (h->data[k].offset <= ptr->offset &&
+ (j < 0 || h->data[k].offset > h->data[j].offset))
+ j = k;
+
+ BUG_ON(i != j);
+#endif
return (i >= 0 &&
ptr->offset < h->data[i].offset + ca->mi.bucket_size &&
ptr->gen == h->data[i].gen);
@@ -203,9 +213,12 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
if (sectors_not_moved && !ret)
bch_warn_ratelimited(c,
- "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved",
+ "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)",
sectors_not_moved, sectors_to_move,
- buckets_not_moved, buckets_to_move);
+ buckets_not_moved, buckets_to_move,
+ atomic64_read(&move_stats.sectors_moved),
+ atomic64_read(&move_stats.keys_raced),
+ atomic64_read(&move_stats.sectors_raced));
trace_copygc(ca,
atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index ba4903352343..3b051e7a8f1d 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -207,6 +207,11 @@ enum opt_type {
OPT_BOOL(), \
BCH_SB_PRJQUOTA, false, \
NULL, "Enable project quotas") \
+ x(reflink, u8, \
+ OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_BOOL(), \
+ BCH_SB_REFLINK, true, \
+ NULL, "Enable reflink support") \
x(degraded, u8, \
OPT_MOUNT, \
OPT_BOOL(), \
@@ -260,6 +265,11 @@ enum opt_type {
OPT_BOOL(), \
NO_SB_OPT, false, \
NULL, "Don't free journal entries/keys after startup")\
+ x(read_entire_journal, u8, \
+ 0, \
+ OPT_BOOL(), \
+ NO_SB_OPT, false, \
+ NULL, "Read all journal entries, not just dirty ones")\
x(noexcl, u8, \
OPT_MOUNT, \
OPT_BOOL(), \
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index a4d0eec2ea3e..41b864dcdc39 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -188,7 +188,79 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *i
iter->b = b;
bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
bch2_journal_iter_init(&iter->journal, journal_keys,
- b->btree_id, b->level, b->data->min_key);
+ b->c.btree_id, b->c.level, b->data->min_key);
+}
+
+/* Walk btree, overlaying keys from the journal: */
+
+static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
+ struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
+{
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ ret = key_fn(c, btree_id, b->c.level, k);
+ if (ret)
+ break;
+
+ if (b->c.level) {
+ struct btree *child;
+ BKEY_PADDED(k) tmp;
+
+ bkey_reassemble(&tmp.k, k);
+ k = bkey_i_to_s_c(&tmp.k);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ if (b->c.level > 0) {
+ child = bch2_btree_node_get_noiter(c, &tmp.k,
+ b->c.btree_id, b->c.level - 1);
+ ret = PTR_ERR_OR_ZERO(child);
+ if (ret)
+ break;
+
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, child,
+ journal_keys, btree_id, node_fn, key_fn);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
+ }
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
+
+ return ret;
+}
+
+int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
+{
+ struct btree *b = c->btree_roots[btree_id].b;
+ int ret = 0;
+
+ if (btree_node_fake(b))
+ return 0;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
+ node_fn, key_fn) ?:
+ key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
+ six_unlock_read(&b->c.lock);
+
+ return ret;
}
/* sort and dedup all keys in the journal: */
@@ -220,17 +292,6 @@ static int journal_sort_key_cmp(const void *_l, const void *_r)
cmp_int(l->journal_offset, r->journal_offset);
}
-static int journal_sort_seq_cmp(const void *_l, const void *_r)
-{
- const struct journal_key *l = _l;
- const struct journal_key *r = _r;
-
- return cmp_int(r->level, l->level) ?:
- cmp_int(l->journal_seq, r->journal_seq) ?:
- cmp_int(l->btree_id, r->btree_id) ?:
- bkey_cmp(l->k->k.p, r->k->k.p);
-}
-
void bch2_journal_keys_free(struct journal_keys *keys)
{
kvfree(keys->d);
@@ -247,20 +308,30 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
struct journal_key *src, *dst;
size_t nr_keys = 0;
- list_for_each_entry(p, journal_entries, list)
+ if (list_empty(journal_entries))
+ return keys;
+
+ keys.journal_seq_base =
+ le64_to_cpu(list_last_entry(journal_entries,
+ struct journal_replay, list)->j.last_seq);
+
+ list_for_each_entry(p, journal_entries, list) {
+ if (le64_to_cpu(p->j.seq) < keys.journal_seq_base)
+ continue;
+
for_each_jset_key(k, _n, entry, &p->j)
nr_keys++;
+ }
- keys.journal_seq_base =
- le64_to_cpu(list_first_entry(journal_entries,
- struct journal_replay,
- list)->j.seq);
keys.d = kvmalloc(sizeof(keys.d[0]) * nr_keys, GFP_KERNEL);
if (!keys.d)
goto err;
- list_for_each_entry(p, journal_entries, list)
+ list_for_each_entry(p, journal_entries, list) {
+ if (le64_to_cpu(p->j.seq) < keys.journal_seq_base)
+ continue;
+
for_each_jset_key(k, _n, entry, &p->j)
keys.d[keys.nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
@@ -270,6 +341,7 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
keys.journal_seq_base,
.journal_offset = k->_data - p->j._data,
};
+ }
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
@@ -435,11 +507,48 @@ static int bch2_journal_replay_key(struct bch_fs *c, enum btree_id id,
__bch2_journal_replay_key(&trans, id, level, k));
}
+static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
+{
+ struct btree_iter *iter;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(iter) ?:
+ bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
+{
+ return bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_JOURNAL_REPLAY,
+ __bch2_alloc_replay_key(&trans, k));
+}
+
+static int journal_sort_seq_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = _l;
+ const struct journal_key *r = _r;
+
+ return cmp_int(r->level, l->level) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->btree_id, r->btree_id) ?:
+ bkey_cmp(l->k->k.p, r->k->k.p);
+}
+
static int bch2_journal_replay(struct bch_fs *c,
struct journal_keys keys)
{
struct journal *j = &c->journal;
struct journal_key *i;
+ u64 seq;
int ret;
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
@@ -447,26 +556,63 @@ static int bch2_journal_replay(struct bch_fs *c,
if (keys.nr)
replay_now_at(j, keys.journal_seq_base);
+ seq = j->replay_journal_seq;
+
+ /*
+ * First replay updates to the alloc btree - these will only update the
+ * btree key cache:
+ */
for_each_journal_key(keys, i) {
- if (!i->level)
- replay_now_at(j, keys.journal_seq_base + i->journal_seq);
+ cond_resched();
- if (i->level)
- ret = bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
- if (i->btree_id == BTREE_ID_ALLOC)
+ if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
ret = bch2_alloc_replay_key(c, i->k);
- else if (i->k->k.size)
- ret = bch2_extent_replay_key(c, i->btree_id, i->k);
- else
- ret = bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
+ if (ret)
+ goto err;
+ }
+ }
- if (ret) {
- bch_err(c, "journal replay: error %d while replaying key",
- ret);
- return ret;
+ /*
+ * Next replay updates to interior btree nodes:
+ */
+ for_each_journal_key(keys, i) {
+ cond_resched();
+
+ if (i->level) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
+ ret = bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
+ if (ret)
+ goto err;
}
+ }
+
+ /*
+ * Now that the btree is in a consistent state, we can start journal
+ * reclaim (which will be flushing entries from the btree key cache back
+ * to the btree:
+ */
+ set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
+ set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
+
+ j->replay_journal_seq = seq;
+ /*
+ * Now replay leaf node updates:
+ */
+ for_each_journal_key(keys, i) {
cond_resched();
+
+ if (i->level || i->btree_id == BTREE_ID_ALLOC)
+ continue;
+
+ replay_now_at(j, keys.journal_seq_base + i->journal_seq);
+
+ ret = i->k->k.size
+ ? bch2_extent_replay_key(c, i->btree_id, i->k)
+ : bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
+ if (ret)
+ goto err;
}
replay_now_at(j, j->replay_journal_seq_end);
@@ -475,6 +621,9 @@ static int bch2_journal_replay(struct bch_fs *c,
bch2_journal_set_replay_done(j);
bch2_journal_flush_all_pins(j);
return bch2_journal_error(j);
+err:
+ bch_err(c, "journal replay: error %d while replaying key", ret);
+ return ret;
}
static bool journal_empty(struct list_head *journal)
@@ -496,6 +645,9 @@ verify_journal_entries_not_blacklisted_or_missing(struct bch_fs *c,
int ret = 0;
list_for_each_entry(i, journal, list) {
+ if (le64_to_cpu(i->j.seq) < start_seq)
+ continue;
+
fsck_err_on(seq != le64_to_cpu(i->j.seq), c,
"journal entries %llu-%llu missing! (replaying %llu-%llu)",
seq, le64_to_cpu(i->j.seq) - 1,
@@ -691,6 +843,7 @@ static int verify_superblock_clean(struct bch_fs *c,
"superblock read clock doesn't match journal after clean shutdown");
for (i = 0; i < BTREE_ID_NR; i++) {
+ char buf1[200], buf2[200];
struct bkey_i *k1, *k2;
unsigned l1 = 0, l2 = 0;
@@ -706,7 +859,11 @@ static int verify_superblock_clean(struct bch_fs *c,
k1->k.u64s != k2->k.u64s ||
memcmp(k1, k2, bkey_bytes(k1)) ||
l1 != l2, c,
- "superblock btree root doesn't match journal after clean shutdown");
+ "superblock btree root %u doesn't match journal after clean shutdown\n"
+ "sb: l=%u %s\n"
+ "journal: l=%u %s\n", i,
+ l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
+ l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
}
fsck_err:
return ret;
@@ -1077,12 +1234,24 @@ int bch2_fs_initialize(struct bch_fs *c)
bch2_mark_dev_superblock(c, ca, 0);
mutex_unlock(&c->sb_lock);
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version = c->disk_sb.sb->version_min =
+ le16_to_cpu(bcachefs_metadata_version_current);
+ c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
+ c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
+ set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
+ set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
+
err = "unable to allocate journal buckets";
for_each_online_member(ca, c, i) {
ret = bch2_dev_journal_alloc(ca);
@@ -1135,11 +1304,6 @@ int bch2_fs_initialize(struct bch_fs *c)
goto err;
mutex_lock(&c->sb_lock);
- c->disk_sb.sb->version = c->disk_sb.sb->version_min =
- le16_to_cpu(bcachefs_metadata_version_current);
- c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
- c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
-
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h
index 19f2f172a26b..a66827c9addf 100644
--- a/fs/bcachefs/recovery.h
+++ b/fs/bcachefs/recovery.h
@@ -44,6 +44,13 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *,
struct journal_keys *,
struct btree *);
+typedef int (*btree_walk_node_fn)(struct bch_fs *c, struct btree *b);
+typedef int (*btree_walk_key_fn)(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_s_c k);
+
+int bch2_btree_and_journal_walk(struct bch_fs *, struct journal_keys *, enum btree_id,
+ btree_walk_node_fn, btree_walk_key_fn);
+
void bch2_journal_keys_free(struct journal_keys *);
void bch2_journal_entries_free(struct list_head *);
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 2f223be74926..3c473f1380a6 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -167,6 +167,9 @@ s64 bch2_remap_range(struct bch_fs *c,
u64 src_done, dst_done;
int ret = 0, ret2 = 0;
+ if (!c->opts.reflink)
+ return -EOPNOTSUPP;
+
if (!percpu_ref_tryget(&c->writes))
return -EROFS;
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 6596764c8421..f2be64c869df 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
#include "disk_groups.h"
@@ -955,7 +956,6 @@ int bch2_fs_mark_dirty(struct bch_fs *c)
mutex_lock(&c->sb_lock);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
- c->disk_sb.sb->compat[0] &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA);
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_new_extent_overwrite;
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_extents_above_btree_updates;
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_btree_updates_journalled;
@@ -989,27 +989,8 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry *entry,
u64 journal_seq)
{
- struct btree_root *r;
unsigned i;
- mutex_lock(&c->btree_root_lock);
-
- for (r = c->btree_roots;
- r < c->btree_roots + BTREE_ID_NR;
- r++)
- if (r->alive) {
- entry_init_u64s(entry, r->key.u64s + 1);
- entry->btree_id = r - c->btree_roots;
- entry->level = r->level;
- entry->type = BCH_JSET_ENTRY_btree_root;
- bkey_copy(&entry->start[0], &r->key);
-
- entry = vstruct_next(entry);
- }
- c->btree_roots_dirty = false;
-
- mutex_unlock(&c->btree_root_lock);
-
percpu_down_write(&c->mark_lock);
if (!journal_seq) {
@@ -1110,6 +1091,7 @@ void bch2_fs_mark_clean(struct bch_fs *c)
entry = sb_clean->start;
entry = bch2_journal_super_entries_add_common(c, entry, 0);
+ entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
memset(entry, 0,
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index d347389771e0..0cdf285e4ffd 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -13,6 +13,7 @@
#include "bkey_sort.h"
#include "btree_cache.h"
#include "btree_gc.h"
+#include "btree_key_cache.h"
#include "btree_update_interior.h"
#include "btree_io.h"
#include "chardev.h"
@@ -207,7 +208,7 @@ int bch2_congested(void *data, int bdi_bits)
static void __bch2_fs_read_only(struct bch_fs *c)
{
struct bch_dev *ca;
- bool wrote;
+ bool wrote = false;
unsigned i, clean_passes = 0;
int ret;
@@ -224,48 +225,68 @@ static void __bch2_fs_read_only(struct bch_fs *c)
*/
bch2_journal_flush_all_pins(&c->journal);
+ /*
+ * If the allocator threads didn't all start up, the btree updates to
+ * write out alloc info aren't going to work:
+ */
if (!test_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags))
- goto allocator_not_running;
+ goto nowrote_alloc;
- do {
- wrote = false;
+ bch_verbose(c, "writing alloc info");
+ /*
+ * This should normally just be writing the bucket read/write clocks:
+ */
+ ret = bch2_stripes_write(c, BTREE_INSERT_NOCHECK_RW, &wrote) ?:
+ bch2_alloc_write(c, BTREE_INSERT_NOCHECK_RW, &wrote);
+ bch_verbose(c, "writing alloc info complete");
- ret = bch2_stripes_write(c, BTREE_INSERT_NOCHECK_RW, &wrote) ?:
- bch2_alloc_write(c, BTREE_INSERT_NOCHECK_RW, &wrote);
+ if (ret && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
+ bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
- if (ret && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
- bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
+ if (ret)
+ goto nowrote_alloc;
- if (ret)
- break;
+ bch_verbose(c, "flushing journal and stopping allocators");
- for_each_member_device(ca, c, i)
- bch2_dev_allocator_quiesce(c, ca);
+ bch2_journal_flush_all_pins(&c->journal);
+ set_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
- bch2_journal_flush_all_pins(&c->journal);
+ do {
+ clean_passes++;
+
+ if (bch2_journal_flush_all_pins(&c->journal))
+ clean_passes = 0;
/*
- * We need to explicitly wait on btree interior updates to complete
- * before stopping the journal, flushing all journal pins isn't
- * sufficient, because in the BTREE_INTERIOR_UPDATING_ROOT case btree
- * interior updates have to drop their journal pin before they're
- * fully complete:
+ * In flight interior btree updates will generate more journal
+ * updates and btree updates (alloc btree):
*/
- closure_wait_event(&c->btree_interior_update_wait,
- !bch2_btree_interior_updates_nr_pending(c));
+ if (bch2_btree_interior_updates_nr_pending(c)) {
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+ clean_passes = 0;
+ }
+ flush_work(&c->btree_interior_update_work);
- clean_passes = wrote ? 0 : clean_passes + 1;
+ if (bch2_journal_flush_all_pins(&c->journal))
+ clean_passes = 0;
} while (clean_passes < 2);
-allocator_not_running:
+ bch_verbose(c, "flushing journal and stopping allocators complete");
+
+ set_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
+nowrote_alloc:
+ closure_wait_event(&c->btree_interior_update_wait,
+ !bch2_btree_interior_updates_nr_pending(c));
+ flush_work(&c->btree_interior_update_work);
+
for_each_member_device(ca, c, i)
bch2_dev_allocator_stop(ca);
clear_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
+ clear_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags);
bch2_fs_journal_stop(&c->journal);
- /* XXX: mark super that alloc info is persistent */
-
/*
* the journal kicks off btree writes via reclaim - wait for in flight
* writes after stopping journal:
@@ -338,8 +359,11 @@ void bch2_fs_read_only(struct bch_fs *c)
!test_bit(BCH_FS_ERROR, &c->flags) &&
!test_bit(BCH_FS_EMERGENCY_RO, &c->flags) &&
test_bit(BCH_FS_STARTED, &c->flags) &&
- !c->opts.norecovery)
+ test_bit(BCH_FS_ALLOC_CLEAN, &c->flags) &&
+ !c->opts.norecovery) {
+ bch_verbose(c, "marking filesystem clean");
bch2_fs_mark_clean(c);
+ }
clear_bit(BCH_FS_RW, &c->flags);
}
@@ -349,9 +373,9 @@ static void bch2_fs_read_only_work(struct work_struct *work)
struct bch_fs *c =
container_of(work, struct bch_fs, read_only_work);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
}
static void bch2_fs_read_only_async(struct bch_fs *c)
@@ -426,20 +450,12 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
+ clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
+
for_each_rw_member(ca, c, i)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
- if (!test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
- ret = bch2_fs_allocator_start(c);
- if (ret) {
- bch_err(c, "error initializing allocator");
- goto err;
- }
-
- set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
- }
-
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {
@@ -494,7 +510,9 @@ static void bch2_fs_free(struct bch_fs *c)
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
bch2_fs_io_exit(c);
+ bch2_fs_btree_interior_update_exit(c);
bch2_fs_btree_iter_exit(c);
+ bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
bch2_fs_btree_cache_exit(c);
bch2_fs_journal_exit(&c->journal);
bch2_io_clock_exit(&c->io_clock[WRITE]);
@@ -511,8 +529,6 @@ static void bch2_fs_free(struct bch_fs *c)
mempool_exit(&c->large_bkey_pool);
mempool_exit(&c->btree_bounce_pool);
bioset_exit(&c->btree_bio);
- mempool_exit(&c->btree_interior_update_pool);
- mempool_exit(&c->btree_reserve_pool);
mempool_exit(&c->fill_iter);
percpu_ref_exit(&c->writes);
kfree(c->replicas.entries);
@@ -551,9 +567,9 @@ void bch2_fs_stop(struct bch_fs *c)
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
bch2_fs_read_only(c);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
for_each_member_device(ca, c, i)
if (ca->kobj.state_in_sysfs &&
@@ -625,7 +641,7 @@ static const char *bch2_fs_online(struct bch_fs *c)
bch2_opts_create_sysfs_files(&c->opts_dir))
return "error creating sysfs objects";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
err = "error creating sysfs objects";
__for_each_member_device(ca, c, i, NULL)
@@ -635,7 +651,7 @@ static const char *bch2_fs_online(struct bch_fs *c)
list_add(&c->list, &bch_fs_list);
err = NULL;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return err;
}
@@ -657,7 +673,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c->minor = -1;
c->disk_sb.fs_sb = true;
- mutex_init(&c->state_lock);
+ init_rwsem(&c->state_lock);
mutex_init(&c->sb_lock);
mutex_init(&c->replicas_gc_lock);
mutex_init(&c->btree_root_lock);
@@ -668,6 +684,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
+ bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
bch2_fs_allocator_background_init(c);
bch2_fs_allocator_foreground_init(c);
bch2_fs_rebalance_init(c);
@@ -675,11 +692,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
INIT_LIST_HEAD(&c->list);
- INIT_LIST_HEAD(&c->btree_interior_update_list);
- INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
- mutex_init(&c->btree_reserve_cache_lock);
- mutex_init(&c->btree_interior_update_lock);
-
mutex_init(&c->usage_scratch_lock);
mutex_init(&c->bio_bounce_pages_lock);
@@ -705,6 +717,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
seqcount_init(&c->usage_lock);
+ sema_init(&c->io_in_flight, 64);
+
c->copy_gc_enabled = 1;
c->rebalance.enabled = 1;
c->promote_whole_extents = true;
@@ -752,10 +766,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
percpu_ref_init(&c->writes, bch2_writes_disabled,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
- mempool_init_kmalloc_pool(&c->btree_reserve_pool, 1,
- sizeof(struct btree_reserve)) ||
- mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
- sizeof(struct btree_update)) ||
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
bioset_init(&c->btree_bio, 1,
max(offsetof(struct btree_read_bio, bio),
@@ -770,7 +780,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_journal_init(&c->journal) ||
bch2_fs_replicas_init(c) ||
bch2_fs_btree_cache_init(c) ||
+ bch2_fs_btree_key_cache_init(&c->btree_key_cache) ||
bch2_fs_btree_iter_init(c) ||
+ bch2_fs_btree_interior_update_init(c) ||
bch2_fs_io_init(c) ||
bch2_fs_encryption_init(c) ||
bch2_fs_compress_init(c) ||
@@ -855,7 +867,7 @@ int bch2_fs_start(struct bch_fs *c)
unsigned i;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
@@ -905,7 +917,7 @@ int bch2_fs_start(struct bch_fs *c)
print_mount_opts(c);
ret = 0;
out:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
err:
switch (ret) {
@@ -1405,22 +1417,47 @@ int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
{
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = __bch2_dev_set_state(c, ca, new_state, flags);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
/* Device add/removal: */
+int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct btree_trans trans;
+ size_t i;
+ int ret;
+
+ bch2_trans_init(&trans, c, 0, 0);
+
+ for (i = 0; i < ca->mi.nbuckets; i++) {
+ ret = bch2_btree_key_cache_flush(&trans,
+ BTREE_ID_ALLOC, POS(ca->dev_idx, i));
+ if (ret)
+ break;
+ }
+ bch2_trans_exit(&trans);
+
+ if (ret)
+ return ret;
+
+ return bch2_btree_delete_range(c, BTREE_ID_ALLOC,
+ POS(ca->dev_idx, 0),
+ POS(ca->dev_idx + 1, 0),
+ NULL);
+}
+
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
{
struct bch_sb_field_members *mi;
unsigned dev_idx = ca->dev_idx, data;
int ret = -EINVAL;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
/*
* We consume a reference to ca->ref, regardless of whether we succeed
@@ -1447,10 +1484,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
goto err;
}
- ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC,
- POS(ca->dev_idx, 0),
- POS(ca->dev_idx + 1, 0),
- NULL);
+ ret = bch2_dev_remove_alloc(c, ca);
if (ret) {
bch_err(ca, "Remove failed, error deleting alloc info");
goto err;
@@ -1510,13 +1544,13 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_RW &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
@@ -1592,7 +1626,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
dev_usage_clear(ca);
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
mutex_lock(&c->sb_lock);
err = "insufficient space in new superblock";
@@ -1653,12 +1687,12 @@ have_slot:
goto err_late;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err_unlock:
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err:
if (ca)
bch2_dev_free(ca);
@@ -1681,11 +1715,11 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
const char *err;
int ret;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
ret = bch2_read_super(path, &opts, &sb);
if (ret) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
@@ -1716,10 +1750,10 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
bch2_free_super(&sb);
bch_err(c, "error bringing %s online: %s", path, err);
return -EINVAL;
@@ -1727,23 +1761,23 @@ err:
int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
{
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (!bch2_dev_is_online(ca)) {
bch_err(ca, "Already offline");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
bch_err(ca, "Cannot offline required disk");
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return -EINVAL;
}
__bch2_dev_offline(c, ca);
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return 0;
}
@@ -1752,7 +1786,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
struct bch_member *mi;
int ret = 0;
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
if (nbuckets < ca->mi.nbuckets) {
bch_err(ca, "Cannot shrink yet");
@@ -1783,7 +1817,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bch2_recalc_capacity(c);
err:
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
return ret;
}
@@ -1862,13 +1896,13 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
goto err;
err = "bch2_dev_online() error";
- mutex_lock(&c->state_lock);
+ down_write(&c->state_lock);
for (i = 0; i < nr_devices; i++)
if (bch2_dev_attach_bdev(c, &sb[i])) {
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
goto err_print;
}
- mutex_unlock(&c->state_lock);
+ up_write(&c->state_lock);
err = "insufficient devices";
if (!bch2_fs_may_start(c))
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index d78ffcc0e8a4..c169d282a1f9 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -14,6 +14,7 @@
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
@@ -134,7 +135,6 @@ do { \
write_attribute(trigger_journal_flush);
write_attribute(trigger_btree_coalesce);
write_attribute(trigger_gc);
-write_attribute(trigger_alloc_write);
write_attribute(prune_cache);
rw_attribute(btree_gc_periodic);
@@ -166,6 +166,8 @@ read_attribute(journal_debug);
read_attribute(journal_pins);
read_attribute(btree_updates);
read_attribute(dirty_btree_nodes);
+read_attribute(btree_key_cache);
+read_attribute(btree_transactions);
read_attribute(internal_uuid);
@@ -402,6 +404,20 @@ SHOW(bch2_fs)
if (attr == &sysfs_dirty_btree_nodes)
return bch2_dirty_btree_nodes_print(c, buf);
+ if (attr == &sysfs_btree_key_cache) {
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
+
+ bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
+ return out.pos - buf;
+ }
+
+ if (attr == &sysfs_btree_transactions) {
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
+
+ bch2_btree_trans_to_text(&out, c);
+ return out.pos - buf;
+ }
+
if (attr == &sysfs_compression_stats)
return bch2_compression_stats(c, buf);
@@ -420,7 +436,7 @@ SHOW(bch2_fs)
return 0;
}
-STORE(__bch2_fs)
+STORE(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
@@ -478,13 +494,17 @@ STORE(__bch2_fs)
if (attr == &sysfs_trigger_btree_coalesce)
bch2_coalesce(c);
- if (attr == &sysfs_trigger_gc)
+ if (attr == &sysfs_trigger_gc) {
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ down_read(&c->state_lock);
bch2_gc(c, NULL, false, false);
-
- if (attr == &sysfs_trigger_alloc_write) {
- bool wrote;
-
- bch2_alloc_write(c, 0, &wrote);
+ up_read(&c->state_lock);
+#else
+ bch2_gc_gens(c);
+#endif
}
if (attr == &sysfs_prune_cache) {
@@ -494,6 +514,7 @@ STORE(__bch2_fs)
sc.nr_to_scan = strtoul_or_return(buf);
c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
}
+
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
@@ -515,17 +536,6 @@ STORE(__bch2_fs)
#endif
return size;
}
-
-STORE(bch2_fs)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- mutex_lock(&c->state_lock);
- size = __bch2_fs_store(kobj, attr, buf, size);
- mutex_unlock(&c->state_lock);
-
- return size;
-}
SYSFS_OPS(bch2_fs);
struct attribute *bch2_fs_files[] = {
@@ -571,6 +581,8 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_journal_pins,
&sysfs_btree_updates,
&sysfs_dirty_btree_nodes,
+ &sysfs_btree_key_cache,
+ &sysfs_btree_transactions,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
@@ -579,7 +591,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_journal_flush,
&sysfs_trigger_btree_coalesce,
&sysfs_trigger_gc,
- &sysfs_trigger_alloc_write,
&sysfs_prune_cache,
&sysfs_copy_gc_enabled,
@@ -835,6 +846,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
" meta: %llu\n"
" user: %llu\n"
" cached: %llu\n"
+ " erasure coded: %llu\n"
" fragmented: %llu\n"
" copygc threshold: %llu\n"
"freelist_wait: %s\n"
@@ -861,6 +873,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
stats.sectors[BCH_DATA_BTREE],
stats.sectors[BCH_DATA_USER],
stats.sectors[BCH_DATA_CACHED],
+ stats.sectors_ec,
stats.sectors_fragmented,
ca->copygc_threshold,
c->freelist_wait.list.first ? "waiting" : "empty",
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 2b19a0038045..0128daba5970 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -664,35 +664,6 @@ static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
memset(s + bytes, c, rem);
}
-static inline struct bio_vec next_contig_bvec(struct bio *bio,
- struct bvec_iter *iter)
-{
- struct bio_vec bv = bio_iter_iovec(bio, *iter);
-
- bio_advance_iter(bio, iter, bv.bv_len);
-#ifndef CONFIG_HIGHMEM
- while (iter->bi_size) {
- struct bio_vec next = bio_iter_iovec(bio, *iter);
-
- if (page_address(bv.bv_page) + bv.bv_offset + bv.bv_len !=
- page_address(next.bv_page) + next.bv_offset)
- break;
-
- bv.bv_len += next.bv_len;
- bio_advance_iter(bio, iter, next.bv_len);
- }
-#endif
- return bv;
-}
-
-#define __bio_for_each_contig_segment(bv, bio, iter, start) \
- for (iter = (start); \
- (iter).bi_size && \
- ((bv = next_contig_bvec((bio), &(iter))), 1);)
-
-#define bio_for_each_contig_segment(bv, bio, iter) \
- __bio_for_each_contig_segment(bv, bio, iter, (bio)->bi_iter)
-
void sort_cmp_size(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *, size_t),
void (*swap_func)(void *, void *, size_t));
diff --git a/include/linux/six.h b/include/linux/six.h
index 0fb1b2f49345..a16e94f482e9 100644
--- a/include/linux/six.h
+++ b/include/linux/six.h
@@ -115,6 +115,8 @@ struct six_lock {
#endif
};
+typedef int (*six_lock_should_sleep_fn)(struct six_lock *lock, void *);
+
static __always_inline void __six_lock_init(struct six_lock *lock,
const char *name,
struct lock_class_key *key)
@@ -141,7 +143,7 @@ do { \
#define __SIX_LOCK(type) \
bool six_trylock_##type(struct six_lock *); \
bool six_relock_##type(struct six_lock *, u32); \
-void six_lock_##type(struct six_lock *); \
+int six_lock_##type(struct six_lock *, six_lock_should_sleep_fn, void *);\
void six_unlock_##type(struct six_lock *);
__SIX_LOCK(read)
@@ -167,14 +169,15 @@ static inline bool six_trylock_type(struct six_lock *lock, enum six_lock_type ty
}
static inline bool six_relock_type(struct six_lock *lock, enum six_lock_type type,
- unsigned seq)
+ unsigned seq)
{
SIX_LOCK_DISPATCH(type, six_relock, lock, seq);
}
-static inline void six_lock_type(struct six_lock *lock, enum six_lock_type type)
+static inline int six_lock_type(struct six_lock *lock, enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
{
- SIX_LOCK_DISPATCH(type, six_lock, lock);
+ SIX_LOCK_DISPATCH(type, six_lock, lock, should_sleep_fn, p);
}
static inline void six_unlock_type(struct six_lock *lock, enum six_lock_type type)
@@ -189,4 +192,6 @@ bool six_trylock_convert(struct six_lock *, enum six_lock_type,
void six_lock_increment(struct six_lock *, enum six_lock_type);
+void six_lock_wakeup_all(struct six_lock *);
+
#endif /* _LINUX_SIX_H */
diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h
index 01a9cc736cab..bafbccafae30 100644
--- a/include/trace/events/bcachefs.h
+++ b/include/trace/events/bcachefs.h
@@ -144,8 +144,8 @@ DECLARE_EVENT_CLASS(btree_node,
TP_fast_assign(
memcpy(__entry->uuid, c->sb.user_uuid.b, 16);
- __entry->level = b->level;
- __entry->id = b->btree_id;
+ __entry->level = b->c.level;
+ __entry->id = b->c.btree_id;
__entry->inode = b->key.k.p.inode;
__entry->offset = b->key.k.p.offset;
),
@@ -262,7 +262,7 @@ TRACE_EVENT(btree_insert_key,
),
TP_fast_assign(
- __entry->id = b->btree_id;
+ __entry->id = b->c.btree_id;
__entry->inode = k->k.p.inode;
__entry->offset = k->k.p.offset;
__entry->size = k->k.size;
diff --git a/kernel/locking/six.c b/kernel/locking/six.c
index 9fa58b6fadc9..49d46ed2e18e 100644
--- a/kernel/locking/six.c
+++ b/kernel/locking/six.c
@@ -15,7 +15,7 @@
#endif
#define six_acquire(l, t) lock_acquire(l, 0, t, 0, 0, NULL, _RET_IP_)
-#define six_release(l) lock_release(l, 0, _RET_IP_)
+#define six_release(l) lock_release(l, _RET_IP_)
struct six_lock_vals {
/* Value we add to the lock in order to take the lock: */
@@ -108,7 +108,8 @@ static bool __six_trylock_type(struct six_lock *lock, enum six_lock_type type)
if (!do_six_trylock_type(lock, type))
return false;
- six_acquire(&lock->dep_map, 1);
+ if (type != SIX_LOCK_write)
+ six_acquire(&lock->dep_map, 1);
return true;
}
@@ -130,7 +131,8 @@ static bool __six_relock_type(struct six_lock *lock, enum six_lock_type type,
old.v + l[type].lock_val)) != old.v);
six_set_owner(lock, type, old);
- six_acquire(&lock->dep_map, 1);
+ if (type != SIX_LOCK_write)
+ six_acquire(&lock->dep_map, 1);
return true;
}
@@ -265,15 +267,21 @@ static inline bool six_optimistic_spin(struct six_lock *lock, enum six_lock_type
#endif
noinline
-static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type)
+static int __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
{
const struct six_lock_vals l[] = LOCK_VALS;
union six_lock_state old, new;
struct six_lock_waiter wait;
+ int ret = 0;
u64 v;
+ ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
+ if (ret)
+ return ret;
+
if (six_optimistic_spin(lock, type))
- return;
+ return 0;
lock_contended(&lock->dep_map, _RET_IP_);
@@ -290,6 +298,10 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t
raw_spin_unlock(&lock->wait_lock);
}
+ ret = should_sleep_fn ? should_sleep_fn(lock, p) : 0;
+ if (ret)
+ break;
+
v = READ_ONCE(lock->state.v);
do {
new.v = old.v = v;
@@ -309,7 +321,8 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t
schedule();
}
- six_set_owner(lock, type, old);
+ if (!ret)
+ six_set_owner(lock, type, old);
__set_current_state(TASK_RUNNING);
@@ -318,17 +331,28 @@ static void __six_lock_type_slowpath(struct six_lock *lock, enum six_lock_type t
list_del_init(&wait.list);
raw_spin_unlock(&lock->wait_lock);
}
+
+ return ret;
}
__always_inline
-static void __six_lock_type(struct six_lock *lock, enum six_lock_type type)
+static int __six_lock_type(struct six_lock *lock, enum six_lock_type type,
+ six_lock_should_sleep_fn should_sleep_fn, void *p)
{
- six_acquire(&lock->dep_map, 0);
+ int ret;
- if (!do_six_trylock_type(lock, type))
- __six_lock_type_slowpath(lock, type);
+ if (type != SIX_LOCK_write)
+ six_acquire(&lock->dep_map, 0);
+
+ ret = do_six_trylock_type(lock, type) ? 0
+ : __six_lock_type_slowpath(lock, type, should_sleep_fn, p);
- lock_acquired(&lock->dep_map, _RET_IP_);
+ if (ret && type != SIX_LOCK_write)
+ six_release(&lock->dep_map);
+ if (!ret)
+ lock_acquired(&lock->dep_map, _RET_IP_);
+
+ return ret;
}
static inline void six_lock_wakeup(struct six_lock *lock,
@@ -382,7 +406,8 @@ static void __six_unlock_type(struct six_lock *lock, enum six_lock_type type)
EBUG_ON(type == SIX_LOCK_write &&
!(lock->state.v & __SIX_LOCK_HELD_intent));
- six_release(&lock->dep_map);
+ if (type != SIX_LOCK_write)
+ six_release(&lock->dep_map);
if (type == SIX_LOCK_intent) {
EBUG_ON(lock->owner != current);
@@ -413,9 +438,10 @@ bool six_relock_##type(struct six_lock *lock, u32 seq) \
} \
EXPORT_SYMBOL_GPL(six_relock_##type); \
\
-void six_lock_##type(struct six_lock *lock) \
+int six_lock_##type(struct six_lock *lock, \
+ six_lock_should_sleep_fn should_sleep_fn, void *p) \
{ \
- __six_lock_type(lock, SIX_LOCK_##type); \
+ return __six_lock_type(lock, SIX_LOCK_##type, should_sleep_fn, p);\
} \
EXPORT_SYMBOL_GPL(six_lock_##type); \
\
@@ -510,3 +536,18 @@ void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
}
}
EXPORT_SYMBOL_GPL(six_lock_increment);
+
+void six_lock_wakeup_all(struct six_lock *lock)
+{
+ struct six_lock_waiter *w;
+
+ raw_spin_lock(&lock->wait_lock);
+
+ list_for_each_entry(w, &lock->wait_list[0], list)
+ wake_up_process(w->task);
+ list_for_each_entry(w, &lock->wait_list[1], list)
+ wake_up_process(w->task);
+
+ raw_spin_unlock(&lock->wait_lock);
+}
+EXPORT_SYMBOL_GPL(six_lock_wakeup_all);