summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2025-07-14 10:18:56 -0400
committerKent Overstreet <kent.overstreet@linux.dev>2025-07-14 10:18:56 -0400
commite1e87f53cdfeb048747b1e6fa1d1a44b4a85376e (patch)
treeb12305759419a953d75bda42519091a816265350
parentb261da891a7d3bd8b9081b5814bfe9b7661e8ec7 (diff)
Update bcachefs sources to 4b5105c627f4 workqueue: Basic memory allocation profiling support
-rw-r--r--.bcachefs_revision2
-rw-r--r--include/linux/kmemleak.h4
-rw-r--r--libbcachefs/bset.c66
-rw-r--r--libbcachefs/btree_io.c10
-rw-r--r--libbcachefs/fsck.c2
-rw-r--r--libbcachefs/io_read.c4
-rw-r--r--libbcachefs/io_write.c9
-rw-r--r--libbcachefs/movinggc.c2
8 files changed, 60 insertions, 39 deletions
diff --git a/.bcachefs_revision b/.bcachefs_revision
index 6766254f..1d699419 100644
--- a/.bcachefs_revision
+++ b/.bcachefs_revision
@@ -1 +1 @@
-c241a5bf54ed4aeb29d029d8f1dae1dd592cdda4
+4b5105c627f4f1490e9bc4267c8096926de367b5
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 93a73c07..fbd424b2 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -28,6 +28,7 @@ extern void kmemleak_update_trace(const void *ptr) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
extern void kmemleak_transient_leak(const void *ptr) __ref;
extern void kmemleak_ignore(const void *ptr) __ref;
+extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
extern void kmemleak_no_scan(const void *ptr) __ref;
extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
@@ -97,6 +98,9 @@ static inline void kmemleak_not_leak(const void *ptr)
static inline void kmemleak_transient_leak(const void *ptr)
{
}
+static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
+{
+}
static inline void kmemleak_ignore(const void *ptr)
{
}
diff --git a/libbcachefs/bset.c b/libbcachefs/bset.c
index 32841f76..90fd1574 100644
--- a/libbcachefs/bset.c
+++ b/libbcachefs/bset.c
@@ -362,27 +362,6 @@ static struct bkey_float *bkey_float(const struct btree *b,
return ro_aux_tree_base(b, t)->f + idx;
}
-static void __bset_aux_tree_verify(struct btree *b)
-{
- for_each_bset(b, t) {
- if (t->aux_data_offset == U16_MAX)
- continue;
-
- BUG_ON(t != b->set &&
- t[-1].aux_data_offset == U16_MAX);
-
- BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
- BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
- BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
- }
-}
-
-static inline void bset_aux_tree_verify(struct btree *b)
-{
- if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
- __bset_aux_tree_verify(b);
-}
-
void bch2_btree_keys_init(struct btree *b)
{
unsigned i;
@@ -538,6 +517,51 @@ static inline void bch2_bset_verify_rw_aux_tree(struct btree *b,
__bch2_bset_verify_rw_aux_tree(b, t);
}
+static void __bset_aux_tree_verify_ro(struct btree *b, struct bset_tree *t)
+{
+ struct bkey_packed *k = btree_bkey_first(b, t);
+
+ eytzinger1_for_each(j, t->size - 1) {
+ while (tree_to_bkey(b, t, j) > k &&
+ k != btree_bkey_last(b, t))
+ k = bkey_p_next(k);
+
+ BUG_ON(tree_to_bkey(b, t, j) != k);
+ }
+}
+
+static void __bset_aux_tree_verify(struct btree *b)
+{
+ for_each_bset(b, t) {
+ if (t->aux_data_offset == U16_MAX)
+ continue;
+
+ BUG_ON(t != b->set &&
+ t[-1].aux_data_offset == U16_MAX);
+
+ BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
+ BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
+ BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
+
+ switch (bset_aux_tree_type(t)) {
+ case BSET_RO_AUX_TREE:
+ __bset_aux_tree_verify_ro(b, t);
+ break;
+ case BSET_RW_AUX_TREE:
+ __bch2_bset_verify_rw_aux_tree(b, t);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static inline void bset_aux_tree_verify(struct btree *b)
+{
+ if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
+ __bset_aux_tree_verify(b);
+}
+
/* returns idx of first entry >= offset: */
static unsigned rw_aux_tree_bsearch(struct btree *b,
struct bset_tree *t,
diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c
index 2fd3f1ed..064627a2 100644
--- a/libbcachefs/btree_io.c
+++ b/libbcachefs/btree_io.c
@@ -1302,9 +1302,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
- if (updated_range)
- bch2_btree_node_drop_keys_outside_node(b);
-
i = &b->data->keys;
for (k = i->start; k != vstruct_last(i);) {
struct bkey tmp;
@@ -1336,16 +1333,15 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
k = bkey_p_next(k);
}
- for (k = i->start; k != vstruct_last(i);) {
- BUG_ON(!k->u64s);
- }
-
bch2_bset_build_aux_tree(b, b->set, false);
set_needs_whiteout(btree_bset_first(b), true);
btree_node_reset_sib_u64s(b);
+ if (updated_range)
+ bch2_btree_node_drop_keys_outside_node(b);
+
/*
* XXX:
*
diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c
index 471e93a3..9d06f32b 100644
--- a/libbcachefs/fsck.c
+++ b/libbcachefs/fsck.c
@@ -3209,6 +3209,8 @@ static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
if (ret)
return ret;
+ thr->c->recovery_task = current;
+
ret = bch2_fs_start(thr->c);
if (ret)
goto err;
diff --git a/libbcachefs/io_read.c b/libbcachefs/io_read.c
index 3d484f1f..e854adea 100644
--- a/libbcachefs/io_read.c
+++ b/libbcachefs/io_read.c
@@ -87,7 +87,7 @@ void bch2_dev_congested_to_text(struct printbuf *out, struct bch_dev *ca)
prt_printf(out, "read latency threshold:\t");
bch2_pr_time_units(out,
- ca->io_latency[READ].quantiles.entries[QUANTILE_IDX(1)].m * 2);
+ ca->io_latency[READ].quantiles.entries[QUANTILE_IDX(1)].m << 2);
prt_newline(out);
prt_printf(out, "median read latency:\t");
@@ -97,7 +97,7 @@ void bch2_dev_congested_to_text(struct printbuf *out, struct bch_dev *ca)
prt_printf(out, "write latency threshold:\t");
bch2_pr_time_units(out,
- ca->io_latency[WRITE].quantiles.entries[QUANTILE_IDX(1)].m * 3);
+ ca->io_latency[WRITE].quantiles.entries[QUANTILE_IDX(1)].m << 3);
prt_newline(out);
prt_printf(out, "median write latency:\t");
diff --git a/libbcachefs/io_write.c b/libbcachefs/io_write.c
index fa077341..aedbea63 100644
--- a/libbcachefs/io_write.c
+++ b/libbcachefs/io_write.c
@@ -55,14 +55,9 @@ static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
s64 latency_over = io_latency - latency_threshold;
if (latency_threshold && latency_over > 0) {
- /*
- * bump up congested by approximately latency_over * 4 /
- * latency_threshold - we don't need much accuracy here so don't
- * bother with the divide:
- */
if (atomic_read(&ca->congested) < CONGESTED_MAX)
- atomic_add(latency_over >>
- max_t(int, ilog2(latency_threshold) - 2, 0),
+ atomic_add((u32) min(U32_MAX, io_latency * 2) /
+ (u32) min(U32_MAX, latency_threshold),
&ca->congested);
ca->congested_last = now;
diff --git a/libbcachefs/movinggc.c b/libbcachefs/movinggc.c
index 27e68d47..5e6de91a 100644
--- a/libbcachefs/movinggc.c
+++ b/libbcachefs/movinggc.c
@@ -71,7 +71,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
if (ret)
return ret;
- struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
+ struct bch_dev *ca = bch2_dev_bucket_tryget(c, k.k->p);
if (!ca)
goto out;