diff options
-rw-r--r-- | fs/bcachefs/backpointers.c | 72 | ||||
-rw-r--r-- | fs/bcachefs/btree_cache.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 1 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 3 | ||||
-rw-r--r-- | fs/bcachefs/fast_list.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/fs-io-pagecache.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/snapshot.c | 40 | ||||
-rw-r--r-- | fs/bcachefs/super-io.c | 4 |
11 files changed, 89 insertions, 47 deletions
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index bd26ab3e6812..afdfe991e4dd 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -872,13 +872,27 @@ static int check_bucket_backpointers_to_extents(struct btree_trans *, struct bch static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct bkey_s_c alloc_k, bool *had_mismatch, - struct bkey_buf *last_flushed) + struct bkey_buf *last_flushed, + struct bpos *last_pos, + unsigned *nr_iters) { struct bch_fs *c = trans->c; struct bch_alloc_v4 a_convert; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); bool need_commit = false; + if (!bpos_eq(*last_pos, alloc_k.k->p)) + *nr_iters = 0; + + *last_pos = alloc_k.k->p; + *nr_iters += 1; + + if (*nr_iters > 2) { + bch_err(c, "%s: looping at %llu:%llu", __func__, + last_pos->inode, last_pos->offset); + return 0; + } + *had_mismatch = false; if (a->data_type == BCH_DATA_sb || @@ -937,19 +951,52 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b if (sectors[ALLOC_dirty] != a->dirty_sectors || sectors[ALLOC_cached] != a->cached_sectors || sectors[ALLOC_stripe] != a->stripe_sectors) { - if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) { - ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed); - if (ret) - return ret; - } - if (sectors[ALLOC_dirty] > a->dirty_sectors || sectors[ALLOC_cached] > a->cached_sectors || sectors[ALLOC_stripe] > a->stripe_sectors) { + if (*nr_iters > 1) { + ret = 0; + + CLASS(printbuf, buf)(); + bch2_log_msg_start(c, &buf); + + prt_printf(&buf, "backpointer sectors > bucket sectors, but found no bad backpointers\n" + "bucket %llu:%llu data type %s, counters\n", + alloc_k.k->p.inode, + alloc_k.k->p.offset, + __bch2_data_types[a->data_type]); + if (sectors[ALLOC_dirty] > a->dirty_sectors) + prt_printf(&buf, "dirty: %u > %u\n", + sectors[ALLOC_dirty], a->dirty_sectors); + if (sectors[ALLOC_cached] > a->cached_sectors) + prt_printf(&buf, "cached: %u > %u\n", + sectors[ALLOC_cached], a->cached_sectors); + if (sectors[ALLOC_stripe] > a->stripe_sectors) + prt_printf(&buf, "stripe: %u > %u\n", + sectors[ALLOC_stripe], a->stripe_sectors); + + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, alloc_k.k->p), + bucket_pos_to_bp_end(ca, alloc_k.k->p), 0, bp_k, ret) { + bch2_bkey_val_to_text(&buf, c, bp_k); + prt_newline(&buf); + } + bch2_trans_iter_exit(trans, &iter); + + bch2_print_str(c, KERN_ERR, buf.buf); + return ret; + } + return check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?: bch_err_throw(c, transaction_restart_nested); } + if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) { + ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed); + if (ret) + return ret; + } + bool empty = (sectors[ALLOC_dirty] + sectors[ALLOC_stripe] + sectors[ALLOC_cached]) == 0; @@ -1101,6 +1148,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) CLASS(btree_trans, trans)(c); struct extents_to_bp_state s = { .bp_start = POS_MIN }; + struct bpos last_pos = POS_MIN; + unsigned nr_iters = 0; bch2_bkey_buf_init(&s.last_flushed); bkey_init(&s.last_flushed.k->k); @@ -1109,7 +1158,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c) POS_MIN, BTREE_ITER_prefetch, k, ({ bool had_mismatch; bch2_fs_going_ro(c) ?: - check_bucket_backpointer_mismatch(trans, k, &had_mismatch, &s.last_flushed); + check_bucket_backpointer_mismatch(trans, k, &had_mismatch, &s.last_flushed, + &last_pos, &nr_iters); })); if (ret) goto err; @@ -1178,7 +1228,11 @@ static int check_bucket_backpointer_pos_mismatch(struct btree_trans *trans, if (ret) return ret; - ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, last_flushed); + struct bpos last_pos = POS_MIN; + unsigned nr_iters = 0; + ret = check_bucket_backpointer_mismatch(trans, k, had_mismatch, + last_flushed, + &last_pos, &nr_iters); bch2_trans_iter_exit(trans, &alloc_iter); return ret; } diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 23ed7393f07f..25b01e750880 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -511,7 +511,7 @@ restart: if (btree_node_accessed(b)) { clear_btree_node_accessed(b); bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++; - --touched;; + --touched; } else if (!btree_node_reclaim(c, b)) { __bch2_btree_node_hash_remove(bc, b); __btree_node_data_free(b); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 83c836080ea2..8a03cd75a64f 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -2011,7 +2011,7 @@ static void btree_node_scrub_work(struct work_struct *work) bch_err_fn_ratelimited(c, ret); } - bch2_bkey_buf_exit(&scrub->key, c);; + bch2_bkey_buf_exit(&scrub->key, c); btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf); enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub); kfree(scrub); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index a282c3886168..3a677bd91676 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -2449,7 +2449,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree } if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_key_cache_fill)) { + !(iter->flags & BTREE_ITER_nofilter_whiteouts)) { search_key = bkey_successor(iter, k.k->p); continue; } @@ -2744,7 +2744,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct } /* Extents can straddle iter->pos: */ - iter->pos = bpos_min(iter->pos, k.k->p);; + iter->pos = bpos_min(iter->pos, k.k->p); if (iter->flags & BTREE_ITER_filter_snapshots) iter->pos.snapshot = iter->snapshot; @@ -2867,7 +2867,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btre if (unlikely(k.k->type == KEY_TYPE_whiteout && (iter->flags & BTREE_ITER_filter_snapshots) && - !(iter->flags & BTREE_ITER_key_cache_fill))) + !(iter->flags & BTREE_ITER_nofilter_whiteouts))) iter->k.type = KEY_TYPE_deleted; } else { struct bpos next; diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index d61b782087ce..2317fe5b3c23 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -329,6 +329,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos, BTREE_ITER_intent| + BTREE_ITER_nofilter_whiteouts| BTREE_ITER_key_cache_fill| BTREE_ITER_cached_nofill); iter.flags &= ~BTREE_ITER_with_journal; diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index e6177b747216..0cd1663c2bef 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -229,6 +229,7 @@ struct btree_node_iter { x(snapshot_field) \ x(all_snapshots) \ x(filter_snapshots) \ + x(nofilter_whiteouts) \ x(nopreserve) \ x(cached_nofill) \ x(key_cache_fill) \ @@ -485,7 +486,7 @@ typedef DARRAY(struct trans_kmalloc_trace) darray_trans_kmalloc_trace; struct btree_trans_subbuf { u16 base; u16 u64s; - u16 size;; + u16 size; }; struct btree_trans { diff --git a/fs/bcachefs/fast_list.h b/fs/bcachefs/fast_list.h index 73c9bf591fd6..f67df3f72ee2 100644 --- a/fs/bcachefs/fast_list.h +++ b/fs/bcachefs/fast_list.h @@ -9,7 +9,7 @@ struct fast_list_pcpu; struct fast_list { GENRADIX(void *) items; - struct ida slots_allocated;; + struct ida slots_allocated; struct fast_list_pcpu __percpu *buffer; }; diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c index 2a6705186c44..469492f6264a 100644 --- a/fs/bcachefs/fs-io-pagecache.c +++ b/fs/bcachefs/fs-io-pagecache.c @@ -635,6 +635,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf) goto out; } + inode->ei_last_dirtied = (unsigned long) current; + bch2_set_folio_dirty(c, inode, folio, &res, offset, len); bch2_folio_reservation_put(c, inode, &res); diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index a4f43c550ad7..07869436a964 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1296,7 +1296,7 @@ int bch2_dev_journal_bucket_delete(struct bch_dev *ca, u64 b) return -EINVAL; } - u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);; + u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); if (!new_buckets) return bch_err_throw(c, ENOMEM_set_nr_journal_buckets); diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index 8c24e2a93041..5370ccb85d2d 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -1431,38 +1431,22 @@ static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id) return i ? i->live_child : 0; } -static unsigned __live_child(struct snapshot_table *t, u32 id, - snapshot_id_list *delete_leaves, - interior_delete_list *delete_interior) -{ - struct snapshot_t *s = __snapshot_t(t, id); - if (!s) - return 0; - - for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) - if (s->children[i] && - !snapshot_list_has_id(delete_leaves, s->children[i]) && - !interior_delete_has_id(delete_interior, s->children[i])) - return s->children[i]; - - for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) { - u32 live_child = s->children[i] - ? __live_child(t, s->children[i], delete_leaves, delete_interior) - : 0; - if (live_child) - return live_child; - } - - return 0; -} - -static unsigned live_child(struct bch_fs *c, u32 id) +static unsigned live_child(struct bch_fs *c, u32 start) { struct snapshot_delete *d = &c->snapshot_delete; guard(rcu)(); - return __live_child(rcu_dereference(c->snapshots), id, - &d->delete_leaves, &d->delete_interior); + struct snapshot_table *t = rcu_dereference(c->snapshots); + + for (u32 id = bch2_snapshot_tree_next(t, start); + id && id != start; + id = bch2_snapshot_tree_next(t, id)) + if (bch2_snapshot_is_leaf(c, id) && + !snapshot_list_has_id(&d->delete_leaves, id) && + !interior_delete_has_id(&d->delete_interior, id)) + return id; + + return 0; } static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id) diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 40fa87ce1d09..c88759964575 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -79,7 +79,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v } else { darray_for_each(c->incompat_versions_requested, i) if (version == *i) - return -BCH_ERR_may_not_use_incompat_feature; + return bch_err_throw(c, may_not_use_incompat_feature); darray_push(&c->incompat_versions_requested, version); CLASS(printbuf, buf)(); @@ -90,7 +90,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v prt_printf(&buf, "\n set version_upgrade=incompat to enable"); bch_notice(c, "%s", buf.buf); - return -BCH_ERR_may_not_use_incompat_feature; + return bch_err_throw(c, may_not_use_incompat_feature); } } |