summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-08-30 15:44:44 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2019-10-09 09:39:18 -0700
commit5a4bacba541db5c248e26b2d5980e962b4c018fb (patch)
treed3c28413c2fcace09f5195d82d9d0fe47829e13e
parent3c224db1310734711f08159c0e75f3316e0c991d (diff)
xfs: remove the for_each_xbitmap_ helpers
Remove the for_each_xbitmap_ macros in favor of proper iterator functions. We'll soon be switching this data structure over to an interval tree implementation, which means that we can't allow callers to modify the bitmap during iteration without telling us. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/scrub/agheader_repair.c73
-rw-r--r--fs/xfs/scrub/bitmap.c59
-rw-r--r--fs/xfs/scrub/bitmap.h22
-rw-r--r--fs/xfs/scrub/repair.c60
4 files changed, 148 insertions, 66 deletions
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index f35596cc26fb..145e9d359d2f 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -560,6 +560,40 @@ xrep_agfl_update_agf(
XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
}
+struct xrep_agfl_fill {
+ struct xbitmap used_extents;
+ struct xfs_scrub *sc;
+ __be32 *agfl_bno;
+ xfs_agblock_t flcount;
+ unsigned int fl_off;
+};
+
+/* Fill the AGFL with whatever blocks are in this extent. */
+static int
+xrep_agfl_fill(
+ uint64_t start,
+ uint64_t len,
+ void *priv)
+{
+ struct xrep_agfl_fill *af = priv;
+ struct xfs_scrub *sc = af->sc;
+ xfs_fsblock_t fsbno = start;
+
+ while (fsbno < start + len && af->fl_off < af->flcount)
+ af->agfl_bno[af->fl_off++] =
+ cpu_to_be32(XFS_FSB_TO_AGBNO(sc->mp, fsbno++));
+
+ trace_xrep_agfl_insert(sc->mp, sc->sa.agno,
+ XFS_FSB_TO_AGBNO(sc->mp, start), len);
+
+ xbitmap_set(&af->used_extents, start, fsbno - 1);
+
+ if (af->fl_off == af->flcount)
+ return -ECANCELED;
+
+ return 0;
+}
+
/* Write out a totally new AGFL. */
STATIC void
xrep_agfl_init_header(
@@ -568,13 +602,12 @@ xrep_agfl_init_header(
struct xbitmap *agfl_extents,
xfs_agblock_t flcount)
{
+ struct xrep_agfl_fill af = {
+ .sc = sc,
+ .flcount = flcount,
+ };
struct xfs_mount *mp = sc->mp;
- __be32 *agfl_bno;
- struct xbitmap_range *br;
- struct xbitmap_range *n;
struct xfs_agfl *agfl;
- xfs_agblock_t agbno;
- unsigned int fl_off;
ASSERT(flcount <= xfs_agfl_size(mp));
@@ -593,35 +626,15 @@ xrep_agfl_init_header(
* blocks than fit in the AGFL, they will be freed in a subsequent
* step.
*/
- fl_off = 0;
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp);
- for_each_xbitmap_extent(br, n, agfl_extents) {
- agbno = XFS_FSB_TO_AGBNO(mp, br->start);
-
- trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
-
- while (br->len > 0 && fl_off < flcount) {
- agfl_bno[fl_off] = cpu_to_be32(agbno);
- fl_off++;
- agbno++;
-
- /*
- * We've now used br->start by putting it in the AGFL,
- * so bump br so that we don't reap the block later.
- */
- br->start++;
- br->len--;
- }
-
- if (br->len)
- break;
- list_del(&br->list);
- kmem_free(br);
- }
+ xbitmap_init(&af.used_extents);
+ af.agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agfl_bp),
+ xbitmap_iter_set(agfl_extents, xrep_agfl_fill, &af);
+ xbitmap_disunion(agfl_extents, &af.used_extents);
/* Write new AGFL to disk. */
xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
+ xbitmap_destroy(&af.used_extents);
}
/* Repair the AGFL. */
diff --git a/fs/xfs/scrub/bitmap.c b/fs/xfs/scrub/bitmap.c
index 8b704d7b5855..1041f17f6bb6 100644
--- a/fs/xfs/scrub/bitmap.c
+++ b/fs/xfs/scrub/bitmap.c
@@ -12,6 +12,9 @@
#include "xfs_btree.h"
#include "scrub/bitmap.h"
+#define for_each_xbitmap_extent(bex, n, bitmap) \
+ list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
+
/*
* Set a range of this bitmap. Caller must ensure the range is not set.
*
@@ -311,3 +314,59 @@ xbitmap_hweight(
return ret;
}
+
+/* Call a function for every run of set bits in this bitmap. */
+int
+xbitmap_iter_set(
+ struct xbitmap *bitmap,
+ xbitmap_walk_run_fn fn,
+ void *priv)
+{
+ struct xbitmap_range *bex, *n;
+ int error;
+
+ for_each_xbitmap_extent(bex, n, bitmap) {
+ error = fn(bex->start, bex->len, priv);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+struct xbitmap_walk_bits {
+ xbitmap_walk_bit_fn fn;
+ void *priv;
+};
+
+/* Walk all the bits in a run. */
+static int
+xbitmap_walk_bits_in_run(
+ uint64_t start,
+ uint64_t len,
+ void *priv)
+{
+ struct xbitmap_walk_bits *wb = priv;
+ uint64_t i;
+ int error;
+
+ for (i = start; i < start + len; i++) {
+ error = wb->fn(i, wb->priv);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+/* Call a function for every set bit in this bitmap. */
+int
+xbitmap_iter_set_bits(
+ struct xbitmap *bitmap,
+ xbitmap_walk_bit_fn fn,
+ void *priv)
+{
+ struct xbitmap_walk_bits wb = {.fn = fn, .priv = priv};
+
+ return xbitmap_iter_set(bitmap, xbitmap_walk_bits_in_run, &wb);
+}
diff --git a/fs/xfs/scrub/bitmap.h b/fs/xfs/scrub/bitmap.h
index 900646b72de1..27fde5b4a753 100644
--- a/fs/xfs/scrub/bitmap.h
+++ b/fs/xfs/scrub/bitmap.h
@@ -19,13 +19,6 @@ struct xbitmap {
void xbitmap_init(struct xbitmap *bitmap);
void xbitmap_destroy(struct xbitmap *bitmap);
-#define for_each_xbitmap_extent(bex, n, bitmap) \
- list_for_each_entry_safe((bex), (n), &(bitmap)->list, list)
-
-#define for_each_xbitmap_block(b, bex, n, bitmap) \
- list_for_each_entry_safe((bex), (n), &(bitmap)->list, list) \
- for ((b) = (bex)->start; (b) < (bex)->start + (bex)->len; (b)++)
-
int xbitmap_set(struct xbitmap *bitmap, uint64_t start, uint64_t len);
int xbitmap_disunion(struct xbitmap *bitmap, struct xbitmap *sub);
int xbitmap_set_btcur_path(struct xbitmap *bitmap,
@@ -34,4 +27,19 @@ int xbitmap_set_btblocks(struct xbitmap *bitmap,
struct xfs_btree_cur *cur);
uint64_t xbitmap_hweight(struct xbitmap *bitmap);
+/*
+ * Return codes for the bitmap iterator functions are 0 to continue iterating,
+ * and non-zero to stop iterating. Any non-zero value will be passed up to the
+ * iteration caller. The special value -ECANCELED can be used to stop
+ * iteration, because neither bitmap iterator ever generates that error code on
+ * its own.
+ */
+typedef int (*xbitmap_walk_run_fn)(uint64_t start, uint64_t len, void *priv);
+int xbitmap_iter_set(struct xbitmap *bitmap, xbitmap_walk_run_fn fn,
+ void *priv);
+
+typedef int (*xbitmap_walk_bit_fn)(uint64_t bit, void *priv);
+int xbitmap_iter_set_bits(struct xbitmap *bitmap, xbitmap_walk_bit_fn fn,
+ void *priv);
+
#endif /* __XFS_SCRUB_BITMAP_H__ */
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index d41da4c44f10..588bc054db5c 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -507,15 +507,21 @@ xrep_reap_invalidate_block(
xfs_trans_binval(sc->tp, bp);
}
+struct xrep_reap_block {
+ struct xfs_scrub *sc;
+ const struct xfs_owner_info *oinfo;
+ enum xfs_ag_resv_type resv;
+ unsigned int deferred;
+};
+
/* Dispose of a single block. */
STATIC int
xrep_reap_block(
- struct xfs_scrub *sc,
- xfs_fsblock_t fsbno,
- const struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type resv,
- unsigned int *deferred)
+ uint64_t fsbno,
+ void *priv)
{
+ struct xrep_reap_block *rb = priv;
+ struct xfs_scrub *sc = rb->sc;
struct xfs_btree_cur *cur;
struct xfs_buf *agf_bp = NULL;
xfs_agnumber_t agno;
@@ -527,6 +533,10 @@ xrep_reap_block(
agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
+ ASSERT(sc->ip != NULL || agno == sc->sa.agno);
+
+ trace_xrep_dispose_btree_extent(sc->mp, agno, agbno, 1);
+
/*
* If we are repairing per-inode metadata, we need to read in the AGF
* buffer. Otherwise, we're repairing a per-AG structure, so reuse
@@ -544,7 +554,8 @@ xrep_reap_block(
cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
/* Can we find any other rmappings? */
- error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
+ error = xfs_rmap_has_other_keys(cur, agbno, 1, rb->oinfo,
+ &has_other_rmap);
xfs_btree_del_cursor(cur, error);
if (error)
goto out_free;
@@ -563,8 +574,9 @@ xrep_reap_block(
* to run xfs_repair.
*/
if (has_other_rmap) {
- error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
- } else if (resv == XFS_AG_RESV_AGFL) {
+ error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1,
+ rb->oinfo);
+ } else if (rb->resv == XFS_AG_RESV_AGFL) {
xrep_reap_invalidate_block(sc, fsbno);
error = xrep_put_freelist(sc, agbno);
} else {
@@ -576,16 +588,16 @@ xrep_reap_block(
* reservation.
*/
xrep_reap_invalidate_block(sc, fsbno);
- __xfs_bmap_add_free(sc->tp, fsbno, 1, oinfo, true);
- (*deferred)++;
- need_roll = *deferred > 100;
+ __xfs_bmap_add_free(sc->tp, fsbno, 1, rb->oinfo, true);
+ rb->deferred++;
+ need_roll = rb->deferred > 100;
}
if (agf_bp != sc->sa.agf_bp)
xfs_trans_brelse(sc->tp, agf_bp);
if (error || !need_roll)
return error;
- *deferred = 0;
+ rb->deferred = 0;
if (sc->ip)
return xfs_trans_roll_inode(&sc->tp, sc->ip);
return xrep_roll_ag_trans(sc);
@@ -604,27 +616,17 @@ xrep_reap_extents(
const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
{
- struct xbitmap_range *bmr;
- struct xbitmap_range *n;
- xfs_fsblock_t fsbno;
- unsigned int deferred = 0;
+ struct xrep_reap_block rb = {
+ .sc = sc,
+ .oinfo = oinfo,
+ .resv = type,
+ };
int error = 0;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
- for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
- ASSERT(sc->ip != NULL ||
- XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
- trace_xrep_dispose_btree_extent(sc->mp,
- XFS_FSB_TO_AGNO(sc->mp, fsbno),
- XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
-
- error = xrep_reap_block(sc, fsbno, oinfo, type, &deferred);
- if (error)
- break;
- }
-
- if (error || deferred == 0)
+ error = xbitmap_iter_set_bits(bitmap, xrep_reap_block, &rb);
+ if (error || rb.deferred == 0)
return error;
if (sc->ip)