summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-09-01 10:40:20 -0700
committerDarrick J. Wong <djwong@kernel.org>2021-09-17 18:54:50 -0700
commit2fbdb9683fb8c1dab149e11dcfca3a08ffc67e89 (patch)
tree8cea18405ccc6660361836772fb1061d1056a87e
parent7a5c53636670cf8ec83bec3730817260d616d83b (diff)
xfs: log EFIs for all btree blocks being used to stage a btreerepair-prep-for-bulk-loading_2021-09-17
We need to log EFIs for every extent that we allocate for the purpose of staging a new btree so that if we fail then the blocks will be freed during log recovery. Add a function to relog the EFIs, so that repair can relog them all every time it creates a new btree block, which will help us to avoid pinning the log tail. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-rw-r--r--fs/xfs/scrub/repair.c103
-rw-r--r--fs/xfs/scrub/repair.h3
2 files changed, 102 insertions, 4 deletions
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index e9346dde608f..7331d54eaf55 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -13,6 +13,7 @@
#include "xfs_btree_staging.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
+#include "xfs_log.h"
#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_alloc.h"
@@ -31,6 +32,8 @@
#include "xfs_da_btree.h"
#include "xfs_attr.h"
#include "xfs_attr_remote.h"
+#include "xfs_defer.h"
+#include "xfs_extfree_item.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -430,12 +433,39 @@ xrep_newbt_init_bare(
XFS_AG_RESV_NONE);
}
+/*
+ * Set up automatic reaping of the blocks reserved for btree reconstruction in
+ * case we crash by logging a deferred free item for each extent we allocate so
+ * that we can get all of the space back if we crash before we can commit the
+ * new btree. This function returns a token that can be used to cancel
+ * automatic reaping if repair is successful.
+ */
+static void
+xrep_newbt_schedule_reap(
+ struct xrep_newbt *xnr,
+ struct xrep_newbt_resv *resv)
+{
+ struct xfs_extent_free_item efi_item = {
+ .xefi_startblock = resv->fsbno,
+ .xefi_blockcount = resv->len,
+ .xefi_oinfo = xnr->oinfo, /* struct copy */
+ .xefi_skip_discard = true,
+ };
+ LIST_HEAD(items);
+
+ INIT_LIST_HEAD(&efi_item.xefi_list);
+ list_add(&efi_item.xefi_list, &items);
+ resv->efi = xfs_extent_free_defer_type.create_intent(xnr->sc->tp,
+ &items, 1, false);
+}
+
/* Designate specific blocks to be used to build our new btree. */
-int
-xrep_newbt_add_blocks(
+static int
+__xrep_newbt_add_blocks(
struct xrep_newbt *xnr,
xfs_fsblock_t fsbno,
- xfs_extlen_t len)
+ xfs_extlen_t len,
+ bool auto_reap)
{
struct xrep_newbt_resv *resv;
@@ -447,10 +477,25 @@ xrep_newbt_add_blocks(
resv->fsbno = fsbno;
resv->len = len;
resv->used = 0;
+ if (auto_reap)
+ xrep_newbt_schedule_reap(xnr, resv);
list_add_tail(&resv->list, &xnr->resv_list);
return 0;
}
+/*
+ * Allow certain callers to add disk space directly to the reservation.
+ * Callers are responsible for cleaning up the reservations.
+ */
+int
+xrep_newbt_add_blocks(
+ struct xrep_newbt *xnr,
+ xfs_fsblock_t fsbno,
+ xfs_extlen_t len)
+{
+ return __xrep_newbt_add_blocks(xnr, fsbno, len, false);
+}
+
/* Allocate disk space for our new btree. */
int
xrep_newbt_alloc_blocks(
@@ -492,7 +537,8 @@ xrep_newbt_alloc_blocks(
XFS_FSB_TO_AGBNO(sc->mp, args.fsbno),
args.len, xnr->oinfo.oi_owner);
- error = xrep_newbt_add_blocks(xnr, args.fsbno, args.len);
+ error = __xrep_newbt_add_blocks(xnr, args.fsbno, args.len,
+ true);
if (error)
return error;
@@ -508,6 +554,35 @@ xrep_newbt_alloc_blocks(
}
/*
+ * Relog the EFIs attached to a staging btree so that we don't pin the log
+ * tail. Same logic as xfs_defer_relog.
+ */
+int
+xrep_newbt_relog_efis(
+ struct xrep_newbt *xnr)
+{
+ struct xrep_newbt_resv *resv;
+ struct xfs_trans *tp = xnr->sc->tp;
+
+ list_for_each_entry(resv, &xnr->resv_list, list) {
+ /*
+ * If the log intent item for this deferred op is in a
+ * different checkpoint, relog it to keep the log tail moving
+ * forward. We're ok with this being racy because an incorrect
+ * decision means we'll be a little slower at pushing the tail.
+ */
+ if (!resv->efi || xfs_log_item_in_current_chkpt(resv->efi))
+ continue;
+
+ resv->efi = xfs_trans_item_relog(resv->efi, tp);
+ }
+
+ if (tp->t_flags & XFS_TRANS_DIRTY)
+ return xrep_roll_trans(xnr->sc);
+ return 0;
+}
+
+/*
* Release blocks that were reserved for a btree repair. If the repair
* succeeded then we log deferred frees for unused blocks. Otherwise, we try
* to free the extents immediately to roll the filesystem back to where it was
@@ -520,6 +595,25 @@ xrep_newbt_destroy_reservation(
bool cancel_repair)
{
struct xfs_scrub *sc = xnr->sc;
+ struct xfs_efd_log_item *efdp;
+ struct xfs_extent *extp;
+ struct xfs_log_item *efd_lip;
+
+ /*
+ * Earlier, we logged EFIs for the extents that we allocated to hold
+ * the new btree so that we could automatically roll back those
+ * allocations if the system crashed. Now we log an EFD to cancel the
+ * EFI, either because the repair succeeded and the new blocks are in
+ * use; or because the repair was cancelled and we're about to free
+ * the extents directly.
+ */
+ efd_lip = xfs_extent_free_defer_type.create_done(sc->tp, resv->efi, 1);
+ efdp = container_of(efd_lip, struct xfs_efd_log_item, efd_item);
+ extp = efdp->efd_format.efd_extents;
+ extp->ext_start = resv->fsbno;
+ extp->ext_len = resv->len;
+ efdp->efd_next_extent++;
+ set_bit(XFS_LI_DIRTY, &efd_lip->li_flags);
if (cancel_repair) {
int error;
@@ -588,6 +682,7 @@ junkit:
* reservations.
*/
list_for_each_entry_safe(resv, n, &xnr->resv_list, list) {
+ xfs_extent_free_defer_type.abort_intent(resv->efi);
list_del(&resv->list);
kmem_free(resv);
}
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index f44e08d62b61..e897ccc09464 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -70,6 +70,8 @@ struct xrep_newbt_resv {
/* Link to list of extents that we've reserved. */
struct list_head list;
+ struct xfs_log_item *efi;
+
/* FSB of the block we reserved. */
xfs_fsblock_t fsbno;
@@ -116,6 +118,7 @@ int xrep_newbt_claim_block(struct xfs_btree_cur *cur, struct xrep_newbt *xnr,
union xfs_btree_ptr *ptr);
void xrep_bload_estimate_slack(struct xfs_scrub *sc,
struct xfs_btree_bload *bload);
+int xrep_newbt_relog_efis(struct xrep_newbt *xnr);
#else