summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <djwong@kernel.org>2021-01-05 17:43:36 -0800
committerDarrick J. Wong <djwong@kernel.org>2021-08-25 22:25:54 -0700
commitfc8f947fa4eaa0152ba0801b4ad82446a4a76776 (patch)
tree4823a0f8c3d88cf0fc128f9fb0c37ea24493a551
parentf33d293dac8d7e61e00407d9591d071fa8e37353 (diff)
xfs: track quota updates during live quotacheck
Create a shadow dqtrx system in the quotacheck code that hooks the regular dquot counter update code. This will be the means to keep our copy of the dquot counters up to date while the scan runs in real time. Signed-off-by: Darrick J. Wong <djwong@kernel.org>
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/scrub/quotacheck.c378
-rw-r--r--fs/xfs/scrub/quotacheck.h16
-rw-r--r--fs/xfs/xfs_mount.c43
-rw-r--r--fs/xfs/xfs_mount.h10
-rw-r--r--fs/xfs/xfs_qm.c18
-rw-r--r--fs/xfs/xfs_qm.h15
-rw-r--r--fs/xfs/xfs_quota.h19
-rw-r--r--fs/xfs/xfs_trans_dquot.c72
9 files changed, 553 insertions, 19 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 7f12b40146b3..85d5de6dc2a6 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -98,6 +98,7 @@ config XFS_ONLINE_SCRUB
default n
depends on XFS_FS
depends on TMPFS && SHMEM
+ depends on SRCU
help
If you say Y here you will be able to check metadata on a
mounted XFS filesystem. This feature is intended to reduce
diff --git a/fs/xfs/scrub/quotacheck.c b/fs/xfs/scrub/quotacheck.c
index 299a6589ad89..ed119b81c3bb 100644
--- a/fs/xfs/scrub/quotacheck.c
+++ b/fs/xfs/scrub/quotacheck.c
@@ -33,16 +33,62 @@
* as the summation of the block usage counts for every file on the filesystem.
* Therefore, we compute the correct icount, bcount, and rtbcount values by
* creating a shadow quota counter structure and walking every inode.
+ *
+ * Because we are scanning a live filesystem, it's possible that another thread
+ * will try to update the quota counters for an inode that we've already
+ * scanned. This will cause our counts to be incorrect. Therefore, we hook
+ * the live transaction code in two places: (1) when the callers update the
+ * per-transaction dqtrx structure to log quota counter updates; and (2) when
+ * transaction commit actually logs those updates to the incore dquot. By
+ * shadowing transaction updates in this manner, live quotacheck can ensure
+ * by locking the dquot and the shadow structure that its own copies are not
+ * out of date.
+ *
+ * Note that we use srcu notifier hooks to minimize the overhead when live
+ * quotacheck is /not/ running.
*/
+/* Track the quota deltas for a dquot in a transaction. */
+struct xqcheck_dqtrx {
+ struct xfs_dquot *dqp;
+ int64_t icount_delta;
+
+ int64_t bcount_delta;
+ int64_t delbcnt_delta;
+
+ int64_t rtbcount_delta;
+ int64_t delrtb_delta;
+};
+
+#define XQCHECK_MAX_NR_DQTRXS (XFS_QM_TRANS_DQTYPES * XFS_QM_TRANS_MAXDQS)
+
+/*
+ * Track the quota deltas for all dquots attached to a transaction if the
+ * quota deltas are being applied to an inode that we already scanned.
+ */
+struct xqcheck_dqacct {
+ struct rhash_head hash;
+ uintptr_t tp;
+ struct xqcheck_dqtrx dqtrx[XQCHECK_MAX_NR_DQTRXS];
+ unsigned int refcount;
+};
+
+/* Free a shadow dquot accounting structure. */
+static void
+xqcheck_dqacct_free(
+ void *ptr,
+ void *arg)
+{
+ struct xqcheck_dqacct *dqa = ptr;
+
+ kmem_free(dqa);
+}
+
/* Set us up to scrub quota counters. */
int
xchk_setup_quotacheck(
struct xfs_scrub *sc)
{
- /* Not ready for general consumption yet. */
- return -EOPNOTSUPP;
-
if (!XFS_IS_QUOTA_ON(sc->mp))
return -ENOENT;
@@ -79,7 +125,7 @@ xqcheck_get_shadow_dquot(
return error;
}
-/* Update an incore dquot information. */
+/* Update an incore dquot information. Caller must hold the xqc lock. */
static int
xqcheck_update_incore(
struct xqcheck *xqc,
@@ -116,6 +162,210 @@ xqcheck_update_incore(
return error;
}
+/* Decide if this is the shadow dquot accounting structure for a transaction. */
+static int
+xqcheck_dqacct_obj_cmpfn(
+ struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const uintptr_t *key = arg->key;
+ const struct xqcheck_dqacct *dqa = obj;
+
+ if (dqa->tp != *key)
+ return 1;
+ return 0;
+}
+
+static const struct rhashtable_params xqcheck_dqacct_hash_params = {
+ .min_size = 32,
+ .key_len = sizeof(uintptr_t),
+ .key_offset = offsetof(struct xqcheck_dqacct, tp),
+ .head_offset = offsetof(struct xqcheck_dqacct, hash),
+ .automatic_shrinking = true,
+ .obj_cmpfn = xqcheck_dqacct_obj_cmpfn,
+};
+
+/* Find a shadow dqtrx slot for the given dquot. */
+STATIC struct xqcheck_dqtrx *
+xqcheck_get_dqtrx(
+ struct xqcheck_dqacct *dqa,
+ struct xfs_dquot *dqp)
+{
+ int i;
+
+ for (i = 0; i < XQCHECK_MAX_NR_DQTRXS; i++) {
+ if (dqa->dqtrx[i].dqp == NULL ||
+ dqa->dqtrx[i].dqp == dqp)
+ return &dqa->dqtrx[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * Create and fill out a quota delta tracking structure to shadow the updates
+ * going on in the regular quota code.
+ */
+static int
+xqcheck_mod_dquot(
+ struct notifier_block *nb,
+ unsigned long arg,
+ void *data)
+{
+ struct xfs_trans_mod_dquot_params *p = data;
+ struct xqcheck *xqc;
+ struct xqcheck_dqacct *dqa;
+ struct xqcheck_dqtrx *dqtrx;
+ int error;
+
+ xqc = container_of(nb, struct xqcheck, mod_hook);
+
+ /* Skip quota reservation fields. */
+ switch (p->field) {
+ case XFS_TRANS_DQ_BCOUNT:
+ case XFS_TRANS_DQ_DELBCOUNT:
+ case XFS_TRANS_DQ_ICOUNT:
+ case XFS_TRANS_DQ_RTBCOUNT:
+ case XFS_TRANS_DQ_DELRTBCOUNT:
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ /* Skip inodes that haven't been scanned yet. */
+ mutex_lock(&xqc->lock);
+ if (xqc->last_ino < p->ip->i_ino || xqc->hook_dead)
+ goto out_unlock;
+
+ /* Make a shadow quota accounting tracker for this transaction. */
+ dqa = rhashtable_lookup_fast(&xqc->shadow_dquot_acct, &p->tp,
+ xqcheck_dqacct_hash_params);
+ if (!dqa) {
+ dqa = kmem_zalloc(sizeof(*dqa), KM_MAYFAIL | KM_NOFS);
+ if (!dqa)
+ goto fail;
+
+ dqa->tp = (uintptr_t)p->tp;
+ error = rhashtable_insert_fast(&xqc->shadow_dquot_acct,
+ &dqa->hash, xqcheck_dqacct_hash_params);
+ if (error)
+ goto fail;
+ }
+
+ /* Find the shadow dqtrx (or an empty slot) here. */
+ dqtrx = xqcheck_get_dqtrx(dqa, p->dqp);
+ if (!dqtrx)
+ goto fail;
+ if (dqtrx->dqp == NULL) {
+ dqtrx->dqp = p->dqp;
+ dqa->refcount++;
+ }
+
+ /* Update counter */
+ switch (p->field) {
+ case XFS_TRANS_DQ_BCOUNT:
+ dqtrx->bcount_delta += p->delta;
+ break;
+ case XFS_TRANS_DQ_DELBCOUNT:
+ dqtrx->delbcnt_delta += p->delta;
+ break;
+ case XFS_TRANS_DQ_ICOUNT:
+ dqtrx->icount_delta += p->delta;
+ break;
+ case XFS_TRANS_DQ_RTBCOUNT:
+ dqtrx->rtbcount_delta += p->delta;
+ break;
+ case XFS_TRANS_DQ_DELRTBCOUNT:
+ dqtrx->delrtb_delta += p->delta;
+ break;
+ }
+
+ goto out_unlock;
+fail:
+ xqc->hook_dead = true;
+out_unlock:
+ mutex_unlock(&xqc->lock);
+ return NOTIFY_DONE;
+}
+
+/*
+ * Apply the transaction quota deltas to our shadow quota accounting info when
+ * the regular quota code are doing the same.
+ */
+static int
+xqcheck_apply_deltas(
+ struct notifier_block *nb,
+ unsigned long arg,
+ void *data)
+{
+ struct xfs_trans_apply_dquot_deltas_params *p = data;
+ struct xqcheck *xqc;
+ struct xqcheck_dqacct *dqa;
+ struct xqcheck_dqtrx *dqtrx;
+ struct xfbma *counts;
+ int error;
+
+ xqc = container_of(nb, struct xqcheck, apply_hook);
+
+ /* Map the dquot type to an incore counter object. */
+ switch (xfs_dquot_type(p->dqp)) {
+ case XFS_DQTYPE_USER:
+ counts = xqc->ucounts;
+ break;
+ case XFS_DQTYPE_GROUP:
+ counts = xqc->gcounts;
+ break;
+ case XFS_DQTYPE_PROJ:
+ counts = xqc->pcounts;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ mutex_lock(&xqc->lock);
+ if (xqc->hook_dead)
+ goto out_unlock;
+
+ /*
+ * Find the shadow dqtrx for this transaction and dquot, if any deltas
+ * need to be applied here.
+ */
+ dqa = rhashtable_lookup_fast(&xqc->shadow_dquot_acct, &p->tp,
+ xqcheck_dqacct_hash_params);
+ if (!dqa)
+ goto out_unlock;
+ dqtrx = xqcheck_get_dqtrx(dqa, p->dqp);
+ if (!dqtrx || dqtrx->dqp == NULL)
+ goto out_unlock;
+
+ /* Update our shadow dquot. */
+ if (arg) {
+ error = xqcheck_update_incore(xqc, counts, p->dqp->q_id,
+ dqtrx->icount_delta,
+ dqtrx->bcount_delta + dqtrx->delbcnt_delta,
+ dqtrx->rtbcount_delta + dqtrx->delrtb_delta);
+ if (error)
+ goto fail;
+ }
+
+ /* Free the shadow accounting structure if that was the last user. */
+ dqa->refcount--;
+ if (dqa->refcount == 0) {
+ error = rhashtable_remove_fast(&xqc->shadow_dquot_acct,
+ &dqa->hash, xqcheck_dqacct_hash_params);
+ if (error)
+ goto fail;
+ xqcheck_dqacct_free(dqa, NULL);
+ }
+
+ goto out_unlock;
+fail:
+ xqc->hook_dead = true;
+out_unlock:
+ mutex_unlock(&xqc->lock);
+ return NOTIFY_DONE;
+}
+
/* Record this inode's quota usage in our shadow quota counter data. */
STATIC int
xqcheck_inode(
@@ -137,26 +387,66 @@ xqcheck_inode(
}
xfs_inode_count_blocks(tp, ip, &nblks, &rtblks);
- /* Update the shadow dquot counters. */
+ /* Update the shadow dquot counters if we haven't already failed. */
+ mutex_lock(&xqc->lock);
+ if (xqc->hook_dead) {
+ xchk_set_incomplete(xqc->sc);
+ error = -ECANCELED;
+ goto out_xqc_lock;
+ }
+
id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_USER);
error = xqcheck_update_incore(xqc, xqc->ucounts, id, 1, nblks, rtblks);
if (error)
- goto out_ilock;
+ goto out_xqc_lock;
id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_GROUP);
error = xqcheck_update_incore(xqc, xqc->gcounts, id, 1, nblks, rtblks);
if (error)
- goto out_ilock;
+ goto out_xqc_lock;
id = xfs_qm_id_for_quotatype(ip, XFS_DQTYPE_PROJ);
error = xqcheck_update_incore(xqc, xqc->pcounts, id, 1, nblks, rtblks);
+ if (error)
+ goto out_xqc_lock;
+
+ /*
+ * Update the quotacheck scan cursor so that the quota hooks will
+ * capture any quota updates made on behalf of this inode after we
+ * unlock it.
+ */
+ xqc->last_ino = ip->i_ino;
+out_xqc_lock:
+ if (error) {
+ xchk_set_incomplete(xqc->sc);
+ xqc->hook_dead = true;
+ }
+ mutex_unlock(&xqc->lock);
out_ilock:
xfs_iunlock(ip, ilock_flags);
return error;
}
/*
+ * Update the quotacheck scan cursor so that the quota hooks will begin to
+ * capture quota updates being made by ongoing transactions. Do this with the
+ * AGI held so that no other threads can create new inodes in this AG.
+ */
+static inline void
+xqcheck_move_cursor(
+ struct xqcheck *xqc,
+ xfs_agnumber_t agno,
+ xfs_agino_t agino,
+ xfs_ino_t *ino)
+{
+ mutex_lock(&xqc->lock);
+ *ino = XFS_AGINO_TO_INO(xqc->sc->mp, agno, agino);
+ xqc->last_ino = *ino - 1;
+ mutex_unlock(&xqc->lock);
+}
+
+/*
* Advance ino to the next inode that the inobt thinks is allocated, being
* careful to jump to the next AG and to skip quota inodes. Advancing ino
* effectively means that we've pushed the quotacheck scan forward, so set the
@@ -193,13 +483,13 @@ next_ino:
if (error)
goto out_buf;
if (agino == NULLAGINO) {
- *ino = XFS_AGINO_TO_INO(sc->mp, agno + 1, 0);
+ xqcheck_move_cursor(xqc, agno + 1, 0, ino);
xfs_trans_brelse(sc->tp, agi_bp);
xfs_perag_put(pag);
goto next_ag;
}
- *ino = XFS_AGINO_TO_INO(sc->mp, agno, agino);
+ xqcheck_move_cursor(xqc, agno, agino, ino);
if (xfs_is_quota_inode(&sc->mp->m_sb, *ino))
goto next_ino;
@@ -280,6 +570,12 @@ xqcheck_compare_dquot(
struct xfbma *counts = xqcheck_counters_for(xqc, dqtype);
int error;
+ mutex_lock(&xqc->lock);
+ if (xqc->hook_dead) {
+ xchk_set_incomplete(xqc->sc);
+ error = -ECANCELED;
+ goto out_unlock;
+ }
error = xqcheck_get_shadow_dquot(counts, dqp->q_id, &xcdq);
if (error)
goto out_unlock;
@@ -299,6 +595,7 @@ xqcheck_compare_dquot(
}
out_unlock:
+ mutex_unlock(&xqc->lock);
return error;
}
@@ -320,9 +617,12 @@ xqcheck_walk_observations(
if (!counts)
return 0;
+ mutex_lock(&xqc->lock);
while (!(error = xfbma_iter_get(counts, &nr, &xcdq))) {
xfs_dqid_t id = nr - 1;
+ mutex_unlock(&xqc->lock);
+
if (xchk_should_terminate(xqc->sc, &error))
return error;
@@ -338,7 +638,10 @@ xqcheck_walk_observations(
xfs_qm_dqput(dqp);
if (error)
return error;
+
+ mutex_lock(&xqc->lock);
}
+ mutex_unlock(&xqc->lock);
/* ENODATA means we hit the end of the array. */
if (error == -ENODATA)
@@ -379,6 +682,30 @@ static void
xqcheck_teardown_scan(
struct xqcheck *xqc)
{
+ struct xfs_quotainfo *qi = xqc->sc->mp->m_quotainfo;
+
+ /* Discourage any hook functions that might be running. */
+ mutex_lock(&xqc->lock);
+ xqc->hook_dead = true;
+ mutex_unlock(&xqc->lock);
+
+ /*
+ * As noted above, the apply hook is responsible for cleaning up the
+ * shadow dquot accounting data when a transaction completes. The mod
+ * hook must be removed before the apply hook so that we don't
+ * mistakenly leave an active shadow account for the mod hook to get
+ * its hands on. No hooks should be running after these functions
+ * return.
+ */
+ xfs_hook_del(&qi->qi_mod_dquot_hooks, &xqc->mod_hook);
+ xfs_hook_del(&qi->qi_apply_dquot_deltas_hooks, &xqc->apply_hook);
+
+ if (xqc->shadow_dquot_acct.key_len) {
+ rhashtable_free_and_destroy(&xqc->shadow_dquot_acct,
+ xqcheck_dqacct_free, NULL);
+ xqc->shadow_dquot_acct.key_len = 0;
+ }
+
if (xqc->pcounts) {
xfbma_destroy(xqc->pcounts);
xqc->pcounts = NULL;
@@ -394,6 +721,7 @@ xqcheck_teardown_scan(
xqc->ucounts = NULL;
}
+ mutex_destroy(&xqc->lock);
xqc->sc = NULL;
}
@@ -407,11 +735,15 @@ xqcheck_setup_scan(
struct xfs_scrub *sc,
struct xqcheck *xqc)
{
+ struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
int error;
ASSERT(xqc->sc == NULL);
xqc->sc = sc;
+ xqc->hook_dead = false;
+ mutex_init(&xqc->lock);
+
error = -ENOMEM;
if (xfs_this_quota_on(sc->mp, XFS_DQTYPE_USER)) {
xqc->ucounts = xfbma_init("user dquots",
@@ -434,6 +766,34 @@ xqcheck_setup_scan(
goto out_teardown;
}
+ /*
+ * Set up hash table to map transactions to our internal shadow dqtrx
+ * structures.
+ */
+ error = rhashtable_init(&xqc->shadow_dquot_acct,
+ &xqcheck_dqacct_hash_params);
+ if (error)
+ goto out_teardown;
+
+ /*
+ * Hook into the quota code. The hook only triggers for inodes that
+ * were already scanned, and the scanner thread takes each inode's
+ * ILOCK, which means that any in-progress inode updates will finish
+ * before we can scan the inode.
+ *
+ * The apply hook (which removes the shadow dquot accounting struct)
+ * must be installed before the mod hook so that we never fail to catch
+ * the end of a quota update sequence and leave stale shadow data.
+ */
+ error = xfs_hook_add(&qi->qi_apply_dquot_deltas_hooks,
+ &xqc->apply_hook, xqcheck_apply_deltas);
+ if (error)
+ goto out_teardown;
+ error = xfs_hook_add(&qi->qi_mod_dquot_hooks, &xqc->mod_hook,
+ xqcheck_mod_dquot);
+ if (error)
+ goto out_teardown;
+
/* Use deferred cleanup to pass the quota count data to repair. */
sc->buf_cleanup = (void (*)(void *))xqcheck_teardown_scan;
return 0;
diff --git a/fs/xfs/scrub/quotacheck.h b/fs/xfs/scrub/quotacheck.h
index e90c1ceb57a8..190e1014f7b9 100644
--- a/fs/xfs/scrub/quotacheck.h
+++ b/fs/xfs/scrub/quotacheck.h
@@ -30,6 +30,22 @@ struct xqcheck {
struct xfbma *ucounts;
struct xfbma *gcounts;
struct xfbma *pcounts;
+
+ /* Last inode scanned by live quotacheck. */
+ xfs_ino_t last_ino;
+
+ /* Hooks into the quota code. */
+ struct notifier_block mod_hook;
+ struct notifier_block apply_hook;
+
+ /* Lock for the data used to capture live quota counter updates. */
+ struct mutex lock;
+
+ /* Shadow quota delta tracking structure. */
+ struct rhashtable shadow_dquot_acct;
+
+ /* Something failed during live tracking. */
+ bool hook_dead;
};
/* Return the incore counter array for a given quota type. */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index ff08192d8d2a..2f3bf29f2d3d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1261,3 +1261,46 @@ xfs_mod_delalloc(
percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
XFS_DELALLOC_BATCH);
}
+
+/* Initialize a hook. */
+void
+xfs_hook_init(
+ struct xfs_hook_chain *chain)
+{
+ srcu_init_notifier_head(&chain->head);
+}
+
+/* Make it so a function gets called whenever we hit a certain hook point. */
+int
+xfs_hook_add(
+ struct xfs_hook_chain *chain,
+ struct notifier_block *hook,
+ notifier_fn_t fn)
+{
+ hook->notifier_call = fn;
+ return srcu_notifier_chain_register(&chain->head, hook);
+}
+
+/* Remove a previously installed hook. */
+void
+xfs_hook_del(
+ struct xfs_hook_chain *chain,
+ struct notifier_block *hook)
+{
+ if (!hook->notifier_call)
+ return;
+
+ srcu_notifier_chain_unregister(&chain->head, hook);
+ rcu_barrier();
+ hook->notifier_call = NULL;
+}
+
+/* Call a hook. */
+int
+xfs_hook_call(
+ struct xfs_hook_chain *chain,
+ unsigned long val,
+ void *priv)
+{
+ return srcu_notifier_call_chain(&chain->head, val, priv);
+}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b27bd41c7b27..ca9b30b4561e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -56,6 +56,10 @@ struct xfs_error_cfg {
long retry_timeout; /* in jiffies, -1 = infinite */
};
+struct xfs_hook_chain {
+ struct srcu_notifier_head head;
+};
+
/*
* Per-cpu deferred inode inactivation GC lists.
*/
@@ -387,4 +391,10 @@ struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
void xfs_force_summary_recalc(struct xfs_mount *mp);
void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
+void xfs_hook_init(struct xfs_hook_chain *chain);
+int xfs_hook_add(struct xfs_hook_chain *chain, struct notifier_block *hook,
+ notifier_fn_t fn);
+void xfs_hook_del(struct xfs_hook_chain *chain, struct notifier_block *hook);
+int xfs_hook_call(struct xfs_hook_chain *chain, unsigned long val, void *priv);
+
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index c20b01cf4199..4f0470feaea9 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -691,6 +691,11 @@ xfs_qm_init_quotainfo(
if (error)
goto out_free_inos;
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+ xfs_hook_init(&qinf->qi_mod_dquot_hooks);
+ xfs_hook_init(&qinf->qi_apply_dquot_deltas_hooks);
+#endif
+
return 0;
out_free_inos:
@@ -1782,12 +1787,12 @@ xfs_qm_vop_chown(
ASSERT(prevdq);
ASSERT(prevdq != newdq);
- xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_nblocks));
- xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, bfield, -(ip->i_nblocks));
+ xfs_trans_mod_ino_dquot(tp, ip, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
/* the sparkling new dquot */
- xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_nblocks);
- xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, bfield, ip->i_nblocks);
+ xfs_trans_mod_ino_dquot(tp, ip, newdq, XFS_TRANS_DQ_ICOUNT, 1);
/*
* Back when we made quota reservations for the chown, we reserved the
@@ -1869,22 +1874,21 @@ xfs_qm_vop_create_dqattach(
ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id);
ip->i_udquot = xfs_qm_dqhold(udqp);
- xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id);
ip->i_gdquot = xfs_qm_dqhold(gdqp);
- xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
ASSERT(ip->i_projid == pdqp->q_id);
ip->i_pdquot = xfs_qm_dqhold(pdqp);
- xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
+
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, 1);
}
/* Decide if this inode's dquot is near an enforcement boundary. */
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 442a0f97a9d4..980bd2ef186d 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -69,6 +69,12 @@ struct xfs_quotainfo {
/* Minimum and maximum quota expiration timestamp values. */
time64_t qi_expiry_min;
time64_t qi_expiry_max;
+
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+ /* online quotacheck stuff */
+ struct xfs_hook_chain qi_mod_dquot_hooks;
+ struct xfs_hook_chain qi_apply_dquot_deltas_hooks;
+#endif
};
static inline struct radix_tree_root *
@@ -105,6 +111,15 @@ xfs_quota_inode(struct xfs_mount *mp, xfs_dqtype_t type)
return NULL;
}
+/* Parameters for xfs_trans_mod_dquot hook. */
+struct xfs_trans_mod_dquot_params {
+ struct xfs_trans *tp;
+ struct xfs_inode *ip;
+ struct xfs_dquot *dqp;
+ uint field;
+ int64_t delta;
+};
+
extern void xfs_trans_mod_dquot(struct xfs_trans *tp, struct xfs_dquot *dqp,
uint field, int64_t delta);
extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *);
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index dcc785fdd345..e736231a33a3 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -74,6 +74,15 @@ struct xfs_dqtrx {
int64_t qt_icount_delta; /* dquot inode count changes */
};
+/*
+ * Parameters for xfs_trans_apply_dquot_deltas hook. The hook arg parameter
+ * is 1 to apply and 0 to cancel the update.
+ */
+struct xfs_trans_apply_dquot_deltas_params {
+ struct xfs_trans *tp;
+ struct xfs_dquot *dqp;
+};
+
#ifdef CONFIG_XFS_QUOTA
extern void xfs_trans_dup_dqinfo(struct xfs_trans *, struct xfs_trans *);
extern void xfs_trans_free_dqinfo(struct xfs_trans *);
@@ -180,4 +189,14 @@ xfs_quota_unreserve_blkres(struct xfs_inode *ip, int64_t blocks)
extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
+#if IS_ENABLED(CONFIG_XFS_QUOTA) && IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+extern void xfs_trans_mod_ino_dquot(struct xfs_trans *tp, struct xfs_inode *ip,
+ struct xfs_dquot *dqp, uint field, int64_t delta);
+#elif IS_ENABLED(CONFIG_XFS_QUOTA)
+# define xfs_trans_mod_ino_dquot(tp, ip, dqp, field, delta) \
+ xfs_trans_mod_dquot((tp), (dqp), (field), (delta))
+#else
+# define xfs_trans_mod_ino_dquot(tp, ip, dqp, field, delta)
+#endif
+
#endif /* __XFS_QUOTA_H__ */
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 6560f498fdc7..a2282b4cb936 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -121,6 +121,31 @@ xfs_trans_dup_dqinfo(
}
}
+/* Schedule a transactional dquot update on behalf of an inode. */
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+void
+xfs_trans_mod_ino_dquot(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip,
+ struct xfs_dquot *dqp,
+ uint field,
+ int64_t delta)
+{
+ struct xfs_trans_mod_dquot_params p;
+ struct xfs_quotainfo *qi;
+
+ xfs_trans_mod_dquot(tp, dqp, field, delta);
+
+ p.tp = tp;
+ p.ip = ip;
+ p.dqp = dqp;
+ p.field = field;
+ p.delta = delta;
+ qi = tp->t_mountp->m_quotainfo;
+ xfs_hook_call(&qi->qi_mod_dquot_hooks, 0, &p);
+}
+#endif
+
/*
* Wrap around mod_dquot to account for both user and group quotas.
*/
@@ -138,11 +163,11 @@ xfs_trans_mod_dquot_byino(
return;
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
- (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
+ xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta);
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
- (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
+ xfs_trans_mod_ino_dquot(tp, ip, ip->i_gdquot, field, delta);
if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
- (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
+ xfs_trans_mod_ino_dquot(tp, ip, ip->i_pdquot, field, delta);
}
STATIC struct xfs_dqtrx *
@@ -322,6 +347,24 @@ xfs_apply_quota_reservation_deltas(
}
}
+/* Call downstream hooks now that it's time to apply dquot deltas. */
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+static inline void
+xfs_trans_apply_dquot_deltas_hook(
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
+{
+ struct xfs_trans_apply_dquot_deltas_params p;
+ struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
+
+ p.tp = tp;
+ p.dqp = dqp;
+ xfs_hook_call(&qi->qi_apply_dquot_deltas_hooks, 1, &p);
+}
+#else
+# define xfs_trans_apply_dquot_deltas_hook(tp, dqp)
+#endif
+
/*
* Called by xfs_trans_commit() and similar in spirit to
* xfs_trans_apply_sb_deltas().
@@ -367,6 +410,8 @@ xfs_trans_apply_dquot_deltas(
ASSERT(XFS_DQ_IS_LOCKED(dqp));
+ xfs_trans_apply_dquot_deltas_hook(tp, dqp);
+
/*
* adjust the actual number of blocks used
*/
@@ -466,6 +511,24 @@ xfs_trans_apply_dquot_deltas(
}
}
+/* Call downstream hooks now that it's time to cancel dquot deltas. */
+#if IS_ENABLED(CONFIG_XFS_ONLINE_SCRUB)
+static inline void
+xfs_trans_unreserve_and_mod_dquots_hook(
+ struct xfs_trans *tp,
+ struct xfs_dquot *dqp)
+{
+ struct xfs_trans_apply_dquot_deltas_params p;
+ struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
+
+ p.tp = tp;
+ p.dqp = dqp;
+ xfs_hook_call(&qi->qi_apply_dquot_deltas_hooks, 0, &p);
+}
+#else
+# define xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp)
+#endif
+
/*
* Release the reservations, and adjust the dquots accordingly.
* This is called only when the transaction is being aborted. If by
@@ -496,6 +559,9 @@ xfs_trans_unreserve_and_mod_dquots(
*/
if ((dqp = qtrx->qt_dquot) == NULL)
break;
+
+ xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp);
+
/*
* Unreserve the original reservation. We don't care
* about the number of blocks used field, or deltas.