summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/iscan.c
blob: 7b16d221b5e748f0367e0b6de0845ff51bffe11b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright (C) 2021 Oracle.  All Rights Reserved.
 * Author: Darrick J. Wong <djwong@kernel.org>
 */
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_ialloc_btree.h"
#include "xfs_ag.h"
#include "xfs_error.h"
#include "xfs_bit.h"
#include "xfs_icache.h"
#include "scrub/scrub.h"
#include "scrub/iscan.h"
#include "scrub/trace.h"

/*
 * Live File Scan
 * ==============
 *
 * Live file scans walk every inode in a live filesystem.  This is more or
 * less like a regular iwalk, except that when we're advancing the scan cursor,
 * we must ensure that inodes cannot be added or deleted anywhere between the
 * old cursor value and the new cursor value.  If we're advancing the cursor
 * by one inode, the caller must hold that inode; if we're finding the next
 * inode to scan, we must grab the AGI and hold it until we've updated the
 * scan cursor.
 *
 * Callers are expected to use this code to scan all files in the filesystem to
 * construct a new metadata index of some kind.  The scan races against other
 * live updates, which means there must be a provision to update the new index
 * when updates are made to inodes that already been scanned.  The iscan lock
 * can be used in live update hook code to stop the scan and protect this data
 * structure.
 *
 * To keep the new index up to date with other metadata updates being made to
 * the live filesystem, it is assumed that the caller will add hooks as needed
 * to be notified when a metadata update occurs.  The inode scanner must tell
 * the hook code when an inode has been visited with xchk_iscan_mark_visit.
 * Hook functions can use xchk_iscan_want_live_update to decide if the
 * scanner's observations must be updated.
 */

/*
 * Set the bits in @irec's free mask that correspond to the inodes before
 * @agino so that we skip them.  This is how we restart an inode walk that was
 * interrupted in the middle of an inode record.
 */
STATIC void
xchk_iscan_adjust_start(
	xfs_agino_t			agino,	/* starting inode of chunk */
	struct xfs_inobt_rec_incore	*irec)	/* btree record */
{
	int				idx;	/* index into inode chunk */

	idx = agino - irec->ir_startino;

	irec->ir_free |= xfs_inobt_maskn(0, idx);
	irec->ir_freecount = hweight64(irec->ir_free);
}

/*
 * Set *cursor to the next allocated inode after whatever it's set to now.
 * If there are no more inodes in this AG, cursor is set to NULLAGINO.
 */
STATIC int
xchk_iscan_find_next(
	struct xfs_scrub	*sc,
	struct xfs_buf		*agi_bp,
	struct xfs_perag	*pag,
	xfs_agino_t		*cursor)
{
	struct xfs_inobt_rec_incore	rec;
	struct xfs_btree_cur	*cur;
	struct xfs_mount	*mp = sc->mp;
	struct xfs_trans	*tp = sc->tp;
	xfs_agnumber_t		agno = pag->pag_agno;
	xfs_agino_t		lastino = NULLAGINO;
	xfs_agino_t		first, last;
	xfs_agino_t		agino = *cursor;
	int			has_rec;
	int			error;

	/* If the cursor is beyond the end of this AG, move to the next one. */
	xfs_agino_range(mp, agno, &first, &last);
	if (agino > last) {
		*cursor = NULLAGINO;
		return 0;
	}

	/*
	 * Look up the inode chunk for the current cursor position.  If there
	 * is no chunk here, we want the next one.
	 */
	cur = xfs_inobt_init_cursor(mp, tp, agi_bp, pag, XFS_BTNUM_INO);
	error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &has_rec);
	if (!error && !has_rec)
		error = xfs_btree_increment(cur, 0, &has_rec);
	for (; !error; error = xfs_btree_increment(cur, 0, &has_rec)) {
		/*
		 * If we've run out of inobt records in this AG, move the
		 * cursor on to the next AG and exit.  The caller can try
		 * again with the next AG.
		 */
		if (!has_rec) {
			*cursor = NULLAGINO;
			break;
		}

		error = xfs_inobt_get_rec(cur, &rec, &has_rec);
		if (error)
			break;
		if (!has_rec) {
			error = -EFSCORRUPTED;
			break;
		}

		/* Make sure that we always move forward. */
		if (lastino != NULLAGINO &&
		    XFS_IS_CORRUPT(mp, lastino >= rec.ir_startino)) {
			error = -EFSCORRUPTED;
			break;
		}
		lastino = rec.ir_startino + XFS_INODES_PER_CHUNK - 1;

		/*
		 * If this record only covers inodes that come before the
		 * cursor, advance to the next record.
		 */
		if (rec.ir_startino + XFS_INODES_PER_CHUNK <= agino)
			continue;

		/*
		 * If the incoming lookup put us in the middle of an inobt
		 * record, mark it and the previous inodes "free" so that the
		 * search for allocated inodes will start at the cursor.  Use
		 * funny math to avoid overflowing the bit shift.
		 */
		if (agino >= rec.ir_startino)
			xchk_iscan_adjust_start(agino + 1, &rec);

		/*
		 * If there are allocated inodes in this chunk, find them,
		 * and update the cursor.
		 */
		if (rec.ir_freecount < XFS_INODES_PER_CHUNK) {
			int	next = xfs_lowbit64(~rec.ir_free);

			*cursor = rec.ir_startino + next;
			break;
		}
	}

	xfs_btree_del_cursor(cur, error);
	return error;
}

/*
 * Prepare to return agno/agino to the iscan caller by moving the lastino
 * cursor to the previous inode.  Do this while we still hold the AGI so that
 * no other threads can create or delete inodes in this AG.
 */
static inline void
xchk_iscan_move_cursor(
	struct xfs_scrub	*sc,
	struct xchk_iscan	*iscan,
	xfs_agnumber_t		agno,
	xfs_agino_t		agino)
{
	struct xfs_mount	*mp = sc->mp;

	mutex_lock(&iscan->lock);
	iscan->cursor_ino = XFS_AGINO_TO_INO(mp, agno, agino);
	iscan->__visited_ino = iscan->cursor_ino - 1;
	trace_xchk_iscan_move_cursor(mp, iscan);
	mutex_unlock(&iscan->lock);
}

/*
 * Advance ino to the next inode that the inobt thinks is allocated, being
 * careful to jump to the next AG and to skip quota inodes.  Advancing ino
 * effectively means that we've pushed the quotacheck scan forward, so set the
 * quotacheck cursor to (ino - 1) so that our shadow dquot tracking will track
 * inode allocations in that range once we release the AGI buffer.
 *
 * Returns 1 if there's a new inode to examine, 0 if we've run out of inodes,
 * -ECANCELED if the live scan aborted, or the usual negative errno.
 */
int
xchk_iscan_advance(
	struct xfs_scrub	*sc,
	struct xchk_iscan	*iscan)
{
	struct xfs_mount	*mp = sc->mp;
	struct xfs_buf		*agi_bp;
	struct xfs_perag	*pag;
	xfs_agnumber_t		agno;
	xfs_agino_t		agino;
	int			ret;

	ASSERT(iscan->cursor_ino >= iscan->__visited_ino);
	iscan->__cursor_tries = iscan->iget_tries;

next_ag:
	agno = XFS_INO_TO_AGNO(mp, iscan->cursor_ino);
	if (agno >= mp->m_sb.sb_agcount) {
		xchk_iscan_move_cursor(sc, iscan, agno, 0);
		iscan->cursor_ino = NULLFSINO;
		return 0;
	}
	agino = XFS_INO_TO_AGINO(mp, iscan->cursor_ino);

	pag = xfs_perag_get(mp, agno);
	ret = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp);
	if (ret)
		goto out_pag;

	ret = xchk_iscan_find_next(sc, agi_bp, pag, &agino);
	if (ret)
		goto out_buf;
	if (agino == NULLAGINO) {
		xchk_iscan_move_cursor(sc, iscan, agno + 1, 0);
		xfs_trans_brelse(sc->tp, agi_bp);
		xfs_perag_put(pag);
		goto next_ag;
	}

	xchk_iscan_move_cursor(sc, iscan, agno, agino);
	ret = 1;
out_buf:
	xfs_trans_brelse(sc->tp, agi_bp);
out_pag:
	xfs_perag_put(pag);
	if (xchk_iscan_aborted(iscan))
		return -ECANCELED;
	return ret;
}

/*
 * Grabbing the inode failed, so we need to back up the scan and ask the caller
 * to try to _advance the scan again.  Returns -ECANCELED if we've run out of
 * retry opportunities or -EAGAIN if we have not.
 */
static int
xchk_iscan_iget_retry(
	struct xchk_iscan	*iscan,
	bool			wait)
{
	ASSERT(iscan->cursor_ino == iscan->__visited_ino + 1);

	iscan->cursor_ino--;
	if (--iscan->__cursor_tries == 0)
		return -ECANCELED;

	if (wait && iscan->iget_retry_delay) {
		/*
		 * Sleep for some number of jiffies.  If we return early,
		 * someone sent a kill signal to the calling process.
		 */
		if (schedule_timeout_killable(iscan->iget_retry_delay) ||
		    xchk_iscan_aborted(iscan))
			return -ECANCELED;
	}

	return -EAGAIN;
}

/*
 * Grab an inode as part of an inode scan.  While scanning this inode, the
 * caller must ensure that no other threads can modify the inode until a call
 * to xchk_iscan_visit succeeds.
 *
 * Returns 0 and an incore inode; -EAGAIN if the caller should call again
 * xchk_iscan_advance; -ECANCELED if we couldn't grab an inode; or some other
 * negative errno.
 */
int
xchk_iscan_iget(
	struct xfs_scrub	*sc,
	struct xchk_iscan	*iscan,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = sc->mp;
	int			error;

	error = xfs_iget(mp, sc->tp, iscan->cursor_ino, XFS_IGET_UNTRUSTED, 0,
			ipp);

	trace_xchk_iscan_iget(mp, iscan, error);

	if (error == -ENOENT) {
		/*¬
		 * It's possible that this inode has lost all of its links but
		 * hasn't yet been inactivated.  Push the inactivation workers
		 * to clear the pending work, go to sleep long enough for
		 * inactivation to wake up, and try again.
		 */
		xfs_inodegc_flush(mp);
		return xchk_iscan_iget_retry(iscan, true);
	}

	if (error == -EINVAL) {
		/*
		 * We thought the inode was allocated, but the inode btree
		 * lookup failed, which means that it was freed since the last
		 * time we advanced the cursor.  Back up and try again.
		 */
		return xchk_iscan_iget_retry(iscan, false);
	}

	return error;
}

/* Release inode scan resources. */
void
xchk_iscan_finish(
	struct xchk_iscan	*iscan)
{
	mutex_destroy(&iscan->lock);
	iscan->cursor_ino = NULLFSINO;
	iscan->__visited_ino = NULLFSINO;
}

/* Set ourselves up to start an inode scan. */
void
xchk_iscan_start(
	struct xchk_iscan	*iscan)
{
	clear_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
	if (!iscan->iget_tries)
		iscan->iget_tries = 1;
	iscan->__visited_ino = 0;
	iscan->cursor_ino = 0;
	mutex_init(&iscan->lock);
}

/*
 * Mark this inode as having been visited.  Callers must hold a sufficiently
 * exclusive lock on the inode to prevent concurrent modifications.
 */
void
xchk_iscan_mark_visited(
	struct xchk_iscan	*iscan,
	struct xfs_inode	*ip)
{
	mutex_lock(&iscan->lock);
	iscan->__visited_ino = ip->i_ino;
	trace_xchk_iscan_visit(ip->i_mount, iscan);
	mutex_unlock(&iscan->lock);
}

/*
 * Do we need a live update for this inode?  This is true if the scanner thread
 * has visited this inode and the scan hasn't been aborted due to errors.
 * Callers must hold a sufficiently exclusive lock on the inode to prevent
 * scanners from reading any inode metadata.
 */
bool
xchk_iscan_want_live_update(
	struct xchk_iscan	*iscan,
	xfs_ino_t		ino)
{
        bool			ret;

	if (xchk_iscan_aborted(iscan))
		return false;

	mutex_lock(&iscan->lock);
	ret = iscan->__visited_ino >= ino;
	mutex_unlock(&iscan->lock);

	return ret;
}