summaryrefslogtreecommitdiff
path: root/tests/xfs/432
blob: 66315b03987287ff458066c4aad6f65cd41dae55 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2017, Oracle and/or its affiliates.  All Rights Reserved.
#
# FS QA Test No. 432
#
# Ensure that metadump copies large directory extents
#
# Metadump helpfully discards directory (and xattr) extents that are
# longer than 1000 blocks.  This is a little silly since a hardlink farm
# can easily create such a monster.
#
# Now that we've upped metadump's default too-long-extent discard
# threshold to 2^21 blocks, make sure we never do that again.
#
. ./common/preamble
_begin_fstest auto quick dir metadata metadump

# Override the default cleanup function.
_cleanup()
{
	cd /
	rm -f "$tmp".* $metadump_file $metadump_img
}

# Import common functions.
. ./common/filter

# real QA test starts here
_supported_fs xfs
_require_command "$XFS_MDRESTORE_PROG" "xfs_mdrestore"
_require_scratch

rm -f "$seqres.full"

echo "Format and mount"
# We need to create a directory with a huuuge extent record.  Normally
# a rapidly expanding directory gets its blocks allocated in lockstep --
# physically we end up writing (a couple of dir data blocks) followed by
# (a da btree block) over and over.
#
# Therefore, we crank the directory block size up to maximum and the
# filesystem down to minimum so that we have to allocate 64 blocks at
# a time, trying to end up with the result that we have > 1000 blocks
# allocated in a single extent.
#
# In theory the math works out here -- ~65500 bytes for a da leaf block /
# 8 bytes per da leaf entry == ~8187 hash entries for a da node.  65500
# bytes for a dir data block / 264 bytes per dirent == ~248 dirents per
# block.  8187 hashes/dablk / 248 dirents/dirblock = ~33 dirblocks per
# dablock.  33 dirblocks * 64k mean that we can expand a directory by
# 2112k before we have to allocate another da btree block.
_scratch_mkfs -b size=1k -n size=64k > "$seqres.full" 2>&1
_scratch_mount >> "$seqres.full" 2>&1

metadump_file="$TEST_DIR/meta-$seq"
metadump_img="$TEST_DIR/img-$seq"
rm -f $metadump_file $metadump_img
testdir="$SCRATCH_MNT/test-$seq"
max_fname_len=255
blksz=$(_get_block_size $SCRATCH_MNT)

# Try to create a directory w/ extents
blocks=1050
names=$((blocks * (blksz / max_fname_len)))
echo "Create huge dir"
mkdir -p $testdir
touch $SCRATCH_MNT/a
seq 0 $names | while read f; do
	name="$testdir/$(printf "%0${max_fname_len}d" $f)"
	ln $SCRATCH_MNT/a $name
done
dir_inum=$(stat -c %i $testdir)

echo "Check for > 1000 block extent?"
_scratch_unmount
check_for_long_extent() {
	inum=$1

	_scratch_xfs_db -x -c "inode $dir_inum" -c bmap | \
		sed -e 's/^.*count \([0-9]*\) flag.*$/\1/g' | \
		awk '{if ($1 > 1000) { printf("yes, %d\n", $1); } }'
}
extlen="$(check_for_long_extent $dir_inum)"
echo "qualifying extent: $extlen blocks" >> $seqres.full
test -n "$extlen" || _notrun "could not create dir extent > 1000 blocks"

echo "Try to metadump"
_scratch_xfs_metadump $metadump_file -w
SCRATCH_DEV=$metadump_img _scratch_xfs_mdrestore $metadump_file

echo "Check restored metadump image"
SCRATCH_DEV=$metadump_img _scratch_xfs_repair -n &>> $seqres.full || \
	echo "xfs_repair on restored fs returned $?"

# success, all done
status=0
exit