summaryrefslogtreecommitdiff
path: root/tests/btrfs/201
blob: 007319ae4ccc2bdbe969c11ce885daf3f09095fc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2019 SUSE Linux Products GmbH. All Rights Reserved.
#
# FSQA Test No. 201
#
# Test that when we have the no-holes feature enabled and a specific metadata
# layout, if we punch a hole that starts at file offset 0 and fsync the file,
# after replaying the log the hole exists.
#
. ./common/preamble
_begin_fstest auto quick punch log

# Override the default cleanup function.
_cleanup()
{
	_cleanup_flakey
	cd /
	rm -f $tmp.*
}

. ./common/attr
. ./common/filter
. ./common/dmflakey

_supported_fs btrfs
_require_scratch
_require_dm_target flakey
_require_attrs
_require_xfs_io_command "fpunch"
_require_btrfs_fs_feature "no_holes"
_require_btrfs_mkfs_feature "no-holes"
_require_odirect

run_test_leading_hole()
{
    # We create the filesystem with a node size of 64Kb because we need to
    # create a specific metadata layout in order to trigger the bug we are
    # testing. At the moment the node size can not be smaller then the system's
    # page size, so given that the largest possible page size is 64Kb and by
    # default the node size is set to the system's page size value, we explicitly
    # create a filesystem with a 64Kb node size, so that the test can run
    # reliably independently of the system's page size.
    _scratch_mkfs -O no-holes -n $((64 * 1024)) >>$seqres.full 2>&1
    _require_metadata_journaling $SCRATCH_DEV
    _init_flakey
    _mount_flakey

    # Create our first file, which is used just to fill space in a leaf. Its
    # items ocuppy most of the first leaf. We use a large xattr since it's an
    # easy and fast way to fill a lot of leaf space.
    touch $SCRATCH_MNT/foo
    $SETFATTR_PROG -n 'user.x1' -v $(printf '%0.sX' $(seq 1 63617)) \
        $SCRATCH_MNT/foo

   # Create our second file, which we will use to test if fsync persists a hole
   # punch operation against it. Create several extent items for the file, all with
   # a size of 64Kb. The first extent item of this file is located within the first
   # leaf of the fs tree, as its last item, and all the remaining extent items in
   # another leaf.
   local offset=0
   for ((i = 0; i <= 10; i++)); do
       $XFS_IO_PROG -f -d -c "pwrite -S 0xab -b 64K $offset 64K" \
           $SCRATCH_MNT/bar >/dev/null
       offset=$(($offset + 64 * 1024))
   done

   # Make sure everything done so far is durably persisted. We also want to start
   # a new transaction and bump the filesystem generation. We don't fsync because
   # we want to keep the 'full sync' flag in the inode of file 'bar', so that the
   # fsync after the hole punch operation uses the slow path, which is necessary
   # to trigger the bug we are testing.
   sync

   # Now punch a hole that covers only the first extent item of file bar. That
   # is the only extent item in the first leaf of the first tree, so the hole
   # punch operation will drop it and will not touch the second leaf which
   # contains the remaining extent items. These conditions are necessary to
   # trigger the bug we are testing.
   $XFS_IO_PROG -c "fpunch 0 64K" -c "fsync" $SCRATCH_MNT/bar

   echo "File digest before power failure:"
   md5sum $SCRATCH_MNT/bar | _filter_scratch
   # Simulate a power failure and mount the filesystem to check that replaying the
   # log tree succeeds and our file bar has the expected content.
   _flakey_drop_and_remount
   echo "File digest after power failure and log replay:"
   md5sum $SCRATCH_MNT/bar | _filter_scratch

   _unmount_flakey
   _cleanup_flakey
}

run_test_middle_hole()
{
    local hole_offset=$1
    local hole_len=$2

    # We create the filesystem with a node size of 64Kb because we need to
    # create a specific metadata layout in order to trigger the bug we are
    # testing. At the moment the node size can not be smaller then the system's
    # page size, so given that the largest possible page size is 64Kb and by
    # default the node size is set to the system's page size value, we explicitly
    # create a filesystem with a 64Kb node size, so that the test can run
    # reliably independently of the system's page size.
    _scratch_mkfs -O no-holes -n $((64 * 1024)) >>$seqres.full 2>&1
    _require_metadata_journaling $SCRATCH_DEV
    _init_flakey
    _mount_flakey

    # Create our first file, which is used just to fill space in a leaf. Its
    # items ocuppy most of the first leaf. We use a large xattr since it's an
    # easy and fast way to fill a lot of leaf space.
    touch $SCRATCH_MNT/foo
    $SETFATTR_PROG -n 'user.x1' -v $(printf '%0.sX' $(seq 1 63600)) \
        $SCRATCH_MNT/foo

    # Create our second file, which we will use to test if fsync persists a hole
    # punch operation against it. Create several extent items for the file, all
    # with a size of 64Kb. The goal is to have the items of this file span 5
    # btree leafs.
    offset=0
    for ((i = 0; i <= 2000; i++)); do
        $XFS_IO_PROG -f -d -c "pwrite -S 0xab -b 64K $offset 64K" \
            $SCRATCH_MNT/bar >/dev/null
        offset=$(($offset + 64 * 1024))
    done

    # Make sure everything done so far is durably persisted. We also want to
    # start a new transaction and bump the filesystem generation. We don't fsync
    # because we want to keep the 'full sync' flag in the inode of file 'bar',
    # so that the fsync after the hole punch operation uses the slow path, which
    # is necessary to trigger the bug we are testing.
    sync

    # Now punch a hole that covers the entire 3rd leaf. This results in deleting
    # the entire leaf, without touching the 2nd and 4th leafs. The first leaf is
    # touched because the inode item needs to be updated (bytes used, ctime,
    # mtime, etc). After that we modify the last extent, located at the 5th leaf,
    # and then fsync the file. We want to verify that both the hole and the new
    # data are correctly persisted by the fsync.
    $XFS_IO_PROG -c "fpunch $hole_offset $hole_len" \
                 -c "pwrite -S 0xf1 131072000 64K" \
                 -c "fsync" $SCRATCH_MNT/bar >/dev/null

    echo "File digest before power failure:"
    md5sum $SCRATCH_MNT/bar | _filter_scratch
    # Simulate a power failure and mount the filesystem to check that replaying
    # the log tree succeeds and our file bar has the expected content.
    _flakey_drop_and_remount
    echo "File digest after power failure and log replay:"
    md5sum $SCRATCH_MNT/bar | _filter_scratch

    _unmount_flakey
    _cleanup_flakey
}

echo "Testing with hole offset 0 hole length 65536"
run_test_leading_hole

# Now test cases where file extent items span many leafs and we punch a large
# hole in the middle of the file, with the goal of getting an entire leaf
# deleted during the punch hole operation. We test 3 different ranges because
# depending on whether SELinux is enabled or not the layout of the items in
# the leafs varies slightly.

echo
echo "Testing with hole offset 786432 hole length 54919168"
run_test_middle_hole 786432 54919168

echo
echo "Testing with hole offset 720896 hole length 54919168"
run_test_middle_hole 720896 54919168

echo
echo "Testing with hole offset 655360 hole length 54919168"
run_test_middle_hole 655360 54919168

status=0
exit