summaryrefslogtreecommitdiff
path: root/tests/btrfs/160
blob: 23a2a8c42f3a9f87daaae3aad16ad6ca59c8fbaf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018, Jeff Layton <jlayton@redhat.com>
#
# FS QA Test No. 160
#
# Open a file and write to it and fsync. Then flip the data device to throw
# errors, write to it again and call sync. Close the file, reopen it and
# then call fsync on it. Is the error reported?
#
. ./common/preamble
_begin_fstest auto quick eio

# Override the default cleanup function.
_cleanup()
{
	cd /
	rm -f $tmp.*
	_dmerror_cleanup
}

. ./common/filter
. ./common/dmerror

_supported_fs btrfs
_require_scratch_dev_pool

_require_dm_target error

# bring up dmerror device
_dmerror_init

# Replace first device with error-test device
old_SCRATCH_DEV=$SCRATCH_DEV
SCRATCH_DEV_POOL=`echo $SCRATCH_DEV_POOL | perl -pe "s#$SCRATCH_DEV#$DMERROR_DEV#"`
SCRATCH_DEV=$DMERROR_DEV

echo "Format and mount"
_scratch_pool_mkfs "-d raid0 -m raid1" > $seqres.full 2>&1
_scratch_mount

# How much do we need to write? We need to hit all of the stripes. btrfs uses a
# fixed 64k stripesize, so write enough to hit each one. In the case of
# compression, each 128K input data chunk will be compressed to 4K (because of
# the characters written are duplicate). Therefore we have to write
# (128K * 16) = 2048K to make sure every stripe can be hit.
number_of_devices=`echo $SCRATCH_DEV_POOL | wc -w`
write_kb=$(($number_of_devices * 2048))
_require_fs_space $SCRATCH_MNT $write_kb
datalen=$((($write_kb * 1024)-1))

# use fd 5 to hold file open
testfile=$SCRATCH_MNT/fsync-open-after-err
exec 5>$testfile

# write some data to file and fsync it out
$XFS_IO_PROG -c "pwrite -q 0 $datalen" -c fsync $testfile

# flip device to non-working mode
_dmerror_load_error_table

# rewrite the data, call sync to ensure it's written back w/o scraping error
$XFS_IO_PROG -c "pwrite -q 0 $datalen" -c sync $testfile

# heal the device error
_dmerror_load_working_table

# open again and call fsync
echo "The following fsync should fail with EIO:"
$XFS_IO_PROG -c fsync $testfile
echo "done"

# close file
exec 5>&-

# success, all done
_dmerror_cleanup

status=0
exit