1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
|
#ifndef _BCACHE_IO_TYPES_H
#define _BCACHE_IO_TYPES_H
#include "btree_types.h"
#include "buckets_types.h"
#include "keylist_types.h"
#include <linux/llist.h>
#include <linux/workqueue.h>
struct bch_read_bio {
/*
* Reads will often have to be split, and if the extent being read from
* was checksummed or compressed we'll also have to allocate bounce
* buffers and copy the data back into the original bio.
*
* If we didn't have to split, we have to save and restore the original
* bi_end_io - @split below indicates which:
*/
union {
struct bch_read_bio *parent;
bio_end_io_t *orig_bi_end_io;
};
/*
* Saved copy of parent->bi_iter, from submission time - allows us to
* resubmit on IO error, and also to copy data back to the original bio
* when we're bouncing:
*/
struct bvec_iter parent_iter;
unsigned submit_time_us;
u16 flags;
u8 bounce:1,
split:1;
struct bch_fs *c;
struct bch_dev *ca;
struct bch_extent_ptr ptr;
struct bch_extent_crc128 crc;
struct bversion version;
struct cache_promote_op *promote;
/*
* If we have to retry the read (IO error, checksum failure, read stale
* data (raced with allocator), we retry the portion of the parent bio
* that failed (i.e. this bio's portion, parent_iter).
*
* But we need to stash the inode somewhere:
*/
u64 inode;
struct work_struct work;
struct bio bio;
};
static inline struct bch_read_bio *
bch_rbio_parent(struct bch_read_bio *rbio)
{
return rbio->split ? rbio->parent : rbio;
}
struct bch_write_bio {
struct bch_fs *c;
struct bch_dev *ca;
union {
struct bio *orig;
struct closure *cl;
};
unsigned submit_time_us;
unsigned split:1,
bounce:1,
put_bio:1;
/* Only for btree writes: */
unsigned used_mempool:1;
u8 order;
struct bio bio;
};
struct bch_replace_info {
struct extent_insert_hook hook;
/* How many insertions succeeded */
unsigned successes;
/* How many insertions failed */
unsigned failures;
BKEY_PADDED(key);
};
struct bch_write_op {
struct closure cl;
struct bch_fs *c;
struct workqueue_struct *io_wq;
struct bch_write_bio *bio;
unsigned written; /* sectors */
short error;
u16 flags;
unsigned csum_type:4;
unsigned compression_type:4;
unsigned nr_replicas:4;
unsigned alloc_reserve:4;
unsigned nonce:14;
struct bpos pos;
struct bversion version;
/* For BCH_WRITE_DATA_COMPRESSED: */
struct bch_extent_crc128 crc;
unsigned size;
struct disk_reservation res;
struct write_point *wp;
union {
u8 open_buckets[16];
struct {
struct bch_write_op *next;
unsigned long expires;
};
};
/*
* If caller wants to flush but hasn't passed us a journal_seq ptr, we
* still need to stash the journal_seq somewhere:
*/
union {
u64 *journal_seq_p;
u64 journal_seq;
};
int (*index_update_fn)(struct bch_write_op *);
struct keylist insert_keys;
u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
};
#endif /* _BCACHE_IO_TYPES_H */
|