1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
|
#ifndef _BCACHE_TOOLS_ONDISK_H
#define _BCACHE_TOOLS_ONDISK_H
/*
* Bcache on disk data structures
*/
#ifdef __cplusplus
typedef bool _Bool;
extern "C" {
#endif
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/uuid.h>
#define LE32_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le32_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u64 new = __le32_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le32(new); \
}
#define LE64_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le64_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u64 new = __le64_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le64(new); \
}
struct bkey_format {
__u8 key_u64s;
__u8 nr_fields;
/* One unused slot for now: */
__u8 bits_per_field[6];
__le64 field_offset[6];
};
/* Btree keys - all units are in sectors */
struct bpos {
/* Word order matches machine byte order */
#if defined(__LITTLE_ENDIAN)
__u32 snapshot;
__u64 offset;
__u64 inode;
#elif defined(__BIG_ENDIAN)
__u64 inode;
__u64 offset; /* Points to end of extent - sectors */
__u32 snapshot;
#else
#error edit for your odd byteorder.
#endif
} __attribute__((packed, aligned(4)));
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
#define KEY_SNAPSHOT_MAX ((__u32)~0U)
static inline struct bpos POS(__u64 inode, __u64 offset)
{
struct bpos ret;
ret.inode = inode;
ret.offset = offset;
ret.snapshot = 0;
return ret;
}
#define POS_MIN POS(0, 0)
#define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
/* Empty placeholder struct, for container_of() */
struct bch_val {
__u64 __nothing[0];
};
struct bversion {
#if defined(__LITTLE_ENDIAN)
__u64 low;
__u32 high;
#elif defined(__BIG_ENDIAN)
__u32 high;
__u64 low;
#endif
} __attribute__((packed, aligned(4)));
struct bkey {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
__u8 format;
/* Type of the value */
__u8 type;
#if defined(__LITTLE_ENDIAN)
__u8 pad[1];
struct bversion version;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif defined(__BIG_ENDIAN)
struct bpos p;
__u32 size; /* extent size, in sectors */
struct bversion version;
__u8 pad[1];
#endif
} __attribute__((packed, aligned(8)));
struct bkey_packed {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
__u8 format;
/* Type of the value */
__u8 type;
__u8 key_start[0];
/*
* We copy bkeys with struct assignment in various places, and while
* that shouldn't be done with packed bkeys we can't disallow it in C,
* and it's legal to cast a bkey to a bkey_packed - so padding it out
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
} __attribute__((packed, aligned(8)));
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define KEY_PACKED_BITS_START 24
#define KEY_SIZE_MAX ((__u32)~0U)
#define KEY_FORMAT_LOCAL_BTREE 0
#define KEY_FORMAT_CURRENT 1
enum bch_bkey_fields {
BKEY_FIELD_INODE,
BKEY_FIELD_OFFSET,
BKEY_FIELD_SNAPSHOT,
BKEY_FIELD_SIZE,
BKEY_FIELD_VERSION_HIGH,
BKEY_FIELD_VERSION_LOW,
BKEY_NR_FIELDS,
};
#define bkey_format_field(name, field) \
[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
#define BKEY_FORMAT_CURRENT \
((struct bkey_format) { \
.key_u64s = BKEY_U64s, \
.nr_fields = BKEY_NR_FIELDS, \
.bits_per_field = { \
bkey_format_field(INODE, p.inode), \
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
bkey_format_field(VERSION_HIGH, version.high), \
bkey_format_field(VERSION_LOW, version.low), \
}, \
})
/* bkey with inline value */
struct bkey_i {
struct bkey k;
struct bch_val v;
};
#ifndef __cplusplus
#define KEY(_inode, _offset, _size) \
((struct bkey) { \
.u64s = BKEY_U64s, \
.format = KEY_FORMAT_CURRENT, \
.p = POS(_inode, _offset), \
.size = _size, \
})
#else
static inline struct bkey KEY(__u64 inode, __u64 offset, __u64 size)
{
struct bkey ret;
memset(&ret, 0, sizeof(ret));
ret.u64s = BKEY_U64s;
ret.format = KEY_FORMAT_CURRENT;
ret.p.inode = inode;
ret.p.offset = offset;
ret.size = size;
return ret;
}
#endif
static inline void bkey_init(struct bkey *k)
{
*k = KEY(0, 0, 0);
}
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src)
{
memcpy(dst, src, bkey_bytes(&src->k));
}
#define __BKEY_PADDED(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
#define BKEY_VAL_TYPE(name, nr) \
struct bkey_i_##name { \
union { \
struct bkey k; \
struct bkey_i k_i; \
}; \
struct bch_##name v; \
}
/*
* - DELETED keys are used internally to mark keys that should be ignored but
* override keys in composition order. Their version number is ignored.
*
* - DISCARDED keys indicate that the data is all 0s because it has been
* discarded. DISCARDs may have a version; if the version is nonzero the key
* will be persistent, otherwise the key will be dropped whenever the btree
* node is rewritten (like DELETED keys).
*
* - ERROR: any read of the data returns a read error, as the data was lost due
* to a failing device. Like DISCARDED keys, they can be removed (overridden)
* by new writes or cluster-wide GC. Node repair can also overwrite them with
* the same or a more recent version number, but not with an older version
* number.
*/
#define KEY_TYPE_DELETED 0
#define KEY_TYPE_DISCARD 1
#define KEY_TYPE_ERROR 2
#define KEY_TYPE_COOKIE 3
#define KEY_TYPE_GENERIC_NR 128
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
/* Extents */
/*
* In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
* preceded by checksum/compression information (bch_extent_crc32 or
* bch_extent_crc64).
*
* One major determining factor in the format of extents is how we handle and
* represent extents that have been partially overwritten and thus trimmed:
*
* If an extent is not checksummed or compressed, when the extent is trimmed we
* don't have to remember the extent we originally allocated and wrote: we can
* merely adjust ptr->offset to point to the start of the start of the data that
* is currently live. The size field in struct bkey records the current (live)
* size of the extent, and is also used to mean "size of region on disk that we
* point to" in this case.
*
* Thus an extent that is not checksummed or compressed will consist only of a
* list of bch_extent_ptrs, with none of the fields in
* bch_extent_crc32/bch_extent_crc64.
*
* When an extent is checksummed or compressed, it's not possible to read only
* the data that is currently live: we have to read the entire extent that was
* originally written, and then return only the part of the extent that is
* currently live.
*
* Thus, in addition to the current size of the extent in struct bkey, we need
* to store the size of the originally allocated space - this is the
* compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
* when the extent is trimmed, instead of modifying the offset field of the
* pointer, we keep a second smaller offset field - "offset into the original
* extent of the currently live region".
*
* The other major determining factor is replication and data migration:
*
* Each pointer may have its own bch_extent_crc32/64. When doing a replicated
* write, we will initially write all the replicas in the same format, with the
* same checksum type and compression format - however, when copygc runs later (or
* tiering/cache promotion, anything that moves data), it is not in general
* going to rewrite all the pointers at once - one of the replicas may be in a
* bucket on one device that has very little fragmentation while another lives
* in a bucket that has become heavily fragmented, and thus is being rewritten
* sooner than the rest.
*
* Thus it will only move a subset of the pointers (or in the case of
* tiering/cache promotion perhaps add a single pointer without dropping any
* current pointers), and if the extent has been partially overwritten it must
* write only the currently live portion (or copygc would not be able to reduce
* fragmentation!) - which necessitates a different bch_extent_crc format for
* the new pointer.
*
* But in the interests of space efficiency, we don't want to store one
* bch_extent_crc for each pointer if we don't have to.
*
* Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
* bch_extent_ptrs appended arbitrarily one after the other. We determine the
* type of a given entry with a scheme similar to utf8 (except we're encoding a
* type, not a size), encoding the type in the position of the first set bit:
*
* bch_extent_crc32 - 0b1
* bch_extent_ptr - 0b10
* bch_extent_crc64 - 0b100
*
* We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
* bch_extent_crc64 is the least constrained).
*
* Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
* until the next bch_extent_crc32/64.
*
* If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
* is neither checksummed nor compressed.
*/
enum bch_extent_entry_type {
BCH_EXTENT_ENTRY_crc32 = 0,
BCH_EXTENT_ENTRY_ptr = 1,
BCH_EXTENT_ENTRY_crc64 = 2,
};
#define BCH_EXTENT_ENTRY_MAX 3
struct bch_extent_crc32 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 type:1,
offset:7,
compressed_size:8,
uncompressed_size:8,
csum_type:4,
compression_type:4;
__u32 csum;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u32 csum;
__u32 compression_type:4,
csum_type:4,
uncompressed_size:8,
compressed_size:8,
offset:7,
type:1;
#endif
} __attribute__((packed, aligned(8)));
#define CRC32_EXTENT_SIZE_MAX (1U << 7)
/* 64k */
#define BCH_COMPRESSED_EXTENT_MAX 128
struct bch_extent_crc64 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:3,
compressed_size:10,
uncompressed_size:10,
offset:10,
nonce:23,
csum_type:4,
compression_type:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 compression_type:4,
csum_type:4,
nonce:23,
offset:10,
uncompressed_size:10,
compressed_size:10,
type:3;
#endif
__u64 csum;
} __attribute__((packed, aligned(8)));
#define CRC64_EXTENT_SIZE_MAX (1U << 10) /* inclusive */
#define CRC64_NONCE_MAX (1U << 23) /* exclusive */
/*
* @reservation - pointer hasn't been written to, just reserved
*/
struct bch_extent_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:2,
erasure_coded:1,
reservation:1,
offset:44, /* 8 petabytes */
dev:8,
gen:8;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 gen:8,
dev:8,
offset:44,
reservation:1,
erasure_coded:1,
type:2;
#endif
} __attribute__((packed, aligned(8)));
union bch_extent_entry {
#if defined(__LITTLE_ENDIAN__) || BITS_PER_LONG == 64
unsigned long type;
#elif BITS_PER_LONG == 32
struct {
unsigned long pad;
unsigned long type;
};
#endif
struct bch_extent_crc32 crc32;
struct bch_extent_crc64 crc64;
struct bch_extent_ptr ptr;
};
enum {
BCH_EXTENT = 128,
/*
* This is kind of a hack, we're overloading the type for a boolean that
* really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
* have the same value type:
*/
BCH_EXTENT_CACHED = 129,
/*
* Persistent reservation:
*/
BCH_RESERVATION = 130,
};
struct bch_extent {
struct bch_val v;
union bch_extent_entry start[0];
__u64 _data[0];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(extent, BCH_EXTENT);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
((sizeof(struct bch_extent_crc64) + \
sizeof(struct bch_extent_ptr)) / sizeof(u64))
/* Maximum possible size of an entire extent value: */
#if 0
/* There's a hack in the keylist code that needs to be fixed.. */
#define BKEY_EXTENT_VAL_U64s_MAX \
(BKEY_EXTENT_PTR_U64s_MAX * BCH_REPLICAS_MAX)
#else
#define BKEY_EXTENT_VAL_U64s_MAX 8
#endif
/* * Maximum possible size of an entire extent, key + value: */
#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
#define BKEY_BTREE_PTR_VAL_U64s_MAX BCH_REPLICAS_MAX
#define BKEY_BTREE_PTR_U64s_MAX (BKEY_U64s + BCH_REPLICAS_MAX)
/* Inodes */
#define BLOCKDEV_INODE_MAX 4096
#define BCACHE_ROOT_INO 4096
enum bch_inode_types {
BCH_INODE_FS = 128,
BCH_INODE_BLOCKDEV = 129,
};
struct bch_inode {
struct bch_val v;
__le16 i_mode;
__le16 pad;
__le32 i_flags;
/* Nanoseconds */
__le64 i_atime;
__le64 i_ctime;
__le64 i_mtime;
__le64 i_size;
__le64 i_sectors;
__le32 i_uid;
__le32 i_gid;
__le32 i_nlink;
__le32 i_dev;
__le64 i_hash_seed;
} __attribute__((packed));
BKEY_VAL_TYPE(inode, BCH_INODE_FS);
enum {
/*
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
* flags)
*/
__BCH_INODE_SYNC = 0,
__BCH_INODE_IMMUTABLE = 1,
__BCH_INODE_APPEND = 2,
__BCH_INODE_NODUMP = 3,
__BCH_INODE_NOATIME = 4,
__BCH_INODE_I_SIZE_DIRTY= 5,
__BCH_INODE_I_SECTORS_DIRTY= 6,
/* not implemented yet: */
__BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
};
LE32_BITMASK(INODE_STR_HASH_TYPE, struct bch_inode, i_flags, 28, 32);
#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
#define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
struct bch_inode_blockdev {
struct bch_val v;
__le64 i_size;
__le64 i_flags;
/* Seconds: */
__le64 i_ctime;
__le64 i_mtime;
uuid_le i_uuid;
__u8 i_label[32];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
/* Thin provisioned volume, or cache for another block device? */
LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
/* Dirents */
/*
* Dirents (and xattrs) have to implement string lookups; since our b-tree
* doesn't support arbitrary length strings for the key, we instead index by a
* 64 bit hash (currently truncated sha1) of the string, stored in the offset
* field of the key - using linear probing to resolve hash collisions. This also
* provides us with the readdir cookie posix requires.
*
* Linear probing requires us to use whiteouts for deletions, in the event of a
* collision:
*/
enum {
BCH_DIRENT = 128,
BCH_DIRENT_WHITEOUT = 129,
};
struct bch_dirent {
struct bch_val v;
/* Target inode number: */
__le64 d_inum;
/*
* Copy of mode bits 12-15 from the target inode - so userspace can get
* the filetype without having to do a stat()
*/
__u8 d_type;
__u8 d_name[];
} __attribute__((packed));
BKEY_VAL_TYPE(dirent, BCH_DIRENT);
/* Xattrs */
enum {
BCH_XATTR = 128,
BCH_XATTR_WHITEOUT = 129,
};
#define BCH_XATTR_INDEX_USER 0
#define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
#define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
#define BCH_XATTR_INDEX_TRUSTED 3
#define BCH_XATTR_INDEX_SECURITY 4
struct bch_xattr {
struct bch_val v;
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
} __attribute__((packed));
BKEY_VAL_TYPE(xattr, BCH_XATTR);
/* Superblock */
/* Version 0: Cache device
* Version 1: Backing device
* Version 2: Seed pointer into btree node checksum
* Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
* Version 5: All the incompat changes
* Version 6: Cache device UUIDs all in superblock, another incompat bset change
*/
#define BCACHE_SB_VERSION_CDEV_V0 0
#define BCACHE_SB_VERSION_BDEV 1
#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_VERSION_CDEV_V2 5
#define BCACHE_SB_VERSION_CDEV_V3 6
#define BCACHE_SB_VERSION_CDEV 6
#define BCACHE_SB_MAX_VERSION 6
#define SB_SECTOR 8
#define SB_LABEL_SIZE 32
#define MAX_CACHES_PER_SET 64
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
struct cache_member {
uuid_le uuid;
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
__le32 pad;
__le64 last_mount; /* time_t */
__le64 f1;
__le64 f2;
};
LE64_BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4)
#define CACHE_ACTIVE 0U
#define CACHE_RO 1U
#define CACHE_FAILED 2U
#define CACHE_SPARE 3U
#define CACHE_STATE_NR 4U
LE64_BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8)
#define CACHE_TIERS 4U
LE64_BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16)
LE64_BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25)
LE64_BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26)
LE64_BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30)
#define CACHE_REPLACEMENT_LRU 0U
#define CACHE_REPLACEMENT_FIFO 1U
#define CACHE_REPLACEMENT_RANDOM 2U
#define CACHE_REPLACEMENT_NR 3U
LE64_BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31);
LE64_BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20);
LE64_BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40);
struct cache_sb {
__le64 csum;
__le64 offset; /* sector where this sb was written */
__le64 version; /* of on disk format */
uuid_le magic; /* bcache superblock UUID */
/* Identifies this disk within the cache set: */
uuid_le disk_uuid;
/*
* Internal cache set UUID - xored with various magic numbers and thus
* must never change:
*/
union {
uuid_le set_uuid;
__le64 set_magic;
};
__u8 label[SB_LABEL_SIZE];
__le64 flags;
/* Incremented each time superblock is written: */
__le64 seq;
/*
* User visible UUID for identifying the cache set the user is allowed
* to change:
*/
uuid_le user_uuid;
__le64 flags2;
__le64 encryption_key[5];
/* Number of cache_member entries: */
__u8 nr_in_set;
/*
* Index of this device - for PTR_DEV(), and also this device's
* slot in the cache_member array:
*/
__u8 nr_this_dev;
__le16 pad2[3];
__le16 block_size; /* sectors */
__le16 pad3[6];
__le16 u64s; /* size of variable length portion */
union {
struct cache_member members[0];
/*
* Journal buckets also in the variable length portion, after
* the member info:
*/
__le64 _data[0];
};
};
/* XXX: rename CACHE_SET -> BCH_FS or something? */
LE64_BITMASK(CACHE_SET_SYNC, struct cache_sb, flags, 0, 1);
LE64_BITMASK(CACHE_SET_ERROR_ACTION, struct cache_sb, flags, 1, 4);
#define BCH_ON_ERROR_CONTINUE 0U
#define BCH_ON_ERROR_RO 1U
#define BCH_ON_ERROR_PANIC 2U
#define BCH_NR_ERROR_ACTIONS 3U
LE64_BITMASK(CACHE_SET_META_REPLICAS_WANT,struct cache_sb, flags, 4, 8);
LE64_BITMASK(CACHE_SET_DATA_REPLICAS_WANT,struct cache_sb, flags, 8, 12);
#define BCH_REPLICAS_MAX 4U
LE64_BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16);
LE64_BITMASK(CACHE_SET_META_CSUM_TYPE,struct cache_sb, flags, 16, 20);
#define BCH_CSUM_NONE 0U
#define BCH_CSUM_CRC32C 1U
#define BCH_CSUM_CRC64 2U
#define BCH_CSUM_NR 3U
LE64_BITMASK(CACHE_SET_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36);
LE64_BITMASK(CACHE_SET_META_REPLICAS_HAVE,struct cache_sb, flags, 36, 40);
LE64_BITMASK(CACHE_SET_DATA_REPLICAS_HAVE,struct cache_sb, flags, 40, 44);
LE64_BITMASK(CACHE_SET_STR_HASH_TYPE,struct cache_sb, flags, 44, 48);
enum bch_str_hash_type {
BCH_STR_HASH_CRC32C = 0,
BCH_STR_HASH_CRC64 = 1,
BCH_STR_HASH_SIPHASH = 2,
BCH_STR_HASH_SHA1 = 3,
};
#define BCH_STR_HASH_NR 4
LE64_BITMASK(CACHE_SET_DATA_CSUM_TYPE, struct cache_sb, flags, 48, 52);
LE64_BITMASK(CACHE_SET_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56);
enum {
BCH_COMPRESSION_NONE = 0,
BCH_COMPRESSION_LZ4 = 1,
BCH_COMPRESSION_GZIP = 2,
};
#define BCH_COMPRESSION_NR 3U
/* Limit inode numbers to 32 bits: */
LE64_BITMASK(CACHE_INODE_32BIT, struct cache_sb, flags, 56, 57);
LE64_BITMASK(CACHE_SET_GC_RESERVE, struct cache_sb, flags, 57, 63);
LE64_BITMASK(CACHE_SET_ROOT_RESERVE, struct cache_sb, flags2, 0, 6);
/*
* Did we shut down cleanly? Just a hint, doesn't affect behaviour of
* mount/recovery path:
*/
LE64_BITMASK(CACHE_SET_CLEAN, struct cache_sb, flags2, 6, 7);
LE64_BITMASK(CACHE_SET_JOURNAL_ENTRY_SIZE, struct cache_sb, flags2, 7, 15);
/* options: */
/**
* CACHE_SET_OPT(name, choices, min, max, sb_option, sysfs_writeable)
*
* @name - name of mount option, sysfs attribute, and struct cache_set_opts
* member
*
* @choices - array of strings that the user can select from - option is by
* array index
*
* Booleans are special cased; if @choices is bch_bool_opt the mount
* options name and noname will work as expected.
*
* @min, @max
*
* @sb_option - name of corresponding superblock option
*
* @sysfs_writeable - if true, option will be modifiable at runtime via sysfs
*/
#define CACHE_SET_SB_OPTS() \
CACHE_SET_OPT(errors, \
bch_error_actions, \
0, BCH_NR_ERROR_ACTIONS, \
CACHE_SET_ERROR_ACTION, \
true) \
CACHE_SET_OPT(metadata_replicas, \
bch_uint_opt, \
0, BCH_REPLICAS_MAX, \
CACHE_SET_META_REPLICAS_WANT, \
false) \
CACHE_SET_OPT(data_replicas, \
bch_uint_opt, \
0, BCH_REPLICAS_MAX, \
CACHE_SET_DATA_REPLICAS_WANT, \
false) \
CACHE_SET_OPT(metadata_checksum, \
bch_csum_types, \
0, BCH_CSUM_NR, \
CACHE_SET_META_CSUM_TYPE, \
true) \
CACHE_SET_OPT(data_checksum, \
bch_csum_types, \
0, BCH_CSUM_NR, \
CACHE_SET_DATA_CSUM_TYPE, \
true) \
CACHE_SET_OPT(compression, \
bch_compression_types, \
0, BCH_COMPRESSION_NR, \
CACHE_SET_COMPRESSION_TYPE, \
true) \
CACHE_SET_OPT(str_hash, \
bch_str_hash_types, \
0, BCH_STR_HASH_NR, \
CACHE_SET_STR_HASH_TYPE, \
true) \
CACHE_SET_OPT(inodes_32bit, \
bch_bool_opt, 0, 2, \
CACHE_INODE_32BIT, \
true) \
CACHE_SET_OPT(gc_reserve_percent, \
bch_uint_opt, \
5, 21, \
CACHE_SET_GC_RESERVE, \
false) \
CACHE_SET_OPT(root_reserve_percent, \
bch_uint_opt, \
0, 21, \
CACHE_SET_ROOT_RESERVE, \
false)
/* backing device specific stuff: */
struct backingdev_sb {
__le64 csum;
__le64 offset; /* sector where this sb was written */
__le64 version; /* of on disk format */
uuid_le magic; /* bcache superblock UUID */
uuid_le disk_uuid;
/*
* Internal cache set UUID - xored with various magic numbers and thus
* must never change:
*/
union {
uuid_le set_uuid;
__le64 set_magic;
};
__u8 label[SB_LABEL_SIZE];
__le64 flags;
/* Incremented each time superblock is written: */
__le64 seq;
/*
* User visible UUID for identifying the cache set the user is allowed
* to change:
*
* XXX hooked up?
*/
uuid_le user_uuid;
__le64 pad1[6];
__le64 data_offset;
__le16 block_size; /* sectors */
__le16 pad2[3];
__le32 last_mount; /* time_t */
__le16 pad3;
/* size of variable length portion - always 0 for backingdev superblock */
__le16 u64s;
__u64 _data[0];
};
LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
#define CACHE_MODE_WRITETHROUGH 0U
#define CACHE_MODE_WRITEBACK 1U
#define CACHE_MODE_WRITEAROUND 2U
#define CACHE_MODE_NONE 3U
LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
#define BDEV_STATE_NONE 0U
#define BDEV_STATE_CLEAN 1U
#define BDEV_STATE_DIRTY 2U
#define BDEV_STATE_STALE 3U
static inline unsigned bch_journal_buckets_offset(struct cache_sb *sb)
{
return sb->nr_in_set * (sizeof(struct cache_member) / sizeof(__u64));
}
static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb)
{
return __le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb);
}
static inline _Bool __SB_IS_BDEV(__u64 version)
{
return version == BCACHE_SB_VERSION_BDEV
|| version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
{
return __SB_IS_BDEV(sb->version);
}
/*
* Magic numbers
*
* The various other data structures have their own magic numbers, which are
* xored with the first part of the cache set's UUID
*/
#define BCACHE_MAGIC \
UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
#define BCACHE_STATFS_MAGIC 0xca451a4e
#define BCACHE_SB_MAGIC 0xca451a4ef67385c6ULL
#define BCACHE_SB_MAGIC2 0x816dba487ff56582ULL
#define JSET_MAGIC 0x245235c1a3625032ULL
#define PSET_MAGIC 0x6750e15f87337f91ULL
#define BSET_MAGIC 0x90135c78b99e07f5ULL
static inline __u64 jset_magic(struct cache_sb *sb)
{
return __le64_to_cpu(sb->set_magic) ^ JSET_MAGIC;
}
static inline __u64 pset_magic(struct cache_sb *sb)
{
return __le64_to_cpu(sb->set_magic) ^ PSET_MAGIC;
}
static inline __u64 bset_magic(struct cache_sb *sb)
{
return __le64_to_cpu(sb->set_magic) ^ BSET_MAGIC;
}
/* 128 bits, sufficient for cryptographic MACs: */
struct bch_csum {
__le64 lo;
__le64 hi;
};
/* Journal */
#define BCACHE_JSET_VERSION_UUIDv1 1
#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
#define BCACHE_JSET_VERSION_JKEYS 2
#define BCACHE_JSET_VERSION 2
struct jset_entry {
__le16 u64s;
__u8 btree_id;
__u8 level;
__le32 flags; /* designates what this jset holds */
union {
struct bkey_i start[0];
__u64 _data[0];
};
};
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
LE32_BITMASK(JOURNAL_ENTRY_TYPE, struct jset_entry, flags, 0, 8);
enum {
JOURNAL_ENTRY_BTREE_KEYS = 0,
JOURNAL_ENTRY_BTREE_ROOT = 1,
JOURNAL_ENTRY_PRIO_PTRS = 2,
/*
* Journal sequence numbers can be blacklisted: bsets record the max
* sequence number of all the journal entries they contain updates for,
* so that on recovery we can ignore those bsets that contain index
* updates newer that what made it into the journal.
*
* This means that we can't reuse that journal_seq - we have to skip it,
* and then record that we skipped it so that the next time we crash and
* recover we don't think there was a missing journal entry.
*/
JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED = 3,
};
/*
* On disk format for a journal entry:
* seq is monotonically increasing; every journal entry has its own unique
* sequence number.
*
* last_seq is the oldest journal entry that still has keys the btree hasn't
* flushed to disk yet.
*
* version is for on disk format changes.
*/
struct jset {
struct bch_csum csum;
__le64 magic;
__le32 version;
__le32 flags;
/* Sequence number of oldest dirty journal entry */
__le64 seq;
__le64 last_seq;
__le16 read_clock;
__le16 write_clock;
__le32 u64s; /* size of d[] in u64s */
union {
struct jset_entry start[0];
__u64 _data[0];
};
};
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
#define BCH_JOURNAL_BUCKETS_MIN 20
/* Bucket prios/gens */
struct prio_set {
struct bch_csum csum;
__le64 magic;
__le32 version;
__le32 flags;
__le64 next_bucket;
struct bucket_disk {
__le16 read_prio;
__le16 write_prio;
__u8 gen;
} __attribute__((packed)) data[];
};
LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
/* Btree: */
#define DEFINE_BCH_BTREE_IDS() \
DEF_BTREE_ID(EXTENTS, 0, "extents") \
DEF_BTREE_ID(INODES, 1, "inodes") \
DEF_BTREE_ID(DIRENTS, 2, "dirents") \
DEF_BTREE_ID(XATTRS, 3, "xattrs")
#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
enum btree_id {
DEFINE_BCH_BTREE_IDS()
BTREE_ID_NR
};
#undef DEF_BTREE_ID
#define BTREE_MAX_DEPTH 4
/* Btree nodes */
/* Version 1: Seed pointer into btree node checksum
*/
#define BCACHE_BSET_CSUM 1
#define BCACHE_BSET_KEY_v1 2
#define BCACHE_BSET_JOURNAL_SEQ 3
#define BCACHE_BSET_VERSION 3
/*
* Btree nodes
*
* On disk a btree node is a list/log of these; within each set the keys are
* sorted
*/
struct bset {
__le64 seq;
/*
* Highest journal entry this bset contains keys for.
* If on recovery we don't see that journal entry, this bset is ignored:
* this allows us to preserve the order of all index updates after a
* crash, since the journal records a total order of all index updates
* and anything that didn't make it to the journal doesn't get used.
*/
__le64 journal_seq;
__le32 flags;
__le16 version;
__le16 u64s; /* count of d[] in u64s */
union {
struct bkey_packed start[0];
__u64 _data[0];
};
} __attribute__((packed));
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
/* Only used in first bset */
LE32_BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8);
LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 8, 9);
struct btree_node {
struct bch_csum csum;
__le64 magic;
/* Closed interval: */
struct bpos min_key;
struct bpos max_key;
struct bkey_format format;
struct bset keys;
} __attribute__((packed));
struct btree_node_entry {
struct bch_csum csum;
struct bset keys;
} __attribute__((packed));
/* Crypto: */
struct nonce {
__le32 d[4];
};
#define BCACHE_MASTER_KEY_HEADER "bch**key"
#define BCACHE_MASTER_KEY_NONCE ((struct nonce) \
{{ __cpu_to_le32(1), __cpu_to_le32(2), \
__cpu_to_le32(3), __cpu_to_le32(4) }})
/* OBSOLETE */
#define BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
k->field &= ~(~(~0ULL << (end - offset)) << offset); \
k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
}
struct bkey_v0 {
__u64 high;
__u64 low;
__u64 ptr[];
};
#define KEY0_FIELD(name, field, offset, size) \
BITMASK(name, struct bkey_v0, field, offset, size)
KEY0_FIELD(KEY0_PTRS, high, 60, 63)
KEY0_FIELD(KEY0_CSUM, high, 56, 58)
KEY0_FIELD(KEY0_DIRTY, high, 36, 37)
KEY0_FIELD(KEY0_SIZE, high, 20, 36)
KEY0_FIELD(KEY0_INODE, high, 0, 20)
static inline unsigned long bkey_v0_u64s(const struct bkey_v0 *k)
{
return (sizeof(struct bkey_v0) / sizeof(__u64)) + KEY0_PTRS(k);
}
static inline struct bkey_v0 *bkey_v0_next(const struct bkey_v0 *k)
{
__u64 *d = (__u64 *) k;
return (struct bkey_v0 *) (d + bkey_v0_u64s(k));
}
struct jset_v0 {
__u64 csum;
__u64 magic;
__u64 seq;
__u32 version;
__u32 keys;
__u64 last_seq;
__BKEY_PADDED(uuid_bucket, 4);
__BKEY_PADDED(btree_root, 4);
__u16 btree_level;
__u16 pad[3];
__u64 prio_bucket[MAX_CACHES_PER_SET];
union {
struct bkey start[0];
__u64 d[0];
};
};
/* UUIDS - per backing device/flash only volume metadata */
struct uuid_entry_v0 {
uuid_le uuid;
__u8 label[32];
__u32 first_reg;
__u32 last_reg;
__u32 invalidated;
__u32 pad;
};
struct uuid_entry {
union {
struct {
uuid_le uuid;
__u8 label[32];
__u32 first_reg;
__u32 last_reg;
__u32 invalidated;
__u32 flags;
/* Size of flash only volumes */
__u64 sectors;
};
__u8 pad[128];
};
};
BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
#define SB_SIZE 4096
#define SB_JOURNAL_BUCKETS 256U
struct cache_sb_v0 {
__u64 csum;
__u64 offset; /* sector where this sb was written */
__u64 version;
uuid_le magic; /* bcache superblock UUID */
uuid_le uuid;
union {
uuid_le set_uuid;
__u64 set_magic;
};
__u8 label[SB_LABEL_SIZE];
__u64 flags;
__u64 seq;
__u64 pad[8];
union {
struct {
/* Cache devices */
__u64 nbuckets; /* device size */
__u16 block_size; /* sectors */
__u16 bucket_size; /* sectors */
__u16 nr_in_set;
__u16 nr_this_dev;
};
struct {
/* Backing devices */
__u64 data_offset;
/*
* block_size from the cache device section is still used by
* backing devices, so don't add anything here until we fix
* things to not need it for backing devices anymore
*/
};
};
__u32 last_mount; /* time_t */
__u16 first_bucket;
__u16 u64s;
__u64 _data[SB_JOURNAL_BUCKETS]; /* journal buckets */
};
#ifdef __cplusplus
}
#endif
#endif /* _BCACHE_TOOLS_ONDISK_H */
/* vim: set foldnestmax=2: */
|