summaryrefslogtreecommitdiff
path: root/libbcache/blockdev_types.h
blob: e51720041f6fa3ffe4828c7b3298521ffacc9fb3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#ifndef _BCACHE_BLOCKDEV_TYPES_H
#define _BCACHE_BLOCKDEV_TYPES_H

#include "keybuf_types.h"
#include "stats_types.h"
#include "super_types.h"
#include "util.h"

struct bcache_device {
	struct closure		cl;

	struct kobject		kobj;

	struct bch_fs	*c;

	struct rb_node		node;
	struct bkey_i_inode_blockdev inode;
	struct mutex		inode_lock;

#define BCACHEDEVNAME_SIZE	12
	char			name[BCACHEDEVNAME_SIZE];

	struct gendisk		*disk;

	unsigned long		flags;
#define BCACHE_DEV_CLOSING	0
#define BCACHE_DEV_DETACHING	1
#define BCACHE_DEV_UNLINK_DONE	2

	unsigned		nr_stripes;
	unsigned		stripe_size;
	atomic_t		*stripe_sectors_dirty;
	unsigned long		*full_dirty_stripes;

	struct bio_set		bio_split;

	unsigned		data_csum:1;

	int (*ioctl)(struct bcache_device *, fmode_t, unsigned, unsigned long);
};

struct io {
	/* Used to track sequential IO so it can be skipped */
	struct hlist_node	hash;
	struct list_head	lru;

	unsigned long		last_io;
	unsigned		sequential;
	sector_t		last;
};

struct cached_dev {
	struct list_head	list;
	struct bcache_device	disk;

	//struct backingdev_sb		sb;

	struct {
		struct backingdev_sb	*sb;
		struct block_device	*bdev;
		struct bio		*bio;
		unsigned		page_order;
	} disk_sb;
	struct closure		sb_write;
	struct semaphore	sb_write_mutex;

	/* Refcount on the cache set. Always nonzero when we're caching. */
	atomic_t		count;
	struct work_struct	detach;

	/*
	 * Device might not be running if it's dirty and the cache set hasn't
	 * showed up yet.
	 */
	atomic_t		running;

	/*
	 * Writes take a shared lock from start to finish; scanning for dirty
	 * data to refill the rb tree requires an exclusive lock.
	 */
	struct rw_semaphore	writeback_lock;

	/*
	 * Nonzero, and writeback has a refcount (d->count), iff there is dirty
	 * data in the cache. Protected by writeback_lock; must have an
	 * shared lock to set and exclusive lock to clear.
	 */
	atomic_t		has_dirty;

	/* for dynamic rate control of writeback */
	struct bch_pd_controller writeback_pd;
	struct delayed_work	writeback_pd_update;
	unsigned		writeback_pd_update_seconds;

	struct task_struct	*writeback_thread;
	struct keybuf		writeback_keys;
	mempool_t		writeback_io_pool;
	mempool_t		writeback_page_pool;

	/* For tracking sequential IO */
#define RECENT_IO_BITS	7
#define RECENT_IO	(1 << RECENT_IO_BITS)
	struct io		io[RECENT_IO];
	struct hlist_head	io_hash[RECENT_IO + 1];
	struct list_head	io_lru;
	spinlock_t		io_lock;

	struct cache_accounting	accounting;

	/* The rest of this all shows up in sysfs */
	unsigned		sequential_cutoff;
	unsigned		readahead;

	unsigned		verify:1;
	unsigned		bypass_torture_test:1;

	unsigned		partial_stripes_expensive:1;
	unsigned		writeback_metadata:1;
	unsigned		writeback_running:1;
	unsigned char		writeback_percent;
};

#endif /* _BCACHE_BLOCKDEV_TYPES_H */