summaryrefslogtreecommitdiff
path: root/libbcache/blockdev.h
blob: 0fc0ed1b80c4387f562c2f4fbed810c86b259ccc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#ifndef _BCACHE_BLOCKDEV_H
#define _BCACHE_BLOCKDEV_H

#include "blockdev_types.h"
#include "io_types.h"

void bch_write_bdev_super(struct cached_dev *, struct closure *);

void bch_cached_dev_release(struct kobject *);
void bch_blockdev_volume_release(struct kobject *);

int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
void bch_attach_backing_devs(struct cache_set *);

void bch_cached_dev_detach(struct cached_dev *);
void bch_cached_dev_run(struct cached_dev *);
void bch_blockdev_stop(struct bcache_device *);

bool bch_is_open_backing_dev(struct block_device *);
const char *bch_backing_dev_register(struct bcache_superblock *);

int bch_blockdev_volume_create(struct cache_set *, u64);
int bch_blockdev_volumes_start(struct cache_set *);

void bch_blockdevs_stop(struct cache_set *);

void bch_blockdev_exit(void);
int bch_blockdev_init(void);

static inline void cached_dev_put(struct cached_dev *dc)
{
	if (atomic_dec_and_test(&dc->count))
		schedule_work(&dc->detach);
}

static inline bool cached_dev_get(struct cached_dev *dc)
{
	if (!atomic_inc_not_zero(&dc->count))
		return false;

	/* Paired with the mb in cached_dev_attach */
	smp_mb__after_atomic();
	return true;
}

static inline u64 bcache_dev_inum(struct bcache_device *d)
{
	return d->inode.k.p.inode;
}

static inline struct bcache_device *bch_dev_find(struct cache_set *c, u64 inode)
{
	return radix_tree_lookup(&c->devices, inode);
}

struct search {
	/* Stack frame for bio_complete */
	struct closure		cl;

	union {
	struct bch_read_bio	rbio;
	struct bch_write_bio	wbio;
	};
	/* Not modified */
	struct bio		*orig_bio;
	struct bcache_device	*d;

	unsigned		inode;
	unsigned		write:1;

	/* Flags only used for reads */
	unsigned		recoverable:1;
	unsigned		read_dirty_data:1;
	unsigned		cache_miss:1;

	/*
	 * For reads:  bypass read from cache and insertion into cache
	 * For writes: discard key range from cache, sending the write to
	 *             the backing device (if there is a backing device)
	 */
	unsigned		bypass:1;

	unsigned long		start_time;

	/*
	 * Mostly only used for writes. For reads, we still make use of
	 * some trivial fields:
	 * - c
	 * - error
	 */
	struct bch_write_op	iop;
};

extern struct kmem_cache *bch_search_cache;

extern struct kobj_type bch_cached_dev_ktype;
extern struct kobj_type bch_blockdev_volume_ktype;

#endif /* _BCACHE_BLOCKDEV_H */