1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
|
#ifndef _BCACHE_ALLOC_H
#define _BCACHE_ALLOC_H
#include "alloc_types.h"
struct bkey;
struct bucket;
struct cache;
struct cache_set;
struct cache_group;
int bch_prio_read(struct cache *);
void bch_recalc_min_prio(struct cache *, int);
void bch_open_bucket_put(struct cache_set *, struct open_bucket *);
struct open_bucket *bch_alloc_sectors_start(struct cache_set *,
struct write_point *,
struct bkey_i_extent *,
unsigned, struct closure *);
void bch_alloc_sectors_done(struct cache_set *, struct write_point *,
struct bkey_i_extent *, unsigned,
struct open_bucket *, unsigned);
struct open_bucket *bch_alloc_sectors(struct cache_set *, struct write_point *,
struct bkey_i_extent *, unsigned,
struct closure *);
static inline void bch_wake_allocator(struct cache *ca)
{
struct task_struct *p;
rcu_read_lock();
if ((p = ACCESS_ONCE(ca->alloc_thread)))
wake_up_process(p);
rcu_read_unlock();
closure_wake_up(&ca->set->buckets_available_wait);
}
#define __open_bucket_next_online_device(_c, _ob, _ptr, _ca) \
({ \
(_ca) = NULL; \
\
while ((_ptr) < (_ob)->ptrs + (_ob)->nr_ptrs && \
!((_ca) = PTR_CACHE(_c, _ptr))) \
(_ptr)++; \
(_ca); \
})
#define open_bucket_for_each_online_device(_c, _ob, _ptr, _ca) \
for ((_ptr) = (_ob)->ptrs; \
((_ca) = __open_bucket_next_online_device(_c, _ob, _ptr, _ca));\
(_ptr)++)
void bch_cache_allocator_stop(struct cache *);
int bch_cache_allocator_start(struct cache *);
const char *bch_cache_allocator_start_once(struct cache *);
void bch_open_buckets_init(struct cache_set *);
#endif /* _BCACHE_ALLOC_H */
|