summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-09 08:20:05 -0900
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-17 19:49:22 -0800
commit04a21c62bcb995f21e1aa80462e5e8aaa80928c5 (patch)
tree1171650d4b9cdce47647f29df4be576e27465225
parent9a7f8bafc903da1aacc11cbca36c6b424d2f0828 (diff)
bcachefs: More renaming
bucket_stats_cache -> bch_dev_usage bucket_stats_cache_set -> bch_fs_usage
-rw-r--r--fs/bcachefs/alloc.c2
-rw-r--r--fs/bcachefs/bcache.h8
-rw-r--r--fs/bcachefs/btree_gc.c10
-rw-r--r--fs/bcachefs/btree_types.h1
-rw-r--r--fs/bcachefs/btree_update.c10
-rw-r--r--fs/bcachefs/buckets.c63
-rw-r--r--fs/bcachefs/buckets.h28
-rw-r--r--fs/bcachefs/buckets_types.h4
-rw-r--r--fs/bcachefs/extents.c2
-rw-r--r--fs/bcachefs/super.c4
-rw-r--r--fs/bcachefs/sysfs.c6
11 files changed, 69 insertions, 69 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index a9ccbdee0c47..acd76b373cf1 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -138,7 +138,7 @@ static void pd_controllers_update(struct work_struct *work)
-1);
group_for_each_cache_rcu(ca, &c->tiers[i].devs, iter) {
- struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca);
+ struct bch_dev_usage stats = bch_dev_usage_read(ca);
unsigned bucket_bits = ca->bucket_bits + 9;
u64 size = (ca->mi.nbuckets -
diff --git a/fs/bcachefs/bcache.h b/fs/bcachefs/bcache.h
index a18a0ba45cf5..542474bb6683 100644
--- a/fs/bcachefs/bcache.h
+++ b/fs/bcachefs/bcache.h
@@ -416,8 +416,8 @@ struct cache {
* second contains a saved copy of the stats from the beginning
* of GC.
*/
- struct bucket_stats_cache __percpu *bucket_stats_percpu;
- struct bucket_stats_cache bucket_stats_cached;
+ struct bch_dev_usage __percpu *bucket_stats_percpu;
+ struct bch_dev_usage bucket_stats_cached;
atomic_long_t saturated_count;
size_t inc_gen_needs_gc;
@@ -651,8 +651,8 @@ struct cache_set {
atomic64_t sectors_available;
- struct bucket_stats_cache_set __percpu *bucket_stats_percpu;
- struct bucket_stats_cache_set bucket_stats_cached;
+ struct bch_fs_usage __percpu *bucket_stats_percpu;
+ struct bch_fs_usage bucket_stats_cached;
struct lglock bucket_stats_lock;
struct mutex bucket_lock;
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index f474e8db0c50..bbd9b29fae86 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -333,7 +333,7 @@ static void bch_mark_metadata(struct cache_set *c)
/* Also see bch_pending_btree_node_free_insert_done() */
static void bch_mark_pending_btree_node_frees(struct cache_set *c)
{
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
struct btree_interior_update *as;
struct pending_btree_node_free *d;
@@ -407,17 +407,17 @@ void bch_gc(struct cache_set *c)
/* Save a copy of the existing bucket stats while we recompute them: */
for_each_cache(ca, c, i) {
- ca->bucket_stats_cached = __bch_bucket_stats_read_cache(ca);
+ ca->bucket_stats_cached = __bch_dev_usage_read(ca);
for_each_possible_cpu(cpu) {
- struct bucket_stats_cache *p =
+ struct bch_dev_usage *p =
per_cpu_ptr(ca->bucket_stats_percpu, cpu);
memset(p, 0, sizeof(*p));
}
}
- c->bucket_stats_cached = __bch_bucket_stats_read_cache_set(c);
+ c->bucket_stats_cached = __bch_fs_usage_read(c);
for_each_possible_cpu(cpu) {
- struct bucket_stats_cache_set *p =
+ struct bch_fs_usage *p =
per_cpu_ptr(c->bucket_stats_percpu, cpu);
memset(p->s, 0, sizeof(p->s));
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index f5e8743a18fe..cce353de9f33 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -271,7 +271,6 @@ struct btree_root {
*/
struct btree_iter;
-struct bucket_stats_cache_set;
struct btree_node_iter;
enum extent_insert_hook_ret {
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index 534284a50572..d67fcd7682c7 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -94,7 +94,7 @@ bool bch_btree_node_format_fits(struct cache_set *c, struct btree *b,
*/
static void bch_btree_node_free_index(struct cache_set *c, struct btree *b,
enum btree_id id, struct bkey_s_c k,
- struct bucket_stats_cache_set *stats)
+ struct bch_fs_usage *stats)
{
struct btree_interior_update *as;
struct pending_btree_node_free *d;
@@ -140,7 +140,7 @@ found:
* moving this reference from, hence one comparison here:
*/
if (gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
- struct bucket_stats_cache_set tmp = { 0 };
+ struct bch_fs_usage tmp = { 0 };
bch_mark_key(c, bkey_i_to_s_c(&d->key),
-c->sb.btree_node_size, true, b
@@ -208,7 +208,7 @@ void bch_btree_node_free_inmem(struct btree_iter *iter, struct btree *b)
static void bch_btree_node_free_ondisk(struct cache_set *c,
struct pending_btree_node_free *pending)
{
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
BUG_ON(!pending->index_update_done);
@@ -374,7 +374,7 @@ static void bch_btree_set_root_inmem(struct cache_set *c, struct btree *b,
* bch_btree_root_read()) - do marking while holding
* btree_root_lock:
*/
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
bch_mark_key(c, bkey_i_to_s_c(&b->key),
c->sb.btree_node_size, true,
@@ -633,7 +633,7 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
struct disk_reservation *disk_res)
{
struct cache_set *c = iter->c;
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
struct bkey_packed *k;
struct bkey tmp;
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index b07bf6424d4f..0dde6fb0c6eb 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -78,8 +78,8 @@
static void bch_fs_stats_verify(struct cache_set *c)
{
- struct bucket_stats_cache_set stats =
- __bch_bucket_stats_read_cache_set(c);
+ struct bch_fs_usage stats =
+ __bch_fs_usage_read(c);
if ((s64) stats.sectors_dirty < 0)
panic("sectors_dirty underflow: %lli\n", stats.sectors_dirty);
@@ -162,26 +162,26 @@ do { \
_ret; \
})
-struct bucket_stats_cache __bch_bucket_stats_read_cache(struct cache *ca)
+struct bch_dev_usage __bch_dev_usage_read(struct cache *ca)
{
return bucket_stats_read_raw(ca->bucket_stats_percpu);
}
-struct bucket_stats_cache bch_bucket_stats_read_cache(struct cache *ca)
+struct bch_dev_usage bch_dev_usage_read(struct cache *ca)
{
return bucket_stats_read_cached(ca->set,
ca->bucket_stats_cached,
ca->bucket_stats_percpu);
}
-struct bucket_stats_cache_set
-__bch_bucket_stats_read_cache_set(struct cache_set *c)
+struct bch_fs_usage
+__bch_fs_usage_read(struct cache_set *c)
{
return bucket_stats_read_raw(c->bucket_stats_percpu);
}
-struct bucket_stats_cache_set
-bch_bucket_stats_read_cache_set(struct cache_set *c)
+struct bch_fs_usage
+bch_fs_usage_read(struct cache_set *c)
{
return bucket_stats_read_cached(c,
c->bucket_stats_cached,
@@ -205,7 +205,7 @@ static inline int is_cached_bucket(struct bucket_mark m)
}
void bch_fs_stats_apply(struct cache_set *c,
- struct bucket_stats_cache_set *stats,
+ struct bch_fs_usage *stats,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
{
@@ -251,11 +251,11 @@ static bool bucket_became_unavailable(struct cache_set *c,
}
static void bucket_stats_update(struct cache *ca,
- struct bucket_mark old, struct bucket_mark new,
- struct bucket_stats_cache_set *bch_alloc_stats)
+ struct bucket_mark old, struct bucket_mark new,
+ struct bch_fs_usage *bch_alloc_stats)
{
struct cache_set *c = ca->set;
- struct bucket_stats_cache *cache_stats;
+ struct bch_dev_usage *cache_stats;
bch_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
@@ -305,7 +305,7 @@ static void bucket_stats_update(struct cache *ca,
#define bucket_data_cmpxchg(ca, g, new, expr) \
({ \
- struct bucket_stats_cache_set _stats = { 0 }; \
+ struct bch_fs_usage _stats = { 0 }; \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
bucket_stats_update(ca, _old, new, &_stats); \
@@ -314,7 +314,7 @@ static void bucket_stats_update(struct cache *ca,
void bch_invalidate_bucket(struct cache *ca, struct bucket *g)
{
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
struct bucket_mark old, new;
old = bucket_cmpxchg(g, new, ({
@@ -441,18 +441,18 @@ static unsigned __compressed_sectors(const union bch_extent_crc *crc, unsigned s
*/
static void bch_mark_pointer(struct cache_set *c,
struct bkey_s_c_extent e,
- struct cache *ca,
const union bch_extent_crc *crc,
const struct bch_extent_ptr *ptr,
s64 sectors, enum s_alloc type,
bool may_make_unavailable,
- struct bucket_stats_cache_set *stats,
+ struct bch_fs_usage *stats,
bool gc_will_visit, u64 journal_seq)
{
struct bucket_mark old, new;
unsigned saturated;
- struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
- u64 v = READ_ONCE(g->_mark.counter);
+ struct cache *ca;
+ struct bucket *g;
+ u64 v;
unsigned old_sectors, new_sectors;
int disk_sectors, compressed_sectors;
@@ -469,6 +469,12 @@ static void bch_mark_pointer(struct cache_set *c,
compressed_sectors = -__compressed_sectors(crc, old_sectors)
+ __compressed_sectors(crc, new_sectors);
+ ca = PTR_CACHE(c, ptr);
+ if (!ca)
+ goto out;
+
+ g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
+
if (gc_will_visit) {
if (journal_seq)
bucket_cmpxchg(g, new, new.journal_seq = journal_seq);
@@ -476,6 +482,7 @@ static void bch_mark_pointer(struct cache_set *c,
goto out;
}
+ v = READ_ONCE(g->_mark.counter);
do {
new.counter = old.counter = v;
saturated = 0;
@@ -548,33 +555,29 @@ out:
static void bch_mark_extent(struct cache_set *c, struct bkey_s_c_extent e,
s64 sectors, bool metadata,
bool may_make_unavailable,
- struct bucket_stats_cache_set *stats,
+ struct bch_fs_usage *stats,
bool gc_will_visit, u64 journal_seq)
{
const struct bch_extent_ptr *ptr;
const union bch_extent_crc *crc;
- struct cache *ca;
enum s_alloc type = metadata ? S_META : S_DIRTY;
BUG_ON(metadata && bkey_extent_is_cached(e.k));
BUG_ON(!sectors);
rcu_read_lock();
- extent_for_each_online_device_crc(c, e, crc, ptr, ca) {
- trace_bcache_mark_bucket(ca, e.k, ptr, sectors, !ptr->cached);
-
- bch_mark_pointer(c, e, ca, crc, ptr, sectors,
+ extent_for_each_ptr_crc(e, ptr, crc)
+ bch_mark_pointer(c, e, crc, ptr, sectors,
ptr->cached ? S_CACHED : type,
may_make_unavailable,
stats, gc_will_visit, journal_seq);
- }
rcu_read_unlock();
}
static void __bch_mark_key(struct cache_set *c, struct bkey_s_c k,
s64 sectors, bool metadata,
bool may_make_unavailable,
- struct bucket_stats_cache_set *stats,
+ struct bch_fs_usage *stats,
bool gc_will_visit, u64 journal_seq)
{
switch (k.k->type) {
@@ -595,7 +598,7 @@ static void __bch_mark_key(struct cache_set *c, struct bkey_s_c k,
void __bch_gc_mark_key(struct cache_set *c, struct bkey_s_c k,
s64 sectors, bool metadata,
- struct bucket_stats_cache_set *stats)
+ struct bch_fs_usage *stats)
{
__bch_mark_key(c, k, sectors, metadata, true, stats, false, 0);
}
@@ -603,7 +606,7 @@ void __bch_gc_mark_key(struct cache_set *c, struct bkey_s_c k,
void bch_gc_mark_key(struct cache_set *c, struct bkey_s_c k,
s64 sectors, bool metadata)
{
- struct bucket_stats_cache_set stats = { 0 };
+ struct bch_fs_usage stats = { 0 };
__bch_gc_mark_key(c, k, sectors, metadata, &stats);
@@ -614,7 +617,7 @@ void bch_gc_mark_key(struct cache_set *c, struct bkey_s_c k,
void bch_mark_key(struct cache_set *c, struct bkey_s_c k,
s64 sectors, bool metadata, struct gc_pos gc_pos,
- struct bucket_stats_cache_set *stats, u64 journal_seq)
+ struct bch_fs_usage *stats, u64 journal_seq)
{
/*
* synchronization w.r.t. GC:
@@ -693,7 +696,7 @@ int bch_disk_reservation_add(struct cache_set *c,
struct disk_reservation *res,
unsigned sectors, int flags)
{
- struct bucket_stats_cache_set *stats;
+ struct bch_fs_usage *stats;
u64 old, new, v;
s64 sectors_available;
int ret;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 6d70103efb42..37a664341e3e 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -157,11 +157,11 @@ static inline unsigned bucket_sectors_used(struct bucket *g)
/* Per device stats: */
-struct bucket_stats_cache __bch_bucket_stats_read_cache(struct cache *);
-struct bucket_stats_cache bch_bucket_stats_read_cache(struct cache *);
+struct bch_dev_usage __bch_dev_usage_read(struct cache *);
+struct bch_dev_usage bch_dev_usage_read(struct cache *);
static inline u64 __buckets_available_cache(struct cache *ca,
- struct bucket_stats_cache stats)
+ struct bch_dev_usage stats)
{
return max_t(s64, 0,
ca->mi.nbuckets - ca->mi.first_bucket -
@@ -175,11 +175,11 @@ static inline u64 __buckets_available_cache(struct cache *ca,
*/
static inline u64 buckets_available_cache(struct cache *ca)
{
- return __buckets_available_cache(ca, bch_bucket_stats_read_cache(ca));
+ return __buckets_available_cache(ca, bch_dev_usage_read(ca));
}
static inline u64 __buckets_free_cache(struct cache *ca,
- struct bucket_stats_cache stats)
+ struct bch_dev_usage stats)
{
return __buckets_available_cache(ca, stats) +
fifo_used(&ca->free[RESERVE_NONE]) +
@@ -188,21 +188,19 @@ static inline u64 __buckets_free_cache(struct cache *ca,
static inline u64 buckets_free_cache(struct cache *ca)
{
- return __buckets_free_cache(ca, bch_bucket_stats_read_cache(ca));
+ return __buckets_free_cache(ca, bch_dev_usage_read(ca));
}
/* Cache set stats: */
-struct bucket_stats_cache_set __bch_bucket_stats_read_cache_set(struct cache_set *);
-struct bucket_stats_cache_set bch_bucket_stats_read_cache_set(struct cache_set *);
-void bch_fs_stats_apply(struct cache_set *,
- struct bucket_stats_cache_set *,
- struct disk_reservation *,
- struct gc_pos);
+struct bch_fs_usage __bch_fs_usage_read(struct cache_set *);
+struct bch_fs_usage bch_fs_usage_read(struct cache_set *);
+void bch_fs_stats_apply(struct cache_set *, struct bch_fs_usage *,
+ struct disk_reservation *, struct gc_pos);
static inline u64 __bch_fs_sectors_used(struct cache_set *c)
{
- struct bucket_stats_cache_set stats = __bch_bucket_stats_read_cache_set(c);
+ struct bch_fs_usage stats = __bch_fs_usage_read(c);
u64 reserved = stats.persistent_reserved +
stats.online_reserved;
@@ -256,10 +254,10 @@ void bch_mark_metadata_bucket(struct cache *, struct bucket *,
enum bucket_data_type, bool);
void __bch_gc_mark_key(struct cache_set *, struct bkey_s_c, s64, bool,
- struct bucket_stats_cache_set *);
+ struct bch_fs_usage *);
void bch_gc_mark_key(struct cache_set *, struct bkey_s_c, s64, bool);
void bch_mark_key(struct cache_set *, struct bkey_s_c, s64, bool,
- struct gc_pos, struct bucket_stats_cache_set *, u64);
+ struct gc_pos, struct bch_fs_usage *, u64);
void bch_recalc_sectors_available(struct cache_set *);
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index f42e09d8a0b9..1856db93d639 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -65,7 +65,7 @@ struct bucket {
};
};
-struct bucket_stats_cache {
+struct bch_dev_usage {
u64 buckets_dirty;
u64 buckets_cached;
u64 buckets_meta;
@@ -89,7 +89,7 @@ enum s_compressed {
S_COMPRESSED_NR,
};
-struct bucket_stats_cache_set {
+struct bch_fs_usage {
/* all fields are in units of 512 byte sectors: */
u64 s[S_COMPRESSED_NR][S_ALLOC_NR];
u64 persistent_reserved;
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index d126a8a5cc60..4b7c9b364ab5 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -941,7 +941,7 @@ struct extent_insert_state {
struct btree_insert *trans;
struct btree_insert_entry *insert;
struct bpos committed;
- struct bucket_stats_cache_set stats;
+ struct bch_fs_usage stats;
/* for deleting: */
struct bkey_i whiteout;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 4468a20e147f..57b6a0a42491 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -616,7 +616,7 @@ static struct cache_set *bch_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c->sb.btree_node_size,
BCH_ENCODED_EXTENT_MAX) /
PAGE_SECTORS, 0) ||
- !(c->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache_set)) ||
+ !(c->bucket_stats_percpu = alloc_percpu(struct bch_fs_usage)) ||
lg_lock_init(&c->bucket_stats_lock) ||
mempool_init_page_pool(&c->btree_bounce_pool, 1,
ilog2(btree_pages(c))) ||
@@ -1211,7 +1211,7 @@ static const char *bch_dev_alloc(struct bcache_superblock *sb,
!(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
2, GFP_KERNEL)) ||
!(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) ||
- !(ca->bucket_stats_percpu = alloc_percpu(struct bucket_stats_cache)) ||
+ !(ca->bucket_stats_percpu = alloc_percpu(struct bch_dev_usage)) ||
!(ca->bio_prio = bio_kmalloc(GFP_NOIO, bucket_pages(ca))) ||
bioset_init(&ca->replica_set, 4,
offsetof(struct bch_write_bio, bio)) ||
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 644d7e827967..56df0089467a 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -552,7 +552,7 @@ static unsigned bch_average_key_size(struct cache_set *c)
static ssize_t show_fs_alloc_debug(struct cache_set *c, char *buf)
{
- struct bucket_stats_cache_set stats = bch_bucket_stats_read_cache_set(c);
+ struct bch_fs_usage stats = bch_fs_usage_read(c);
return scnprintf(buf, PAGE_SIZE,
"capacity:\t\t%llu\n"
@@ -1127,7 +1127,7 @@ static ssize_t show_reserve_stats(struct cache *ca, char *buf)
static ssize_t show_dev_alloc_debug(struct cache *ca, char *buf)
{
struct cache_set *c = ca->set;
- struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca);
+ struct bch_dev_usage stats = bch_dev_usage_read(ca);
return scnprintf(buf, PAGE_SIZE,
"free_inc: %zu/%zu\n"
@@ -1171,7 +1171,7 @@ SHOW(bch_dev)
{
struct cache *ca = container_of(kobj, struct cache, kobj);
struct cache_set *c = ca->set;
- struct bucket_stats_cache stats = bch_bucket_stats_read_cache(ca);
+ struct bch_dev_usage stats = bch_dev_usage_read(ca);
sysfs_printf(uuid, "%pU\n", ca->uuid.b);