summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2017-03-12 05:55:40 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2017-03-17 19:49:24 -0800
commitba8bf9cdf090d8180f7398196550e93b868b3c2a (patch)
tree9debbe009284ba2cb849544afaf112b37a26efe5
parenta5b2efedf8485ee4a36c736cf6cfe907c0db91c5 (diff)
bcachefs: Refactor some disk usage accounting stuff
-rw-r--r--fs/bcachefs/alloc.c7
-rw-r--r--fs/bcachefs/btree_update.c4
-rw-r--r--fs/bcachefs/buckets.c112
-rw-r--r--fs/bcachefs/buckets.h2
-rw-r--r--fs/bcachefs/buckets_types.h24
-rw-r--r--fs/bcachefs/extents.c6
-rw-r--r--fs/bcachefs/sysfs.c8
7 files changed, 66 insertions, 97 deletions
diff --git a/fs/bcachefs/alloc.c b/fs/bcachefs/alloc.c
index 5bd6de9fb05c..299795d117f2 100644
--- a/fs/bcachefs/alloc.c
+++ b/fs/bcachefs/alloc.c
@@ -154,11 +154,10 @@ static void pd_controllers_update(struct work_struct *work)
s64 fragmented = ((stats.buckets_dirty +
stats.buckets_cached) <<
bucket_bits) -
- ((stats.sectors_dirty +
- stats.sectors_cached) << 9);
+ ((stats.sectors[S_DIRTY] +
+ stats.sectors[S_CACHED] ) << 9);
- if (fragmented < 0)
- fragmented = 0;
+ fragmented = max(0LL, fragmented);
bch_pd_controller_update(&ca->moving_gc_pd,
free, fragmented, -1);
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index 9da1c9c6fdc0..96348ac6bd36 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -385,7 +385,7 @@ static void bch_btree_set_root_inmem(struct bch_fs *c, struct btree *b,
bch_btree_node_free_index(c, NULL, old->btree_id,
bkey_i_to_s_c(&old->key),
&stats);
- bch_fs_stats_apply(c, &stats, &btree_reserve->disk_res,
+ bch_fs_usage_apply(c, &stats, &btree_reserve->disk_res,
gc_pos_btree_root(b->btree_id));
}
@@ -655,7 +655,7 @@ static void bch_insert_fixup_btree_ptr(struct btree_iter *iter,
bkey_disassemble(b, k, &tmp),
&stats);
- bch_fs_stats_apply(c, &stats, disk_res, gc_pos_btree_node(b));
+ bch_fs_usage_apply(c, &stats, disk_res, gc_pos_btree_node(b));
bch_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 8514f5472016..585a7ce68b12 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -204,7 +204,21 @@ static inline int is_cached_bucket(struct bucket_mark m)
!m.dirty_sectors && !!m.cached_sectors;
}
-void bch_fs_stats_apply(struct bch_fs *c,
+static inline enum s_alloc bucket_type(struct bucket_mark m)
+{
+ return is_meta_bucket(m) ? S_META : S_DIRTY;
+}
+
+static bool bucket_became_unavailable(struct bch_fs *c,
+ struct bucket_mark old,
+ struct bucket_mark new)
+{
+ return is_available_bucket(old) &&
+ !is_available_bucket(new) &&
+ c && c->gc_pos.phase == GC_PHASE_DONE;
+}
+
+void bch_fs_usage_apply(struct bch_fs *c,
struct bch_fs_usage *stats,
struct disk_reservation *disk_res,
struct gc_pos gc_pos)
@@ -241,62 +255,43 @@ void bch_fs_stats_apply(struct bch_fs *c,
memset(stats, 0, sizeof(*stats));
}
-static bool bucket_became_unavailable(struct bch_fs *c,
- struct bucket_mark old,
- struct bucket_mark new)
+static void bch_fs_usage_update(struct bch_fs_usage *fs_usage,
+ struct bucket_mark old, struct bucket_mark new)
{
- return is_available_bucket(old) &&
- !is_available_bucket(new) &&
- c && c->gc_pos.phase == GC_PHASE_DONE;
+ fs_usage->s[S_COMPRESSED][S_CACHED] +=
+ (int) new.cached_sectors - (int) old.cached_sectors;
+ fs_usage->s[S_COMPRESSED][bucket_type(old)] -=
+ old.dirty_sectors;
+ fs_usage->s[S_COMPRESSED][bucket_type(new)] +=
+ new.dirty_sectors;
}
-static void bch_usage_update(struct bch_dev *ca,
- struct bucket_mark old, struct bucket_mark new,
- struct bch_fs_usage *bch_alloc_stats)
+static void bch_dev_usage_update(struct bch_dev *ca,
+ struct bucket_mark old, struct bucket_mark new)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage *cache_stats;
+ struct bch_dev_usage *dev_usage;
bch_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
"different types of metadata in same bucket: %u, %u",
old.data_type, new.data_type);
- if (bch_alloc_stats) {
- bch_alloc_stats->s[S_COMPRESSED][S_CACHED] +=
- (int) new.cached_sectors - (int) old.cached_sectors;
-
- bch_alloc_stats->s[S_COMPRESSED]
- [is_meta_bucket(old) ? S_META : S_DIRTY] -=
- old.dirty_sectors;
-
- bch_alloc_stats->s[S_COMPRESSED]
- [is_meta_bucket(new) ? S_META : S_DIRTY] +=
- new.dirty_sectors;
- }
-
preempt_disable();
- cache_stats = this_cpu_ptr(ca->usage_percpu);
+ dev_usage = this_cpu_ptr(ca->usage_percpu);
- cache_stats->sectors_cached +=
+ dev_usage->sectors[S_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
- if (is_meta_bucket(old))
- cache_stats->sectors_meta -= old.dirty_sectors;
- else
- cache_stats->sectors_dirty -= old.dirty_sectors;
+ dev_usage->sectors[bucket_type(old)] -= old.dirty_sectors;
+ dev_usage->sectors[bucket_type(new)] += new.dirty_sectors;
- if (is_meta_bucket(new))
- cache_stats->sectors_meta += new.dirty_sectors;
- else
- cache_stats->sectors_dirty += new.dirty_sectors;
-
- cache_stats->buckets_alloc +=
+ dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
- cache_stats->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old);
- cache_stats->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old);
- cache_stats->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old);
+ dev_usage->buckets_meta += is_meta_bucket(new) - is_meta_bucket(old);
+ dev_usage->buckets_cached += is_cached_bucket(new) - is_cached_bucket(old);
+ dev_usage->buckets_dirty += is_dirty_bucket(new) - is_dirty_bucket(old);
preempt_enable();
if (!is_available_bucket(old) && is_available_bucket(new))
@@ -305,10 +300,9 @@ static void bch_usage_update(struct bch_dev *ca,
#define bucket_data_cmpxchg(ca, g, new, expr) \
({ \
- struct bch_fs_usage _stats = { 0 }; \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
- bch_usage_update(ca, _old, new, &_stats); \
+ bch_dev_usage_update(ca, _old, new); \
_old; \
})
@@ -317,7 +311,7 @@ void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
struct bch_fs_usage stats = { 0 };
struct bucket_mark old, new;
- old = bucket_cmpxchg(g, new, ({
+ old = bucket_data_cmpxchg(ca, g, new, ({
new.owned_by_allocator = 1;
new.had_metadata = 0;
new.data_type = 0;
@@ -327,23 +321,8 @@ void bch_invalidate_bucket(struct bch_dev *ca, struct bucket *g)
new.gen++;
}));
- bch_usage_update(ca, old, new, &stats);
-
- BUG_ON(old.dirty_sectors);
-
- /*
- * Ick:
- *
- * Only stats.sectors_cached should be nonzero: this is important
- * because in this path we modify bch_alloc_stats based on how the
- * bucket_mark was modified, and the sector counts in bucket_mark are
- * subject to (saturating) overflow - and if they did overflow, the
- * bch_fs_usage stats will now be off. We can tolerate this for
- * sectors_cached, but not anything else:
- */
- stats.s[S_COMPRESSED][S_CACHED] = 0;
- stats.s[S_UNCOMPRESSED][S_CACHED] = 0;
- BUG_ON(!bch_is_zero(&stats, sizeof(stats)));
+ /* XXX: we're not actually updating fs usage's cached sectors... */
+ bch_fs_usage_update(&stats, old, new);
if (!old.owned_by_allocator && old.cached_sectors)
trace_bcache_invalidate(ca, g - ca->buckets,
@@ -452,7 +431,6 @@ static void bch_mark_pointer(struct bch_fs *c,
unsigned saturated;
struct bch_dev *ca = c->devs[ptr->dev];
struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
- u64 v;
unsigned old_sectors, new_sectors;
int disk_sectors, compressed_sectors;
@@ -476,9 +454,7 @@ static void bch_mark_pointer(struct bch_fs *c,
goto out;
}
- v = READ_ONCE(g->_mark.counter);
- do {
- new.counter = old.counter = v;
+ old = bucket_data_cmpxchg(ca, g, new, ({
saturated = 0;
/*
@@ -486,7 +462,7 @@ static void bch_mark_pointer(struct bch_fs *c,
* the allocator invalidating a bucket after we've already
* checked the gen
*/
- if (gen_after(old.gen, ptr->gen)) {
+ if (gen_after(new.gen, ptr->gen)) {
EBUG_ON(type != S_CACHED &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
return;
@@ -494,7 +470,7 @@ static void bch_mark_pointer(struct bch_fs *c,
EBUG_ON(type != S_CACHED &&
!may_make_unavailable &&
- is_available_bucket(old) &&
+ is_available_bucket(new) &&
test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
if (type != S_CACHED &&
@@ -523,11 +499,7 @@ static void bch_mark_pointer(struct bch_fs *c,
}
new.had_metadata |= is_meta_bucket(new);
- } while ((v = cmpxchg(&g->_mark.counter,
- old.counter,
- new.counter)) != old.counter);
-
- bch_usage_update(ca, old, new, NULL);
+ }));
BUG_ON(!may_make_unavailable &&
bucket_became_unavailable(c, old, new));
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 9a00d38a682a..81355576f33a 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -183,7 +183,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca)
struct bch_fs_usage __bch_fs_usage_read(struct bch_fs *);
struct bch_fs_usage bch_fs_usage_read(struct bch_fs *);
-void bch_fs_stats_apply(struct bch_fs *, struct bch_fs_usage *,
+void bch_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, struct gc_pos);
static inline u64 __bch_fs_sectors_used(struct bch_fs *c)
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 1856db93d639..ca187099ee41 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -65,15 +65,10 @@ struct bucket {
};
};
-struct bch_dev_usage {
- u64 buckets_dirty;
- u64 buckets_cached;
- u64 buckets_meta;
- u64 buckets_alloc;
-
- u64 sectors_dirty;
- u64 sectors_cached;
- u64 sectors_meta;
+enum s_compressed {
+ S_COMPRESSED,
+ S_UNCOMPRESSED,
+ S_COMPRESSED_NR,
};
enum s_alloc {
@@ -83,10 +78,13 @@ enum s_alloc {
S_ALLOC_NR,
};
-enum s_compressed {
- S_COMPRESSED,
- S_UNCOMPRESSED,
- S_COMPRESSED_NR,
+struct bch_dev_usage {
+ u64 buckets_dirty;
+ u64 buckets_cached;
+ u64 buckets_meta;
+ u64 buckets_alloc;
+
+ u64 sectors[S_ALLOC_NR];
};
struct bch_fs_usage {
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 87a68d738567..905a77973872 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -931,7 +931,7 @@ struct extent_insert_state {
struct btree_insert *trans;
struct btree_insert_entry *insert;
struct bpos committed;
- struct bch_fs_usage stats;
+ struct bch_fs_usage stats;
/* for deleting: */
struct bkey_i whiteout;
@@ -1554,7 +1554,7 @@ next:
stop:
extent_insert_committed(s);
- bch_fs_stats_apply(c, &s->stats, s->trans->disk_res,
+ bch_fs_usage_apply(c, &s->stats, s->trans->disk_res,
gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, s->committed));
@@ -1716,7 +1716,7 @@ stop:
bkey_start_offset(&insert->k->k),
insert->k->k.size);
- bch_fs_stats_apply(c, &s.stats, trans->disk_res,
+ bch_fs_usage_apply(c, &s.stats, trans->disk_res,
gc_pos_btree_node(b));
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 91897671b52d..6f87f71072a2 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -1174,11 +1174,11 @@ SHOW(bch_dev)
sysfs_print(io_errors,
atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
- sysfs_hprint(dirty_data, stats.sectors_dirty << 9);
- sysfs_print(dirty_bytes, stats.sectors_dirty << 9);
+ sysfs_hprint(dirty_data, stats.sectors[S_DIRTY] << 9);
+ sysfs_print(dirty_bytes, stats.sectors[S_DIRTY] << 9);
sysfs_print(dirty_buckets, stats.buckets_dirty);
- sysfs_hprint(cached_data, stats.sectors_cached << 9);
- sysfs_print(cached_bytes, stats.sectors_cached << 9);
+ sysfs_hprint(cached_data, stats.sectors[S_CACHED] << 9);
+ sysfs_print(cached_bytes, stats.sectors[S_CACHED] << 9);
sysfs_print(cached_buckets, stats.buckets_cached);
sysfs_print(meta_buckets, stats.buckets_meta);
sysfs_print(alloc_buckets, stats.buckets_alloc);