summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-02-12 15:03:47 -0500
committerKent Overstreet <kent.overstreet@gmail.com>2019-04-03 12:44:07 -0400
commitae11bb2d0dff150a338c71ae4b3e30d7be63ed4f (patch)
tree26e03b42cabdfe6dbf15436b590da4eed4b92d9a
parentae2082a190e202b95834938e8171a7c98c110da4 (diff)
bcachefs: refactor key marking code a bit
-rw-r--r--fs/bcachefs/btree_gc.c15
-rw-r--r--fs/bcachefs/buckets.c124
-rw-r--r--fs/bcachefs/buckets.h4
3 files changed, 63 insertions, 80 deletions
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 3e355b5c81fd..91153cb17c0a 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -229,12 +229,12 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
bch2_verify_btree_nr_keys(b);
+ gc_pos_set(c, gc_pos_btree_node(b));
+
ret = btree_gc_mark_node(c, b, &max_stale, initial);
if (ret)
break;
- gc_pos_set(c, gc_pos_btree_node(b));
-
if (!initial) {
if (max_stale > 64)
bch2_btree_node_rewrite(c, &iter,
@@ -620,10 +620,13 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
"persistent_reserved[%i]", i);
for (i = 0; i < c->replicas.nr; i++) {
- /*
- * XXX: print out replicas entry
- */
- copy_fs_field(data[i], "data[%i]", i);
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
+ char buf[80];
+
+ bch2_replicas_entry_to_text(&PBUF(buf), e);
+
+ copy_fs_field(data[i], "%s", buf);
}
}
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index cc0998546864..b43ff257c46d 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -393,9 +393,22 @@ static inline void update_cached_sectors(struct bch_fs *c,
update_replicas(c, fs_usage, &r.e, sectors);
}
-static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, struct bucket_mark *ret,
- bool gc)
+#define do_mark_fn(fn, c, pos, flags, ...) \
+({ \
+ int gc, ret = 0; \
+ \
+ percpu_rwsem_assert_held(&c->mark_lock); \
+ \
+ for (gc = 0; gc < 2 && !ret; gc++) \
+ if (!gc == !(flags & BCH_BUCKET_MARK_GC) || \
+ (gc && gc_visited(c, pos))) \
+ ret = fn(c, __VA_ARGS__, gc); \
+ ret; \
+})
+
+static int __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, struct bucket_mark *ret,
+ bool gc)
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
@@ -416,28 +429,25 @@ static void __bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
update_cached_sectors(c, fs_usage, ca->dev_idx,
-old.cached_sectors);
- if (ret)
+ if (!gc)
*ret = old;
+ return 0;
}
void bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, struct bucket_mark *old)
{
- percpu_rwsem_assert_held(&c->mark_lock);
-
- __bch2_invalidate_bucket(c, ca, b, old, false);
-
- if (gc_visited(c, gc_phase(GC_PHASE_START)))
- __bch2_invalidate_bucket(c, ca, b, NULL, true);
+ do_mark_fn(__bch2_invalidate_bucket, c, gc_phase(GC_PHASE_START), 0,
+ ca, b, old);
if (!old->owned_by_allocator && old->cached_sectors)
trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors);
}
-static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, bool owned_by_allocator,
- bool gc)
+static int __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, bool owned_by_allocator,
+ bool gc)
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
@@ -449,20 +459,16 @@ static void __bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
BUG_ON(!gc &&
!owned_by_allocator && !old.owned_by_allocator);
+
+ return 0;
}
void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
- percpu_rwsem_assert_held(&c->mark_lock);
-
- if (!(flags & BCH_BUCKET_MARK_GC))
- __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, false);
-
- if ((flags & BCH_BUCKET_MARK_GC) ||
- gc_visited(c, pos))
- __bch2_mark_alloc_bucket(c, ca, b, owned_by_allocator, true);
+ do_mark_fn(__bch2_mark_alloc_bucket, c, pos, flags,
+ ca, b, owned_by_allocator);
}
#define checked_add(a, b) \
@@ -472,9 +478,9 @@ do { \
BUG_ON((a) != _res); \
} while (0)
-static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- size_t b, enum bch_data_type type,
- unsigned sectors, bool gc)
+static int __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
+ size_t b, enum bch_data_type type,
+ unsigned sectors, bool gc)
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
@@ -488,6 +494,8 @@ static void __bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
new.data_type = type;
checked_add(new.dirty_sectors, sectors);
}));
+
+ return 0;
}
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
@@ -499,15 +507,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
type != BCH_DATA_JOURNAL);
if (likely(c)) {
- percpu_rwsem_assert_held(&c->mark_lock);
-
- if (!(flags & BCH_BUCKET_MARK_GC))
- __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
- false);
- if ((flags & BCH_BUCKET_MARK_GC) ||
- gc_visited(c, pos))
- __bch2_mark_metadata_bucket(c, ca, b, type, sectors,
- true);
+ do_mark_fn(__bch2_mark_metadata_bucket, c, pos, flags,
+ ca, b, type, sectors);
} else {
struct bucket *g;
struct bucket_mark new;
@@ -824,30 +825,28 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
unsigned journal_seq, unsigned flags,
bool gc)
{
- int ret = 0;
+ if (!fs_usage || gc)
+ fs_usage = this_cpu_ptr(c->usage[gc]);
switch (k.k->type) {
case KEY_TYPE_btree_ptr:
- ret = bch2_mark_extent(c, k, inserting
- ? c->opts.btree_node_size
- : -c->opts.btree_node_size,
- BCH_DATA_BTREE,
- fs_usage, journal_seq, flags, gc);
- break;
+ return bch2_mark_extent(c, k, inserting
+ ? c->opts.btree_node_size
+ : -c->opts.btree_node_size,
+ BCH_DATA_BTREE,
+ fs_usage, journal_seq, flags, gc);
case KEY_TYPE_extent:
- ret = bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
- fs_usage, journal_seq, flags, gc);
- break;
+ return bch2_mark_extent(c, k, sectors, BCH_DATA_USER,
+ fs_usage, journal_seq, flags, gc);
case KEY_TYPE_stripe:
- ret = bch2_mark_stripe(c, k, inserting,
- fs_usage, journal_seq, flags, gc);
- break;
+ return bch2_mark_stripe(c, k, inserting,
+ fs_usage, journal_seq, flags, gc);
case KEY_TYPE_inode:
if (inserting)
fs_usage->s.nr_inodes++;
else
fs_usage->s.nr_inodes--;
- break;
+ return 0;
case KEY_TYPE_reservation: {
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
@@ -857,13 +856,11 @@ static int __bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
fs_usage->s.reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors;
- break;
+ return 0;
}
default:
- break;
+ return 0;
}
-
- return ret;
}
int bch2_mark_key_locked(struct bch_fs *c,
@@ -873,26 +870,9 @@ int bch2_mark_key_locked(struct bch_fs *c,
struct bch_fs_usage *fs_usage,
u64 journal_seq, unsigned flags)
{
- int ret;
-
- if (!(flags & BCH_BUCKET_MARK_GC)) {
- ret = __bch2_mark_key(c, k, inserting, sectors,
- fs_usage ?: this_cpu_ptr(c->usage[0]),
- journal_seq, flags, false);
- if (ret)
- return ret;
- }
-
- if ((flags & BCH_BUCKET_MARK_GC) ||
- gc_visited(c, pos)) {
- ret = __bch2_mark_key(c, k, inserting, sectors,
- this_cpu_ptr(c->usage[1]),
- journal_seq, flags, true);
- if (ret)
- return ret;
- }
-
- return 0;
+ return do_mark_fn(__bch2_mark_key, c, pos, flags,
+ k, inserting, sectors, fs_usage,
+ journal_seq, flags);
}
int bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
@@ -983,7 +963,7 @@ void bch2_mark_update(struct btree_insert *trans,
bch2_btree_node_iter_advance(&node_iter, b);
}
- if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res, pos) &&
+ if (bch2_fs_usage_apply(c, fs_usage, trans->disk_res) &&
!warned_disk_usage &&
!xchg(&warned_disk_usage, 1)) {
char buf[200];
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index a362bec97d84..ffa05951b671 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -248,8 +248,8 @@ void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);
-#define BCH_BUCKET_MARK_NOATOMIC (1 << 0)
-#define BCH_BUCKET_MARK_GC (1 << 1)
+#define BCH_BUCKET_MARK_GC (1 << 0)
+#define BCH_BUCKET_MARK_NOATOMIC (1 << 1)
int bch2_mark_key_locked(struct bch_fs *, struct bkey_s_c,
bool, s64, struct gc_pos,