summaryrefslogtreecommitdiff
path: root/libbcachefs/super.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcachefs/super.c')
-rw-r--r--libbcachefs/super.c62
1 files changed, 32 insertions, 30 deletions
diff --git a/libbcachefs/super.c b/libbcachefs/super.c
index 0cdf285e..94288fc9 100644
--- a/libbcachefs/super.c
+++ b/libbcachefs/super.c
@@ -169,10 +169,9 @@ int bch2_congested(void *data, int bdi_bits)
}
}
} else {
- unsigned target = READ_ONCE(c->opts.foreground_target);
- const struct bch_devs_mask *devs = target
- ? bch2_target_to_mask(c, target)
- : &c->rw_devs[BCH_DATA_USER];
+ const struct bch_devs_mask *devs =
+ bch2_target_to_mask(c, c->opts.foreground_target) ?:
+ &c->rw_devs[BCH_DATA_user];
for_each_member_device_rcu(ca, c, i, devs) {
bdi = ca->disk_sb.bdev->bd_bdi;
@@ -213,10 +212,7 @@ static void __bch2_fs_read_only(struct bch_fs *c)
int ret;
bch2_rebalance_stop(c);
-
- for_each_member_device(ca, c, i)
- bch2_copygc_stop(ca);
-
+ bch2_copygc_stop(c);
bch2_gc_thread_stop(c);
/*
@@ -387,8 +383,8 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
{
bool ret = !test_and_set_bit(BCH_FS_EMERGENCY_RO, &c->flags);
- bch2_fs_read_only_async(c);
bch2_journal_halt(&c->journal);
+ bch2_fs_read_only_async(c);
wake_up(&bch_read_only_wait);
return ret;
@@ -396,8 +392,6 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c)
static int bch2_fs_read_write_late(struct bch_fs *c)
{
- struct bch_dev *ca;
- unsigned i;
int ret;
ret = bch2_gc_thread_start(c);
@@ -406,13 +400,10 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
return ret;
}
- for_each_rw_member(ca, c, i) {
- ret = bch2_copygc_start(c, ca);
- if (ret) {
- bch_err(c, "error starting copygc threads");
- percpu_ref_put(&ca->io_ref);
- return ret;
- }
+ ret = bch2_copygc_start(c);
+ if (ret) {
+ bch_err(c, "error starting copygc thread");
+ return ret;
}
ret = bch2_rebalance_start(c);
@@ -450,6 +441,13 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
+ /*
+ * We need to write out a journal entry before we start doing btree
+ * updates, to ensure that on unclean shutdown new journal blacklist
+ * entries are created:
+ */
+ bch2_journal_meta(&c->journal);
+
clear_bit(BCH_FS_ALLOC_CLEAN, &c->flags);
for_each_rw_member(ca, c, i)
@@ -535,6 +533,7 @@ static void bch2_fs_free(struct bch_fs *c)
kfree(c->replicas_gc.entries);
kfree(rcu_dereference_protected(c->disk_groups, 1));
kfree(c->journal_seq_blacklist_table);
+ free_heap(&c->copygc_heap);
if (c->journal_reclaim_wq)
destroy_workqueue(c->journal_reclaim_wq);
@@ -684,6 +683,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_init(&c->times[i]);
+ bch2_fs_copygc_init(c);
bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
bch2_fs_allocator_background_init(c);
bch2_fs_allocator_foreground_init(c);
@@ -708,9 +708,12 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
INIT_LIST_HEAD(&c->fsck_errors);
mutex_init(&c->fsck_error_lock);
- INIT_LIST_HEAD(&c->ec_new_stripe_list);
- mutex_init(&c->ec_new_stripe_lock);
- mutex_init(&c->ec_stripe_create_lock);
+ INIT_LIST_HEAD(&c->ec_stripe_head_list);
+ mutex_init(&c->ec_stripe_head_lock);
+
+ INIT_LIST_HEAD(&c->ec_stripe_new_list);
+ mutex_init(&c->ec_stripe_new_lock);
+
spin_lock_init(&c->ec_stripes_heap_lock);
seqcount_init(&c->gc_pos_lock);
@@ -1108,10 +1111,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
init_rwsem(&ca->bucket_lock);
- writepoint_init(&ca->copygc_write_point, BCH_DATA_USER);
-
- bch2_dev_copygc_init(ca);
-
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
bch2_time_stats_init(&ca->io_latency[READ]);
@@ -1241,7 +1240,7 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
return ret;
if (test_bit(BCH_FS_ALLOC_READ_DONE, &c->flags) &&
- !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_SB])) {
+ !percpu_u64_get(&ca->usage[0]->buckets[BCH_DATA_sb])) {
mutex_lock(&c->sb_lock);
bch2_mark_dev_superblock(ca->fs, ca, 0);
mutex_unlock(&c->sb_lock);
@@ -1352,7 +1351,11 @@ static bool bch2_fs_may_start(struct bch_fs *c)
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
- bch2_copygc_stop(ca);
+ /*
+ * Device going read only means the copygc reserve get smaller, so we
+ * don't want that happening while copygc is in progress:
+ */
+ bch2_copygc_stop(c);
/*
* The allocator thread itself allocates btree nodes, so stop it first:
@@ -1360,6 +1363,8 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_stop(ca);
bch2_dev_allocator_remove(c, ca);
bch2_dev_journal_stop(&c->journal, ca);
+
+ bch2_copygc_start(c);
}
static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
@@ -1374,9 +1379,6 @@ static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
if (bch2_dev_allocator_start(ca))
return "error starting allocator thread";
- if (bch2_copygc_start(c, ca))
- return "error starting copygc thread";
-
return NULL;
}