diff options
Diffstat (limited to 'fs/bcachefs/super.c')
-rw-r--r-- | fs/bcachefs/super.c | 43 |
1 files changed, 23 insertions, 20 deletions
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 777d60d3a4de..287535e9c4f7 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -149,6 +149,7 @@ int bch2_congested(void *data, int bdi_bits) unsigned i; int ret = 0; + rcu_read_lock(); if (bdi_bits & (1 << WB_sync_congested)) { /* Reads - check all devices: */ for_each_readable_member(ca, c, i) { @@ -160,12 +161,11 @@ int bch2_congested(void *data, int bdi_bits) } } } else { - /* Writes prefer fastest tier: */ - struct bch_tier *tier = READ_ONCE(c->fastest_tier); - struct bch_devs_mask *devs = - tier ? &tier->devs : &c->rw_devs[BCH_DATA_USER]; + unsigned target = READ_ONCE(c->opts.foreground_target); + const struct bch_devs_mask *devs = target + ? bch2_target_to_mask(c, target) + : &c->rw_devs[BCH_DATA_USER]; - rcu_read_lock(); for_each_member_device_rcu(ca, c, i, devs) { bdi = ca->disk_sb.bdev->bd_bdi; @@ -174,8 +174,8 @@ int bch2_congested(void *data, int bdi_bits) break; } } - rcu_read_unlock(); } + rcu_read_unlock(); return ret; } @@ -185,9 +185,9 @@ int bch2_congested(void *data, int bdi_bits) /* * For startup/shutdown of RW stuff, the dependencies are: * - * - foreground writes depend on copygc and tiering (to free up space) + * - foreground writes depend on copygc and rebalance (to free up space) * - * - copygc and tiering depend on mark and sweep gc (they actually probably + * - copygc and rebalance depend on mark and sweep gc (they actually probably * don't because they either reserve ahead of time or don't block if * allocations fail, but allocations can require mark and sweep gc to run * because of generation number wraparound) @@ -225,7 +225,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) struct bch_dev *ca; unsigned i; - bch2_tiering_stop(c); + bch2_rebalance_stop(c); for_each_member_device(ca, c, i) bch2_copygc_stop(ca); @@ -385,8 +385,8 @@ const char *bch2_fs_read_write(struct bch_fs *c) goto err; } - err = "error starting tiering thread"; - if (bch2_tiering_start(c)) + err = "error starting rebalance thread"; + if (bch2_rebalance_start(c)) goto err; schedule_delayed_work(&c->pd_controllers_update, 5 * HZ); @@ -531,7 +531,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) #undef BCH_TIME_STAT bch2_fs_allocator_init(c); - bch2_fs_tiering_init(c); + bch2_fs_rebalance_init(c); bch2_fs_quota_init(c); INIT_LIST_HEAD(&c->list); @@ -555,8 +555,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) c->writeback_pages_max = (256 << 10) / PAGE_SIZE; c->copy_gc_enabled = 1; - c->tiering_enabled = 1; - c->tiering_percent = 10; + c->rebalance_enabled = 1; + c->rebalance_percent = 10; c->journal.write_time = &c->journal_write_time; c->journal.delay_time = &c->journal_delay_time; @@ -1215,6 +1215,8 @@ static int __bch2_dev_online(struct bch_fs *c, struct bch_sb_handle *sb) if (ca->mi.state == BCH_MEMBER_STATE_RW) bch2_dev_allocator_add(c, ca); + rebalance_wakeup(c); + percpu_ref_reinit(&ca->io_ref); return 0; } @@ -1339,9 +1341,6 @@ static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) if (bch2_copygc_start(c, ca)) return "error starting copygc thread"; - if (bch2_tiering_start(c)) - return "error starting tiering thread"; - return NULL; } @@ -1349,6 +1348,7 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, enum bch_member_state new_state, int flags) { struct bch_sb_field_members *mi; + int ret = 0; if (ca->mi.state == new_state) return 0; @@ -1367,10 +1367,13 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, bch2_write_super(c); mutex_unlock(&c->sb_lock); - if (new_state == BCH_MEMBER_STATE_RW) - return __bch2_dev_read_write(c, ca) ? -ENOMEM : 0; + if (new_state == BCH_MEMBER_STATE_RW && + __bch2_dev_read_write(c, ca)) + ret = -ENOMEM; - return 0; + rebalance_wakeup(c); + + return ret; } int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, |