summaryrefslogtreecommitdiff
path: root/libbcache/sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'libbcache/sysfs.c')
-rw-r--r--libbcache/sysfs.c141
1 files changed, 61 insertions, 80 deletions
diff --git a/libbcache/sysfs.c b/libbcache/sysfs.c
index 5f41d6ea..c96ad336 100644
--- a/libbcache/sysfs.c
+++ b/libbcache/sysfs.c
@@ -37,7 +37,6 @@ write_attribute(trigger_btree_coalesce);
write_attribute(trigger_gc);
write_attribute(prune_cache);
write_attribute(blockdev_volume_create);
-write_attribute(add_device);
read_attribute(uuid);
read_attribute(minor);
@@ -207,12 +206,10 @@ SHOW(bch_cached_dev)
return 0;
}
-STORE(__cached_dev)
+STORE(bch_cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
- unsigned v = size;
- struct cache_set *c;
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
@@ -229,6 +226,13 @@ STORE(__cached_dev)
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
+ if (attr == &sysfs_writeback_running)
+ bch_writeback_queue(dc);
+
+ if (attr == &sysfs_writeback_percent)
+ schedule_delayed_work(&dc->writeback_pd_update,
+ dc->writeback_pd_update_seconds * HZ);
+
if (attr == &sysfs_clear_stats)
bch_cache_accounting_clear(&dc->accounting);
@@ -296,17 +300,25 @@ STORE(__cached_dev)
}
if (attr == &sysfs_attach) {
- if (uuid_parse(buf, &dc->disk_sb.sb->user_uuid))
+ struct bch_fs *c;
+ uuid_le uuid;
+ int ret;
+
+ if (uuid_parse(buf, &uuid))
return -EINVAL;
- list_for_each_entry(c, &bch_fs_list, list) {
- v = bch_cached_dev_attach(dc, c);
- if (!v)
- return size;
+ c = bch_uuid_to_fs(uuid);
+ if (!c) {
+ pr_err("Can't attach %s: cache set not found", buf);
+ return -ENOENT;
}
- pr_err("Can't attach %s: cache set not found", buf);
- size = v;
+ dc->disk_sb.sb->set_uuid = uuid;
+
+ ret = bch_cached_dev_attach(dc, c);
+ closure_put(&c->cl);
+ if (ret)
+ return ret;
}
if (attr == &sysfs_detach && dc->disk.c)
@@ -318,25 +330,6 @@ STORE(__cached_dev)
return size;
}
-STORE(bch_cached_dev)
-{
- struct cached_dev *dc = container_of(kobj, struct cached_dev,
- disk.kobj);
-
- mutex_lock(&bch_register_lock);
- size = __cached_dev_store(kobj, attr, buf, size);
-
- if (attr == &sysfs_writeback_running)
- bch_writeback_queue(dc);
-
- if (attr == &sysfs_writeback_percent)
- schedule_delayed_work(&dc->writeback_pd_update,
- dc->writeback_pd_update_seconds * HZ);
-
- mutex_unlock(&bch_register_lock);
- return size;
-}
-
static struct attribute *bch_cached_dev_files[] = {
&sysfs_attach,
&sysfs_detach,
@@ -381,7 +374,7 @@ SHOW(bch_blockdev_volume)
return 0;
}
-STORE(__bch_blockdev_volume)
+STORE(bch_blockdev_volume)
{
struct bcache_device *d = container_of(kobj, struct bcache_device,
kobj);
@@ -439,7 +432,6 @@ STORE(__bch_blockdev_volume)
return size;
}
-STORE_LOCKED(bch_blockdev_volume)
static struct attribute *bch_blockdev_volume_files[] = {
&sysfs_unregister,
@@ -449,7 +441,7 @@ static struct attribute *bch_blockdev_volume_files[] = {
};
KTYPE(bch_blockdev_volume);
-static int bch_bset_print_stats(struct cache_set *c, char *buf)
+static int bch_bset_print_stats(struct bch_fs *c, char *buf)
{
struct bset_stats stats;
size_t nodes = 0;
@@ -492,7 +484,7 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
stats.failed_overflow);
}
-static unsigned bch_root_usage(struct cache_set *c)
+static unsigned bch_root_usage(struct bch_fs *c)
{
unsigned bytes = 0;
struct bkey_packed *k;
@@ -516,7 +508,7 @@ lock_root:
return (bytes * 100) / btree_bytes(c);
}
-static size_t bch_btree_cache_size(struct cache_set *c)
+static size_t bch_btree_cache_size(struct bch_fs *c)
{
size_t ret = 0;
struct btree *b;
@@ -529,20 +521,20 @@ static size_t bch_btree_cache_size(struct cache_set *c)
return ret;
}
-static unsigned bch_fs_available_percent(struct cache_set *c)
+static unsigned bch_fs_available_percent(struct bch_fs *c)
{
return div64_u64((u64) sectors_available(c) * 100,
c->capacity ?: 1);
}
#if 0
-static unsigned bch_btree_used(struct cache_set *c)
+static unsigned bch_btree_used(struct bch_fs *c)
{
return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c));
}
-static unsigned bch_average_key_size(struct cache_set *c)
+static unsigned bch_average_key_size(struct bch_fs *c)
{
return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
@@ -550,7 +542,7 @@ static unsigned bch_average_key_size(struct cache_set *c)
}
#endif
-static ssize_t show_fs_alloc_debug(struct cache_set *c, char *buf)
+static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
{
struct bch_fs_usage stats = bch_fs_usage_read(c);
@@ -577,7 +569,7 @@ static ssize_t show_fs_alloc_debug(struct cache_set *c, char *buf)
stats.online_reserved);
}
-static ssize_t bch_compression_stats(struct cache_set *c, char *buf)
+static ssize_t bch_compression_stats(struct bch_fs *c, char *buf)
{
struct btree_iter iter;
struct bkey_s_c k;
@@ -627,7 +619,7 @@ static ssize_t bch_compression_stats(struct cache_set *c, char *buf)
SHOW(bch_fs)
{
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
sysfs_print(minor, c->minor);
@@ -718,7 +710,7 @@ SHOW(bch_fs)
STORE(__bch_fs)
{
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
if (attr == &sysfs_unregister) {
bch_fs_detach(c);
@@ -761,12 +753,12 @@ STORE(__bch_fs)
c->foreground_write_ratelimit_enabled);
if (attr == &sysfs_copy_gc_enabled) {
- struct cache *ca;
+ struct bch_dev *ca;
unsigned i;
ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
?: (ssize_t) size;
- for_each_cache(ca, c, i)
+ for_each_member_device(ca, c, i)
if (ca->moving_gc_read)
wake_up_process(ca->moving_gc_read);
return ret;
@@ -833,21 +825,12 @@ STORE(__bch_fs)
STORE(bch_fs)
{
- struct cache_set *c = container_of(kobj, struct cache_set, kobj);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
mutex_lock(&c->state_lock);
size = __bch_fs_store(kobj, attr, buf, size);
mutex_unlock(&c->state_lock);
- if (attr == &sysfs_add_device) {
- char *path = kstrdup(buf, GFP_KERNEL);
- int r = bch_dev_add(c, strim(path));
-
- kfree(path);
- if (r)
- return r;
- }
-
return size;
}
@@ -858,7 +841,6 @@ static struct attribute *bch_fs_files[] = {
&sysfs_journal_reclaim_delay_ms,
&sysfs_journal_entry_size_max,
&sysfs_blockdev_volume_create,
- &sysfs_add_device,
&sysfs_block_size,
&sysfs_block_size_bytes,
@@ -894,13 +876,13 @@ KTYPE(bch_fs);
SHOW(bch_fs_internal)
{
- struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
return bch_fs_show(&c->kobj, attr, buf);
}
STORE(bch_fs_internal)
{
- struct cache_set *c = container_of(kobj, struct cache_set, internal);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
return bch_fs_store(&c->kobj, attr, buf, size);
}
@@ -945,14 +927,14 @@ KTYPE(bch_fs_internal);
SHOW(bch_fs_opts_dir)
{
- struct cache_set *c = container_of(kobj, struct cache_set, opts_dir);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
return bch_opt_show(&c->opts, attr->name, buf, PAGE_SIZE);
}
STORE(bch_fs_opts_dir)
{
- struct cache_set *c = container_of(kobj, struct cache_set, opts_dir);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
const struct bch_option *opt;
enum bch_opt_id id;
u64 v;
@@ -1004,7 +986,7 @@ KTYPE(bch_fs_opts_dir);
SHOW(bch_fs_time_stats)
{
- struct cache_set *c = container_of(kobj, struct cache_set, time_stats);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
#define BCH_TIME_STAT(name, frequency_units, duration_units) \
sysfs_print_time_stats(&c->name##_time, name, \
@@ -1017,7 +999,7 @@ SHOW(bch_fs_time_stats)
STORE(bch_fs_time_stats)
{
- struct cache_set *c = container_of(kobj, struct cache_set, time_stats);
+ struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
#define BCH_TIME_STAT(name, frequency_units, duration_units) \
sysfs_clear_time_stats(&c->name##_time, name);
@@ -1041,29 +1023,29 @@ static struct attribute *bch_fs_time_stats_files[] = {
};
KTYPE(bch_fs_time_stats);
-typedef unsigned (bucket_map_fn)(struct cache *, struct bucket *, void *);
+typedef unsigned (bucket_map_fn)(struct bch_dev *, struct bucket *, void *);
-static unsigned bucket_priority_fn(struct cache *ca, struct bucket *g,
+static unsigned bucket_priority_fn(struct bch_dev *ca, struct bucket *g,
void *private)
{
int rw = (private ? 1 : 0);
- return ca->set->prio_clock[rw].hand - g->prio[rw];
+ return ca->fs->prio_clock[rw].hand - g->prio[rw];
}
-static unsigned bucket_sectors_used_fn(struct cache *ca, struct bucket *g,
+static unsigned bucket_sectors_used_fn(struct bch_dev *ca, struct bucket *g,
void *private)
{
return bucket_sectors_used(g);
}
-static unsigned bucket_oldest_gen_fn(struct cache *ca, struct bucket *g,
+static unsigned bucket_oldest_gen_fn(struct bch_dev *ca, struct bucket *g,
void *private)
{
return bucket_gc_gen(ca, g);
}
-static ssize_t show_quantiles(struct cache *ca, char *buf,
+static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
bucket_map_fn *fn, void *private)
{
int cmp(const void *l, const void *r)
@@ -1101,7 +1083,7 @@ static ssize_t show_quantiles(struct cache *ca, char *buf,
}
-static ssize_t show_reserve_stats(struct cache *ca, char *buf)
+static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
{
enum alloc_reserve i;
ssize_t ret;
@@ -1124,9 +1106,9 @@ static ssize_t show_reserve_stats(struct cache *ca, char *buf)
return ret;
}
-static ssize_t show_dev_alloc_debug(struct cache *ca, char *buf)
+static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
{
- struct cache_set *c = ca->set;
+ struct bch_fs *c = ca->fs;
struct bch_dev_usage stats = bch_dev_usage_read(ca);
return scnprintf(buf, PAGE_SIZE,
@@ -1150,13 +1132,13 @@ static ssize_t show_dev_alloc_debug(struct cache *ca, char *buf)
stats.buckets_alloc, ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets_meta, ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets_dirty, ca->mi.nbuckets - ca->mi.first_bucket,
- __buckets_available_cache(ca, stats), ca->mi.nbuckets - ca->mi.first_bucket,
+ __dev_buckets_available(ca, stats), ca->mi.nbuckets - ca->mi.first_bucket,
c->freelist_wait.list.first ? "waiting" : "empty",
c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
c->open_buckets_wait.list.first ? "waiting" : "empty");
}
-static u64 sectors_written(struct cache *ca)
+static u64 sectors_written(struct bch_dev *ca)
{
u64 ret = 0;
int cpu;
@@ -1169,8 +1151,8 @@ static u64 sectors_written(struct cache *ca)
SHOW(bch_dev)
{
- struct cache *ca = container_of(kobj, struct cache, kobj);
- struct cache_set *c = ca->set;
+ struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
+ struct bch_fs *c = ca->fs;
struct bch_dev_usage stats = bch_dev_usage_read(ca);
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
@@ -1200,8 +1182,8 @@ SHOW(bch_dev)
sysfs_print(cached_buckets, stats.buckets_cached);
sysfs_print(meta_buckets, stats.buckets_meta);
sysfs_print(alloc_buckets, stats.buckets_alloc);
- sysfs_print(available_buckets, buckets_available_cache(ca));
- sysfs_print(free_buckets, buckets_free_cache(ca));
+ sysfs_print(available_buckets, dev_buckets_available(ca));
+ sysfs_print(free_buckets, dev_buckets_free(ca));
sysfs_print(has_data, ca->mi.has_data);
sysfs_print(has_metadata, ca->mi.has_metadata);
@@ -1235,10 +1217,10 @@ SHOW(bch_dev)
return 0;
}
-STORE(__bch_dev)
+STORE(bch_dev)
{
- struct cache *ca = container_of(kobj, struct cache, kobj);
- struct cache_set *c = ca->set;
+ struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
+ struct bch_fs *c = ca->fs;
struct bch_member *mi;
sysfs_pd_controller_store(copy_gc, &ca->moving_gc_pd);
@@ -1311,7 +1293,6 @@ STORE(__bch_dev)
return size;
}
-STORE_LOCKED(bch_dev)
static struct attribute *bch_dev_files[] = {
&sysfs_uuid,