diff options
author | Kent Overstreet <kent.overstreet@gmail.com> | 2022-03-13 19:27:55 -0400 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@gmail.com> | 2022-04-02 16:02:55 -0400 |
commit | e3f47c13740cecba83e47f01e989f3b25138e5ec (patch) | |
tree | 611ef1986dcc778e0b8e05a381d292c63222f756 | |
parent | c2070e4a85422938a291cfb43f7d5c4c85c1fdb3 (diff) |
bcachefs: Improve bucket_alloc tracepoints
- bucket_alloc_fail now indicates whether allocation was nonblocking
- we now return strings, not integers, for alloc reserve.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 22 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/alloc_types.h | 14 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/buckets.h | 8 | ||||
-rw-r--r-- | fs/bcachefs/ec.c | 8 | ||||
-rw-r--r-- | fs/bcachefs/io.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/move.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/movinggc.c | 21 | ||||
-rw-r--r-- | fs/bcachefs/sysfs.c | 2 | ||||
-rw-r--r-- | include/trace/events/bcachefs.h | 42 |
12 files changed, 67 insertions, 64 deletions
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 178d7c058597..5b1149365389 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -32,6 +32,13 @@ #include <linux/rcupdate.h> #include <trace/events/bcachefs.h> +const char * const bch2_alloc_reserves[] = { +#define x(t) #t, + BCH_ALLOC_RESERVES() +#undef x + NULL +}; + /* * Open buckets represent a bucket that's currently being allocated from. They * serve two purposes: @@ -172,10 +179,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) { switch (reserve) { - case RESERVE_BTREE: - case RESERVE_BTREE_MOVINGGC: + case RESERVE_btree: + case RESERVE_btree_movinggc: return 0; - case RESERVE_MOVINGGC: + case RESERVE_movinggc: return OPEN_BUCKETS_COUNT / 4; default: return OPEN_BUCKETS_COUNT / 2; @@ -213,7 +220,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * spin_unlock(&c->freelist_lock); - trace_open_bucket_alloc_fail(ca, reserve); + trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]); return ERR_PTR(-OPEN_BUCKETS_EMPTY); } @@ -254,7 +261,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * spin_unlock(&c->freelist_lock); - trace_bucket_alloc(ca, reserve); + trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]); return ob; } @@ -487,7 +494,8 @@ err: ob = ERR_PTR(ret ?: -FREELIST_EMPTY); if (ob == ERR_PTR(-FREELIST_EMPTY)) { - trace_bucket_alloc_fail(ca, reserve, avail, need_journal_commit); + trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail, + need_journal_commit, cl == NULL); atomic_long_inc(&c->bucket_alloc_fail); } @@ -521,7 +529,7 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, struct dev_stripe_state *stripe) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_available(ca, RESERVE_NONE); + u64 free_space = dev_buckets_available(ca, RESERVE_none); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h index f51cec5e7cc1..8bc78877f0fc 100644 --- a/fs/bcachefs/alloc_foreground.h +++ b/fs/bcachefs/alloc_foreground.h @@ -12,6 +12,8 @@ struct bch_dev; struct bch_fs; struct bch_devs_List; +extern const char * const bch2_alloc_reserves[]; + struct dev_alloc_list { unsigned nr; u8 devs[BCH_SB_MEMBERS_MAX]; diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index 22e1fbda9046..21b56451bc18 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -10,12 +10,16 @@ struct ec_bucket_buf; +#define BCH_ALLOC_RESERVES() \ + x(btree_movinggc) \ + x(btree) \ + x(movinggc) \ + x(none) + enum alloc_reserve { - RESERVE_BTREE_MOVINGGC = -2, - RESERVE_BTREE = -1, - RESERVE_MOVINGGC = 0, - RESERVE_NONE = 1, - RESERVE_NR = 2, +#define x(name) RESERVE_##name, + BCH_ALLOC_RESERVES() +#undef x }; #define OPEN_BUCKETS_COUNT 1024 diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 5834190da6a9..4ba229bfb0ee 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -194,10 +194,10 @@ static struct btree *__bch2_btree_node_alloc(struct bch_fs *c, if (flags & BTREE_INSERT_USE_RESERVE) { nr_reserve = 0; - alloc_reserve = RESERVE_BTREE_MOVINGGC; + alloc_reserve = RESERVE_btree_movinggc; } else { nr_reserve = BTREE_NODE_RESERVE; - alloc_reserve = RESERVE_BTREE; + alloc_reserve = RESERVE_btree; } mutex_lock(&c->btree_reserve_cache_lock); diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index 4a3d6bf1e3ef..25baca33e885 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -122,16 +122,16 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca, s64 reserved = 0; switch (reserve) { - case RESERVE_NONE: + case RESERVE_none: reserved += ca->mi.nbuckets >> 6; fallthrough; - case RESERVE_MOVINGGC: + case RESERVE_movinggc: reserved += ca->nr_btree_reserve; fallthrough; - case RESERVE_BTREE: + case RESERVE_btree: reserved += ca->nr_btree_reserve; fallthrough; - case RESERVE_BTREE_MOVINGGC: + case RESERVE_btree_movinggc: break; default: BUG(); diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 7629c34b7cd0..616a551265e0 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -1304,8 +1304,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h, &nr_have_parity, &have_cache, h->copygc - ? RESERVE_MOVINGGC - : RESERVE_NONE, + ? RESERVE_movinggc + : RESERVE_none, 0, cl); @@ -1333,8 +1333,8 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h, &nr_have_data, &have_cache, h->copygc - ? RESERVE_MOVINGGC - : RESERVE_NONE, + ? RESERVE_movinggc + : RESERVE_none, 0, cl); diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h index 1aa422dccef7..fb5114518666 100644 --- a/fs/bcachefs/io.h +++ b/fs/bcachefs/io.h @@ -50,7 +50,7 @@ static inline u64 *op_journal_seq(struct bch_write_op *op) static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op) { - return op->alloc_reserve == RESERVE_MOVINGGC + return op->alloc_reserve == RESERVE_movinggc ? op->c->copygc_wq : op->c->btree_update_wq; } @@ -79,7 +79,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->compression_type = bch2_compression_opt_to_type[opts.compression]; op->nr_replicas = 0; op->nr_replicas_required = c->opts.data_replicas_required; - op->alloc_reserve = RESERVE_NONE; + op->alloc_reserve = RESERVE_none; op->incompressible = 0; op->open_buckets.nr = 0; op->devs_have.nr = 0; diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index e33085fe978f..6ea6810337db 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -801,7 +801,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, break; } } else { - ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_NONE, + ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, false, cl); if (IS_ERR(ob[nr_got])) { ret = cl ? -EAGAIN : -ENOSPC; diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 16bca1446a2b..b4588a919dd4 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -351,7 +351,7 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m, } if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) { - m->op.alloc_reserve = RESERVE_MOVINGGC; + m->op.alloc_reserve = RESERVE_movinggc; m->op.flags |= BCH_WRITE_ALLOC_NOWAIT; } else { /* XXX: this should probably be passed in */ diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 466975a3151f..1c92d5365958 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -30,21 +30,6 @@ #include <linux/sort.h> #include <linux/wait.h> -/* - * We can't use the entire copygc reserve in one iteration of copygc: we may - * need the buckets we're freeing up to go back into the copygc reserve to make - * forward progress, but if the copygc reserve is full they'll be available for - * any allocation - and it's possible that in a given iteration, we free up most - * of the buckets we're going to free before we allocate most of the buckets - * we're going to allocate. - * - * If we only use half of the reserve per iteration, then in steady state we'll - * always have room in the reserve for the buckets we're going to need in the - * next iteration: - */ -#define COPYGC_BUCKETS_PER_ITER(ca) \ - ((ca)->free[RESERVE_MOVINGGC].size / 2) - static int bucket_offset_cmp(const void *_l, const void *_r, size_t size) { const struct copygc_heap_entry *l = _l; @@ -250,7 +235,7 @@ static int bch2_copygc(struct bch_fs *c) } for_each_rw_member(ca, c, dev_idx) { - s64 avail = min(dev_buckets_available(ca, RESERVE_MOVINGGC), + s64 avail = min(dev_buckets_available(ca, RESERVE_movinggc), ca->mi.nbuckets >> 6); sectors_reserved += avail * ca->mi.bucket_size; @@ -268,7 +253,7 @@ static int bch2_copygc(struct bch_fs *c) } /* - * Our btree node allocations also come out of RESERVE_MOVINGGC: + * Our btree node allocations also come out of RESERVE_movingc: */ sectors_reserved = (sectors_reserved * 3) / 4; if (!sectors_reserved) { @@ -354,7 +339,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c) for_each_rw_member(ca, c, dev_idx) { struct bch_dev_usage usage = bch2_dev_usage_read(ca); - fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_NONE) * + fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) * ca->mi.bucket_size) >> 1); fragmented = usage.d[BCH_DATA_user].fragmented; diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index bed48afb4ac9..d018e8bc2677 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -734,7 +734,7 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) "open_buckets_user\t%u\n" "btree reserve cache\t%u\n", stats.buckets_ec, - __dev_buckets_available(ca, stats, RESERVE_NONE), + __dev_buckets_available(ca, stats, RESERVE_none), c->freelist_wait.list.first ? "waiting" : "empty", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free, ca->nr_open_buckets, diff --git a/include/trace/events/bcachefs.h b/include/trace/events/bcachefs.h index 832e9f191409..0fd2fc11b86b 100644 --- a/include/trace/events/bcachefs.h +++ b/include/trace/events/bcachefs.h @@ -468,58 +468,62 @@ TRACE_EVENT(invalidate, ); DECLARE_EVENT_CLASS(bucket_alloc, - TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), - TP_ARGS(ca, reserve), + TP_PROTO(struct bch_dev *ca, const char *alloc_reserve), + TP_ARGS(ca, alloc_reserve), TP_STRUCT__entry( __field(dev_t, dev ) - __field(enum alloc_reserve, reserve ) + __array(char, reserve, 16 ) ), TP_fast_assign( __entry->dev = ca->dev; - __entry->reserve = reserve; + strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve)); ), - TP_printk("%d,%d reserve %d", + TP_printk("%d,%d reserve %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->reserve) ); DEFINE_EVENT(bucket_alloc, bucket_alloc, - TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), - TP_ARGS(ca, reserve) + TP_PROTO(struct bch_dev *ca, const char *alloc_reserve), + TP_ARGS(ca, alloc_reserve) ); TRACE_EVENT(bucket_alloc_fail, - TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve, - u64 avail, u64 need_journal_commit), - TP_ARGS(ca, reserve, avail, need_journal_commit), + TP_PROTO(struct bch_dev *ca, const char *alloc_reserve, + u64 avail, u64 need_journal_commit, + bool nonblocking), + TP_ARGS(ca, alloc_reserve, avail, need_journal_commit, nonblocking), TP_STRUCT__entry( - __field(dev_t, dev ) - __field(enum alloc_reserve, reserve ) - __field(u64, avail ) - __field(u64, need_journal_commit ) + __field(dev_t, dev ) + __array(char, reserve, 16 ) + __field(u64, avail ) + __field(u64, need_journal_commit ) + __field(bool, nonblocking ) ), TP_fast_assign( __entry->dev = ca->dev; - __entry->reserve = reserve; + strlcpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve)); __entry->avail = avail; __entry->need_journal_commit = need_journal_commit; + __entry->nonblocking = nonblocking; ), - TP_printk("%d,%d reserve %d avail %llu need_journal_commit %llu", + TP_printk("%d,%d reserve %s avail %llu need_journal_commit %llu nonblocking %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->reserve, __entry->avail, - __entry->need_journal_commit) + __entry->need_journal_commit, + __entry->nonblocking) ); DEFINE_EVENT(bucket_alloc, open_bucket_alloc_fail, - TP_PROTO(struct bch_dev *ca, enum alloc_reserve reserve), - TP_ARGS(ca, reserve) + TP_PROTO(struct bch_dev *ca, const char *alloc_reserve), + TP_ARGS(ca, alloc_reserve) ); /* Moving IO */ |