diff options
-rw-r--r-- | fs/bcachefs/io_read.c | 11 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/journal_reclaim.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/recovery_passes.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/sb-counters_format.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/trace.h | 20 | ||||
-rw-r--r-- | include/linux/workqueue.h | 12 | ||||
-rw-r--r-- | kernel/workqueue.c | 14 |
8 files changed, 47 insertions, 32 deletions
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index 210b6adc359f..fa56ff67803c 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -343,7 +343,16 @@ static struct bch_read_bio *promote_alloc(struct btree_trans *trans, return promote; nopromote: - trace_io_read_nopromote(c, ret); + if (trace_io_read_nopromote_enabled()) { + CLASS(printbuf, buf)(); + printbuf_indent_add_nextline(&buf, 2); + prt_printf(&buf, "%s\n", bch2_err_str(ret)); + bch2_bkey_val_to_text(&buf, c, k); + + trace_io_read_nopromote(c, buf.buf); + } + count_event(c, io_read_nopromote); + return NULL; } diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index f22b05e02c1e..93794d276a7f 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -1589,7 +1589,7 @@ void bch2_dev_journal_exit(struct bch_dev *ca) struct journal_device *ja = &ca->journal; for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) { - kfree(ja->bio[i]); + kvfree(ja->bio[i]); ja->bio[i] = NULL; } @@ -1626,7 +1626,16 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb) unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE); for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) { - ja->bio[i] = kzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs, + /* + * kvzalloc() is not what we want to be using here: + * JOURNAL_ENTRY_SIZE_MAX is probably quite a bit bigger than it + * needs to be. + * + * But changing that will require performance testing - + * performance can be sensitive to anything that affects journal + * pipelining. + */ + ja->bio[i] = kvzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs, nr_bvecs), GFP_KERNEL); if (!ja->bio[i]) return bch_err_throw(c, ENOMEM_dev_journal_init); diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index cd6201741c59..0042d43b8e57 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -170,6 +170,12 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne return (struct journal_space) { 0, 0 }; /* + * It's possible for bucket size to be misaligned w.r.t. the filesystem + * block size: + */ + min_bucket_size = round_down(min_bucket_size, block_sectors(c)); + + /* * We sorted largest to smallest, and we want the smallest out of the * @nr_devs_want largest devices: */ diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index c09ed2dd4639..6a039e011064 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -360,7 +360,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, !(r->passes_complete & BIT_ULL(pass)); bool ratelimit = flags & RUN_RECOVERY_PASS_ratelimit; - if (!(in_recovery && (flags & RUN_RECOVERY_PASS_nopersistent))) { + if (!(flags & RUN_RECOVERY_PASS_nopersistent)) { struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); } diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h index b868702a431a..a59b2a10659e 100644 --- a/fs/bcachefs/sb-counters_format.h +++ b/fs/bcachefs/sb-counters_format.h @@ -12,6 +12,7 @@ enum counters_flags { x(io_read_inline, 80, TYPE_SECTORS) \ x(io_read_hole, 81, TYPE_SECTORS) \ x(io_read_promote, 30, TYPE_COUNTER) \ + x(io_read_nopromote, 85, TYPE_COUNTER) \ x(io_read_bounce, 31, TYPE_COUNTER) \ x(io_read_split, 33, TYPE_COUNTER) \ x(io_read_reuse_race, 34, TYPE_COUNTER) \ diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 9324ef32903d..3776a1403104 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -292,23 +292,9 @@ DEFINE_EVENT(bio, io_read_promote, TP_ARGS(bio) ); -TRACE_EVENT(io_read_nopromote, - TP_PROTO(struct bch_fs *c, int ret), - TP_ARGS(c, ret), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __array(char, ret, 32 ) - ), - - TP_fast_assign( - __entry->dev = c->dev; - strscpy(__entry->ret, bch2_err_str(ret), sizeof(__entry->ret)); - ), - - TP_printk("%d,%d ret %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ret) +DEFINE_EVENT(fs_str, io_read_nopromote, + TP_PROTO(struct bch_fs *c, const char *str), + TP_ARGS(c, str) ); DEFINE_EVENT(bio, io_read_bounce, diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6e30f275da77..e907c9bb840c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -6,6 +6,7 @@ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H +#include <linux/alloc_tag.h> #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> @@ -505,7 +506,8 @@ void workqueue_softirq_dead(unsigned int cpu); * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 4) struct workqueue_struct * -alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); +alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); +#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) #ifdef CONFIG_LOCKDEP /** @@ -544,8 +546,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ - alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ - 1, lockdep_map, ##args) + alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ + 1, lockdep_map, ##args)) #endif /** @@ -577,7 +579,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(void); +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); +#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) + void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 97f37b5bae66..bd195d4db685 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4629,7 +4629,7 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) * * Return: The allocated new workqueue_attr on success. %NULL on failure. */ -struct workqueue_attrs *alloc_workqueue_attrs(void) +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void) { struct workqueue_attrs *attrs; @@ -5682,12 +5682,12 @@ static struct workqueue_struct *__alloc_workqueue(const char *fmt, else wq_size = sizeof(*wq); - wq = kzalloc(wq_size, GFP_KERNEL); + wq = kzalloc_noprof(wq_size, GFP_KERNEL); if (!wq) return NULL; if (flags & WQ_UNBOUND) { - wq->unbound_attrs = alloc_workqueue_attrs(); + wq->unbound_attrs = alloc_workqueue_attrs_noprof(); if (!wq->unbound_attrs) goto err_free_wq; } @@ -5777,9 +5777,9 @@ err_destroy: } __printf(1, 4) -struct workqueue_struct *alloc_workqueue(const char *fmt, - unsigned int flags, - int max_active, ...) +struct workqueue_struct *alloc_workqueue_noprof(const char *fmt, + unsigned int flags, + int max_active, ...) { struct workqueue_struct *wq; va_list args; @@ -5794,7 +5794,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt, return wq; } -EXPORT_SYMBOL_GPL(alloc_workqueue); +EXPORT_SYMBOL_GPL(alloc_workqueue_noprof); #ifdef CONFIG_LOCKDEP __printf(1, 5) |