diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 2 | ||||
-rw-r--r-- | lib/Kconfig.kfence | 26 | ||||
-rw-r--r-- | lib/bootconfig.c | 2 | ||||
-rw-r--r-- | lib/cpumask.c | 2 | ||||
-rw-r--r-- | lib/devres.c | 82 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 60 | ||||
-rw-r--r-- | lib/iov_iter.c | 103 | ||||
-rw-r--r-- | lib/kobject.c | 2 | ||||
-rw-r--r-- | lib/kunit/executor.c | 152 | ||||
-rw-r--r-- | lib/kunit/executor_test.c | 110 | ||||
-rw-r--r-- | lib/kunit/kunit-test.c | 14 | ||||
-rw-r--r-- | lib/kunit/test.c | 6 | ||||
-rw-r--r-- | lib/raid6/Makefile | 4 | ||||
-rw-r--r-- | lib/scatterlist.c | 11 | ||||
-rw-r--r-- | lib/stackdepot.c | 118 | ||||
-rw-r--r-- | lib/string_helpers.c | 20 | ||||
-rw-r--r-- | lib/test_kasan.c | 26 | ||||
-rw-r--r-- | lib/test_kasan_module.c | 2 | ||||
-rw-r--r-- | lib/test_printf.c | 61 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 6 | ||||
-rw-r--r-- | lib/vsprintf.c | 14 |
21 files changed, 629 insertions, 194 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6fdbf9613aec..9ef7ce18b4f5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -877,7 +877,7 @@ config DEBUG_MEMORY_INIT config MEMORY_NOTIFIER_ERROR_INJECT tristate "Memory hotplug notifier error injection module" - depends on MEMORY_HOTPLUG_SPARSE && NOTIFIER_ERROR_INJECTION + depends on MEMORY_HOTPLUG && NOTIFIER_ERROR_INJECTION help This option provides the ability to inject artificial errors to memory hotplug notifier chain callbacks. It is controlled through diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence index e641add33947..912f252a41fc 100644 --- a/lib/Kconfig.kfence +++ b/lib/Kconfig.kfence @@ -25,17 +25,6 @@ menuconfig KFENCE if KFENCE -config KFENCE_STATIC_KEYS - bool "Use static keys to set up allocations" - default y - depends on JUMP_LABEL # To ensure performance, require jump labels - help - Use static keys (static branches) to set up KFENCE allocations. Using - static keys is normally recommended, because it avoids a dynamic - branch in the allocator's fast path. However, with very low sample - intervals, or on systems that do not support jump labels, a dynamic - branch may still be an acceptable performance trade-off. - config KFENCE_SAMPLE_INTERVAL int "Default sample interval in milliseconds" default 100 @@ -56,6 +45,21 @@ config KFENCE_NUM_OBJECTS pages are required; with one containing the object and two adjacent ones used as guard pages. +config KFENCE_STATIC_KEYS + bool "Use static keys to set up allocations" if EXPERT + depends on JUMP_LABEL + help + Use static keys (static branches) to set up KFENCE allocations. This + option is only recommended when using very large sample intervals, or + performance has carefully been evaluated with this option. + + Using static keys comes with trade-offs that need to be carefully + evaluated given target workloads and system architectures. Notably, + enabling and disabling static keys invoke IPI broadcasts, the latency + and impact of which is much harder to predict than a dynamic branch. + + Say N if you are unsure. + config KFENCE_STRESS_TEST_FAULTS int "Stress testing of fault handling and error reporting" if EXPERT default 0 diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 70e0d52ffd24..74f3201ab8e5 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -50,7 +50,7 @@ static inline void * __init xbc_alloc_mem(size_t size) static inline void __init xbc_free_mem(void *addr, size_t size) { - memblock_free_ptr(addr, size); + memblock_free(addr, size); } #else /* !__KERNEL__ */ diff --git a/lib/cpumask.c b/lib/cpumask.c index c3c76b833384..a971a82d2f43 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -188,7 +188,7 @@ EXPORT_SYMBOL(free_cpumask_var); */ void __init free_bootmem_cpumask_var(cpumask_var_t mask) { - memblock_free_early(__pa(mask), cpumask_size()); + memblock_free(mask, cpumask_size()); } #endif diff --git a/lib/devres.c b/lib/devres.c index b0e1c6702c71..14664bbb4875 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -528,3 +528,85 @@ void pcim_iounmap_regions(struct pci_dev *pdev, int mask) } EXPORT_SYMBOL(pcim_iounmap_regions); #endif /* CONFIG_PCI */ + +static void devm_arch_phys_ac_add_release(struct device *dev, void *res) +{ + arch_phys_wc_del(*((int *)res)); +} + +/** + * devm_arch_phys_wc_add - Managed arch_phys_wc_add() + * @dev: Managed device + * @base: Memory base address + * @size: Size of memory range + * + * Adds a WC MTRR using arch_phys_wc_add() and sets up a release callback. + * See arch_phys_wc_add() for more information. + */ +int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size) +{ + int *mtrr; + int ret; + + mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL); + if (!mtrr) + return -ENOMEM; + + ret = arch_phys_wc_add(base, size); + if (ret < 0) { + devres_free(mtrr); + return ret; + } + + *mtrr = ret; + devres_add(dev, mtrr); + + return ret; +} +EXPORT_SYMBOL(devm_arch_phys_wc_add); + +struct arch_io_reserve_memtype_wc_devres { + resource_size_t start; + resource_size_t size; +}; + +static void devm_arch_io_free_memtype_wc_release(struct device *dev, void *res) +{ + const struct arch_io_reserve_memtype_wc_devres *this = res; + + arch_io_free_memtype_wc(this->start, this->size); +} + +/** + * devm_arch_io_reserve_memtype_wc - Managed arch_io_reserve_memtype_wc() + * @dev: Managed device + * @start: Memory base address + * @size: Size of memory range + * + * Reserves a memory range with WC caching using arch_io_reserve_memtype_wc() + * and sets up a release callback See arch_io_reserve_memtype_wc() for more + * information. + */ +int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, + resource_size_t size) +{ + struct arch_io_reserve_memtype_wc_devres *dr; + int ret; + + dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + ret = arch_io_reserve_memtype_wc(start, size); + if (ret < 0) { + devres_free(dr); + return ret; + } + + dr->start = start; + dr->size = size; + devres_add(dev, dr); + + return ret; +} +EXPORT_SYMBOL(devm_arch_io_reserve_memtype_wc); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index cb5abb42c16a..dd7f56af9aed 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -71,6 +71,8 @@ static DEFINE_MUTEX(ddebug_lock); static LIST_HEAD(ddebug_tables); static int verbose; module_param(verbose, int, 0644); +MODULE_PARM_DESC(verbose, " dynamic_debug/control processing " + "( 0 = off (default), 1 = module add/rm, 2 = >control summary, 3 = parsing, 4 = per-site changes)"); /* Return the path relative to source root */ static inline const char *trim_prefix(const char *path) @@ -118,6 +120,8 @@ do { \ #define vpr_info(fmt, ...) vnpr_info(1, fmt, ##__VA_ARGS__) #define v2pr_info(fmt, ...) vnpr_info(2, fmt, ##__VA_ARGS__) +#define v3pr_info(fmt, ...) vnpr_info(3, fmt, ##__VA_ARGS__) +#define v4pr_info(fmt, ...) vnpr_info(4, fmt, ##__VA_ARGS__) static void vpr_info_dq(const struct ddebug_query *query, const char *msg) { @@ -130,7 +134,7 @@ static void vpr_info_dq(const struct ddebug_query *query, const char *msg) fmtlen--; } - vpr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n", + v3pr_info("%s: func=\"%s\" file=\"%s\" module=\"%s\" format=\"%.*s\" lineno=%u-%u\n", msg, query->function ?: "", query->filename ?: "", @@ -213,7 +217,7 @@ static int ddebug_change(const struct ddebug_query *query, static_branch_enable(&dp->key.dd_key_true); #endif dp->flags = newflags; - v2pr_info("changed %s:%d [%s]%s =%s\n", + v4pr_info("changed %s:%d [%s]%s =%s\n", trim_prefix(dp->filename), dp->lineno, dt->mod_name, dp->function, ddebug_describe_flags(dp->flags, &fbuf)); @@ -273,7 +277,7 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) buf = end; } - if (verbose) { + if (verbose >= 3) { int i; pr_info("split into words:"); for (i = 0; i < nwords; i++) @@ -333,7 +337,7 @@ static int parse_linerange(struct ddebug_query *query, const char *first) } else { query->last_lineno = query->first_lineno; } - vpr_info("parsed line %d-%d\n", query->first_lineno, + v3pr_info("parsed line %d-%d\n", query->first_lineno, query->last_lineno); return 0; } @@ -447,7 +451,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers) pr_err("bad flag-op %c, at start of %s\n", *str, str); return -EINVAL; } - vpr_info("op='%c'\n", op); + v3pr_info("op='%c'\n", op); for (; *str ; ++str) { for (i = ARRAY_SIZE(opt_array) - 1; i >= 0; i--) { @@ -461,7 +465,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers) return -EINVAL; } } - vpr_info("flags=0x%x\n", modifiers->flags); + v3pr_info("flags=0x%x\n", modifiers->flags); /* calculate final flags, mask based upon op */ switch (op) { @@ -477,7 +481,7 @@ static int ddebug_parse_flags(const char *str, struct flag_settings *modifiers) modifiers->flags = 0; break; } - vpr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask); + v3pr_info("*flagsp=0x%x *maskp=0x%x\n", modifiers->flags, modifiers->mask); return 0; } @@ -529,7 +533,7 @@ static int ddebug_exec_queries(char *query, const char *modname) if (!query || !*query || *query == '#') continue; - vpr_info("query %d: \"%s\"\n", i, query); + vpr_info("query %d: \"%s\" mod:%s\n", i, query, modname ?: "*"); rc = ddebug_exec_query(query, modname); if (rc < 0) { @@ -540,8 +544,9 @@ static int ddebug_exec_queries(char *query, const char *modname) } i++; } - vpr_info("processed %d queries, with %d matches, %d errs\n", - i, nfound, errs); + if (i) + v2pr_info("processed %d queries, with %d matches, %d errs\n", + i, nfound, errs); if (exitcode) return exitcode; @@ -746,20 +751,17 @@ EXPORT_SYMBOL(__dynamic_ibdev_dbg); #endif -#define DDEBUG_STRING_SIZE 1024 -static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE]; - -static __init int ddebug_setup_query(char *str) +/* + * Install a noop handler to make dyndbg look like a normal kernel cli param. + * This avoids warnings about dyndbg being an unknown cli param when supplied + * by a user. + */ +static __init int dyndbg_setup(char *str) { - if (strlen(str) >= DDEBUG_STRING_SIZE) { - pr_warn("ddebug boot param string too large\n"); - return 0; - } - strlcpy(ddebug_setup_string, str, DDEBUG_STRING_SIZE); return 1; } -__setup("ddebug_query=", ddebug_setup_query); +__setup("dyndbg=", dyndbg_setup); /* * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the @@ -781,7 +783,7 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, tmpbuf = memdup_user_nul(ubuf, len); if (IS_ERR(tmpbuf)) return PTR_ERR(tmpbuf); - vpr_info("read %d bytes from userspace\n", (int)len); + v2pr_info("read %zu bytes from userspace\n", len); ret = ddebug_exec_queries(tmpbuf, NULL); kfree(tmpbuf); @@ -969,7 +971,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, list_add(&dt->link, &ddebug_tables); mutex_unlock(&ddebug_lock); - v2pr_info("%3u debug prints in module %s\n", n, dt->mod_name); + vpr_info("%3u debug prints in module %s\n", n, dt->mod_name); return 0; } @@ -1028,8 +1030,6 @@ int ddebug_remove_module(const char *mod_name) struct ddebug_table *dt, *nextdt; int ret = -ENOENT; - v2pr_info("removing module \"%s\"\n", mod_name); - mutex_lock(&ddebug_lock); list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { if (dt->mod_name == mod_name) { @@ -1039,6 +1039,8 @@ int ddebug_remove_module(const char *mod_name) } } mutex_unlock(&ddebug_lock); + if (!ret) + v2pr_info("removed module \"%s\"\n", mod_name); return ret; } @@ -1121,16 +1123,6 @@ static int __init dynamic_debug_init(void) entries, modct, (int)((modct * sizeof(struct ddebug_table)) >> 10), (int)((entries * sizeof(struct _ddebug)) >> 10)); - /* apply ddebug_query boot param, dont unload tables on err */ - if (ddebug_setup_string[0] != '\0') { - pr_warn("ddebug_query param name is deprecated, change it to dyndbg\n"); - ret = ddebug_exec_queries(ddebug_setup_string, NULL); - if (ret < 0) - pr_warn("Invalid ddebug boot param %s\n", - ddebug_setup_string); - else - pr_info("%d changes by ddebug_query\n", ret); - } /* now that ddebug tables are loaded, process all boot args * again to find and activate queries given in dyndbg params. * While this has already been done for known boot params, it diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 755c10c5138c..66a740e6e153 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -191,7 +191,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { kaddr = kmap_atomic(page); from = kaddr + offset; @@ -275,7 +275,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t buf = iov->iov_base + skip; copy = min(bytes, iov->iov_len - skip); - if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { + if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { kaddr = kmap_atomic(page); to = kaddr + offset; @@ -430,35 +430,81 @@ out: } /* + * fault_in_iov_iter_readable - fault in iov iterator for reading + * @i: iterator + * @size: maximum length + * * Fault in one or more iovecs of the given iov_iter, to a maximum length of - * bytes. For each iovec, fault in each page that constitutes the iovec. + * @size. For each iovec, fault in each page that constitutes the iovec. + * + * Returns the number of bytes not faulted in (like copy_to_user() and + * copy_from_user()). * - * Return 0 on success, or non-zero if the memory could not be accessed (i.e. - * because it is an invalid address). + * Always returns 0 for non-userspace iterators. */ -int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes) +size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) { if (iter_is_iovec(i)) { + size_t count = min(size, iov_iter_count(i)); const struct iovec *p; size_t skip; - if (bytes > i->count) - bytes = i->count; - for (p = i->iov, skip = i->iov_offset; bytes; p++, skip = 0) { - size_t len = min(bytes, p->iov_len - skip); - int err; + size -= count; + for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + size_t len = min(count, p->iov_len - skip); + size_t ret; if (unlikely(!len)) continue; - err = fault_in_pages_readable(p->iov_base + skip, len); - if (unlikely(err)) - return err; - bytes -= len; + ret = fault_in_readable(p->iov_base + skip, len); + count -= len - ret; + if (ret) + break; } + return count + size; } return 0; } -EXPORT_SYMBOL(iov_iter_fault_in_readable); +EXPORT_SYMBOL(fault_in_iov_iter_readable); + +/* + * fault_in_iov_iter_writeable - fault in iov iterator for writing + * @i: iterator + * @size: maximum length + * + * Faults in the iterator using get_user_pages(), i.e., without triggering + * hardware page faults. This is primarily useful when we already know that + * some or all of the pages in @i aren't in memory. + * + * Returns the number of bytes not faulted in, like copy_to_user() and + * copy_from_user(). + * + * Always returns 0 for non-user-space iterators. + */ +size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) +{ + if (iter_is_iovec(i)) { + size_t count = min(size, iov_iter_count(i)); + const struct iovec *p; + size_t skip; + + size -= count; + for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + size_t len = min(count, p->iov_len - skip); + size_t ret; + + if (unlikely(!len)) + continue; + ret = fault_in_safe_writeable(p->iov_base + skip, len); + count -= len - ret; + if (ret) + break; + } + return count + size; + } + return 0; +} +EXPORT_SYMBOL(fault_in_iov_iter_writeable); void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov, unsigned long nr_segs, @@ -467,6 +513,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, + .nofault = false, .data_source = direction, .iov = iov, .nr_segs = nr_segs, @@ -1481,14 +1528,18 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, return 0; if (likely(iter_is_iovec(i))) { + unsigned int gup_flags = 0; unsigned long addr; + if (iov_iter_rw(i) != WRITE) + gup_flags |= FOLL_WRITE; + if (i->nofault) + gup_flags |= FOLL_NOFAULT; + addr = first_iovec_segment(i, &len, start, maxsize, maxpages); n = DIV_ROUND_UP(len, PAGE_SIZE); - res = get_user_pages_fast(addr, n, - iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, - pages); - if (unlikely(res < 0)) + res = get_user_pages_fast(addr, n, gup_flags, pages); + if (unlikely(res <= 0)) return res; return (res == n ? len : res * PAGE_SIZE) - *start; } @@ -1603,17 +1654,23 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, return 0; if (likely(iter_is_iovec(i))) { + unsigned int gup_flags = 0; unsigned long addr; + if (iov_iter_rw(i) != WRITE) + gup_flags |= FOLL_WRITE; + if (i->nofault) + gup_flags |= FOLL_NOFAULT; + addr = first_iovec_segment(i, &len, start, maxsize, ~0U); n = DIV_ROUND_UP(len, PAGE_SIZE); p = get_pages_array(n); if (!p) return -ENOMEM; - res = get_user_pages_fast(addr, n, - iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); - if (unlikely(res < 0)) { + res = get_user_pages_fast(addr, n, gup_flags, p); + if (unlikely(res <= 0)) { kvfree(p); + *pages = NULL; return res; } *pages = p; diff --git a/lib/kobject.c b/lib/kobject.c index ea53b30cf483..4a56f519139d 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -777,7 +777,7 @@ static struct kobj_type dynamic_kobj_ktype = { * call to kobject_put() and not kfree(), as kobject_init() has * already been called on this structure. */ -struct kobject *kobject_create(void) +static struct kobject *kobject_create(void) { struct kobject *kobj; diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index acd1de436f59..22640c9ee819 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -15,23 +15,89 @@ extern struct kunit_suite * const * const __kunit_suites_end[]; #if IS_BUILTIN(CONFIG_KUNIT) static char *filter_glob_param; +static char *action_param; + module_param_named(filter_glob, filter_glob_param, charp, 0); MODULE_PARM_DESC(filter_glob, - "Filter which KUnit test suites run at boot-time, e.g. list*"); + "Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test"); +module_param_named(action, action_param, charp, 0); +MODULE_PARM_DESC(action, + "Changes KUnit executor behavior, valid values are:\n" + "<none>: run the tests like normal\n" + "'list' to list test names instead of running them.\n"); + +/* glob_match() needs NULL terminated strings, so we need a copy of filter_glob_param. */ +struct kunit_test_filter { + char *suite_glob; + char *test_glob; +}; + +/* Split "suite_glob.test_glob" into two. Assumes filter_glob is not empty. */ +static void kunit_parse_filter_glob(struct kunit_test_filter *parsed, + const char *filter_glob) +{ + const int len = strlen(filter_glob); + const char *period = strchr(filter_glob, '.'); + + if (!period) { + parsed->suite_glob = kzalloc(len + 1, GFP_KERNEL); + parsed->test_glob = NULL; + strcpy(parsed->suite_glob, filter_glob); + return; + } + + parsed->suite_glob = kzalloc(period - filter_glob + 1, GFP_KERNEL); + parsed->test_glob = kzalloc(len - (period - filter_glob) + 1, GFP_KERNEL); + + strncpy(parsed->suite_glob, filter_glob, period - filter_glob); + strncpy(parsed->test_glob, period + 1, len - (period - filter_glob)); +} + +/* Create a copy of suite with only tests that match test_glob. */ +static struct kunit_suite * +kunit_filter_tests(struct kunit_suite *const suite, const char *test_glob) +{ + int n = 0; + struct kunit_case *filtered, *test_case; + struct kunit_suite *copy; + + kunit_suite_for_each_test_case(suite, test_case) { + if (!test_glob || glob_match(test_glob, test_case->name)) + ++n; + } + + if (n == 0) + return NULL; + + /* Use memcpy to workaround copy->name being const. */ + copy = kmalloc(sizeof(*copy), GFP_KERNEL); + memcpy(copy, suite, sizeof(*copy)); + + filtered = kcalloc(n + 1, sizeof(*filtered), GFP_KERNEL); + + n = 0; + kunit_suite_for_each_test_case(suite, test_case) { + if (!test_glob || glob_match(test_glob, test_case->name)) + filtered[n++] = *test_case; + } + + copy->test_cases = filtered; + return copy; +} static char *kunit_shutdown; core_param(kunit_shutdown, kunit_shutdown, charp, 0644); static struct kunit_suite * const * kunit_filter_subsuite(struct kunit_suite * const * const subsuite, - const char *filter_glob) + struct kunit_test_filter *filter) { int i, n = 0; - struct kunit_suite **filtered; + struct kunit_suite **filtered, *filtered_suite; n = 0; - for (i = 0; subsuite[i] != NULL; ++i) { - if (glob_match(filter_glob, subsuite[i]->name)) + for (i = 0; subsuite[i]; ++i) { + if (glob_match(filter->suite_glob, subsuite[i]->name)) ++n; } @@ -44,8 +110,11 @@ kunit_filter_subsuite(struct kunit_suite * const * const subsuite, n = 0; for (i = 0; subsuite[i] != NULL; ++i) { - if (glob_match(filter_glob, subsuite[i]->name)) - filtered[n++] = subsuite[i]; + if (!glob_match(filter->suite_glob, subsuite[i]->name)) + continue; + filtered_suite = kunit_filter_tests(subsuite[i], filter->test_glob); + if (filtered_suite) + filtered[n++] = filtered_suite; } filtered[n] = NULL; @@ -57,12 +126,32 @@ struct suite_set { struct kunit_suite * const * const *end; }; +static void kunit_free_subsuite(struct kunit_suite * const *subsuite) +{ + unsigned int i; + + for (i = 0; subsuite[i]; i++) + kfree(subsuite[i]); + + kfree(subsuite); +} + +static void kunit_free_suite_set(struct suite_set suite_set) +{ + struct kunit_suite * const * const *suites; + + for (suites = suite_set.start; suites < suite_set.end; suites++) + kunit_free_subsuite(*suites); + kfree(suite_set.start); +} + static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, const char *filter_glob) { int i; struct kunit_suite * const **copy, * const *filtered_subsuite; struct suite_set filtered; + struct kunit_test_filter filter; const size_t max = suite_set->end - suite_set->start; @@ -73,12 +162,17 @@ static struct suite_set kunit_filter_suites(const struct suite_set *suite_set, return filtered; } + kunit_parse_filter_glob(&filter, filter_glob); + for (i = 0; i < max; ++i) { - filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], filter_glob); + filtered_subsuite = kunit_filter_subsuite(suite_set->start[i], &filter); if (filtered_subsuite) *copy++ = filtered_subsuite; } filtered.end = copy; + + kfree(filter.suite_glob); + kfree(filter.test_glob); return filtered; } @@ -109,9 +203,35 @@ static void kunit_print_tap_header(struct suite_set *suite_set) pr_info("1..%d\n", num_of_suites); } -int kunit_run_all_tests(void) +static void kunit_exec_run_tests(struct suite_set *suite_set) +{ + struct kunit_suite * const * const *suites; + + kunit_print_tap_header(suite_set); + + for (suites = suite_set->start; suites < suite_set->end; suites++) + __kunit_test_suites_init(*suites); +} + +static void kunit_exec_list_tests(struct suite_set *suite_set) { + unsigned int i; struct kunit_suite * const * const *suites; + struct kunit_case *test_case; + + /* Hack: print a tap header so kunit.py can find the start of KUnit output. */ + pr_info("TAP version 14\n"); + + for (suites = suite_set->start; suites < suite_set->end; suites++) + for (i = 0; (*suites)[i] != NULL; i++) { + kunit_suite_for_each_test_case((*suites)[i], test_case) { + pr_info("%s.%s\n", (*suites)[i]->name, test_case->name); + } + } +} + +int kunit_run_all_tests(void) +{ struct suite_set suite_set = { .start = __kunit_suites_start, .end = __kunit_suites_end, @@ -120,15 +240,15 @@ int kunit_run_all_tests(void) if (filter_glob_param) suite_set = kunit_filter_suites(&suite_set, filter_glob_param); - kunit_print_tap_header(&suite_set); - - for (suites = suite_set.start; suites < suite_set.end; suites++) - __kunit_test_suites_init(*suites); + if (!action_param) + kunit_exec_run_tests(&suite_set); + else if (strcmp(action_param, "list") == 0) + kunit_exec_list_tests(&suite_set); + else + pr_err("kunit executor: unknown action '%s'\n", action_param); if (filter_glob_param) { /* a copy was made of each array */ - for (suites = suite_set.start; suites < suite_set.end; suites++) - kfree(*suites); - kfree(suite_set.start); + kunit_free_suite_set(suite_set); } kunit_handle_shutdown(); diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index e14a18af573d..4ed57fd94e42 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -9,38 +9,103 @@ #include <kunit/test.h> static void kfree_at_end(struct kunit *test, const void *to_free); +static void free_subsuite_at_end(struct kunit *test, + struct kunit_suite *const *to_free); static struct kunit_suite *alloc_fake_suite(struct kunit *test, - const char *suite_name); + const char *suite_name, + struct kunit_case *test_cases); + +static void dummy_test(struct kunit *test) {} + +static struct kunit_case dummy_test_cases[] = { + /* .run_case is not important, just needs to be non-NULL */ + { .name = "test1", .run_case = dummy_test }, + { .name = "test2", .run_case = dummy_test }, + {}, +}; + +static void parse_filter_test(struct kunit *test) +{ + struct kunit_test_filter filter = {NULL, NULL}; + + kunit_parse_filter_glob(&filter, "suite"); + KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite"); + KUNIT_EXPECT_FALSE(test, filter.test_glob); + kfree(filter.suite_glob); + kfree(filter.test_glob); + + kunit_parse_filter_glob(&filter, "suite.test"); + KUNIT_EXPECT_STREQ(test, filter.suite_glob, "suite"); + KUNIT_EXPECT_STREQ(test, filter.test_glob, "test"); + kfree(filter.suite_glob); + kfree(filter.test_glob); +} static void filter_subsuite_test(struct kunit *test) { struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; struct kunit_suite * const *filtered; + struct kunit_test_filter filter = { + .suite_glob = "suite2", + .test_glob = NULL, + }; - subsuite[0] = alloc_fake_suite(test, "suite1"); - subsuite[1] = alloc_fake_suite(test, "suite2"); + subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); + subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); /* Want: suite1, suite2, NULL -> suite2, NULL */ - filtered = kunit_filter_subsuite(subsuite, "suite2*"); + filtered = kunit_filter_subsuite(subsuite, &filter); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); - kfree_at_end(test, filtered); + free_subsuite_at_end(test, filtered); + /* Validate we just have suite2 */ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); + KUNIT_EXPECT_FALSE(test, filtered[1]); +} + +static void filter_subsuite_test_glob_test(struct kunit *test) +{ + struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; + struct kunit_suite * const *filtered; + struct kunit_test_filter filter = { + .suite_glob = "suite2", + .test_glob = "test2", + }; + + subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); + subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); + /* Want: suite1, suite2, NULL -> suite2 (just test1), NULL */ + filtered = kunit_filter_subsuite(subsuite, &filter); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered); + free_subsuite_at_end(test, filtered); + + /* Validate we just have suite2 */ + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]); + KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->name, "suite2"); KUNIT_EXPECT_FALSE(test, filtered[1]); + + /* Now validate we just have test2 */ + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered[0]->test_cases); + KUNIT_EXPECT_STREQ(test, (const char *)filtered[0]->test_cases[0].name, "test2"); + KUNIT_EXPECT_FALSE(test, filtered[0]->test_cases[1].name); } static void filter_subsuite_to_empty_test(struct kunit *test) { struct kunit_suite *subsuite[3] = {NULL, NULL, NULL}; struct kunit_suite * const *filtered; + struct kunit_test_filter filter = { + .suite_glob = "not_found", + .test_glob = NULL, + }; - subsuite[0] = alloc_fake_suite(test, "suite1"); - subsuite[1] = alloc_fake_suite(test, "suite2"); + subsuite[0] = alloc_fake_suite(test, "suite1", dummy_test_cases); + subsuite[1] = alloc_fake_suite(test, "suite2", dummy_test_cases); - filtered = kunit_filter_subsuite(subsuite, "not_found"); - kfree_at_end(test, filtered); /* just in case */ + filtered = kunit_filter_subsuite(subsuite, &filter); + free_subsuite_at_end(test, filtered); /* just in case */ KUNIT_EXPECT_FALSE_MSG(test, filtered, "should be NULL to indicate no match"); @@ -52,7 +117,7 @@ static void kfree_subsuites_at_end(struct kunit *test, struct suite_set *suite_s kfree_at_end(test, suite_set->start); for (suites = suite_set->start; suites < suite_set->end; suites++) - kfree_at_end(test, *suites); + free_subsuite_at_end(test, *suites); } static void filter_suites_test(struct kunit *test) @@ -74,8 +139,8 @@ static void filter_suites_test(struct kunit *test) struct suite_set filtered = {.start = NULL, .end = NULL}; /* Emulate two files, each having one suite */ - subsuites[0][0] = alloc_fake_suite(test, "suite0"); - subsuites[1][0] = alloc_fake_suite(test, "suite1"); + subsuites[0][0] = alloc_fake_suite(test, "suite0", dummy_test_cases); + subsuites[1][0] = alloc_fake_suite(test, "suite1", dummy_test_cases); /* Filter out suite1 */ filtered = kunit_filter_suites(&suite_set, "suite0"); @@ -84,11 +149,14 @@ static void filter_suites_test(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0]); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filtered.start[0][0]); KUNIT_EXPECT_STREQ(test, (const char *)filtered.start[0][0]->name, "suite0"); } static struct kunit_case executor_test_cases[] = { + KUNIT_CASE(parse_filter_test), KUNIT_CASE(filter_subsuite_test), + KUNIT_CASE(filter_subsuite_test_glob_test), KUNIT_CASE(filter_subsuite_to_empty_test), KUNIT_CASE(filter_suites_test), {} @@ -120,14 +188,30 @@ static void kfree_at_end(struct kunit *test, const void *to_free) (void *)to_free); } +static void free_subsuite_res_free(struct kunit_resource *res) +{ + kunit_free_subsuite(res->data); +} + +static void free_subsuite_at_end(struct kunit *test, + struct kunit_suite *const *to_free) +{ + if (IS_ERR_OR_NULL(to_free)) + return; + kunit_alloc_resource(test, NULL, free_subsuite_res_free, + GFP_KERNEL, (void *)to_free); +} + static struct kunit_suite *alloc_fake_suite(struct kunit *test, - const char *suite_name) + const char *suite_name, + struct kunit_case *test_cases) { struct kunit_suite *suite; /* We normally never expect to allocate suites, hence the non-const cast. */ suite = kunit_kzalloc(test, sizeof(*suite), GFP_KERNEL); strncpy((char *)suite->name, suite_name, sizeof(suite->name) - 1); + suite->test_cases = test_cases; return suite; } diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index d69efcbed624..555601d17f79 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -415,12 +415,15 @@ static struct kunit_suite kunit_log_test_suite = { static void kunit_log_test(struct kunit *test) { - struct kunit_suite *suite = &kunit_log_test_suite; + struct kunit_suite suite; + + suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log); kunit_log(KERN_INFO, test, "put this in log."); kunit_log(KERN_INFO, test, "this too."); - kunit_log(KERN_INFO, suite, "add to suite log."); - kunit_log(KERN_INFO, suite, "along with this."); + kunit_log(KERN_INFO, &suite, "add to suite log."); + kunit_log(KERN_INFO, &suite, "along with this."); #ifdef CONFIG_KUNIT_DEBUGFS KUNIT_EXPECT_NOT_ERR_OR_NULL(test, @@ -428,12 +431,11 @@ static void kunit_log_test(struct kunit *test) KUNIT_EXPECT_NOT_ERR_OR_NULL(test, strstr(test->log, "this too.")); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, - strstr(suite->log, "add to suite log.")); + strstr(suite.log, "add to suite log.")); KUNIT_EXPECT_NOT_ERR_OR_NULL(test, - strstr(suite->log, "along with this.")); + strstr(suite.log, "along with this.")); #else KUNIT_EXPECT_PTR_EQ(test, test->log, (char *)NULL); - KUNIT_EXPECT_PTR_EQ(test, suite->log, (char *)NULL); #endif } diff --git a/lib/kunit/test.c b/lib/kunit/test.c index f246b847024e..3bd741e50a2d 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -190,10 +190,10 @@ enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) } EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded); +static size_t kunit_suite_counter = 1; + static void kunit_print_subtest_end(struct kunit_suite *suite) { - static size_t kunit_suite_counter = 1; - kunit_print_ok_not_ok((void *)suite, false, kunit_suite_has_succeeded(suite), kunit_suite_counter++, @@ -583,6 +583,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites) for (i = 0; suites[i] != NULL; i++) kunit_exit_suite(suites[i]); + + kunit_suite_counter = 1; } EXPORT_SYMBOL_GPL(__kunit_test_suites_exit); diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index c770570bfe4f..45e17619422b 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -14,6 +14,8 @@ hostprogs += mktables ifeq ($(CONFIG_ALTIVEC),y) altivec_flags := -maltivec $(call cc-option,-mabi=altivec) +# Enable <altivec.h> +altivec_flags += -isystem $(shell $(CC) -print-file-name=include) ifdef CONFIG_CC_IS_CLANG # clang ppc port does not yet support -maltivec when -msoft-float is @@ -34,6 +36,8 @@ endif # ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel) ifeq ($(CONFIG_KERNEL_MODE_NEON),y) NEON_FLAGS := -ffreestanding +# Enable <arm_neon.h> +NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include) ifeq ($(ARCH),arm) NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon endif diff --git a/lib/scatterlist.c b/lib/scatterlist.c index abb3432ed744..d5e82e4a57ad 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -828,8 +828,7 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) * stops @miter. * * Context: - * Don't care if @miter is stopped, or not proceeded yet. - * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. + * Don't care. * * Returns: * true if @miter contains the valid mapping. false if end of sg @@ -865,8 +864,7 @@ EXPORT_SYMBOL(sg_miter_skip); * @miter->addr and @miter->length point to the current mapping. * * Context: - * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled - * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. + * May sleep if !SG_MITER_ATOMIC. * * Returns: * true if @miter contains the next mapping. false if end of sg @@ -906,8 +904,7 @@ EXPORT_SYMBOL(sg_miter_next); * need to be released during iteration. * * Context: - * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care - * otherwise. + * Don't care otherwise. */ void sg_miter_stop(struct sg_mapping_iter *miter) { @@ -922,7 +919,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 0a2e417f83cb..b437ae79aca1 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -20,7 +20,6 @@ */ #include <linux/gfp.h> -#include <linux/interrupt.h> #include <linux/jhash.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -102,8 +101,8 @@ static bool init_stack_slab(void **prealloc) } /* Allocation of a new stack in raw storage */ -static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, - u32 hash, void **prealloc, gfp_t alloc_flags) +static struct stack_record * +depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) { struct stack_record *stack; size_t required_size = struct_size(stack, entries, size); @@ -215,6 +214,49 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, } /** + * stack_depot_snprint - print stack entries from a depot into a buffer + * + * @handle: Stack depot handle which was returned from + * stack_depot_save(). + * @buf: Pointer to the print buffer + * + * @size: Size of the print buffer + * + * @spaces: Number of leading spaces to print + * + * Return: Number of bytes printed. + */ +int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, + int spaces) +{ + unsigned long *entries; + unsigned int nr_entries; + + nr_entries = stack_depot_fetch(handle, &entries); + return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries, + spaces) : 0; +} +EXPORT_SYMBOL_GPL(stack_depot_snprint); + +/** + * stack_depot_print - print stack entries from a depot + * + * @stack: Stack depot handle which was returned from + * stack_depot_save(). + * + */ +void stack_depot_print(depot_stack_handle_t stack) +{ + unsigned long *entries; + unsigned int nr_entries; + + nr_entries = stack_depot_fetch(stack, &entries); + if (nr_entries > 0) + stack_trace_print(entries, nr_entries, 0); +} +EXPORT_SYMBOL_GPL(stack_depot_print); + +/** * stack_depot_fetch - Fetch stack entries from a depot * * @handle: Stack depot handle which was returned from @@ -232,6 +274,9 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, struct stack_record *stack; *entries = NULL; + if (!handle) + return 0; + if (parts.slabindex > depot_index) { WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n", parts.slabindex, depot_index, handle); @@ -248,17 +293,28 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, EXPORT_SYMBOL_GPL(stack_depot_fetch); /** - * stack_depot_save - Save a stack trace from an array + * __stack_depot_save - Save a stack trace from an array * * @entries: Pointer to storage array * @nr_entries: Size of the storage array * @alloc_flags: Allocation gfp flags + * @can_alloc: Allocate stack slabs (increased chance of failure if false) + * + * Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is + * %true, is allowed to replenish the stack slab pool in case no space is left + * (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids + * any allocations and will fail if no space is left to store the stack trace. + * + * Context: Any context, but setting @can_alloc to %false is required if + * alloc_pages() cannot be used from the current context. Currently + * this is the case from contexts where neither %GFP_ATOMIC nor + * %GFP_NOWAIT can be used (NMI, raw_spin_lock). * - * Return: The handle of the stack struct stored in depot + * Return: The handle of the stack struct stored in depot, 0 on failure. */ -depot_stack_handle_t stack_depot_save(unsigned long *entries, - unsigned int nr_entries, - gfp_t alloc_flags) +depot_stack_handle_t __stack_depot_save(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags, bool can_alloc) { struct stack_record *found = NULL, **bucket; depot_stack_handle_t retval = 0; @@ -291,7 +347,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, * The smp_load_acquire() here pairs with smp_store_release() to * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). */ - if (unlikely(!smp_load_acquire(&next_slab_inited))) { + if (unlikely(can_alloc && !smp_load_acquire(&next_slab_inited))) { /* * Zero out zone modifiers, as we don't have specific zone * requirements. Keep the flags related to allocation in atomic @@ -309,9 +365,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, found = find_stack(*bucket, entries, nr_entries, hash); if (!found) { - struct stack_record *new = - depot_alloc_stack(entries, nr_entries, - hash, &prealloc, alloc_flags); + struct stack_record *new = depot_alloc_stack(entries, nr_entries, hash, &prealloc); + if (new) { new->next = *bucket; /* @@ -340,27 +395,24 @@ exit: fast_exit: return retval; } -EXPORT_SYMBOL_GPL(stack_depot_save); - -static inline int in_irqentry_text(unsigned long ptr) -{ - return (ptr >= (unsigned long)&__irqentry_text_start && - ptr < (unsigned long)&__irqentry_text_end) || - (ptr >= (unsigned long)&__softirqentry_text_start && - ptr < (unsigned long)&__softirqentry_text_end); -} +EXPORT_SYMBOL_GPL(__stack_depot_save); -unsigned int filter_irq_stacks(unsigned long *entries, - unsigned int nr_entries) +/** + * stack_depot_save - Save a stack trace from an array + * + * @entries: Pointer to storage array + * @nr_entries: Size of the storage array + * @alloc_flags: Allocation gfp flags + * + * Context: Contexts where allocations via alloc_pages() are allowed. + * See __stack_depot_save() for more details. + * + * Return: The handle of the stack struct stored in depot, 0 on failure. + */ +depot_stack_handle_t stack_depot_save(unsigned long *entries, + unsigned int nr_entries, + gfp_t alloc_flags) { - unsigned int i; - - for (i = 0; i < nr_entries; i++) { - if (in_irqentry_text(entries[i])) { - /* Include the irqentry function into the stack. */ - return i + 1; - } - } - return nr_entries; + return __stack_depot_save(entries, nr_entries, alloc_flags, true); } -EXPORT_SYMBOL_GPL(filter_irq_stacks); +EXPORT_SYMBOL_GPL(stack_depot_save); diff --git a/lib/string_helpers.c b/lib/string_helpers.c index faa9d8e4e2c5..d5d008f5b1d9 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -883,6 +883,26 @@ char *strreplace(char *s, char old, char new) } EXPORT_SYMBOL(strreplace); +/** + * memcpy_and_pad - Copy one buffer to another with padding + * @dest: Where to copy to + * @dest_len: The destination buffer size + * @src: Where to copy from + * @count: The number of bytes to copy + * @pad: Character to use for padding if space is left in destination. + */ +void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, + int pad) +{ + if (dest_len > count) { + memcpy(dest, src, count); + memset(dest + count, pad, dest_len - count); + } else { + memcpy(dest, src, dest_len); + } +} +EXPORT_SYMBOL(memcpy_and_pad); + #ifdef CONFIG_FORTIFY_SOURCE void fortify_panic(const char *name) { diff --git a/lib/test_kasan.c b/lib/test_kasan.c index ebed755ebf34..67ed689a0b1b 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -440,6 +440,7 @@ static void kmalloc_oob_memset_2(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); kfree(ptr); } @@ -452,6 +453,7 @@ static void kmalloc_oob_memset_4(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); kfree(ptr); } @@ -464,6 +466,7 @@ static void kmalloc_oob_memset_8(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); kfree(ptr); } @@ -476,6 +479,7 @@ static void kmalloc_oob_memset_16(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); kfree(ptr); } @@ -488,16 +492,17 @@ static void kmalloc_oob_in_memset(struct kunit *test) ptr = kmalloc(size, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + OPTIMIZER_HIDE_VAR(size); KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + KASAN_GRANULE_SIZE)); kfree(ptr); } -static void kmalloc_memmove_invalid_size(struct kunit *test) +static void kmalloc_memmove_negative_size(struct kunit *test) { char *ptr; size_t size = 64; - volatile size_t invalid_size = -2; + size_t invalid_size = -2; /* * Hardware tag-based mode doesn't check memmove for negative size. @@ -510,6 +515,22 @@ static void kmalloc_memmove_invalid_size(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset((char *)ptr, 0, 64); + OPTIMIZER_HIDE_VAR(invalid_size); + KUNIT_EXPECT_KASAN_FAIL(test, + memmove((char *)ptr, (char *)ptr + 4, invalid_size)); + kfree(ptr); +} + +static void kmalloc_memmove_invalid_size(struct kunit *test) +{ + char *ptr; + size_t size = 64; + volatile size_t invalid_size = size; + + ptr = kmalloc(size, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); + + memset((char *)ptr, 0, 64); KUNIT_EXPECT_KASAN_FAIL(test, memmove((char *)ptr, (char *)ptr + 4, invalid_size)); kfree(ptr); @@ -1129,6 +1150,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_memset_4), KUNIT_CASE(kmalloc_oob_memset_8), KUNIT_CASE(kmalloc_oob_memset_16), + KUNIT_CASE(kmalloc_memmove_negative_size), KUNIT_CASE(kmalloc_memmove_invalid_size), KUNIT_CASE(kmalloc_uaf), KUNIT_CASE(kmalloc_uaf_memset), diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c index 7ebf433edef3..b112cbc835e9 100644 --- a/lib/test_kasan_module.c +++ b/lib/test_kasan_module.c @@ -35,6 +35,8 @@ static noinline void __init copy_user_test(void) return; } + OPTIMIZER_HIDE_VAR(size); + pr_info("out-of-bounds in copy_from_user()\n"); unused = copy_from_user(kmem, usermem, size + 1); diff --git a/lib/test_printf.c b/lib/test_printf.c index 55082432f37e..07309c45f327 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -586,70 +586,59 @@ struct page_flags_test { int width; int shift; int mask; - unsigned long value; const char *fmt; const char *name; }; -static struct page_flags_test pft[] = { +static const struct page_flags_test pft[] = { {SECTIONS_WIDTH, SECTIONS_PGSHIFT, SECTIONS_MASK, - 0, "%d", "section"}, + "%d", "section"}, {NODES_WIDTH, NODES_PGSHIFT, NODES_MASK, - 0, "%d", "node"}, + "%d", "node"}, {ZONES_WIDTH, ZONES_PGSHIFT, ZONES_MASK, - 0, "%d", "zone"}, + "%d", "zone"}, {LAST_CPUPID_WIDTH, LAST_CPUPID_PGSHIFT, LAST_CPUPID_MASK, - 0, "%#x", "lastcpupid"}, + "%#x", "lastcpupid"}, {KASAN_TAG_WIDTH, KASAN_TAG_PGSHIFT, KASAN_TAG_MASK, - 0, "%#x", "kasantag"}, + "%#x", "kasantag"}, }; static void __init page_flags_test(int section, int node, int zone, int last_cpupid, - int kasan_tag, int flags, const char *name, char *cmp_buf) + int kasan_tag, unsigned long flags, const char *name, + char *cmp_buf) { unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag}; - unsigned long page_flags = 0; - unsigned long size = 0; + unsigned long size; bool append = false; int i; - flags &= PAGEFLAGS_MASK; - if (flags) { - page_flags |= flags; - snprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); - size = strlen(cmp_buf); -#if SECTIONS_WIDTH || NODES_WIDTH || ZONES_WIDTH || \ - LAST_CPUPID_WIDTH || KASAN_TAG_WIDTH - /* Other information also included in page flags */ - snprintf(cmp_buf + size, BUF_SIZE - size, "|"); - size = strlen(cmp_buf); -#endif - } + for (i = 0; i < ARRAY_SIZE(values); i++) + flags |= (values[i] & pft[i].mask) << pft[i].shift; - /* Set the test value */ - for (i = 0; i < ARRAY_SIZE(pft); i++) - pft[i].value = values[i]; + size = scnprintf(cmp_buf, BUF_SIZE, "%#lx(", flags); + if (flags & PAGEFLAGS_MASK) { + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s", name); + append = true; + } for (i = 0; i < ARRAY_SIZE(pft); i++) { if (!pft[i].width) continue; - if (append) { - snprintf(cmp_buf + size, BUF_SIZE - size, "|"); - size = strlen(cmp_buf); - } + if (append) + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "|"); - page_flags |= (pft[i].value & pft[i].mask) << pft[i].shift; - snprintf(cmp_buf + size, BUF_SIZE - size, "%s=", pft[i].name); - size = strlen(cmp_buf); - snprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, - pft[i].value & pft[i].mask); - size = strlen(cmp_buf); + size += scnprintf(cmp_buf + size, BUF_SIZE - size, "%s=", + pft[i].name); + size += scnprintf(cmp_buf + size, BUF_SIZE - size, pft[i].fmt, + values[i] & pft[i].mask); append = true; } - test(cmp_buf, "%pGp", &page_flags); + snprintf(cmp_buf + size, BUF_SIZE - size, ")"); + + test(cmp_buf, "%pGp", &flags); } static void __init diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index e14993bc84d2..cf41fd6df42a 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -393,7 +393,7 @@ static struct test_driver { static void shuffle_array(int *arr, int n) { unsigned int rnd; - int i, j, x; + int i, j; for (i = n - 1; i > 0; i--) { get_random_bytes(&rnd, sizeof(rnd)); @@ -402,9 +402,7 @@ static void shuffle_array(int *arr, int n) j = rnd % i; /* Swap indexes. */ - x = arr[i]; - arr[i] = arr[j]; - arr[j] = x; + swap(arr[i], arr[j]); } } diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d7ad44f2c8f5..58d5e567f836 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -53,8 +53,7 @@ #include <linux/string_helpers.h> #include "kstrtox.h" -static unsigned long long simple_strntoull(const char *startp, size_t max_chars, - char **endp, unsigned int base) +static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base) { const char *cp; unsigned long long result = 0ULL; @@ -408,8 +407,9 @@ int num_to_str(char *buf, int size, unsigned long long num, unsigned int width) #define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ #define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ +static_assert(SIGN == 1); static_assert(ZEROPAD == ('0' - ' ')); -static_assert(SMALL == ' '); +static_assert(SMALL == ('a' ^ 'A')); enum format_type { FORMAT_TYPE_NONE, /* Just a string part */ @@ -2023,6 +2023,11 @@ char *format_page_flags(char *buf, char *end, unsigned long flags) bool append = false; int i; + buf = number(buf, end, flags, default_flag_spec); + if (buf < end) + *buf = '('; + buf++; + /* Page flags from the main area. */ if (main_flags) { buf = format_flags(buf, end, main_flags, pageflag_names); @@ -2051,6 +2056,9 @@ char *format_page_flags(char *buf, char *end, unsigned long flags) append = true; } + if (buf < end) + *buf = ')'; + buf++; return buf; } |