summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug36
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.kcsan6
-rw-r--r--lib/debugobjects.c14
-rw-r--r--lib/fault-inject.c15
-rw-r--r--lib/find_bit_benchmark.c4
-rw-r--r--lib/fonts/fonts.c4
-rw-r--r--lib/is_signed_type_kunit.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kunit/assert.c62
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/kunit/kunit-example-test.c7
-rw-r--r--lib/kunit/string-stream.c5
-rw-r--r--lib/kunit/test.c15
-rw-r--r--lib/llist.c4
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/notifier-error-inject.c2
-rw-r--r--lib/oid_registry.c1
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/raid6/s390vx.uc3
-rw-r--r--lib/reed_solomon/test_rslib.c6
-rw-r--r--lib/sbitmap.c4
-rw-r--r--lib/slub_kunit.c58
-rw-r--r--lib/test-string_helpers.c2
-rw-r--r--lib/test_fprobe.c5
-rw-r--r--lib/test_hexdump.c10
-rw-r--r--lib/test_kprobes.c5
-rw-r--r--lib/test_list_sort.c2
-rw-r--r--lib/test_printf.c40
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--lib/test_vmalloc.c8
-rw-r--r--lib/vdso/Makefile2
-rw-r--r--lib/vsprintf.c16
36 files changed, 251 insertions, 134 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9bbf8a4b2108..e69c2d819c44 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -529,8 +529,8 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "NR_CPUS is set to an actual number of CPUs"
- depends on SMP
+ bool "Set number of CPUs at compile time"
+ depends on SMP && EXPERT && !COMPILE_TEST
help
Say Yes if you have NR_CPUS set to an actual number of possible
CPUs in your system, not to a default value. This forces the core
@@ -672,6 +672,9 @@ config ARCH_HAS_PMEM_API
config MEMREGION
bool
+config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+ bool
+
config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c3c0b077ade3..9dc5066654fd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -399,6 +399,7 @@ config FRAME_WARN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
default 1024 if !64BIT
default 2048 if 64BIT
help
@@ -1716,6 +1717,16 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config DEBUG_CGROUP_REF
+ bool "Disable inlining of cgroup css reference count functions"
+ depends on DEBUG_KERNEL
+ depends on CGROUPS
+ depends on KPROBES
+ default n
+ help
+ Force cgroup css reference count functions to not be inlined so
+ that they can be kprobed for debugging.
+
source "kernel/trace/Kconfig"
config PROVIDE_OHCI1394_DMA_INIT
@@ -1874,8 +1885,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
config FUNCTION_ERROR_INJECTION
- def_bool y
+ bool "Fault-injections of functions"
depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of theses functions. This is useful to test error paths of code.
+
+ If unsure, say N
config FAULT_INJECTION
bool "Fault-injection framework"
@@ -2107,6 +2124,7 @@ config KPROBES_SANITY_TEST
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
+ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
@@ -2805,6 +2823,22 @@ config RUST_OVERFLOW_CHECKS
If unsure, say Y.
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+
+ This should not happen, thus by default the build is aborted. However,
+ as an escape hatch, you can choose Y here to ignore them during build
+ and let the check be carried at runtime (with `panic!` being called if
+ the check fails).
+
+ If unsure, say N.
+
endmenu # "Rust"
source "Documentation/Kconfig"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index ca09b1cf8ee9..836f70393e22 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 47a693c45864..375575a5a0e3 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -125,7 +125,7 @@ config KCSAN_SKIP_WATCH
default 4000
help
The number of per-CPU memory operations to skip, before another
- watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU
+ watchpoint is set up, i.e. one in KCSAN_SKIP_WATCH per-CPU
memory operations are used to set up a watchpoint. A smaller value
results in more aggressive race detection, whereas a larger value
improves system performance at the cost of missing some races.
@@ -135,8 +135,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
default y
help
If instruction skip count should be randomized, where the maximum is
- KCSAN_WATCH_SKIP. If false, the chosen value is always
- KCSAN_WATCH_SKIP.
+ KCSAN_SKIP_WATCH. If false, the chosen value is always
+ KCSAN_SKIP_WATCH.
config KCSAN_INTERRUPT_WATCHER
bool "Interruptible watchers" if !KCSAN_STRICT
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 337d797a7141..df86e649d8be 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu)
struct debug_percpu_free *percpu_pool;
struct hlist_node *tmp;
struct debug_obj *obj;
+ unsigned long flags;
/* Remote access is safe as the CPU is dead already */
percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
@@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu)
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ obj_pool_used -= percpu_pool->obj_free;
+ debug_objects_freed += percpu_pool->obj_free;
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+
percpu_pool->obj_free = 0;
return 0;
@@ -500,9 +507,9 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
descr->debug_hint(obj->object) : NULL;
limit++;
WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
- "object type: %s hint: %pS\n",
+ "object: %p object type: %s hint: %pS\n",
msg, obj_states[obj->state], obj->astate,
- descr->name, hint);
+ obj->object, descr->name, hint);
}
debug_objects_warnings++;
}
@@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void)
hlist_add_head(&obj->node, &objects);
}
+ debug_objects_allocated += i;
+
/*
* debug_objects_mem_init() is now called early that only one CPU is up
* and interrupts have been disabled, so it is safe to replace the
@@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
+ return;
} else
debug_objects_selftest();
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 96e092de5b72..1421818c9ef7 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
- if (attr->no_warn)
- return;
-
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
@@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
* http://www.nongnu.org/failmalloc/
*/
-bool should_fail(struct fault_attr *attr, ssize_t size)
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
@@ -139,20 +136,26 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
}
- if (attr->probability <= prandom_u32_max(100))
+ if (attr->probability <= get_random_u32_below(100))
return false;
if (!fail_stacktrace(attr))
return false;
fail:
- fail_dump(attr);
+ if (!(flags & FAULT_NOWARN))
+ fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return should_fail_ex(attr, size, 0);
+}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 7c3c011abd29..d3fb09e6eff1 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -174,8 +174,8 @@ static int __init find_bit_test(void)
bitmap_zero(bitmap2, BITMAP_LEN);
while (nbits--) {
- __set_bit(prandom_u32_max(BITMAP_LEN), bitmap);
- __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
}
test_find_next_bit(bitmap, BITMAP_LEN);
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 5f4b07b56cd9..973866438608 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
+ if ((font_w & (1U << (f->width - 1))) &&
+ (font_h & (1U << (f->height - 1))))
c += 1000;
if (c > cc) {
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
index 207207522925..0a7f6ae62839 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/is_signed_type_kunit.c
@@ -21,11 +21,7 @@ static void is_signed_type_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
-#ifdef __CHAR_UNSIGNED__
KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
-#else
- KUNIT_EXPECT_EQ(test, is_signed_type(char), true);
-#endif
KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
diff --git a/lib/kobject.c b/lib/kobject.c
index a0b2dbfcfa23..af1f5f2954d4 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -694,7 +694,7 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- unsigned long delay = HZ + HZ * prandom_u32_max(4);
+ unsigned long delay = HZ + HZ * get_random_u32_below(4);
pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index d00d6d181ee8..f5b50babe38d 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -127,13 +127,15 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert->text->right_text);
if (!is_literal(stream->test, binary_assert->text->left_text,
binary_assert->left_value, stream->gfp))
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
binary_assert->text->left_text,
+ binary_assert->left_value,
binary_assert->left_value);
if (!is_literal(stream->test, binary_assert->text->right_text,
binary_assert->right_value, stream->gfp))
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
binary_assert->text->right_text,
+ binary_assert->right_value,
binary_assert->right_value);
kunit_assert_print_msg(message, stream);
}
@@ -204,3 +206,59 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
+
+/* Adds a hexdump of a buffer to a string_stream comparing it with
+ * a second buffer. The different bytes are marked with <>.
+ */
+static void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
+{
+ size_t i;
+ const u8 *buf1 = buf;
+ const u8 *buf2 = compared_buf;
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT);
+
+ for (i = 0; i < len; ++i) {
+ if (!(i % 16) && i)
+ string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT);
+
+ if (buf1[i] != buf2[i])
+ string_stream_add(stream, "<%02x>", buf1[i]);
+ else
+ string_stream_add(stream, " %02x ", buf1[i]);
+ }
+}
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_mem_assert *mem_assert;
+
+ mem_assert = container_of(assert, struct kunit_mem_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
+
+ string_stream_add(stream, "\n");
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
+
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 1048ef1b8d6e..de0ee2e03ed6 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -63,7 +63,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
kunit_suite_for_each_test_case(suite, test_case)
debugfs_print_result(seq, suite, test_case);
- seq_printf(seq, "%s %d - %s\n",
+ seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 9bbc422c284b..74982b83707c 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -166,7 +166,7 @@ static void kunit_exec_run_tests(struct suite_set *suite_set)
{
size_t num_suites = suite_set->end - suite_set->start;
- pr_info("TAP version 14\n");
+ pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);
__kunit_test_suites_init(suite_set->start, num_suites);
@@ -177,8 +177,8 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
struct kunit_suite * const *suites;
struct kunit_case *test_case;
- /* Hack: print a tap header so kunit.py can find the start of KUnit output. */
- pr_info("TAP version 14\n");
+ /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
+ pr_info("KTAP version 1\n");
for (suites = suite_set->start; suites < suite_set->end; suites++)
kunit_suite_for_each_test_case((*suites), test_case) {
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index f8fe582c9e36..66cc4e2365ec 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -86,6 +86,9 @@ static void example_mark_skipped_test(struct kunit *test)
*/
static void example_all_expect_macros_test(struct kunit *test)
{
+ const u32 array1[] = { 0x0F, 0xFF };
+ const u32 array2[] = { 0x1F, 0xFF };
+
/* Boolean assertions */
KUNIT_EXPECT_TRUE(test, true);
KUNIT_EXPECT_FALSE(test, false);
@@ -109,6 +112,10 @@ static void example_all_expect_macros_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, "hi", "hi");
KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
+ /* Memory block assertions */
+ KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1));
+ KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1));
+
/*
* There are also ASSERT variants of all of the above that abort test
* execution if they fail. Useful for memory allocations, etc.
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index a608746020a9..f5f51166d8c2 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -131,11 +131,6 @@ bool string_stream_is_empty(struct string_stream *stream)
return list_empty(&stream->fragments);
}
-struct string_stream_alloc_context {
- struct kunit *test;
- gfp_t gfp;
-};
-
struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
{
struct string_stream *stream;
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2a6992fe7c3e..c9ebf975e56b 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -20,6 +20,8 @@
#include "string-stream.h"
#include "try-catch-impl.h"
+DEFINE_STATIC_KEY_FALSE(kunit_running);
+
#if IS_BUILTIN(CONFIG_KUNIT)
/*
* Fail the current test and print an error message to the log.
@@ -149,6 +151,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_suite_start(struct kunit_suite *suite)
{
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
@@ -175,13 +178,13 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s%s%s\n",
+ pr_info("%s %zd %s%s%s\n",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
else
kunit_log(KERN_INFO, test,
- KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ KUNIT_SUBTEST_INDENT "%s %zd %s%s%s",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
@@ -543,6 +546,8 @@ int kunit_run_tests(struct kunit_suite *suite)
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "KTAP version 1\n");
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);
while (test.param_value) {
@@ -555,7 +560,7 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_log(KERN_INFO, &test,
KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
- "%s %d - %s",
+ "%s %d %s",
kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
@@ -612,10 +617,14 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
return 0;
}
+ static_branch_inc(&kunit_running);
+
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
kunit_run_tests(suites[i]);
}
+
+ static_branch_dec(&kunit_running);
return 0;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
diff --git a/lib/llist.c b/lib/llist.c
index 7d78b736e8af..6e668fa5a2c6 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -26,10 +26,10 @@
bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_head *head)
{
- struct llist_node *first;
+ struct llist_node *first = READ_ONCE(head->first);
do {
- new_last->next = first = READ_ONCE(head->first);
+ new_last->next = first;
} while (!try_cmpxchg(&head->first, &first, new_first));
return !first;
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 46866394fc84..55a81782e271 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -63,12 +63,6 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
#endif
-/**
- * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- * @remainder: 64bit remainder
- */
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -89,7 +83,7 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/**
+/*
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -129,7 +123,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
EXPORT_SYMBOL(div64_u64_rem);
#endif
-/**
+/*
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -163,11 +157,6 @@ u64 div64_u64(u64 dividend, u64 divisor)
EXPORT_SYMBOL(div64_u64);
#endif
-/**
- * div64_s64 - signed 64bit divide with 64bit divisor
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- */
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 21016b32d313..2b24ea6c9497 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index e592d48b1974..fe6705cfd780 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -146,7 +146,6 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
bufsize -= count;
while (v < end) {
- num = 0;
n = *v++;
if (!(n & 0x80)) {
num = n;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index e5c5315da274..668f6aa6a75d 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -230,7 +230,8 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu_hurry(&ref->data->rcu,
+ percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3c78e1e8b2ad..049ba132f7ef 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int offset;
+ int offset = 0;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 9e597e1f91a4..b25dfc9c7759 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -13,8 +13,7 @@
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
-
-asm(".include \"asm/vx-insn.h\"\n");
+#include <asm/vx-insn.h>
#define NSIZE 16
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 848e7eb5da92..75cb1adac884 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -183,7 +183,7 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
do {
/* Must not choose the same location twice */
- errloc = prandom_u32_max(len);
+ errloc = get_random_u32_below(len);
} while (errlocs[errloc] != 0);
errlocs[errloc] = 1;
@@ -194,12 +194,12 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
for (i = 0; i < eras; i++) {
do {
/* Must not choose the same location twice */
- errloc = prandom_u32_max(len);
+ errloc = get_random_u32_below(len);
} while (errlocs[errloc] != 0);
derrlocs[i] = errloc;
- if (ewsc && prandom_u32_max(2)) {
+ if (ewsc && get_random_u32_below(2)) {
/* Erasure with the symbol intact */
errlocs[errloc] = 2;
} else {
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 7280ae8ca88c..58de526ff051 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
int i;
for_each_possible_cpu(i)
- *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
+ *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
}
return 0;
}
@@ -33,7 +33,7 @@ static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32_max(depth) : 0;
+ hint = depth ? get_random_u32_below(depth) : 0;
this_cpu_write(*sb->alloc_hint, hint);
}
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 7a0564d7cb7a..d4a3730b08fa 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -9,10 +10,25 @@
static struct kunit_resource resource;
static int slab_errors;
+/*
+ * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
+ * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
+ * object from kfence pool, where the operation could be caught by both
+ * our test and kfence sanity check.
+ */
+static struct kmem_cache *test_kmem_cache_create(const char *name,
+ unsigned int size, slab_flags_t flags)
+{
+ struct kmem_cache *s = kmem_cache_create(name, size, 0,
+ (flags | SLAB_NO_USER_FLAGS), NULL);
+ s->flags |= SLAB_SKIP_KFENCE;
+ return s;
+}
+
static void test_clobber_zone(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
- SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
+ SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -29,8 +45,8 @@ static void test_clobber_zone(struct kunit *test)
#ifndef CONFIG_KASAN
static void test_next_pointer(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
@@ -74,8 +90,8 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -89,8 +105,8 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -105,8 +121,8 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
- SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
+ SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -120,6 +136,27 @@ static void test_clobber_redzone_free(struct kunit *test)
kmem_cache_destroy(s);
}
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+ u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+
+ kasan_disable_current();
+
+ /* Suppress the -Warray-bounds warning */
+ OPTIMIZER_HIDE_VAR(p);
+ p[18] = 0xab;
+ p[19] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -139,6 +176,7 @@ static struct kunit_case test_cases[] = {
#endif
KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
{}
};
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 86fadd3ba08c..41d3447bc3b4 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -587,7 +587,7 @@ static int __init test_string_helpers_init(void)
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
- prandom_u32_max(UNESCAPE_ANY + 1), true);
+ get_random_u32_below(UNESCAPE_ANY + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
index e0381b3ec410..1fb56cf5e5ce 100644
--- a/lib/test_fprobe.c
+++ b/lib/test_fprobe.c
@@ -144,10 +144,7 @@ static unsigned long get_ftrace_location(void *func)
static int fprobe_test_init(struct kunit *test)
{
- do {
- rand1 = get_random_u32();
- } while (rand1 <= div_factor);
-
+ rand1 = get_random_u32_above(div_factor);
target = fprobe_selftest_target;
target2 = fprobe_selftest_target2;
target_ip = get_ftrace_location(target);
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 0927f44cd478..b916801f23a8 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
static void __init test_hexdump_set(int rowsize, bool ascii)
{
size_t d = min_t(size_t, sizeof(data_b), rowsize);
- size_t len = prandom_u32_max(d) + 1;
+ size_t len = get_random_u32_inclusive(1, d);
test_hexdump(len, rowsize, 4, ascii);
test_hexdump(len, rowsize, 2, ascii);
@@ -208,11 +208,11 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len,
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
{
unsigned int i = 0;
- int rs = (prandom_u32_max(2) + 1) * 16;
+ int rs = get_random_u32_inclusive(1, 2) * 16;
do {
int gs = 1 << i;
- size_t len = prandom_u32_max(rs) + gs;
+ size_t len = get_random_u32_below(rs) + gs;
test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
} while (i++ < 3);
@@ -223,11 +223,11 @@ static int __init test_hexdump_init(void)
unsigned int i;
int rowsize;
- rowsize = (prandom_u32_max(2) + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, false);
- rowsize = (prandom_u32_max(2) + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, true);
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
index eeb1d728d974..1c95e5719802 100644
--- a/lib/test_kprobes.c
+++ b/lib/test_kprobes.c
@@ -339,10 +339,7 @@ static int kprobes_test_init(struct kunit *test)
stacktrace_target = kprobe_stacktrace_target;
internal_target = kprobe_stacktrace_internal_target;
stacktrace_driver = kprobe_stacktrace_driver;
-
- do {
- rand1 = get_random_u32();
- } while (rand1 <= div_factor);
+ rand1 = get_random_u32_above(div_factor);
return 0;
}
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 19ff229b9c3a..cc5f335f29b5 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -71,7 +71,7 @@ static void list_sort_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
- el->value = prandom_u32_max(TEST_LIST_LEN / 3);
+ el->value = get_random_u32_below(TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 4bd15a593fbd..d34dc636b81c 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -126,7 +126,7 @@ __test(const char *expect, int elen, const char *fmt, ...)
* be able to print it as expected.
*/
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
- rand = 1 + prandom_u32_max(elen+1);
+ rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
failed_tests += do_test(rand, expect, elen, fmt, ap);
failed_tests += do_test(0, expect, elen, fmt, ap);
@@ -179,18 +179,6 @@ test_number(void)
* behaviour.
*/
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
-#ifndef __CHAR_UNSIGNED__
- {
- /*
- * Passing a 'char' to a %02x specifier doesn't do
- * what was presumably the intention when char is
- * signed and the value is negative. One must either &
- * with 0xff or cast to u8.
- */
- char val = -16;
- test("0xfffffff0|0xf0|0xf0", "%#02x|%#02x|%#02x", val, val & 0xff, (u8)val);
- }
-#endif
}
static void __init
@@ -704,31 +692,29 @@ flags(void)
static void __init fwnode_pointer(void)
{
- const struct software_node softnodes[] = {
- { .name = "first", },
- { .name = "second", .parent = &softnodes[0], },
- { .name = "third", .parent = &softnodes[1], },
- { NULL /* Guardian */ }
- };
- const char * const full_name = "first/second/third";
+ const struct software_node first = { .name = "first" };
+ const struct software_node second = { .name = "second", .parent = &first };
+ const struct software_node third = { .name = "third", .parent = &second };
+ const struct software_node *group[] = { &first, &second, &third, NULL };
const char * const full_name_second = "first/second";
+ const char * const full_name_third = "first/second/third";
const char * const second_name = "second";
const char * const third_name = "third";
int rval;
- rval = software_node_register_nodes(softnodes);
+ rval = software_node_register_node_group(group);
if (rval) {
pr_warn("cannot register softnodes; rval %d\n", rval);
return;
}
- test(full_name_second, "%pfw", software_node_fwnode(&softnodes[1]));
- test(full_name, "%pfw", software_node_fwnode(&softnodes[2]));
- test(full_name, "%pfwf", software_node_fwnode(&softnodes[2]));
- test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
- test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
+ test(full_name_second, "%pfw", software_node_fwnode(&second));
+ test(full_name_third, "%pfw", software_node_fwnode(&third));
+ test(full_name_third, "%pfwf", software_node_fwnode(&third));
+ test(second_name, "%pfwP", software_node_fwnode(&second));
+ test(third_name, "%pfwP", software_node_fwnode(&third));
- software_node_unregister_nodes(softnodes);
+ software_node_unregister_node_group(group);
}
static void __init fourcc_pointer(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f2ba5787055a..6a8e445c8b55 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -368,8 +368,8 @@ static int __init test_rhltable(unsigned int entries)
pr_info("test %d random rhlist add/delete operations\n", entries);
for (j = 0; j < entries; j++) {
- u32 i = prandom_u32_max(entries);
- u32 prand = prandom_u32_max(4);
+ u32 i = get_random_u32_below(entries);
+ u32 prand = get_random_u32_below(4);
cond_resched();
@@ -396,7 +396,7 @@ static int __init test_rhltable(unsigned int entries)
}
if (prand & 2) {
- i = prandom_u32_max(entries);
+ i = get_random_u32_below(entries);
if (test_bit(i, obj_in_table)) {
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
WARN(err, "cannot remove element at slot %d", i);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index cf7780572f5b..f90d2c27675b 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -151,7 +151,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- n = prandom_u32_max(100) + 1;
+ n = get_random_u32_inclusive(1, 100);
p = vmalloc(n * PAGE_SIZE);
if (!p)
@@ -291,12 +291,12 @@ pcpu_alloc_test(void)
return -1;
for (i = 0; i < 35000; i++) {
- size = prandom_u32_max(PAGE_SIZE / 4) + 1;
+ size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
/*
* Maximum PAGE_SIZE
*/
- align = 1 << (prandom_u32_max(11) + 1);
+ align = 1 << get_random_u32_inclusive(1, 11);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@@ -391,7 +391,7 @@ static void shuffle_array(int *arr, int n)
for (i = n - 1; i > 0; i--) {
/* Cut the range. */
- j = prandom_u32_max(i);
+ j = get_random_u32_below(i);
/* Swap indexes. */
swap(arr[i], arr[j]);
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index c415a685d61b..e814061d6aa0 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set)
endif
quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
+ cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \
then (echo >&2 "$@: dynamic relocations are not supported"; \
rm -f $@; /bin/false); fi
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24f37bab8bc1..be71a03c936a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -41,6 +41,7 @@
#include <linux/siphash.h>
#include <linux/compiler.h>
#include <linux/property.h>
+#include <linux/notifier.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -752,26 +753,21 @@ early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
static bool filled_random_ptr_key __read_mostly;
static siphash_key_t ptr_key __read_mostly;
-static void fill_ptr_key_workfn(struct work_struct *work);
-static DECLARE_DELAYED_WORK(fill_ptr_key_work, fill_ptr_key_workfn);
-static void fill_ptr_key_workfn(struct work_struct *work)
+static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
{
- if (!rng_is_initialized()) {
- queue_delayed_work(system_unbound_wq, &fill_ptr_key_work, HZ * 2);
- return;
- }
-
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Pairs with smp_rmb() before reading ptr_key. */
smp_wmb();
WRITE_ONCE(filled_random_ptr_key, true);
+ return NOTIFY_DONE;
}
static int __init vsprintf_init_hashval(void)
{
- fill_ptr_key_workfn(NULL);
+ static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
+ execute_with_initialized_rng(&fill_ptr_key_nb);
return 0;
}
subsys_initcall(vsprintf_init_hashval)
@@ -866,7 +862,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (in_hardirq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);