diff options
author | Andrey Konovalov <andreyknvl@google.com> | 2023-11-20 18:47:03 +0100 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-12-10 16:51:44 -0800 |
commit | 83130ab2d8a49e86c70d628d1446a84c8e6ad1a4 (patch) | |
tree | e7ad11a02ee24c13c62160dd2fbb803a772b2a9c /lib/stackdepot.c | |
parent | 5f9ce55e020742e3c86a06941fbe9f37f9c022dd (diff) |
lib/stackdepot: add depot_fetch_stack helper
Add a helper depot_fetch_stack function that fetches the pointer to a
stack record.
With this change, all static depot_* functions now operate on stack pools
and the exported stack_depot_* functions operate on the hash table.
Link: https://lkml.kernel.org/r/170d8c202f29dc8e3d5491ee074d1e9e029a46db.1700502145.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/stackdepot.c')
-rw-r--r-- | lib/stackdepot.c | 45 |
1 files changed, 28 insertions, 17 deletions
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 46a422d31c1f..e41713983cac 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -310,6 +310,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) stack->handle.extra = 0; memcpy(stack->entries, entries, flex_array_size(stack, entries, size)); pool_offset += required_size; + /* * Let KMSAN know the stored stack record is initialized. This shall * prevent false positive reports if instrumented code accesses it. @@ -319,6 +320,32 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) return stack; } +static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) +{ + union handle_parts parts = { .handle = handle }; + /* + * READ_ONCE pairs with potential concurrent write in + * depot_alloc_stack(). + */ + int pool_index_cached = READ_ONCE(pool_index); + void *pool; + size_t offset = parts.offset << DEPOT_STACK_ALIGN; + struct stack_record *stack; + + if (parts.pool_index > pool_index_cached) { + WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", + parts.pool_index, pool_index_cached, handle); + return NULL; + } + + pool = stack_pools[parts.pool_index]; + if (!pool) + return NULL; + + stack = pool + offset; + return stack; +} + /* Calculates the hash for a stack. */ static inline u32 hash_stack(unsigned long *entries, unsigned int size) { @@ -462,14 +489,6 @@ EXPORT_SYMBOL_GPL(stack_depot_save); unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries) { - union handle_parts parts = { .handle = handle }; - /* - * READ_ONCE pairs with potential concurrent write in - * depot_alloc_stack. - */ - int pool_index_cached = READ_ONCE(pool_index); - void *pool; - size_t offset = parts.offset << DEPOT_STACK_ALIGN; struct stack_record *stack; *entries = NULL; @@ -482,15 +501,7 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle, if (!handle || stack_depot_disabled) return 0; - if (parts.pool_index > pool_index_cached) { - WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n", - parts.pool_index, pool_index_cached, handle); - return 0; - } - pool = stack_pools[parts.pool_index]; - if (!pool) - return 0; - stack = pool + offset; + stack = depot_fetch_stack(handle); *entries = stack->entries; return stack->size; |