1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Stack depot - a stack trace storage that avoids duplication.
*
* Stack depot is intended to be used by subsystems that need to store and
* later retrieve many potentially duplicated stack traces without wasting
* memory.
*
* For example, KASAN needs to save allocation and free stack traces for each
* object. Storing two stack traces per object requires a lot of memory (e.g.
* SLUB_DEBUG needs 256 bytes per object for that). Since allocation and free
* stack traces often repeat, using stack depot allows to save about 100x space.
*
* Internally, stack depot maintains a hash table of unique stacktraces. The
* stack traces themselves are stored contiguously one after another in a set
* of separate page allocations.
*
* Stack traces are never removed from stack depot.
*
* Author: Alexander Potapenko <glider@google.com>
* Copyright (C) 2016 Google, Inc.
*
* Based on the code by Dmitry Chernenkov.
*/
#define pr_fmt(fmt) "stackdepot: " fmt
#include <linux/gfp.h>
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/memblock.h>
#include <linux/kasan-enabled.h>
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
#define DEPOT_VALID_BITS 1
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
#define DEPOT_STACK_ALIGN 4
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
#define DEPOT_POOLS_CAP 8192
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
/* Compact structure that stores a reference to a stack. */
union handle_parts {
depot_stack_handle_t handle;
struct {
u32 pool_index : DEPOT_POOL_INDEX_BITS;
u32 offset : DEPOT_OFFSET_BITS;
u32 valid : DEPOT_VALID_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
struct stack_record *next; /* Link in the hash table */
u32 hash; /* Hash in the hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
unsigned long entries[]; /* Variable-sized array of frames */
};
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
/* Use one hash table bucket per 16 KB of memory. */
#define STACK_HASH_TABLE_SCALE 14
/* Limit the number of buckets between 4K and 1M. */
#define STACK_BUCKET_NUMBER_ORDER_MIN 12
#define STACK_BUCKET_NUMBER_ORDER_MAX 20
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
/* Hash table of pointers to stored stack traces. */
static struct stack_record **stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
/* Array of memory regions that store stack traces. */
static void *stack_pools[DEPOT_MAX_POOLS];
/* Currently used pool in stack_pools. */
static int pool_index;
/* Offset to the unused space in the currently used pool. */
static size_t pool_offset;
/* Lock that protects the variables above. */
static DEFINE_RAW_SPINLOCK(pool_lock);
/*
* Stack depot tries to keep an extra pool allocated even before it runs out
* of space in the currently used pool.
* This flag marks that this next extra pool needs to be allocated and
* initialized. It has the value 0 when either the next pool is not yet
* initialized or the limit on the number of pools is reached.
*/
static int next_pool_required = 1;
static int __init disable_stack_depot(char *str)
{
int ret;
ret = kstrtobool(str, &stack_depot_disabled);
if (!ret && stack_depot_disabled) {
pr_info("disabled\n");
stack_table = NULL;
}
return 0;
}
early_param("stack_depot_disable", disable_stack_depot);
void __init stack_depot_request_early_init(void)
{
/* Too late to request early init now. */
WARN_ON(__stack_depot_early_init_passed);
__stack_depot_early_init_requested = true;
}
/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
unsigned long entries = 0;
/* This function must be called only once, from mm_init(). */
if (WARN_ON(__stack_depot_early_init_passed))
return 0;
__stack_depot_early_init_passed = true;
/*
* If KASAN is enabled, use the maximum order: KASAN is frequently used
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
*/
if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0;
/*
* If stack_bucket_number_order is not set, leave entries as 0 to rely
* on the automatic calculations performed by alloc_large_system_hash.
*/
if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *),
entries,
STACK_HASH_TABLE_SCALE,
HASH_EARLY | HASH_ZERO,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
return -ENOMEM;
}
return 0;
}
/* Allocates a hash table via kvcalloc. Can be used after boot. */
int stack_depot_init(void)
{
static DEFINE_MUTEX(stack_depot_init_mutex);
unsigned long entries;
int ret = 0;
mutex_lock(&stack_depot_init_mutex);
if (stack_depot_disabled || stack_table)
goto out_unlock;
/*
* Similarly to stack_depot_early_init, use stack_bucket_number_order
* if assigned, and rely on automatic scaling otherwise.
*/
if (stack_bucket_number_order) {
entries = 1UL << stack_bucket_number_order;
} else {
int scale = STACK_HASH_TABLE_SCALE;
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
if (scale > PAGE_SHIFT)
entries >>= (scale - PAGE_SHIFT);
else
entries <<= (PAGE_SHIFT - scale);
}
if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
ret = -ENOMEM;
goto out_unlock;
}
stack_hash_mask = entries - 1;
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(stack_depot_init);
/* Uses preallocated memory to initialize a new stack depot pool. */
static void depot_init_pool(void **prealloc)
{
/*
* If the next pool is already initialized or the maximum number of
* pools is reached, do not use the preallocated memory.
* smp_load_acquire() here pairs with smp_store_release() below and
* in depot_alloc_stack().
*/
if (!smp_load_acquire(&next_pool_required))
return;
/* Check if the current pool is not yet allocated. */
if (stack_pools[pool_index] == NULL) {
/* Use the preallocated memory for the current pool. */
stack_pools[pool_index] = *prealloc;
*prealloc = NULL;
} else {
/*
* Otherwise, use the preallocated memory for the next pool
* as long as we do not exceed the maximum number of pools.
*/
if (pool_index + 1 < DEPOT_MAX_POOLS) {
stack_pools[pool_index + 1] = *prealloc;
*prealloc = NULL;
}
/*
* At this point, either the next pool is initialized or the
* maximum number of pools is reached. In either case, take
* note that initializing another pool is not required.
* This smp_store_release pairs with smp_load_acquire() above
* and in stack_depot_save().
*/
smp_store_release(&next_pool_required, 0);
}
}
/* Allocates a new stack in a stack depot pool. */
static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
size_t required_size = struct_size(stack, entries, size);
required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
/* Check if there is not enough space in the current pool. */
if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
/* Bail out if we reached the pool limit. */
if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
WARN_ONCE(1, "Stack depot reached limit capacity");
return NULL;
}
/*
* Move on to the next pool.
* WRITE_ONCE pairs with potential concurrent read in
* stack_depot_fetch().
*/
WRITE_ONCE(pool_index, pool_index + 1);
pool_offset = 0;
/*
* If the maximum number of pools is not reached, take note
* that the next pool needs to initialized.
* smp_store_release() here pairs with smp_load_acquire() in
* stack_depot_save() and depot_init_pool().
*/
if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_required, 1);
}
/* Assign the preallocated memory to a pool if required. */
if (*prealloc)
depot_init_pool(prealloc);
/* Check if we have a pool to save the stack trace. */
if (stack_pools[pool_index] == NULL)
return NULL;
/* Save the stack trace. */
stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
stack->size = size;
stack->handle.pool_index = pool_index;
stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
stack->handle.valid = 1;
stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
pool_offset += required_size;
return stack;
}
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
return jhash2((u32 *)entries,
array_size(size, sizeof(*entries)) / sizeof(u32),
STACK_HASH_SEED);
}
/*
* Non-instrumented version of memcmp().
* Does not check the lexicographical order, only the equality.
*/
static inline
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
unsigned int n)
{
for ( ; n-- ; u1++, u2++) {
if (*u1 != *u2)
return 1;
}
return 0;
}
/* Finds a stack in a bucket of the hash table. */
static inline struct stack_record *find_stack(struct stack_record *bucket,
unsigned long *entries, int size,
u32 hash)
{
struct stack_record *found;
for (found = bucket; found; found = found->next) {
if (found->hash == hash &&
found->size == size &&
!stackdepot_memcmp(entries, found->entries, size))
return found;
}
return NULL;
}
/**
* __stack_depot_save - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
* @can_alloc: Allocate stack pools (increased chance of failure if false)
*
* Saves a stack trace from @entries array of size @nr_entries. If @can_alloc is
* %true, stack depot can replenish the stack pools in case no space is left
* (allocates using GFP flags of @alloc_flags). If @can_alloc is %false, avoids
* any allocations and fails if no space is left to store the stack trace.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
*
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case for contexts where neither %GFP_ATOMIC nor
* %GFP_NOWAIT can be used (NMI, raw_spin_lock).
*
* Return: Handle of the stack struct stored in depot, 0 on failure
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
union handle_parts retval = { .handle = 0 };
struct page *page = NULL;
void *prealloc = NULL;
unsigned long flags;
u32 hash;
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth.
*
* Since use of filter_irq_stacks() is a requirement to ensure stack
* depot can efficiently deduplicate interrupt stacks, always
* filter_irq_stacks() to simplify all callers' use of stack depot.
*/
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled)
goto fast_exit;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
/*
* Fast path: look the stack trace up without locking.
* The smp_load_acquire() here pairs with smp_store_release() to
* |bucket| below.
*/
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found)
goto exit;
/*
* Check if another stack pool needs to be initialized. If so, allocate
* the memory now - we won't be able to do that under the lock.
*
* The smp_load_acquire() here pairs with smp_store_release() to
* |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
*/
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
* contexts and I/O.
*/
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
raw_spin_lock_irqsave(&pool_lock, flags);
found = find_stack(*bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
new->next = *bucket;
/*
* This smp_store_release() pairs with
* smp_load_acquire() from |bucket| above.
*/
smp_store_release(bucket, new);
found = new;
}
} else if (prealloc) {
/*
* Stack depot already contains this stack trace, but let's
* keep the preallocated memory for the future.
*/
depot_init_pool(&prealloc);
}
raw_spin_unlock_irqrestore(&pool_lock, flags);
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
retval.handle = found->handle.handle;
fast_exit:
return retval.handle;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
/**
* stack_depot_save - Save a stack trace to stack depot
*
* @entries: Pointer to the stack trace
* @nr_entries: Number of frames in the stack
* @alloc_flags: Allocation GFP flags
*
* Context: Contexts where allocations via alloc_pages() are allowed.
* See __stack_depot_save() for more details.
*
* Return: Handle of the stack trace stored in depot, 0 on failure
*/
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
return __stack_depot_save(entries, nr_entries, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
/**
* stack_depot_fetch - Fetch a stack trace from stack depot
*
* @handle: Stack depot handle returned from stack_depot_save()
* @entries: Pointer to store the address of the stack trace
*
* Return: Number of frames for the fetched stack
*/
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
union handle_parts parts = { .handle = handle };
/*
* READ_ONCE pairs with potential concurrent write in
* depot_alloc_stack.
*/
int pool_index_cached = READ_ONCE(pool_index);
void *pool;
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
*entries = NULL;
if (!handle)
return 0;
if (parts.pool_index > pool_index_cached) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
parts.pool_index, pool_index_cached, handle);
return 0;
}
pool = stack_pools[parts.pool_index];
if (!pool)
return 0;
stack = pool + offset;
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
/**
* stack_depot_print - Print a stack trace from stack depot
*
* @stack: Stack depot handle returned from stack_depot_save()
*/
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(stack, &entries);
if (nr_entries > 0)
stack_trace_print(entries, nr_entries, 0);
}
EXPORT_SYMBOL_GPL(stack_depot_print);
/**
* stack_depot_snprint - Print a stack trace from stack depot into a buffer
*
* @handle: Stack depot handle returned from stack_depot_save()
* @buf: Pointer to the print buffer
* @size: Size of the print buffer
* @spaces: Number of leading spaces to print
*
* Return: Number of bytes printed
*/
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces)
{
unsigned long *entries;
unsigned int nr_entries;
nr_entries = stack_depot_fetch(handle, &entries);
return nr_entries ? stack_trace_snprint(buf, size, entries, nr_entries,
spaces) : 0;
}
EXPORT_SYMBOL_GPL(stack_depot_snprint);
/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
* @extra_bits: Value to set the extra bits
*
* Return: Stack depot handle with extra bits set
*
* Stack depot handles have a few unused bits, which can be used for storing
* user-specific information. These bits are transparent to the stack depot.
*/
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits)
{
union handle_parts parts = { .handle = handle };
/* Don't set extra bits on empty handles. */
if (!handle)
return 0;
parts.extra = extra_bits;
return parts.handle;
}
EXPORT_SYMBOL(stack_depot_set_extra_bits);
/**
* stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
*
* @handle: Stack depot handle with extra bits saved
*
* Return: Extra bits retrieved from the stack depot handle
*/
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{
union handle_parts parts = { .handle = handle };
return parts.extra;
}
EXPORT_SYMBOL(stack_depot_get_extra_bits);
|