diff options
author | Dennis Zhou <dennisz@fb.com> | 2017-06-19 19:28:30 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2017-06-20 15:31:28 -0400 |
commit | 8fa3ed8014ac792a4c76d585b5c0f68bd202ff6b (patch) | |
tree | 97316c47b8c92e8d39b381d38f6ee889b1d195da /mm/percpu.c | |
parent | 5ccd30e40e731051f6d1eb02f7ac073c1ef9deba (diff) |
percpu: migrate percpu data structures to internal header
Migrates pcpu_chunk definition and a few percpu static variables to an
internal header file from mm/percpu.c. These will be used with debugfs
to expose statistics about percpu memory improving visibility regarding
allocations and fragmentation.
Signed-off-by: Dennis Zhou <dennisz@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm/percpu.c')
-rw-r--r-- | mm/percpu.c | 30 |
1 files changed, 7 insertions, 23 deletions
diff --git a/mm/percpu.c b/mm/percpu.c index be95d3192acb..75ac982c19df 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -76,6 +76,8 @@ #include <asm/tlbflush.h> #include <asm/io.h> +#include "percpu-internal.h" + #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ #define PCPU_ATOMIC_MAP_MARGIN_LOW 32 @@ -103,29 +105,11 @@ #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) #endif /* CONFIG_SMP */ -struct pcpu_chunk { - struct list_head list; /* linked to pcpu_slot lists */ - int free_size; /* free bytes in the chunk */ - int contig_hint; /* max contiguous size hint */ - void *base_addr; /* base address of this chunk */ - - int map_used; /* # of map entries used before the sentry */ - int map_alloc; /* # of map entries allocated */ - int *map; /* allocation map */ - struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ - - void *data; /* chunk data */ - int first_free; /* no free below this */ - bool immutable; /* no [de]population allowed */ - int nr_populated; /* # of populated pages */ - unsigned long populated[]; /* populated bitmap */ -}; - static int pcpu_unit_pages __ro_after_init; static int pcpu_unit_size __ro_after_init; static int pcpu_nr_units __ro_after_init; static int pcpu_atom_size __ro_after_init; -static int pcpu_nr_slots __ro_after_init; +int pcpu_nr_slots __ro_after_init; static size_t pcpu_chunk_struct_size __ro_after_init; /* cpus with the lowest and highest unit addresses */ @@ -149,7 +133,7 @@ static const size_t *pcpu_group_sizes __ro_after_init; * chunks, this one can be allocated and mapped in several different * ways and thus often doesn't live in the vmalloc area. */ -static struct pcpu_chunk *pcpu_first_chunk __ro_after_init; +struct pcpu_chunk *pcpu_first_chunk __ro_after_init; /* * Optional reserved chunk. This chunk reserves part of the first @@ -158,13 +142,13 @@ static struct pcpu_chunk *pcpu_first_chunk __ro_after_init; * area doesn't exist, the following variables contain NULL and 0 * respectively. */ -static struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; +struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init; static int pcpu_reserved_chunk_limit __ro_after_init; -static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ +DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ -static struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ +struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */ /* chunks which need their map areas extended, protected by pcpu_lock */ static LIST_HEAD(pcpu_map_extend_chunks); |