summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 11:32:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-30 11:32:24 -0700
commit720d85075b7ed3617de8ca8d9097390e303e9f60 (patch)
tree3ce3911aa3f948b94949440954503c9f1b10ee64 /include
parent637e49ae4f5b4a82b418dae8435e16132b298b7e (diff)
parent73a1180e140d45cb9ef5fbab103d3bbfc4c84606 (diff)
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB changes from Pekka Enberg: "Most of the changes included are from Christoph Lameter's "common slab" patch series that unifies common parts of SLUB, SLAB, and SLOB allocators. The unification is needed for Glauber Costa's "kmem memcg" work that will hopefully appear for v3.7. The rest of the changes are fixes and speedups by various people." * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: (32 commits) mm: Fix build warning in kmem_cache_create() slob: Fix early boot kernel crash mm, slub: ensure irqs are enabled for kmemcheck mm, sl[aou]b: Move kmem_cache_create mutex handling to common code mm, sl[aou]b: Use a common mutex definition mm, sl[aou]b: Common definition for boot state of the slab allocators mm, sl[aou]b: Extract common code for kmem_cache_create() slub: remove invalid reference to list iterator variable mm: Fix signal SIGFPE in slabinfo.c. slab: move FULL state transition to an initcall slab: Fix a typo in commit 8c138b "slab: Get rid of obj_size macro" mm, slab: Build fix for recent kmem_cache changes slab: rename gfpflags to allocflags slub: refactoring unfreeze_partials() slub: use __cmpxchg_double_slab() at interrupt disabled place slab/mempolicy: always use local policy from interrupt context slab: Get rid of obj_size macro mm, sl[aou]b: Extract common fields from struct kmem_cache slab: Remove some accessors slab: Use page struct fields instead of casting ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/slab.h24
-rw-r--r--include/linux/slab_def.h12
-rw-r--r--include/linux/slub_def.h3
5 files changed, 42 insertions, 10 deletions
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4aa42732e47f..95b738c7abff 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -215,7 +215,7 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
const nodemask_t *mask);
-extern unsigned slab_node(struct mempolicy *policy);
+extern unsigned slab_node(void);
extern enum zone_type policy_zone;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 704a626d94a0..074eb98fe15d 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -53,7 +53,7 @@ struct page {
struct {
union {
pgoff_t index; /* Our offset within mapping. */
- void *freelist; /* slub first free object */
+ void *freelist; /* slub/slob first free object */
};
union {
@@ -91,11 +91,12 @@ struct page {
*/
atomic_t _mapcount;
- struct {
+ struct { /* SLUB */
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
+ int units; /* SLOB */
};
atomic_t _count; /* Usage count, see below. */
};
@@ -117,6 +118,12 @@ struct page {
short int pobjects;
#endif
};
+
+ struct list_head list; /* slobs list of pages */
+ struct { /* slab fields */
+ struct kmem_cache *slab_cache;
+ struct slab *slab_page;
+ };
};
/* Remainder is not double word aligned */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 67d5d94b783a..0dd2dfa7beca 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -93,6 +93,30 @@
(unsigned long)ZERO_SIZE_PTR)
/*
+ * Common fields provided in kmem_cache by all slab allocators
+ * This struct is either used directly by the allocator (SLOB)
+ * or the allocator must include definitions for all fields
+ * provided in kmem_cache_common in their definition of kmem_cache.
+ *
+ * Once we can do anonymous structs (C11 standard) we could put a
+ * anonymous struct definition in these allocators so that the
+ * separate allocations in the kmem_cache structure of SLAB and
+ * SLUB is no longer needed.
+ */
+#ifdef CONFIG_SLOB
+struct kmem_cache {
+ unsigned int object_size;/* The original size of the object */
+ unsigned int size; /* The aligned/padded/added on size */
+ unsigned int align; /* Alignment as calculated */
+ unsigned long flags; /* Active flags on the slab */
+ const char *name; /* Slab name for sysfs */
+ int refcount; /* Use counter */
+ void (*ctor)(void *); /* Called on object slot creation */
+ struct list_head list; /* List of all slab caches on the system */
+};
+#endif
+
+/*
* struct kmem_cache related prototypes
*/
void __init kmem_cache_init(void);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index fbd1117fdfde..0c634fa376c9 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -27,7 +27,7 @@ struct kmem_cache {
unsigned int limit;
unsigned int shared;
- unsigned int buffer_size;
+ unsigned int size;
u32 reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
@@ -39,7 +39,7 @@ struct kmem_cache {
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
- gfp_t gfpflags;
+ gfp_t allocflags;
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
@@ -52,7 +52,10 @@ struct kmem_cache {
/* 4) cache creation/removal */
const char *name;
- struct list_head next;
+ struct list_head list;
+ int refcount;
+ int object_size;
+ int align;
/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
@@ -73,12 +76,11 @@ struct kmem_cache {
/*
* If debugging is enabled, then the allocator can add additional
- * fields and/or padding to every object. buffer_size contains the total
+ * fields and/or padding to every object. size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
- int obj_size;
#endif /* CONFIG_DEBUG_SLAB */
/* 6) per-cpu/per-node data, touched during every alloc/free */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c2f8c8bc56ed..df448adb7283 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -48,7 +48,6 @@ struct kmem_cache_cpu {
unsigned long tid; /* Globally unique transaction id */
struct page *page; /* The slab from which we are allocating */
struct page *partial; /* Partially allocated frozen slabs */
- int node; /* The node of the page (or -1 for debug) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
@@ -83,7 +82,7 @@ struct kmem_cache {
unsigned long flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
- int objsize; /* The size of an object without meta data */
+ int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
int cpu_partial; /* Number of per cpu partial objects to keep around */
struct kmem_cache_order_objects oo;