diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-07-17 04:03:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-17 10:23:01 -0700 |
commit | 81cda6626178cd55297831296ba8ecedbfd8b52d (patch) | |
tree | fa35a6a04db63080bbeb42f33f4b4a891b7fc96c /include/linux | |
parent | ce15fea8274acca06daa1674322d37a7d3f0036b (diff) |
Slab allocators: Cleanup zeroing allocations
It becomes now easy to support the zeroing allocs with generic inline
functions in slab.h. Provide inline definitions to allow the continued use of
kzalloc, kmem_cache_zalloc etc but remove other definitions of zeroing
functions from the slab allocators and util.c.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/slab.h | 77 | ||||
-rw-r--r-- | include/linux/slab_def.h | 30 | ||||
-rw-r--r-- | include/linux/slub_def.h | 13 |
3 files changed, 46 insertions, 74 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index 0289ec89300a..0e1d0daef6a2 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -55,7 +55,6 @@ struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, void (*)(void *, struct kmem_cache *, unsigned long)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); void kmem_cache_free(struct kmem_cache *, void *); unsigned int kmem_cache_size(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *); @@ -91,11 +90,37 @@ int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); /* * Common kmalloc functions provided by all allocators */ -void *__kzalloc(size_t, gfp_t); void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); size_t ksize(const void *); +/* + * Allocator specific definitions. These are mainly used to establish optimized + * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by + * selecting the appropriate general cache at compile time. + * + * Allocators must define at least: + * + * kmem_cache_alloc() + * __kmalloc() + * kmalloc() + * + * Those wishing to support NUMA must also define: + * + * kmem_cache_alloc_node() + * kmalloc_node() + * + * See each allocator definition file for additional comments and + * implementation notes. + */ +#ifdef CONFIG_SLUB +#include <linux/slub_def.h> +#elif defined(CONFIG_SLOB) +#include <linux/slob_def.h> +#else +#include <linux/slab_def.h> +#endif + /** * kcalloc - allocate memory for an array. The memory is set to zero. * @n: number of elements. @@ -151,37 +176,9 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) { if (n != 0 && size > ULONG_MAX / n) return NULL; - return __kzalloc(n * size, flags); + return __kmalloc(n * size, flags | __GFP_ZERO); } -/* - * Allocator specific definitions. These are mainly used to establish optimized - * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by - * selecting the appropriate general cache at compile time. - * - * Allocators must define at least: - * - * kmem_cache_alloc() - * __kmalloc() - * kmalloc() - * kzalloc() - * - * Those wishing to support NUMA must also define: - * - * kmem_cache_alloc_node() - * kmalloc_node() - * - * See each allocator definition file for additional comments and - * implementation notes. - */ -#ifdef CONFIG_SLUB -#include <linux/slub_def.h> -#elif defined(CONFIG_SLOB) -#include <linux/slob_def.h> -#else -#include <linux/slab_def.h> -#endif - #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) /** * kmalloc_node - allocate memory from a specific node @@ -255,5 +252,23 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); #endif /* DEBUG_SLAB */ +/* + * Shortcuts + */ +static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) +{ + return kmem_cache_alloc(k, flags | __GFP_ZERO); +} + +/** + * kzalloc - allocate memory. The memory is set to zero. + * @size: how many bytes of memory are required. + * @flags: the type of memory to allocate (see kmalloc). + */ +static inline void *kzalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags | __GFP_ZERO); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SLAB_H */ diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 16e814ffab8d..32bdc2ffd715 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -58,36 +58,6 @@ found: return __kmalloc(size, flags); } -static inline void *kzalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size)) { - int i = 0; - - if (!size) - return ZERO_SIZE_PTR; - -#define CACHE(x) \ - if (size <= x) \ - goto found; \ - else \ - i++; -#include "kmalloc_sizes.h" -#undef CACHE - { - extern void __you_cannot_kzalloc_that_much(void); - __you_cannot_kzalloc_that_much(); - } -found: -#ifdef CONFIG_ZONE_DMA - if (flags & GFP_DMA) - return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep, - flags); -#endif - return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags); - } - return __kzalloc(size, flags); -} - #ifdef CONFIG_NUMA extern void *__kmalloc_node(size_t size, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index bae11111458f..07f7e4cbcee3 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -179,19 +179,6 @@ static inline void *kmalloc(size_t size, gfp_t flags) return __kmalloc(size, flags); } -static inline void *kzalloc(size_t size, gfp_t flags) -{ - if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) { - struct kmem_cache *s = kmalloc_slab(size); - - if (!s) - return ZERO_SIZE_PTR; - - return kmem_cache_zalloc(s, flags); - } else - return __kzalloc(size, flags); -} - #ifdef CONFIG_NUMA void *__kmalloc_node(size_t size, gfp_t flags, int node); void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |