summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h13
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/slab.h b/mm/slab.h
index a6051385186e..f22fb760b286 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -261,8 +261,6 @@ struct kmem_cache {
unsigned int object_size; /* Object size without metadata */
struct reciprocal_value reciprocal_size;
unsigned int offset; /* Free pointer offset */
- /* Specific free pointer requested (if not UINT_MAX) */
- unsigned int rcu_freeptr_offset;
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
unsigned int cpu_partial;
@@ -424,7 +422,9 @@ kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */
-int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
+int do_kmem_cache_create(struct kmem_cache *s, const char *name,
+ unsigned int size, struct kmem_cache_args *args,
+ slab_flags_t flags);
void __init kmem_cache_init(void);
extern void create_boot_cache(struct kmem_cache *, const char *name,
@@ -445,6 +445,13 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s)
return (s->flags & SLAB_KMALLOC);
}
+static inline bool is_kmalloc_normal(struct kmem_cache *s)
+{
+ if (!is_kmalloc_cache(s))
+ return false;
+ return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
+}
+
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
SLAB_CACHE_DMA32 | SLAB_PANIC | \