summaryrefslogtreecommitdiff
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2024-08-28 12:56:24 +0200
committerChristian Brauner <brauner@kernel.org>2024-08-29 15:20:32 +0200
commitd345bd2e9834e2da505977e154a1c179c793b7b2 (patch)
tree5bd9bfdb853f72c587a0986039d990c3f2a96114 /mm/slab_common.c
parente446f18e98e89fb7de2b320620ce983929bb2486 (diff)
mm: add kmem_cache_create_rcu()
When a kmem cache is created with SLAB_TYPESAFE_BY_RCU the free pointer must be located outside of the object because we don't know what part of the memory can safely be overwritten as it may be needed to prevent object recycling. That has the consequence that SLAB_TYPESAFE_BY_RCU may end up adding a new cacheline. This is the case for e.g., struct file. After having it shrunk down by 40 bytes and having it fit in three cachelines we still have SLAB_TYPESAFE_BY_RCU adding a fourth cacheline because it needs to accommodate the free pointer. Add a new kmem_cache_create_rcu() function that allows the caller to specify an offset where the free pointer is supposed to be placed. Link: https://lore.kernel.org/r/20240828-work-kmem_cache-rcu-v3-2-5460bc1f09f6@kernel.org Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c136
1 files changed, 97 insertions, 39 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index c8dd7e08c5f6..887f6b9855dd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -202,9 +202,10 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int align,
- slab_flags_t flags, unsigned int useroffset,
- unsigned int usersize, void (*ctor)(void *))
+ unsigned int object_size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
{
struct kmem_cache *s;
int err;
@@ -212,6 +213,13 @@ static struct kmem_cache *create_cache(const char *name,
if (WARN_ON(useroffset + usersize > object_size))
useroffset = usersize = 0;
+ /* If a custom freelist pointer is requested make sure it's sane. */
+ err = -EINVAL;
+ if (freeptr_offset != UINT_MAX &&
+ (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t))))
+ goto out;
+
err = -ENOMEM;
s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
if (!s)
@@ -219,13 +227,13 @@ static struct kmem_cache *create_cache(const char *name,
s->name = name;
s->size = s->object_size = object_size;
+ s->rcu_freeptr_offset = freeptr_offset;
s->align = align;
s->ctor = ctor;
#ifdef CONFIG_HARDENED_USERCOPY
s->useroffset = useroffset;
s->usersize = usersize;
#endif
-
err = __kmem_cache_create(s, flags);
if (err)
goto out_free_cache;
@@ -240,38 +248,10 @@ out:
return ERR_PTR(err);
}
-/**
- * kmem_cache_create_usercopy - Create a cache with a region suitable
- * for copying to userspace
- * @name: A string which is used in /proc/slabinfo to identify this cache.
- * @size: The size of objects to be created in this cache.
- * @align: The required alignment for the objects.
- * @flags: SLAB flags
- * @useroffset: Usercopy region offset
- * @usersize: Usercopy region size
- * @ctor: A constructor for the objects.
- *
- * Cannot be called within a interrupt, but can be interrupted.
- * The @ctor is run when new pages are allocated by the cache.
- *
- * The flags are
- *
- * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
- * to catch references to uninitialised memory.
- *
- * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
- * for buffer overruns.
- *
- * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
- * cacheline. This can be beneficial if you're counting cycles as closely
- * as davem.
- *
- * Return: a pointer to the cache on success, NULL on failure.
- */
-struct kmem_cache *
-kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int align,
- slab_flags_t flags,
+static struct kmem_cache *
+do_kmem_cache_create_usercopy(const char *name,
+ unsigned int size, unsigned int freeptr_offset,
+ unsigned int align, slab_flags_t flags,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
@@ -331,7 +311,7 @@ kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size,
+ s = create_cache(cache_name, size, freeptr_offset,
calculate_alignment(flags, align, size),
flags, useroffset, usersize, ctor);
if (IS_ERR(s)) {
@@ -355,6 +335,45 @@ out_unlock:
}
return s;
}
+
+/**
+ * kmem_cache_create_usercopy - Create a cache with a region suitable
+ * for copying to userspace
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @freeptr_offset: Custom offset for the free pointer in RCU caches
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @ctor: A constructor for the objects.
+ *
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *
+kmem_cache_create_usercopy(const char *name, unsigned int size,
+ unsigned int align, slab_flags_t flags,
+ unsigned int useroffset, unsigned int usersize,
+ void (*ctor)(void *))
+{
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ useroffset, usersize, ctor);
+}
EXPORT_SYMBOL(kmem_cache_create_usercopy);
/**
@@ -386,11 +405,50 @@ struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
- return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
- ctor);
+ return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
+ 0, 0, ctor);
}
EXPORT_SYMBOL(kmem_cache_create);
+/**
+ * kmem_cache_create_rcu - Create a SLAB_TYPESAFE_BY_RCU cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @freeptr_offset: The offset into the memory to the free pointer
+ * @flags: SLAB flags
+ *
+ * Cannot be called within an interrupt, but can be interrupted.
+ *
+ * See kmem_cache_create() for an explanation of possible @flags.
+ *
+ * By default SLAB_TYPESAFE_BY_RCU caches place the free pointer outside
+ * of the object. This might cause the object to grow in size. Callers
+ * that have a reason to avoid this can specify a custom free pointer
+ * offset in their struct where the free pointer will be placed.
+ *
+ * Note that placing the free pointer inside the object requires the
+ * caller to ensure that no fields are invalidated that are required to
+ * guard against object recycling (See SLAB_TYPESAFE_BY_RCU for
+ * details.).
+ *
+ * Using zero as a value for @freeptr_offset is valid. To request no
+ * offset UINT_MAX must be specified.
+ *
+ * Note that @ctor isn't supported with custom free pointers as a @ctor
+ * requires an external free pointer.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
+ unsigned int freeptr_offset,
+ slab_flags_t flags)
+{
+ return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
+ flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
+ NULL);
+}
+EXPORT_SYMBOL(kmem_cache_create_rcu);
+
static struct kmem_cache *kmem_buckets_cache __ro_after_init;
/**