summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-29 16:34:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-29 16:34:12 -0700
commit632f54b4d60bfe0701f43d0bc387928de6e3dcfb (patch)
treefac09ccb563bdd3e71133c3f571db6d747fe94fa /mm/slub.c
parentbf1fa6f15553df04f2bdd06190ccd5f388ab0777 (diff)
parent7bc162d5cc4de5c33c5570dba2719a01506a9fd0 (diff)
Merge tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka: - SLAB deprecation: Following the discussion at LSF/MM 2023 [1] and no objections, the SLAB allocator is deprecated by renaming the config option (to make its users notice) to CONFIG_SLAB_DEPRECATED with updated help text. SLUB should be used instead. Existing defconfigs with CONFIG_SLAB are also updated. - SLAB_NO_MERGE kmem_cache flag (Jesper Dangaard Brouer): There are (very limited) cases where kmem_cache merging is undesirable, and existing ways to prevent it are hacky. Introduce a new flag to do that cleanly and convert the existing hacky users. Btrfs plans to use this for debug kernel builds (that use case is always fine), networking for performance reasons (that should be very rare). - Replace the usage of weak PRNGs (David Keisar Schmidt): In addition to using stronger RNGs for the security related features, the code is a bit cleaner. - Misc code cleanups (SeongJae Parki, Xiongwei Song, Zhen Lei, and zhaoxinchao) Link: https://lwn.net/Articles/932201/ [1] * tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm/slab_common: use SLAB_NO_MERGE instead of negative refcount mm/slab: break up RCU readers on SLAB_TYPESAFE_BY_RCU example code mm/slab: add a missing semicolon on SLAB_TYPESAFE_BY_RCU example code mm/slab_common: reduce an if statement in create_cache() mm/slab: introduce kmem_cache flag SLAB_NO_MERGE mm/slab: rename CONFIG_SLAB to CONFIG_SLAB_DEPRECATED mm/slab: remove HAVE_HARDENED_USERCOPY_ALLOCATOR mm/slab_common: Replace invocation of weak PRNG mm/slab: Replace invocation of weak PRNG slub: Don't read nr_slabs and total_objects directly slub: Remove slabs_node() function slub: Remove CONFIG_SMP defined check slub: Put objects_show() into CONFIG_SLUB_DEBUG enabled block slub: Correct the error code when slab_kset is NULL mm/slab: correct return values in comment for _kmem_cache_create()
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c47
1 files changed, 18 insertions, 29 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 7529626bbec2..e3b5d5c0eb3a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1365,14 +1365,6 @@ static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct
list_del(&slab->slab_list);
}
-/* Tracking of the number of slabs for debugging purposes */
-static inline unsigned long slabs_node(struct kmem_cache *s, int node)
-{
- struct kmem_cache_node *n = get_node(s, node);
-
- return atomic_long_read(&n->nr_slabs);
-}
-
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{
return atomic_long_read(&n->nr_slabs);
@@ -1743,8 +1735,6 @@ slab_flags_t kmem_cache_flags(unsigned int object_size,
#define disable_higher_order_debug 0
-static inline unsigned long slabs_node(struct kmem_cache *s, int node)
- { return 0; }
static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
{ return 0; }
static inline void inc_slabs_node(struct kmem_cache *s, int node,
@@ -4623,7 +4613,7 @@ bool __kmem_cache_empty(struct kmem_cache *s)
struct kmem_cache_node *n;
for_each_kmem_cache_node(s, node, n)
- if (n->nr_partial || slabs_node(s, node))
+ if (n->nr_partial || node_nr_slabs(n))
return false;
return true;
}
@@ -4640,7 +4630,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
/* Attempt to free all objects */
for_each_kmem_cache_node(s, node, n) {
free_partial(s, n);
- if (n->nr_partial || slabs_node(s, node))
+ if (n->nr_partial || node_nr_slabs(n))
return 1;
}
return 0;
@@ -4853,7 +4843,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
list_for_each_entry_safe(slab, t, &discard, slab_list)
free_slab(s, slab);
- if (slabs_node(s, node))
+ if (node_nr_slabs(n))
ret = 1;
}
@@ -5191,9 +5181,9 @@ static int validate_slab_node(struct kmem_cache *s,
validate_slab(s, slab, obj_map);
count++;
}
- if (count != atomic_long_read(&n->nr_slabs)) {
+ if (count != node_nr_slabs(n)) {
pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
- s->name, count, atomic_long_read(&n->nr_slabs));
+ s->name, count, node_nr_slabs(n));
slab_add_kunit_errors();
}
@@ -5477,12 +5467,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_kmem_cache_node(s, node, n) {
if (flags & SO_TOTAL)
- x = atomic_long_read(&n->total_objects);
+ x = node_nr_objs(n);
else if (flags & SO_OBJECTS)
- x = atomic_long_read(&n->total_objects) -
- count_partial(n, count_free);
+ x = node_nr_objs(n) - count_partial(n, count_free);
else
- x = atomic_long_read(&n->nr_slabs);
+ x = node_nr_slabs(n);
total += x;
nodes[node] += x;
}
@@ -5637,12 +5626,6 @@ static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(cpu_slabs);
-static ssize_t objects_show(struct kmem_cache *s, char *buf)
-{
- return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
-}
-SLAB_ATTR_RO(objects);
-
static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
{
return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
@@ -5671,7 +5654,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
objects = (slabs * oo_objects(s->oo)) / 2;
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
-#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
+#ifdef CONFIG_SLUB_CPU_PARTIAL
for_each_online_cpu(cpu) {
struct slab *slab;
@@ -5737,6 +5720,12 @@ static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(total_objects);
+static ssize_t objects_show(struct kmem_cache *s, char *buf)
+{
+ return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
+}
+SLAB_ATTR_RO(objects);
+
static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
{
return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
@@ -5968,7 +5957,6 @@ static struct attribute *slab_attrs[] = {
&order_attr.attr,
&min_partial_attr.attr,
&cpu_partial_attr.attr,
- &objects_attr.attr,
&objects_partial_attr.attr,
&partial_attr.attr,
&cpu_slabs_attr.attr,
@@ -5982,6 +5970,7 @@ static struct attribute *slab_attrs[] = {
&slabs_cpu_partial_attr.attr,
#ifdef CONFIG_SLUB_DEBUG
&total_objects_attr.attr,
+ &objects_attr.attr,
&slabs_attr.attr,
&sanity_checks_attr.attr,
&trace_attr.attr,
@@ -6249,7 +6238,7 @@ static int __init slab_sysfs_init(void)
if (!slab_kset) {
mutex_unlock(&slab_mutex);
pr_err("Cannot register slab subsystem.\n");
- return -ENOSYS;
+ return -ENOMEM;
}
slab_state = FULL;
@@ -6421,7 +6410,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep)
unsigned long flags;
struct slab *slab;
- if (!atomic_long_read(&n->nr_slabs))
+ if (!node_nr_slabs(n))
continue;
spin_lock_irqsave(&n->list_lock, flags);