diff options
-rw-r--r-- | include/linux/memcontrol.h | 1 | ||||
-rw-r--r-- | mm/memcontrol.c | 48 | ||||
-rw-r--r-- | mm/slab.h | 2 | ||||
-rw-r--r-- | mm/slab_common.c | 22 |
4 files changed, 15 insertions, 58 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 11fd18b3d6c6..2ac84dcfc9e5 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1418,7 +1418,6 @@ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_kmem_enabled_key; -extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 874704c4a48a..c713867e496d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -399,8 +399,6 @@ void memcg_put_cache_ids(void) */ DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key); EXPORT_SYMBOL(memcg_kmem_enabled_key); - -struct workqueue_struct *memcg_kmem_cache_wq; #endif static int memcg_shrinker_map_size; @@ -2902,39 +2900,6 @@ static void memcg_free_cache_id(int id) ida_simple_remove(&memcg_cache_ida, id); } -struct memcg_kmem_cache_create_work { - struct kmem_cache *cachep; - struct work_struct work; -}; - -static void memcg_kmem_cache_create_func(struct work_struct *w) -{ - struct memcg_kmem_cache_create_work *cw = - container_of(w, struct memcg_kmem_cache_create_work, work); - struct kmem_cache *cachep = cw->cachep; - - memcg_create_kmem_cache(cachep); - - kfree(cw); -} - -/* - * Enqueue the creation of a per-memcg kmem_cache. - */ -static void memcg_schedule_kmem_cache_create(struct kmem_cache *cachep) -{ - struct memcg_kmem_cache_create_work *cw; - - cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); - if (!cw) - return; - - cw->cachep = cachep; - INIT_WORK(&cw->work, memcg_kmem_cache_create_func); - - queue_work(memcg_kmem_cache_wq, &cw->work); -} - /** * memcg_kmem_get_cache: select memcg or root cache for allocation * @cachep: the original global kmem cache @@ -2951,7 +2916,7 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep) memcg_cachep = READ_ONCE(cachep->memcg_params.memcg_cache); if (unlikely(!memcg_cachep)) { - memcg_schedule_kmem_cache_create(cachep); + queue_work(system_wq, &cachep->memcg_params.work); return cachep; } @@ -7022,17 +6987,6 @@ static int __init mem_cgroup_init(void) { int cpu, node; -#ifdef CONFIG_MEMCG_KMEM - /* - * Kmem cache creation is mostly done with the slab_mutex held, - * so use a workqueue with limited concurrency to avoid stalling - * all worker threads in case lots of cgroups are created and - * destroyed simultaneously. - */ - memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1); - BUG_ON(!memcg_kmem_cache_wq); -#endif - cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); diff --git a/mm/slab.h b/mm/slab.h index e716b80befc2..fd9fcdfb3789 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -45,12 +45,14 @@ struct kmem_cache { * @memcg_cache: pointer to memcg kmem cache, used by all non-root memory * cgroups. * @root_caches_node: list node for slab_root_caches list. + * @work: work struct used to create the non-root cache. */ struct memcg_cache_params { struct kmem_cache *root_cache; struct kmem_cache *memcg_cache; struct list_head __root_caches_node; + struct work_struct work; }; #endif /* CONFIG_SLOB */ diff --git a/mm/slab_common.c b/mm/slab_common.c index e752132eb64d..b898698f6c8a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -134,10 +134,18 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, LIST_HEAD(slab_root_caches); +static void memcg_kmem_cache_create_func(struct work_struct *work) +{ + struct kmem_cache *cachep = container_of(work, struct kmem_cache, + memcg_params.work); + memcg_create_kmem_cache(cachep); +} + void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.root_cache = NULL; s->memcg_params.memcg_cache = NULL; + INIT_WORK(&s->memcg_params.work, memcg_kmem_cache_create_func); } static void init_memcg_params(struct kmem_cache *s, @@ -586,15 +594,9 @@ static int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static void flush_memcg_workqueue(struct kmem_cache *s) +static void cancel_memcg_cache_creation(struct kmem_cache *s) { - /* - * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB - * deactivates the memcg kmem_caches through workqueue. Make sure all - * previous workitems on workqueue are processed. - */ - if (likely(memcg_kmem_cache_wq)) - flush_workqueue(memcg_kmem_cache_wq); + cancel_work_sync(&s->memcg_params.work); } #else static inline int shutdown_memcg_caches(struct kmem_cache *s) @@ -602,7 +604,7 @@ static inline int shutdown_memcg_caches(struct kmem_cache *s) return 0; } -static inline void flush_memcg_workqueue(struct kmem_cache *s) +static inline void cancel_memcg_cache_creation(struct kmem_cache *s) { } #endif /* CONFIG_MEMCG_KMEM */ @@ -621,7 +623,7 @@ void kmem_cache_destroy(struct kmem_cache *s) if (unlikely(!s)) return; - flush_memcg_workqueue(s); + cancel_memcg_cache_creation(s); get_online_cpus(); get_online_mems(); |