diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2021-11-03 18:19:48 +0100 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2022-01-06 12:26:29 +0100 |
commit | 8dae0cfed57357c0a627f377386ade1591f4d9ea (patch) | |
tree | 75c221232f269505b26d0cf6ce2f0065c4eb4204 /mm/kfence | |
parent | 6e48a966dfd18987fec9385566a67d36e2b5fc11 (diff) |
mm/kfence: Convert kfence_guarded_alloc() to struct slab
The function sets some fields that are being moved from struct page to
struct slab so it needs to be converted.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Marco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
Diffstat (limited to 'mm/kfence')
-rw-r--r-- | mm/kfence/core.c | 12 | ||||
-rw-r--r-- | mm/kfence/kfence_test.c | 6 |
2 files changed, 9 insertions, 9 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 09945784df9e..4eb60cf5ff8b 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -360,7 +360,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g { struct kfence_metadata *meta = NULL; unsigned long flags; - struct page *page; + struct slab *slab; void *addr; /* Try to obtain a free object. */ @@ -424,13 +424,13 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g alloc_covered_add(alloc_stack_hash, 1); - /* Set required struct page fields. */ - page = virt_to_page(meta->addr); - page->slab_cache = cache; + /* Set required slab fields. */ + slab = virt_to_slab((void *)meta->addr); + slab->slab_cache = cache; if (IS_ENABLED(CONFIG_SLUB)) - page->objects = 1; + slab->objects = 1; if (IS_ENABLED(CONFIG_SLAB)) - page->s_mem = addr; + slab->s_mem = addr; /* Memory initialization. */ for_each_canary(meta, set_canary_byte); diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index f7276711d7b9..a22b1af85577 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -282,7 +282,7 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat alloc = kmalloc(size, gfp); if (is_kfence_address(alloc)) { - struct page *page = virt_to_head_page(alloc); + struct slab *slab = virt_to_slab(alloc); struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]; @@ -291,8 +291,8 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * even for KFENCE objects; these are required so that * memcg accounting works correctly. */ - KUNIT_EXPECT_EQ(test, obj_to_index(s, page_slab(page), alloc), 0U); - KUNIT_EXPECT_EQ(test, objs_per_slab(s, page_slab(page)), 1); + KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U); + KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1); if (policy == ALLOCATE_ANY) return alloc; |