summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c51
1 files changed, 40 insertions, 11 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8da8501db942..185ab3e27d13 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2237,8 +2237,12 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node)
return NULL;
}
- /* Mark the pages as accessible, now that they are mapped. */
- mem = kasan_unpoison_vmalloc(mem, size);
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+ */
+ mem = kasan_unpoison_vmalloc(mem, size, KASAN_VMALLOC_NONE);
return mem;
}
@@ -2472,9 +2476,12 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
* best-effort approach, as they can be mapped outside of vmalloc code.
* For VM_ALLOC mappings, the pages are marked as accessible after
* getting mapped in __vmalloc_node_range().
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
if (!(flags & VM_ALLOC))
- area->addr = kasan_unpoison_vmalloc(area->addr, requested_size);
+ area->addr = kasan_unpoison_vmalloc(area->addr, requested_size,
+ KASAN_VMALLOC_NONE);
return area;
}
@@ -3084,6 +3091,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
{
struct vm_struct *area;
void *ret;
+ kasan_vmalloc_flags_t kasan_flags;
unsigned long real_size = size;
unsigned long real_align = align;
unsigned int shift = PAGE_SHIFT;
@@ -3136,21 +3144,39 @@ again:
goto fail;
}
- /*
- * Modify protection bits to allow tagging.
- * This must be done before mapping by __vmalloc_area_node().
- */
+ /* Prepare arguments for __vmalloc_area_node(). */
if (kasan_hw_tags_enabled() &&
- pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) {
+ /*
+ * Modify protection bits to allow tagging.
+ * This must be done before mapping in __vmalloc_area_node().
+ */
prot = arch_vmap_pgprot_tagged(prot);
+ /*
+ * Skip page_alloc poisoning and zeroing for physical pages
+ * backing VM_ALLOC mapping. Memory is instead poisoned and
+ * zeroed by kasan_unpoison_vmalloc().
+ */
+ gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO;
+ }
+
/* Allocate physical pages and map them into vmalloc space. */
ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
if (!ret)
goto fail;
- /* Mark the pages as accessible, now that they are mapped. */
- area->addr = kasan_unpoison_vmalloc(area->addr, real_size);
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * The init condition should match the one in post_alloc_hook()
+ * (except for the should_skip_init() check) to make sure that memory
+ * is initialized under the same conditions regardless of the enabled
+ * KASAN mode.
+ */
+ kasan_flags = KASAN_VMALLOC_VM_ALLOC;
+ if (!want_init_on_free() && want_init_on_alloc(gfp_mask))
+ kasan_flags |= KASAN_VMALLOC_INIT;
+ area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags);
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@ -3850,10 +3876,13 @@ retry:
/*
* Mark allocated areas as accessible. Do it now as a best-effort
* approach, as they can be mapped outside of vmalloc code.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
*/
for (area = 0; area < nr_vms; area++)
vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
- vms[area]->size);
+ vms[area]->size,
+ KASAN_VMALLOC_NONE);
kfree(vas);
return vms;