diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 11:14:47 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 11:14:47 +0100 |
commit | e2b623fbe6a34bce1332584212ae101ebc2508f5 (patch) | |
tree | eb07421d683c90cbed7326cdcc778cea9a08d48d /arch/s390/mm | |
parent | 70408a9987d1ffac006e21b965f0c30dd22b0af2 (diff) | |
parent | f822ad2c2c03af85a531c5174136b6d5b1abc566 (diff) |
Merge tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky:
- Improved access control for the zcrypt driver, multiple device nodes
can now be created with different access control lists
- Extend the pkey API to provide random protected keys, this is useful
for encrypted swap device with ephemeral protected keys
- Add support for virtually mapped kernel stacks
- Rework the early boot code, this moves the memory detection into the
boot code that runs prior to decompression.
- Add KASAN support
- Bug fixes and cleanups
* tag 's390-4.20-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (83 commits)
s390/pkey: move pckmo subfunction available checks away from module init
s390/kasan: support preemptible kernel build
s390/pkey: Load pkey kernel module automatically
s390/perf: Return error when debug_register fails
s390/sthyi: Fix machine name validity indication
s390/zcrypt: fix broken zcrypt_send_cprb in-kernel api function
s390/vmalloc: fix VMALLOC_START calculation
s390/mem_detect: add missing include
s390/dumpstack: print psw mask and address again
s390/crypto: Enhance paes cipher to accept variable length key material
s390/pkey: Introduce new API for transforming key blobs
s390/pkey: Introduce new API for random protected key verification
s390/pkey: Add sysfs attributes to emit secure key blobs
s390/pkey: Add sysfs attributes to emit protected key blobs
s390/pkey: Define protected key blob format
s390/pkey: Introduce new API for random protected key generation
s390/zcrypt: add ap_adapter_mask sysfs attribute
s390/zcrypt: provide apfs failure code on type 86 error reply
s390/zcrypt: zcrypt device driver cleanup
s390/kasan: add support for mem= kernel parameter
...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/Makefile | 6 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 58 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 38 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 5 | ||||
-rw-r--r-- | arch/s390/mm/kasan_init.c | 387 | ||||
-rw-r--r-- | arch/s390/mm/maccess.c | 25 | ||||
-rw-r--r-- | arch/s390/mm/mem_detect.c | 62 |
7 files changed, 486 insertions, 95 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index 33fe418506bc..f5880bfd1b0c 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile @@ -4,10 +4,12 @@ # obj-y := init.o fault.o extmem.o mmap.o vmem.o maccess.o -obj-y += page-states.o gup.o pageattr.o mem_detect.o -obj-y += pgtable.o pgalloc.o +obj-y += page-states.o gup.o pageattr.o pgtable.o pgalloc.o obj-$(CONFIG_CMM) += cmm.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_S390_PTDUMP) += dump_pagetables.o obj-$(CONFIG_PGSTE) += gmap.o + +KASAN_SANITIZE_kasan_init.o := n +obj-$(CONFIG_KASAN) += kasan_init.o diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 7cdea2ec51e9..363f6470d742 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -3,6 +3,8 @@ #include <linux/debugfs.h> #include <linux/sched.h> #include <linux/mm.h> +#include <linux/kasan.h> +#include <asm/kasan.h> #include <asm/sections.h> #include <asm/pgtable.h> @@ -17,18 +19,26 @@ enum address_markers_idx { IDENTITY_NR = 0, KERNEL_START_NR, KERNEL_END_NR, +#ifdef CONFIG_KASAN + KASAN_SHADOW_START_NR, + KASAN_SHADOW_END_NR, +#endif VMEMMAP_NR, VMALLOC_NR, MODULES_NR, }; static struct addr_marker address_markers[] = { - [IDENTITY_NR] = {0, "Identity Mapping"}, - [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, - [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, - [VMEMMAP_NR] = {0, "vmemmap Area"}, - [VMALLOC_NR] = {0, "vmalloc Area"}, - [MODULES_NR] = {0, "Modules Area"}, + [IDENTITY_NR] = {0, "Identity Mapping"}, + [KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"}, + [KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"}, +#ifdef CONFIG_KASAN + [KASAN_SHADOW_START_NR] = {KASAN_SHADOW_START, "Kasan Shadow Start"}, + [KASAN_SHADOW_END_NR] = {KASAN_SHADOW_END, "Kasan Shadow End"}, +#endif + [VMEMMAP_NR] = {0, "vmemmap Area"}, + [VMALLOC_NR] = {0, "vmalloc Area"}, + [MODULES_NR] = {0, "Modules Area"}, { -1, NULL } }; @@ -80,7 +90,7 @@ static void note_page(struct seq_file *m, struct pg_state *st, } else if (prot != cur || level != st->level || st->current_address >= st->marker[1].start_address) { /* Print the actual finished series */ - seq_printf(m, "0x%0*lx-0x%0*lx", + seq_printf(m, "0x%0*lx-0x%0*lx ", width, st->start_address, width, st->current_address); delta = (st->current_address - st->start_address) >> 10; @@ -90,7 +100,7 @@ static void note_page(struct seq_file *m, struct pg_state *st, } seq_printf(m, "%9lu%c ", delta, *unit); print_prot(m, st->current_prot, st->level); - if (st->current_address >= st->marker[1].start_address) { + while (st->current_address >= st->marker[1].start_address) { st->marker++; seq_printf(m, "---[ %s ]---\n", st->marker->name); } @@ -100,6 +110,17 @@ static void note_page(struct seq_file *m, struct pg_state *st, } } +#ifdef CONFIG_KASAN +static void note_kasan_zero_page(struct seq_file *m, struct pg_state *st) +{ + unsigned int prot; + + prot = pte_val(*kasan_zero_pte) & + (_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC); + note_page(m, st, prot, 4); +} +#endif + /* * The actual page table walker functions. In order to keep the * implementation of print_prot() short, we only check and pass @@ -132,6 +153,13 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pmd_t *pmd; int i; +#ifdef CONFIG_KASAN + if ((pud_val(*pud) & PAGE_MASK) == __pa(kasan_zero_pmd)) { + note_kasan_zero_page(m, st); + return; + } +#endif + for (i = 0; i < PTRS_PER_PMD && addr < max_addr; i++) { st->current_address = addr; pmd = pmd_offset(pud, addr); @@ -156,6 +184,13 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, pud_t *pud; int i; +#ifdef CONFIG_KASAN + if ((p4d_val(*p4d) & PAGE_MASK) == __pa(kasan_zero_pud)) { + note_kasan_zero_page(m, st); + return; + } +#endif + for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { st->current_address = addr; pud = pud_offset(p4d, addr); @@ -179,6 +214,13 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, p4d_t *p4d; int i; +#ifdef CONFIG_KASAN + if ((pgd_val(*pgd) & PAGE_MASK) == __pa(kasan_zero_p4d)) { + note_kasan_zero_page(m, st); + return; + } +#endif + for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { st->current_address = addr; p4d = p4d_offset(pgd, addr); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 72af23bacbb5..2b8f32f56e0c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -636,17 +636,19 @@ struct pfault_refbk { u64 reserved; } __attribute__ ((packed, aligned(8))); +static struct pfault_refbk pfault_init_refbk = { + .refdiagc = 0x258, + .reffcode = 0, + .refdwlen = 5, + .refversn = 2, + .refgaddr = __LC_LPP, + .refselmk = 1ULL << 48, + .refcmpmk = 1ULL << 48, + .reserved = __PF_RES_FIELD +}; + int pfault_init(void) { - struct pfault_refbk refbk = { - .refdiagc = 0x258, - .reffcode = 0, - .refdwlen = 5, - .refversn = 2, - .refgaddr = __LC_LPP, - .refselmk = 1ULL << 48, - .refcmpmk = 1ULL << 48, - .reserved = __PF_RES_FIELD }; int rc; if (pfault_disable) @@ -658,18 +660,20 @@ int pfault_init(void) "1: la %0,8\n" "2:\n" EX_TABLE(0b,1b) - : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); + : "=d" (rc) + : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc"); return rc; } +static struct pfault_refbk pfault_fini_refbk = { + .refdiagc = 0x258, + .reffcode = 1, + .refdwlen = 5, + .refversn = 2, +}; + void pfault_fini(void) { - struct pfault_refbk refbk = { - .refdiagc = 0x258, - .reffcode = 1, - .refdwlen = 5, - .refversn = 2, - }; if (pfault_disable) return; @@ -678,7 +682,7 @@ void pfault_fini(void) " diag %0,0,0x258\n" "0: nopr %%r7\n" EX_TABLE(0b,0b) - : : "a" (&refbk), "m" (refbk) : "cc"); + : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc"); } static DEFINE_SPINLOCK(pfault_lock); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 3fa3e5323612..92d7a153e72a 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -42,6 +42,7 @@ #include <asm/ctl_reg.h> #include <asm/sclp.h> #include <asm/set_memory.h> +#include <asm/kasan.h> pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); @@ -98,8 +99,9 @@ void __init paging_init(void) S390_lowcore.user_asce = S390_lowcore.kernel_asce; crst_table_init((unsigned long *) init_mm.pgd, pgd_type); vmem_map_init(); + kasan_copy_shadow(init_mm.pgd); - /* enable virtual mapping in kernel mode */ + /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); @@ -107,6 +109,7 @@ void __init paging_init(void) psw_bits(psw).dat = 1; psw_bits(psw).as = PSW_BITS_AS_HOME; __load_psw_mask(psw.mask); + kasan_free_early_identity(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c new file mode 100644 index 000000000000..acb9645b762b --- /dev/null +++ b/arch/s390/mm/kasan_init.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/kasan.h> +#include <linux/sched/task.h> +#include <linux/memblock.h> +#include <asm/pgalloc.h> +#include <asm/pgtable.h> +#include <asm/kasan.h> +#include <asm/mem_detect.h> +#include <asm/processor.h> +#include <asm/sclp.h> +#include <asm/facility.h> +#include <asm/sections.h> +#include <asm/setup.h> + +static unsigned long segment_pos __initdata; +static unsigned long segment_low __initdata; +static unsigned long pgalloc_pos __initdata; +static unsigned long pgalloc_low __initdata; +static unsigned long pgalloc_freeable __initdata; +static bool has_edat __initdata; +static bool has_nx __initdata; + +#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x)) + +static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); + +static void __init kasan_early_panic(const char *reason) +{ + sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n"); + sclp_early_printk(reason); + disabled_wait(0); +} + +static void * __init kasan_early_alloc_segment(void) +{ + segment_pos -= _SEGMENT_SIZE; + + if (segment_pos < segment_low) + kasan_early_panic("out of memory during initialisation\n"); + + return (void *)segment_pos; +} + +static void * __init kasan_early_alloc_pages(unsigned int order) +{ + pgalloc_pos -= (PAGE_SIZE << order); + + if (pgalloc_pos < pgalloc_low) + kasan_early_panic("out of memory during initialisation\n"); + + return (void *)pgalloc_pos; +} + +static void * __init kasan_early_crst_alloc(unsigned long val) +{ + unsigned long *table; + + table = kasan_early_alloc_pages(CRST_ALLOC_ORDER); + if (table) + crst_table_init(table, val); + return table; +} + +static pte_t * __init kasan_early_pte_alloc(void) +{ + static void *pte_leftover; + pte_t *pte; + + BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE); + + if (!pte_leftover) { + pte_leftover = kasan_early_alloc_pages(0); + pte = pte_leftover + _PAGE_TABLE_SIZE; + } else { + pte = pte_leftover; + pte_leftover = NULL; + } + memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); + return pte; +} + +enum populate_mode { + POPULATE_ONE2ONE, + POPULATE_MAP, + POPULATE_ZERO_SHADOW +}; +static void __init kasan_early_vmemmap_populate(unsigned long address, + unsigned long end, + enum populate_mode mode) +{ + unsigned long pgt_prot_zero, pgt_prot, sgt_prot; + pgd_t *pg_dir; + p4d_t *p4_dir; + pud_t *pu_dir; + pmd_t *pm_dir; + pte_t *pt_dir; + + pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO); + if (!has_nx) + pgt_prot_zero &= ~_PAGE_NOEXEC; + pgt_prot = pgprot_val(PAGE_KERNEL_EXEC); + sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC); + + while (address < end) { + pg_dir = pgd_offset_k(address); + if (pgd_none(*pg_dir)) { + if (mode == POPULATE_ZERO_SHADOW && + IS_ALIGNED(address, PGDIR_SIZE) && + end - address >= PGDIR_SIZE) { + pgd_populate(&init_mm, pg_dir, kasan_zero_p4d); + address = (address + PGDIR_SIZE) & PGDIR_MASK; + continue; + } + p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY); + pgd_populate(&init_mm, pg_dir, p4_dir); + } + + p4_dir = p4d_offset(pg_dir, address); + if (p4d_none(*p4_dir)) { + if (mode == POPULATE_ZERO_SHADOW && + IS_ALIGNED(address, P4D_SIZE) && + end - address >= P4D_SIZE) { + p4d_populate(&init_mm, p4_dir, kasan_zero_pud); + address = (address + P4D_SIZE) & P4D_MASK; + continue; + } + pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY); + p4d_populate(&init_mm, p4_dir, pu_dir); + } + + pu_dir = pud_offset(p4_dir, address); + if (pud_none(*pu_dir)) { + if (mode == POPULATE_ZERO_SHADOW && + IS_ALIGNED(address, PUD_SIZE) && + end - address >= PUD_SIZE) { + pud_populate(&init_mm, pu_dir, kasan_zero_pmd); + address = (address + PUD_SIZE) & PUD_MASK; + continue; + } + pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY); + pud_populate(&init_mm, pu_dir, pm_dir); + } + + pm_dir = pmd_offset(pu_dir, address); + if (pmd_none(*pm_dir)) { + if (mode == POPULATE_ZERO_SHADOW && + IS_ALIGNED(address, PMD_SIZE) && + end - address >= PMD_SIZE) { + pmd_populate(&init_mm, pm_dir, kasan_zero_pte); + address = (address + PMD_SIZE) & PMD_MASK; + continue; + } + /* the first megabyte of 1:1 is mapped with 4k pages */ + if (has_edat && address && end - address >= PMD_SIZE && + mode != POPULATE_ZERO_SHADOW) { + void *page; + + if (mode == POPULATE_ONE2ONE) { + page = (void *)address; + } else { + page = kasan_early_alloc_segment(); + memset(page, 0, _SEGMENT_SIZE); + } + pmd_val(*pm_dir) = __pa(page) | sgt_prot; + address = (address + PMD_SIZE) & PMD_MASK; + continue; + } + + pt_dir = kasan_early_pte_alloc(); + pmd_populate(&init_mm, pm_dir, pt_dir); + } else if (pmd_large(*pm_dir)) { + address = (address + PMD_SIZE) & PMD_MASK; + continue; + } + + pt_dir = pte_offset_kernel(pm_dir, address); + if (pte_none(*pt_dir)) { + void *page; + + switch (mode) { + case POPULATE_ONE2ONE: + page = (void *)address; + pte_val(*pt_dir) = __pa(page) | pgt_prot; + break; + case POPULATE_MAP: + page = kasan_early_alloc_pages(0); + memset(page, 0, PAGE_SIZE); + pte_val(*pt_dir) = __pa(page) | pgt_prot; + break; + case POPULATE_ZERO_SHADOW: + page = kasan_zero_page; + pte_val(*pt_dir) = __pa(page) | pgt_prot_zero; + break; + } + } + address += PAGE_SIZE; + } +} + +static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type) +{ + unsigned long asce_bits; + + asce_bits = asce_type | _ASCE_TABLE_LENGTH; + S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits; + S390_lowcore.user_asce = S390_lowcore.kernel_asce; + + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + __ctl_load(S390_lowcore.kernel_asce, 7, 7); + __ctl_load(S390_lowcore.kernel_asce, 13, 13); +} + +static void __init kasan_enable_dat(void) +{ + psw_t psw; + + psw.mask = __extract_psw(); + psw_bits(psw).dat = 1; + psw_bits(psw).as = PSW_BITS_AS_HOME; + __load_psw_mask(psw.mask); +} + +static void __init kasan_early_detect_facilities(void) +{ + __stfle(S390_lowcore.stfle_fac_list, + ARRAY_SIZE(S390_lowcore.stfle_fac_list)); + if (test_facility(8)) { + has_edat = true; + __ctl_set_bit(0, 23); + } + if (!noexec_disabled && test_facility(130)) { + has_nx = true; + __ctl_set_bit(0, 20); + } +} + +static unsigned long __init get_mem_detect_end(void) +{ + unsigned long start; + unsigned long end; + + if (mem_detect.count) { + __get_mem_detect_block(mem_detect.count - 1, &start, &end); + return end; + } + return 0; +} + +void __init kasan_early_init(void) +{ + unsigned long untracked_mem_end; + unsigned long shadow_alloc_size; + unsigned long initrd_end; + unsigned long asce_type; + unsigned long memsize; + unsigned long vmax; + unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO); + pte_t pte_z; + pmd_t pmd_z = __pmd(__pa(kasan_zero_pte) | _SEGMENT_ENTRY); + pud_t pud_z = __pud(__pa(kasan_zero_pmd) | _REGION3_ENTRY); + p4d_t p4d_z = __p4d(__pa(kasan_zero_pud) | _REGION2_ENTRY); + + kasan_early_detect_facilities(); + if (!has_nx) + pgt_prot &= ~_PAGE_NOEXEC; + pte_z = __pte(__pa(kasan_zero_page) | pgt_prot); + + memsize = get_mem_detect_end(); + if (!memsize) + kasan_early_panic("cannot detect physical memory size\n"); + /* respect mem= cmdline parameter */ + if (memory_end_set && memsize > memory_end) + memsize = memory_end; + memsize = min(memsize, KASAN_SHADOW_START); + + if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) { + /* 4 level paging */ + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE)); + crst_table_init((unsigned long *)early_pg_dir, + _REGION2_ENTRY_EMPTY); + untracked_mem_end = vmax = _REGION1_SIZE; + asce_type = _ASCE_TYPE_REGION2; + } else { + /* 3 level paging */ + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); + crst_table_init((unsigned long *)early_pg_dir, + _REGION3_ENTRY_EMPTY); + untracked_mem_end = vmax = _REGION2_SIZE; + asce_type = _ASCE_TYPE_REGION3; + } + + /* init kasan zero shadow */ + crst_table_init((unsigned long *)kasan_zero_p4d, p4d_val(p4d_z)); + crst_table_init((unsigned long *)kasan_zero_pud, pud_val(pud_z)); + crst_table_init((unsigned long *)kasan_zero_pmd, pmd_val(pmd_z)); + memset64((u64 *)kasan_zero_pte, pte_val(pte_z), PTRS_PER_PTE); + + shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT; + pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE); + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) { + initrd_end = + round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE); + pgalloc_low = max(pgalloc_low, initrd_end); + } + + if (pgalloc_low + shadow_alloc_size > memsize) + kasan_early_panic("out of memory during initialisation\n"); + + if (has_edat) { + segment_pos = round_down(memsize, _SEGMENT_SIZE); + segment_low = segment_pos - shadow_alloc_size; + pgalloc_pos = segment_low; + } else { + pgalloc_pos = memsize; + } + init_mm.pgd = early_pg_dir; + /* + * Current memory layout: + * +- 0 -------------+ +- shadow start -+ + * | 1:1 ram mapping | /| 1/8 ram | + * +- end of ram ----+ / +----------------+ + * | ... gap ... |/ | kasan | + * +- shadow start --+ | zero | + * | 1/8 addr space | | page | + * +- shadow end -+ | mapping | + * | ... gap ... |\ | (untracked) | + * +- modules vaddr -+ \ +----------------+ + * | 2Gb | \| unmapped | allocated per module + * +-----------------+ +- shadow end ---+ + */ + /* populate kasan shadow (for identity mapping and zero page mapping) */ + kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); + if (IS_ENABLED(CONFIG_MODULES)) + untracked_mem_end = vmax - MODULES_LEN; + kasan_early_vmemmap_populate(__sha(max_physmem_end), + __sha(untracked_mem_end), + POPULATE_ZERO_SHADOW); + /* memory allocated for identity mapping structs will be freed later */ + pgalloc_freeable = pgalloc_pos; + /* populate identity mapping */ + kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE); + kasan_set_pgd(early_pg_dir, asce_type); + kasan_enable_dat(); + /* enable kasan */ + init_task.kasan_depth = 0; + memblock_reserve(pgalloc_pos, memsize - pgalloc_pos); + sclp_early_printk("KernelAddressSanitizer initialized\n"); +} + +void __init kasan_copy_shadow(pgd_t *pg_dir) +{ + /* + * At this point we are still running on early pages setup early_pg_dir, + * while swapper_pg_dir has just been initialized with identity mapping. + * Carry over shadow memory region from early_pg_dir to swapper_pg_dir. + */ + + pgd_t *pg_dir_src; + pgd_t *pg_dir_dst; + p4d_t *p4_dir_src; + p4d_t *p4_dir_dst; + pud_t *pu_dir_src; + pud_t *pu_dir_dst; + + pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START); + pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START); + p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START); + p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START); + if (!p4d_folded(*p4_dir_src)) { + /* 4 level paging */ + memcpy(p4_dir_dst, p4_dir_src, + (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t)); + return; + } + /* 3 level paging */ + pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START); + pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START); + memcpy(pu_dir_dst, pu_dir_src, + (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t)); +} + +void __init kasan_free_early_identity(void) +{ + memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos); +} diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index 7be06475809b..97b3ee53852b 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -89,10 +89,8 @@ static int __memcpy_real(void *dest, void *src, size_t count) return rc; } -/* - * Copy memory in real mode (kernel to kernel) - */ -int memcpy_real(void *dest, void *src, size_t count) +static unsigned long _memcpy_real(unsigned long dest, unsigned long src, + unsigned long count) { int irqs_disabled, rc; unsigned long flags; @@ -103,7 +101,7 @@ int memcpy_real(void *dest, void *src, size_t count) irqs_disabled = arch_irqs_disabled_flags(flags); if (!irqs_disabled) trace_hardirqs_off(); - rc = __memcpy_real(dest, src, count); + rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); if (!irqs_disabled) trace_hardirqs_on(); __arch_local_irq_ssm(flags); @@ -111,6 +109,23 @@ int memcpy_real(void *dest, void *src, size_t count) } /* + * Copy memory in real mode (kernel to kernel) + */ +int memcpy_real(void *dest, void *src, size_t count) +{ + if (S390_lowcore.nodat_stack != 0) + return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, + 3, dest, src, count); + /* + * This is a really early memcpy_real call, the stacks are + * not set up yet. Just call _memcpy_real on the early boot + * stack + */ + return _memcpy_real((unsigned long) dest,(unsigned long) src, + (unsigned long) count); +} + +/* * Copy memory in absolute mode (kernel to kernel) */ void memcpy_absolute(void *dest, void *src, size_t count) diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c deleted file mode 100644 index 21f6c82c8296..000000000000 --- a/arch/s390/mm/mem_detect.c +++ /dev/null @@ -1,62 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright IBM Corp. 2008, 2009 - * - * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> - */ - -#include <linux/kernel.h> -#include <linux/memblock.h> -#include <linux/init.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> -#include <asm/ipl.h> -#include <asm/sclp.h> -#include <asm/setup.h> - -#define CHUNK_READ_WRITE 0 -#define CHUNK_READ_ONLY 1 - -static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size) -{ - memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n", - start, start + size - 1); - memblock_add_range(&memblock.memory, start, size, 0, 0); - memblock_add_range(&memblock.physmem, start, size, 0, 0); -} - -void __init detect_memory_memblock(void) -{ - unsigned long memsize, rnmax, rzm, addr, size; - int type; - - rzm = sclp.rzm; - rnmax = sclp.rnmax; - memsize = rzm * rnmax; - if (!rzm) - rzm = 1UL << 17; - max_physmem_end = memsize; - addr = 0; - /* keep memblock lists close to the kernel */ - memblock_set_bottom_up(true); - do { - size = 0; - /* assume lowcore is writable */ - type = addr ? tprot(addr) : CHUNK_READ_WRITE; - do { - size += rzm; - if (max_physmem_end && addr + size >= max_physmem_end) - break; - } while (type == tprot(addr + size)); - if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) { - if (max_physmem_end && (addr + size > max_physmem_end)) - size = max_physmem_end - addr; - memblock_physmem_add(addr, size); - } - addr += size; - } while (addr < max_physmem_end); - memblock_set_bottom_up(false); - if (!max_physmem_end) - max_physmem_end = memblock_end_of_DRAM(); - memblock_dump_all(); -} |