summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2022-12-16 19:07:38 +0100
committerHeiko Carstens <hca@linux.ibm.com>2023-01-13 14:15:05 +0100
commitbf2b4af2ed23b9548ea7f24613fd905f56ec7910 (patch)
tree8aa00782e8dbed53205117dbfd7e26bff9b8cf33 /arch/s390/mm
parente148071b9f7711cb68c6d89c5b2033ee5a8add93 (diff)
s390/kasan: use set_pXe_bit() for pgtable entries setup
Convert setup of pgtable entries to use set_pXe_bit() helpers as the preferred way in MM code. Locally introduce pgprot_clear_bit() helper, which is strictly speaking a generic function. However, it is only x86 pgprot_clear_protnone_bits() helper, which does a similar thing, so do not make it public. Reviewed-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/kasan_init.c37
1 files changed, 26 insertions, 11 deletions
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
index 89545280935a..a97b7981358e 100644
--- a/arch/s390/mm/kasan_init.c
+++ b/arch/s390/mm/kasan_init.c
@@ -86,25 +86,32 @@ enum populate_mode {
POPULATE_ZERO_SHADOW,
POPULATE_SHALLOW
};
+
+static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
+{
+ return __pgprot(pgprot_val(pgprot) & ~bit);
+}
+
static void __init kasan_early_pgtable_populate(unsigned long address,
unsigned long end,
enum populate_mode mode)
{
- unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
+ pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
+ pgprot_t pgt_prot = PAGE_KERNEL;
+ pgprot_t sgt_prot = SEGMENT_KERNEL;
pgd_t *pg_dir;
p4d_t *p4_dir;
pud_t *pu_dir;
pmd_t *pm_dir;
pte_t *pt_dir;
+ pmd_t pmd;
+ pte_t pte;
- pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
if (!has_nx)
- pgt_prot_zero &= ~_PAGE_NOEXEC;
- pgt_prot = pgprot_val(PAGE_KERNEL);
- sgt_prot = pgprot_val(SEGMENT_KERNEL);
+ pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
if (!has_nx || mode == POPULATE_ONE2ONE) {
- pgt_prot &= ~_PAGE_NOEXEC;
- sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
+ pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
+ sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
}
/*
@@ -175,7 +182,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
- set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
+ pmd = __pmd(__pa(page));
+ pmd = set_pmd_bit(pmd, sgt_prot);
+ set_pmd(pm_dir, pmd);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -194,16 +203,22 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
switch (mode) {
case POPULATE_ONE2ONE:
page = (void *)address;
- set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
+ pte = __pte(__pa(page));
+ pte = set_pte_bit(pte, pgt_prot);
+ set_pte(pt_dir, pte);
break;
case POPULATE_MAP:
page = kasan_early_alloc_pages(0);
memset(page, 0, PAGE_SIZE);
- set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
+ pte = __pte(__pa(page));
+ pte = set_pte_bit(pte, pgt_prot);
+ set_pte(pt_dir, pte);
break;
case POPULATE_ZERO_SHADOW:
page = kasan_early_shadow_page;
- set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
+ pte = __pte(__pa(page));
+ pte = set_pte_bit(pte, pgt_prot_zero);
+ set_pte(pt_dir, pte);
break;
case POPULATE_SHALLOW:
/* should never happen */