From 1aea9b3f921003f0880f0676ae85d87c9f1cb4a2 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Mon, 24 Apr 2017 18:19:10 +0200 Subject: s390/mm: implement 5 level pages tables Add the logic to upgrade the page table for a 64-bit process to five levels. This increases the TASK_SIZE from 8PB to 16EB-4K. Signed-off-by: Martin Schwidefsky --- arch/s390/mm/dump_pagetables.c | 23 ++++++++++++++--- arch/s390/mm/gmap.c | 5 +++- arch/s390/mm/gup.c | 33 ++++++++++++++++++++---- arch/s390/mm/hugetlbpage.c | 30 ++++++++++++++-------- arch/s390/mm/mmap.c | 4 +-- arch/s390/mm/pageattr.c | 30 +++++++++++++++++++--- arch/s390/mm/pgalloc.c | 57 +++++++++++++++++++++++++++--------------- arch/s390/mm/pgtable.c | 6 ++++- arch/s390/mm/vmem.c | 44 ++++++++++++++++++++++++++++---- 9 files changed, 180 insertions(+), 52 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 1b553d847140..049c3c455b32 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -149,7 +149,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st, } static void walk_pud_level(struct seq_file *m, struct pg_state *st, - pgd_t *pgd, unsigned long addr) + p4d_t *p4d, unsigned long addr) { unsigned int prot; pud_t *pud; @@ -157,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, for (i = 0; i < PTRS_PER_PUD && addr < max_addr; i++) { st->current_address = addr; - pud = pud_offset(pgd, addr); + pud = pud_offset(p4d, addr); if (!pud_none(*pud)) if (pud_large(*pud)) { prot = pud_val(*pud) & @@ -172,6 +172,23 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, } } +static void walk_p4d_level(struct seq_file *m, struct pg_state *st, + pgd_t *pgd, unsigned long addr) +{ + p4d_t *p4d; + int i; + + for (i = 0; i < PTRS_PER_P4D && addr < max_addr; i++) { + st->current_address = addr; + p4d = p4d_offset(pgd, addr); + if (!p4d_none(*p4d)) + walk_pud_level(m, st, p4d, addr); + else + note_page(m, st, _PAGE_INVALID, 2); + addr += P4D_SIZE; + } +} + static void walk_pgd_level(struct seq_file *m) { unsigned long addr = 0; @@ -184,7 +201,7 @@ static void walk_pgd_level(struct seq_file *m) st.current_address = addr; pgd = pgd_offset_k(addr); if (!pgd_none(*pgd)) - walk_pud_level(m, &st, pgd, addr); + walk_p4d_level(m, &st, pgd, addr); else note_page(m, &st, _PAGE_INVALID, 1); addr += PGDIR_SIZE; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 7f6db1e6c048..fbd664e48098 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -537,6 +537,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) unsigned long *table; spinlock_t *ptl; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; int rc; @@ -573,7 +574,9 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) mm = gmap->mm; pgd = pgd_offset(mm, vmaddr); VM_BUG_ON(pgd_none(*pgd)); - pud = pud_offset(pgd, vmaddr); + p4d = p4d_offset(pgd, vmaddr); + VM_BUG_ON(p4d_none(*p4d)); + pud = pud_offset(p4d, vmaddr); VM_BUG_ON(pud_none(*pud)); /* large puds cannot yet be handled */ if (pud_large(*pud)) diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index b7b779c40a5b..8ecc25e760fa 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c @@ -166,15 +166,15 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, return 1; } -static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, +static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp, pud; - pudp = (pud_t *) pgdp; - if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) - pudp = (pud_t *) pgd_deref(pgd); + pudp = (pud_t *) p4dp; + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) + pudp = (pud_t *) p4d_deref(p4d); pudp += pud_index(addr); do { pud = *pudp; @@ -194,6 +194,29 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, return 1; } +static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, + unsigned long end, int write, struct page **pages, int *nr) +{ + unsigned long next; + p4d_t *p4dp, p4d; + + p4dp = (p4d_t *) pgdp; + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1) + p4dp = (p4d_t *) pgd_deref(pgd); + p4dp += p4d_index(addr); + do { + p4d = *p4dp; + barrier(); + next = p4d_addr_end(addr, end); + if (p4d_none(p4d)) + return 0; + if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr)) + return 0; + } while (p4dp++, addr = next, addr != end); + + return 1; +} + /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. @@ -228,7 +251,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, next = pgd_addr_end(addr, end); if (pgd_none(pgd)) break; - if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr)) + if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr)) break; } while (pgdp++, addr = next, addr != end); local_irq_restore(flags); diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 9b4050caa4e9..d3a5e39756f6 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -162,16 +162,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp = NULL; pgdp = pgd_offset(mm, addr); - pudp = pud_alloc(mm, pgdp, addr); - if (pudp) { - if (sz == PUD_SIZE) - return (pte_t *) pudp; - else if (sz == PMD_SIZE) - pmdp = pmd_alloc(mm, pudp, addr); + p4dp = p4d_alloc(mm, pgdp, addr); + if (p4dp) { + pudp = pud_alloc(mm, p4dp, addr); + if (pudp) { + if (sz == PUD_SIZE) + return (pte_t *) pudp; + else if (sz == PMD_SIZE) + pmdp = pmd_alloc(mm, pudp, addr); + } } return (pte_t *) pmdp; } @@ -179,16 +183,20 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgdp; + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp = NULL; pgdp = pgd_offset(mm, addr); if (pgd_present(*pgdp)) { - pudp = pud_offset(pgdp, addr); - if (pud_present(*pudp)) { - if (pud_large(*pudp)) - return (pte_t *) pudp; - pmdp = pmd_offset(pudp, addr); + p4dp = p4d_offset(pgdp, addr); + if (p4d_present(*p4dp)) { + pudp = pud_offset(p4dp, addr); + if (pud_present(*pudp)) { + if (pud_large(*pudp)) + return (pte_t *) pudp; + pmdp = pmd_offset(pudp, addr); + } } } return (pte_t *) pmdp; diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b017daed6887..8c5f284044ef 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -120,7 +120,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, check_asce_limit: if (addr + len > current->mm->context.asce_limit) { - rc = crst_table_upgrade(mm); + rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; } @@ -184,7 +184,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, check_asce_limit: if (addr + len > current->mm->context.asce_limit) { - rc = crst_table_upgrade(mm); + rc = crst_table_upgrade(mm, addr + len); if (rc) return (unsigned long) rc; } diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 49e721f3645e..180481589246 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -229,14 +229,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr, pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3); } -static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end, +static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end, unsigned long flags) { unsigned long next; pud_t *pudp; int rc = 0; - pudp = pud_offset(pgd, addr); + pudp = pud_offset(p4d, addr); do { if (pud_none(*pudp)) return -EINVAL; @@ -259,6 +259,26 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end, return rc; } +static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end, + unsigned long flags) +{ + unsigned long next; + p4d_t *p4dp; + int rc = 0; + + p4dp = p4d_offset(pgd, addr); + do { + if (p4d_none(*p4dp)) + return -EINVAL; + next = p4d_addr_end(addr, end); + rc = walk_pud_level(p4dp, addr, next, flags); + p4dp++; + addr = next; + cond_resched(); + } while (addr < end && !rc); + return rc; +} + static DEFINE_MUTEX(cpa_mutex); static int change_page_attr(unsigned long addr, unsigned long end, @@ -278,7 +298,7 @@ static int change_page_attr(unsigned long addr, unsigned long end, if (pgd_none(*pgdp)) break; next = pgd_addr_end(addr, end); - rc = walk_pud_level(pgdp, addr, next, flags); + rc = walk_p4d_level(pgdp, addr, next, flags); if (rc) break; cond_resched(); @@ -319,6 +339,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) unsigned long address; int nr, i, j; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -326,7 +347,8 @@ void __kernel_map_pages(struct page *page, int numpages, int enable) for (i = 0; i < numpages;) { address = page_to_phys(page + i); pgd = pgd_offset_k(address); - pud = pud_offset(pgd, address); + p4d = p4d_offset(pgd, address); + pud = pud_offset(p4d, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); nr = (unsigned long)pte >> ilog2(sizeof(long)); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index f502cbe657af..18918e394ce4 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -76,29 +76,46 @@ static void __crst_table_upgrade(void *arg) __tlb_flush_local(); } -int crst_table_upgrade(struct mm_struct *mm) +int crst_table_upgrade(struct mm_struct *mm, unsigned long end) { unsigned long *table, *pgd; + int rc, notify; - /* upgrade should only happen from 3 to 4 levels */ - BUG_ON(mm->context.asce_limit != (1UL << 42)); - - table = crst_table_alloc(mm); - if (!table) + /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ + BUG_ON(mm->context.asce_limit < (1UL << 42)); + if (end >= TASK_SIZE_MAX) return -ENOMEM; - - spin_lock_bh(&mm->page_table_lock); - pgd = (unsigned long *) mm->pgd; - crst_table_init(table, _REGION2_ENTRY_EMPTY); - pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd); - mm->pgd = (pgd_t *) table; - mm->context.asce_limit = 1UL << 53; - mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | - _ASCE_USER_BITS | _ASCE_TYPE_REGION2; - spin_unlock_bh(&mm->page_table_lock); - - on_each_cpu(__crst_table_upgrade, mm, 0); - return 0; + rc = 0; + notify = 0; + while (mm->context.asce_limit < end) { + table = crst_table_alloc(mm); + if (!table) { + rc = -ENOMEM; + break; + } + spin_lock_bh(&mm->page_table_lock); + pgd = (unsigned long *) mm->pgd; + if (mm->context.asce_limit == (1UL << 42)) { + crst_table_init(table, _REGION2_ENTRY_EMPTY); + p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); + mm->pgd = (pgd_t *) table; + mm->context.asce_limit = 1UL << 53; + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | _ASCE_TYPE_REGION2; + } else { + crst_table_init(table, _REGION1_ENTRY_EMPTY); + pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); + mm->pgd = (pgd_t *) table; + mm->context.asce_limit = -PAGE_SIZE; + mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | + _ASCE_USER_BITS | _ASCE_TYPE_REGION1; + } + notify = 1; + spin_unlock_bh(&mm->page_table_lock); + } + if (notify) + on_each_cpu(__crst_table_upgrade, mm, 0); + return rc; } void crst_table_downgrade(struct mm_struct *mm) @@ -274,7 +291,7 @@ static void __tlb_remove_table(void *_table) struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT); switch (mask) { - case 0: /* pmd or pud */ + case 0: /* pmd, pud, or p4d */ free_pages((unsigned long) table, 2); break; case 1: /* lower 2K of a 4K page table */ diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 947b66a5cdba..d4d409ba206b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -610,6 +610,7 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) { spinlock_t *ptl; pgd_t *pgd; + p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgste_t pgste; @@ -618,7 +619,10 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) bool dirty; pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); + p4d = p4d_alloc(mm, pgd, addr); + if (!p4d) + return false; + pud = pud_alloc(mm, p4d, addr); if (!pud) return false; pmd = pmd_alloc(mm, pud, addr); diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index c33c94b4be60..d8398962a723 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -38,6 +38,17 @@ static void __ref *vmem_alloc_pages(unsigned int order) return (void *) memblock_alloc(size, size); } +static inline p4d_t *vmem_p4d_alloc(void) +{ + p4d_t *p4d = NULL; + + p4d = vmem_alloc_pages(2); + if (!p4d) + return NULL; + clear_table((unsigned long *) p4d, _REGION2_ENTRY_EMPTY, PAGE_SIZE * 4); + return p4d; +} + static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; @@ -85,6 +96,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size) unsigned long end = start + size; unsigned long address = start; pgd_t *pg_dir; + p4d_t *p4_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; @@ -102,12 +114,19 @@ static int vmem_add_mem(unsigned long start, unsigned long size) while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { + p4_dir = vmem_p4d_alloc(); + if (!p4_dir) + goto out; + pgd_populate(&init_mm, pg_dir, p4_dir); + } + p4_dir = p4d_offset(pg_dir, address); + if (p4d_none(*p4_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; - pgd_populate(&init_mm, pg_dir, pu_dir); + p4d_populate(&init_mm, p4_dir, pu_dir); } - pu_dir = pud_offset(pg_dir, address); + pu_dir = pud_offset(p4_dir, address); if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && !debug_pagealloc_enabled()) { @@ -161,6 +180,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) unsigned long end = start + size; unsigned long address = start; pgd_t *pg_dir; + p4d_t *p4_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; @@ -172,7 +192,12 @@ static void vmem_remove_range(unsigned long start, unsigned long size) address += PGDIR_SIZE; continue; } - pu_dir = pud_offset(pg_dir, address); + p4_dir = p4d_offset(pg_dir, address); + if (p4d_none(*p4_dir)) { + address += P4D_SIZE; + continue; + } + pu_dir = pud_offset(p4_dir, address); if (pud_none(*pu_dir)) { address += PUD_SIZE; continue; @@ -213,6 +238,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) unsigned long pgt_prot, sgt_prot; unsigned long address = start; pgd_t *pg_dir; + p4d_t *p4_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; @@ -227,13 +253,21 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) for (address = start; address < end;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { + p4_dir = vmem_p4d_alloc(); + if (!p4_dir) + goto out; + pgd_populate(&init_mm, pg_dir, p4_dir); + } + + p4_dir = p4d_offset(pg_dir, address); + if (p4d_none(*p4_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; - pgd_populate(&init_mm, pg_dir, pu_dir); + p4d_populate(&init_mm, p4_dir, pu_dir); } - pu_dir = pud_offset(pg_dir, address); + pu_dir = pud_offset(p4_dir, address); if (pud_none(*pu_dir)) { pm_dir = vmem_pmd_alloc(); if (!pm_dir) -- cgit v1.2.3-58-ga151 From d12a3d603690ba84b7e3fd357f05b96e777d4630 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Tue, 9 May 2017 13:44:43 +0200 Subject: s390/mm: add __rcu annotations Add __rcu annotations so sparse correctly warns only if "slot" gets derefenced without using rcu_dereference(). Right now we get warnings because of the missing annotation: arch/s390/mm/gmap.c:135:17: warning: incorrect type in assignment (different address spaces) arch/s390/mm/gmap.c:135:17: expected void **slot arch/s390/mm/gmap.c:135:17: got void [noderef] ** Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/gmap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index fbd664e48098..4fb3d3cdb370 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -125,7 +125,7 @@ static void gmap_radix_tree_free(struct radix_tree_root *root) struct radix_tree_iter iter; unsigned long indices[16]; unsigned long index; - void **slot; + void __rcu **slot; int i, nr; /* A radix tree is freed by deleting all of its entries */ @@ -150,7 +150,7 @@ static void gmap_rmap_radix_tree_free(struct radix_tree_root *root) struct radix_tree_iter iter; unsigned long indices[16]; unsigned long index; - void **slot; + void __rcu **slot; int i, nr; /* A radix tree is freed by deleting all of its entries */ @@ -1011,7 +1011,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table); static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr, struct gmap_rmap *rmap) { - void **slot; + void __rcu **slot; BUG_ON(!gmap_is_shadow(sg)); slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT); -- cgit v1.2.3-58-ga151 From fe7b274729fc0bab9b4238f875695d36726a6b10 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 22 May 2017 13:16:00 +0200 Subject: s390/fault: use _ASCE_ORIGIN instead of PAGE_MASK When masking an ASCE to get its origin use the corresponding define instead of the unrelated PAGE_MASK. This doesn't fix a bug since both masks are identical. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/fault.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 5845d3028ffc..14f25798b001 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -130,7 +130,7 @@ static int bad_address(void *p) static void dump_pagetable(unsigned long asce, unsigned long address) { - unsigned long *table = __va(asce & PAGE_MASK); + unsigned long *table = __va(asce & _ASCE_ORIGIN); pr_alert("AS:%016lx ", asce); switch (asce & _ASCE_TYPE_MASK) { -- cgit v1.2.3-58-ga151 From 60c497014e34af5aa0be56d0869c67fa2b5c3786 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 1 Jun 2017 11:04:04 +0200 Subject: s390/mm: use correct address space when enabling DAT Right now the kernel uses the primary address space until finally the switch to the correct home address space will be done when the idle PSW will be loaded within psw_idle(). Correct this and simply use the home address space when DAT is enabled for the first time. This doesn't really fix a bug, but fixes odd behavior. Reviewed-by: Christian Borntraeger Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/mm/init.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ee6a1d3d4983..0352f9f88c73 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -81,6 +81,7 @@ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type, asce_bits; + psw_t psw; init_mm.pgd = swapper_pg_dir; if (VMALLOC_END > (1UL << 42)) { @@ -100,7 +101,10 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); - __arch_local_irq_stosm(0x04); + psw.mask = __extract_psw(); + psw_bits(psw).t = 1; + psw_bits(psw).as = PSW_AS_HOME; + __load_psw_mask(psw.mask); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); -- cgit v1.2.3-58-ga151 From 8bb3fdd6863c3b6b84bbab750d6b35e889c1399d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 3 Jun 2017 10:19:55 +0200 Subject: s390: rename psw_bits enums The address space enums that must be used when modifying the address space part of a psw with the psw_bits() macro can easily be confused with the psw defines that are used to mask and compare directly the mask part of a psw. We have e.g. PSW_AS_PRIMARY vs PSW_ASC_PRIMARY. To avoid confusion rename the PSW_AS_* enums to PSW_BITS_AS_*. In addition also rename the PSW_AMODE_* enums, so they also follow the same naming scheme: PSW_BITS_AMODE_*. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ptrace.h | 14 +++++++------- arch/s390/kernel/uprobes.c | 8 ++++---- arch/s390/kvm/gaccess.c | 12 ++++++------ arch/s390/kvm/gaccess.h | 4 ++-- arch/s390/kvm/guestdbg.c | 6 +++--- arch/s390/kvm/priv.c | 6 +++--- arch/s390/mm/init.c | 2 +- 7 files changed, 26 insertions(+), 26 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 99bc456cc26a..c8d13bcc9f5d 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -46,16 +46,16 @@ struct psw_bits { }; enum { - PSW_AMODE_24BIT = 0, - PSW_AMODE_31BIT = 1, - PSW_AMODE_64BIT = 3 + PSW_BITS_AMODE_24BIT = 0, + PSW_BITS_AMODE_31BIT = 1, + PSW_BITS_AMODE_64BIT = 3 }; enum { - PSW_AS_PRIMARY = 0, - PSW_AS_ACCREG = 1, - PSW_AS_SECONDARY = 2, - PSW_AS_HOME = 3 + PSW_BITS_AS_PRIMARY = 0, + PSW_BITS_AS_ACCREG = 1, + PSW_BITS_AS_SECONDARY = 2, + PSW_BITS_AS_HOME = 3 }; #define psw_bits(__psw) (*({ \ diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 314e0ee3016a..0eec45b4575b 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -27,9 +27,9 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) + if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) return -EINVAL; - if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) + if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) return -EINVAL; clear_pt_regs_flag(regs, PIF_PER_TRAP); auprobe->saved_per = psw_bits(regs->psw).r; @@ -372,8 +372,8 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if ((psw_bits(regs->psw).eaba == PSW_AMODE_24BIT) || - ((psw_bits(regs->psw).eaba == PSW_AMODE_31BIT) && + if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) || + ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) && !is_compat_task())) { regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE); do_report_trap(regs, SIGILL, ILL_ILLADR, NULL); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 9da243d94cc3..6ad4a9797de8 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -557,20 +557,20 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, return 0; } - if (mode == GACC_IFETCH) - psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY; + if ((mode == GACC_IFETCH) && (psw.as != PSW_BITS_AS_HOME)) + psw.as = PSW_BITS_AS_PRIMARY; switch (psw.as) { - case PSW_AS_PRIMARY: + case PSW_BITS_AS_PRIMARY: asce->val = vcpu->arch.sie_block->gcr[1]; return 0; - case PSW_AS_SECONDARY: + case PSW_BITS_AS_SECONDARY: asce->val = vcpu->arch.sie_block->gcr[7]; return 0; - case PSW_AS_HOME: + case PSW_BITS_AS_HOME: asce->val = vcpu->arch.sie_block->gcr[13]; return 0; - case PSW_AS_ACCREG: + case PSW_BITS_AS_ACCREG: rc = ar_translation(vcpu, asce, ar, mode); if (rc > 0) return trans_exc(vcpu, rc, ga, ar, mode, PROT_TYPE_ALC); diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h index 7ce47fd36f28..bec42b852246 100644 --- a/arch/s390/kvm/gaccess.h +++ b/arch/s390/kvm/gaccess.h @@ -57,9 +57,9 @@ static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu, { psw_t *psw = &vcpu->arch.sie_block->gpsw; - if (psw_bits(*psw).eaba == PSW_AMODE_64BIT) + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT) return ga; - if (psw_bits(*psw).eaba == PSW_AMODE_31BIT) + if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT) return ga & ((1UL << 31) - 1); return ga & ((1UL << 24) - 1); } diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 23d9a4e12da1..c2e0ddc1356e 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -613,15 +613,15 @@ int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) * instruction. Check primary and home space-switch-event * controls. (theoretically home -> home produced no event) */ - if (((new_as == PSW_AS_HOME) ^ old_as_is_home(vcpu)) && - (pssec(vcpu) || hssec(vcpu))) + if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) && + (pssec(vcpu) || hssec(vcpu))) vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; /* * PT, PTI, PR, PC instruction operate on primary AS only. Check * if the primary-space-switch-event control was or got set. */ - if (new_as == PSW_AS_PRIMARY && !old_as_is_home(vcpu) && + if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) && (pssec(vcpu) || old_ssec(vcpu))) vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; } diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index c03106c428cf..e9dd7efc57b8 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -361,7 +361,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) } } if (m3 & SSKE_MB) { - if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) + if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK; else vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL; @@ -901,7 +901,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) /* only support 2G frame size if EDAT2 is available and we are not in 24-bit addressing mode */ if (!test_kvm_facility(vcpu->kvm, 78) || - psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT) + psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); end = (start + (1UL << 31)) & ~((1UL << 31) - 1); break; @@ -938,7 +938,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) start += PAGE_SIZE; } if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { - if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) { + if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) { vcpu->run->s.regs.gprs[reg2] = end; } else { vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0352f9f88c73..bc8c301f82b6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -103,7 +103,7 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 13, 13); psw.mask = __extract_psw(); psw_bits(psw).t = 1; - psw_bits(psw).as = PSW_AS_HOME; + psw_bits(psw).as = PSW_BITS_AS_HOME; __load_psw_mask(psw.mask); sparse_memory_present_with_active_regions(MAX_NUMNODES); -- cgit v1.2.3-58-ga151 From a752598254016d2f9b4415d43a6402fe083f70b2 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sat, 3 Jun 2017 10:56:07 +0200 Subject: s390: rename struct psw_bits members Rename a couple of the struct psw_bits members so it is more obvious for what they are good. Initially I thought using the single character names from the PoP would be sufficient and obvious, but admittedly that is not true. The current implementation is not easy to use, if one has to look into the source file to figure out which member represents the 'per' bit (which is the 'r' member). Therefore rename the members to sane names that are identical to the uapi psw mask defines: r -> per i -> io e -> ext t -> dat m -> mcheck w -> wait p -> pstate Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/ptrace.h | 38 +++++++++++++++++++------------------- arch/s390/kernel/dumpstack.c | 4 ++-- arch/s390/kernel/perf_cpum_sf.c | 10 +++++----- arch/s390/kernel/uprobes.c | 4 ++-- arch/s390/kvm/gaccess.c | 10 +++++----- arch/s390/kvm/priv.c | 2 +- arch/s390/mm/init.c | 2 +- 7 files changed, 35 insertions(+), 35 deletions(-) (limited to 'arch/s390/mm') diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index c8d13bcc9f5d..004f54909235 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -24,25 +24,25 @@ PSW_MASK_PSTATE | PSW_ASC_PRIMARY) struct psw_bits { - unsigned long : 1; - unsigned long r : 1; /* PER-Mask */ - unsigned long : 3; - unsigned long t : 1; /* DAT Mode */ - unsigned long i : 1; /* Input/Output Mask */ - unsigned long e : 1; /* External Mask */ - unsigned long key : 4; /* PSW Key */ - unsigned long : 1; - unsigned long m : 1; /* Machine-Check Mask */ - unsigned long w : 1; /* Wait State */ - unsigned long p : 1; /* Problem State */ - unsigned long as : 2; /* Address Space Control */ - unsigned long cc : 2; /* Condition Code */ - unsigned long pm : 4; /* Program Mask */ - unsigned long ri : 1; /* Runtime Instrumentation */ - unsigned long : 6; - unsigned long eaba : 2; /* Addressing Mode */ - unsigned long : 31; - unsigned long ia : 64; /* Instruction Address */ + unsigned long : 1; + unsigned long per : 1; /* PER-Mask */ + unsigned long : 3; + unsigned long dat : 1; /* DAT Mode */ + unsigned long io : 1; /* Input/Output Mask */ + unsigned long ext : 1; /* External Mask */ + unsigned long key : 4; /* PSW Key */ + unsigned long : 1; + unsigned long mcheck : 1; /* Machine-Check Mask */ + unsigned long wait : 1; /* Wait State */ + unsigned long pstate : 1; /* Problem State */ + unsigned long as : 2; /* Address Space Control */ + unsigned long cc : 2; /* Condition Code */ + unsigned long pm : 4; /* Program Mask */ + unsigned long ri : 1; /* Runtime Instrumentation */ + unsigned long : 6; + unsigned long eaba : 2; /* Addressing Mode */ + unsigned long : 31; + unsigned long ia : 64; /* Instruction Address */ }; enum { diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 62904890d127..dab78babfab6 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -128,8 +128,8 @@ void show_registers(struct pt_regs *regs) pr_cont(" (%pSR)", (void *)regs->psw.addr); pr_cont("\n"); printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " - "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e, - psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm); + "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext, + psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm); pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba); printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode, regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]); diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index ca960d0370d5..0c82f7903fc7 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -995,11 +995,11 @@ static int perf_push_sample(struct perf_event *event, struct sf_raw_sample *sfr) regs.int_parm = CPU_MF_INT_SF_PRA; sde_regs = (struct perf_sf_sde_regs *) ®s.int_parm_long; - psw_bits(regs.psw).ia = sfr->basic.ia; - psw_bits(regs.psw).t = sfr->basic.T; - psw_bits(regs.psw).w = sfr->basic.W; - psw_bits(regs.psw).p = sfr->basic.P; - psw_bits(regs.psw).as = sfr->basic.AS; + psw_bits(regs.psw).ia = sfr->basic.ia; + psw_bits(regs.psw).dat = sfr->basic.T; + psw_bits(regs.psw).wait = sfr->basic.W; + psw_bits(regs.psw).per = sfr->basic.P; + psw_bits(regs.psw).as = sfr->basic.AS; /* * Use the hardware provided configuration level to decide if the diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index 0eec45b4575b..d94baa8db507 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -32,7 +32,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) return -EINVAL; clear_pt_regs_flag(regs, PIF_PER_TRAP); - auprobe->saved_per = psw_bits(regs->psw).r; + auprobe->saved_per = psw_bits(regs->psw).per; auprobe->saved_int_code = regs->int_code; regs->int_code = UPROBE_TRAP_NR; regs->psw.addr = current->utask->xol_vaddr; @@ -81,7 +81,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP); update_cr_regs(current); - psw_bits(regs->psw).r = auprobe->saved_per; + psw_bits(regs->psw).per = auprobe->saved_per; regs->int_code = auprobe->saved_int_code; if (fixup & FIXUP_PSW_NORMAL) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 6ad4a9797de8..e0f7d5fc7efd 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -551,7 +551,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, int rc; struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw); - if (!psw.t) { + if (!psw.dat) { asce->val = 0; asce->r = 1; return 0; @@ -771,7 +771,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu, if (!ctlreg0.lap) return 0; - if (psw_bits(*psw).t && asce.p) + if (psw_bits(*psw).dat && asce.p) return 0; return 1; } @@ -790,7 +790,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode, PROT_TYPE_LA); ga &= PAGE_MASK; - if (psw_bits(*psw).t) { + if (psw_bits(*psw).dat) { rc = guest_translate(vcpu, ga, pages, asce, mode); if (rc < 0) return rc; @@ -831,7 +831,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data, pages = vmalloc(nr_pages * sizeof(unsigned long)); if (!pages) return -ENOMEM; - need_ipte_lock = psw_bits(*psw).t && !asce.r; + need_ipte_lock = psw_bits(*psw).dat && !asce.r; if (need_ipte_lock) ipte_lock(vcpu); rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode); @@ -899,7 +899,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, mode, PROT_TYPE_LA); } - if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ + if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */ rc = guest_translate(vcpu, gva, gpa, asce, mode); if (rc > 0) return trans_exc(vcpu, rc, gva, 0, mode, PROT_TYPE_DAT); diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index e9dd7efc57b8..e53292a89257 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -374,7 +374,7 @@ static int handle_sske(struct kvm_vcpu *vcpu) static int handle_ipte_interlock(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_ipte_interlock++; - if (psw_bits(vcpu->arch.sie_block->gpsw).p) + if (psw_bits(vcpu->arch.sie_block->gpsw).pstate) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu)); kvm_s390_retry_instr(vcpu); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index bc8c301f82b6..3348e60dd8ad 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -102,7 +102,7 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); psw.mask = __extract_psw(); - psw_bits(psw).t = 1; + psw_bits(psw).dat = 1; psw_bits(psw).as = PSW_BITS_AS_HOME; __load_psw_mask(psw.mask); -- cgit v1.2.3-58-ga151