diff options
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/fault.c | 8 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 50 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi.c | 5 |
5 files changed, 53 insertions, 15 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a5b4dce1b7ac..904611bf0e5a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg) { if (config_enabled(CONFIG_X86_32) && !arg) force_enable_local_apic = 1; - else if (!strncmp(arg, "notscdeadline", 13)) + else if (arg && !strncmp(arg, "notscdeadline", 13)) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return 0; } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fb674fd3fc22..2b97525246d4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address) if (pmd_large(*pmd)) return spurious_fault_check(error_code, (pte_t *) pmd); - /* - * Note: don't use pte_present() here, since it returns true - * if the _PAGE_PROTNONE bit is set. However, this aliases the - * _PAGE_GLOBAL bit, which for kernel pages give false positives - * when CONFIG_DEBUG_PAGEALLOC is used. - */ pte = pte_offset_kernel(pmd, address); - if (!(pte_flags(*pte) & _PAGE_PRESENT)) + if (!pte_present(*pte)) return 0; ret = spurious_fault_check(error_code, pte); diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index dfd30259eb89..ff3633c794c6 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -97,8 +97,7 @@ void numa_set_node(int cpu, int node) #endif per_cpu(x86_cpu_to_node_map, cpu) = node; - if (node != NUMA_NO_NODE) - set_cpu_numa_node(cpu, node); + set_cpu_numa_node(cpu, node); } void numa_clear_node(int cpu) diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ca1f1c2bb7be..091934e1d0d9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -473,6 +473,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); /* + * Set the PSE and GLOBAL flags only if the PRESENT flag is + * set otherwise pmd_present/pmd_huge will return true even on + * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL + * for the ancient hardware that doesn't support it. + */ + if (pgprot_val(new_prot) & _PAGE_PRESENT) + pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; + else + pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); + + new_prot = canon_pgprot(new_prot); + + /* * old_pte points to the large page base address. So we need * to add the offset of the virtual address: */ @@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * The address is aligned and the number of pages * covers the full page. */ - new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); + new_pte = pfn_pte(pte_pfn(old_pte), new_prot); __set_pmd_pte(kpte, address, new_pte); cpa->flags |= CPA_FLUSHTLB; do_split = 0; @@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) #ifdef CONFIG_X86_64 if (level == PG_LEVEL_1G) { pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; - pgprot_val(ref_prot) |= _PAGE_PSE; + /* + * Set the PSE flags only if the PRESENT flag is set + * otherwise pmd_present/pmd_huge will return true + * even on a non present pmd. + */ + if (pgprot_val(ref_prot) & _PAGE_PRESENT) + pgprot_val(ref_prot) |= _PAGE_PSE; + else + pgprot_val(ref_prot) &= ~_PAGE_PSE; } #endif /* + * Set the GLOBAL flags only if the PRESENT flag is set + * otherwise pmd/pte_present will return true even on a non + * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL + * for the ancient hardware that doesn't support it. + */ + if (pgprot_val(ref_prot) & _PAGE_PRESENT) + pgprot_val(ref_prot) |= _PAGE_GLOBAL; + else + pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; + + /* * Get the target pfn from the original entry: */ pfn = pte_pfn(*kpte); for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) - set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); + set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), PFN_DOWN(__pa(address)) + 1)) @@ -685,6 +717,18 @@ repeat: new_prot = static_protections(new_prot, address, pfn); /* + * Set the GLOBAL flags only if the PRESENT flag is + * set otherwise pte_present will return true even on + * a non present pte. The canon_pgprot will clear + * _PAGE_GLOBAL for the ancient hardware that doesn't + * support it. + */ + if (pgprot_val(new_prot) & _PAGE_PRESENT) + pgprot_val(new_prot) |= _PAGE_GLOBAL; + else + pgprot_val(new_prot) &= ~_PAGE_GLOBAL; + + /* * We need to keep the pfn from the existing PTE, * after all we're only going to change it's attributes * not the memory it points to diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 70b2a3a305d6..2f81db40d7ca 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -85,9 +85,10 @@ int efi_enabled(int facility) } EXPORT_SYMBOL(efi_enabled); +static bool disable_runtime = false; static int __init setup_noefi(char *arg) { - clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); + disable_runtime = true; return 0; } early_param("noefi", setup_noefi); @@ -734,7 +735,7 @@ void __init efi_init(void) if (!efi_is_native()) pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); else { - if (efi_runtime_init()) + if (disable_runtime || efi_runtime_init()) return; set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); } |