diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-20 18:58:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-20 18:58:18 -0700 |
commit | 24b5e20f11a75866bbffc46c30a22fa50612a769 (patch) | |
tree | 2dab5fc6714a5ad1e31bdea1e954fbd69704ce72 /arch/x86/mm | |
parent | 26660a4046b171a752e72a1dd32153230234fe3a (diff) | |
parent | d367cef0a7f0c6ee86e997c0cb455b21b3c6b9ba (diff) |
Merge branch 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull EFI updates from Ingo Molnar:
"The main changes are:
- Use separate EFI page tables when executing EFI firmware code.
This isolates the EFI context from the rest of the kernel, which
has security and general robustness advantages. (Matt Fleming)
- Run regular UEFI firmware with interrupts enabled. This is already
the status quo under other OSs. (Ard Biesheuvel)
- Various x86 EFI enhancements, such as the use of non-executable
attributes for EFI memory mappings. (Sai Praneeth Prakhya)
- Various arm64 UEFI enhancements. (Ard Biesheuvel)
- ... various fixes and cleanups.
The separate EFI page tables feature got delayed twice already,
because it's an intrusive change and we didn't feel confident about
it - third time's the charm we hope!"
* 'efi-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
x86/mm/pat: Fix boot crash when 1GB pages are not supported by the CPU
x86/efi: Only map kernel text for EFI mixed mode
x86/efi: Map EFI_MEMORY_{XP,RO} memory region bits to EFI page tables
x86/mm/pat: Don't implicitly allow _PAGE_RW in kernel_map_pages_in_pgd()
efi/arm*: Perform hardware compatibility check
efi/arm64: Check for h/w support before booting a >4 KB granular kernel
efi/arm: Check for LPAE support before booting a LPAE kernel
efi/arm-init: Use read-only early mappings
efi/efistub: Prevent __init annotations from being used
arm64/vmlinux.lds.S: Handle .init.rodata.xxx and .init.bss sections
efi/arm64: Drop __init annotation from handle_kernel_image()
x86/mm/pat: Use _PAGE_GLOBAL bit for EFI page table mappings
efi/runtime-wrappers: Run UEFI Runtime Services with interrupts enabled
efi: Reformat GUID tables to follow the format in UEFI spec
efi: Add Persistent Memory type name
efi: Add NV memory attribute
x86/efi: Show actual ending addresses in efi_print_memmap
x86/efi/bgrt: Don't ignore the BGRT if the 'valid' bit is 0
efivars: Use to_efivar_entry
efi: Runtime-wrapper: Get rid of the rtc_lock spinlock
...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pageattr.c | 34 |
1 files changed, 23 insertions, 11 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4d0b26253042..01be9ec3bf79 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -909,16 +909,25 @@ static void populate_pte(struct cpa_data *cpa, pte = pte_offset_kernel(pmd, start); - while (num_pages-- && start < end) { + /* + * Set the GLOBAL flags only if the PRESENT flag is + * set otherwise pte_present will return true even on + * a non present pte. The canon_pgprot will clear + * _PAGE_GLOBAL for the ancient hardware that doesn't + * support it. + */ + if (pgprot_val(pgprot) & _PAGE_PRESENT) + pgprot_val(pgprot) |= _PAGE_GLOBAL; + else + pgprot_val(pgprot) &= ~_PAGE_GLOBAL; - /* deal with the NX bit */ - if (!(pgprot_val(pgprot) & _PAGE_NX)) - cpa->pfn &= ~_PAGE_NX; + pgprot = canon_pgprot(pgprot); - set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot)); + while (num_pages-- && start < end) { + set_pte(pte, pfn_pte(cpa->pfn, pgprot)); start += PAGE_SIZE; - cpa->pfn += PAGE_SIZE; + cpa->pfn++; pte++; } } @@ -974,11 +983,11 @@ static int populate_pmd(struct cpa_data *cpa, pmd = pmd_offset(pud, start); - set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | + set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pmd_pgprot))); start += PMD_SIZE; - cpa->pfn += PMD_SIZE; + cpa->pfn += PMD_SIZE >> PAGE_SHIFT; cur_pages += PMD_SIZE >> PAGE_SHIFT; } @@ -1046,12 +1055,12 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, /* * Map everything starting from the Gb boundary, possibly with 1G pages */ - while (end - start >= PUD_SIZE) { - set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | + while (cpu_has_gbpages && end - start >= PUD_SIZE) { + set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE | massage_pgprot(pud_pgprot))); start += PUD_SIZE; - cpa->pfn += PUD_SIZE; + cpa->pfn += PUD_SIZE >> PAGE_SHIFT; cur_pages += PUD_SIZE >> PAGE_SHIFT; pud++; } @@ -1964,6 +1973,9 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address, if (!(page_flags & _PAGE_NX)) cpa.mask_clr = __pgprot(_PAGE_NX); + if (!(page_flags & _PAGE_RW)) + cpa.mask_clr = __pgprot(_PAGE_RW); + cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags); retval = __change_page_attr_set_clr(&cpa, 0); |