diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/boot/compressed/Makefile | 41 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_32.S | 99 | ||||
-rw-r--r-- | arch/x86/boot/compressed/head_64.S | 165 | ||||
-rw-r--r-- | arch/x86/boot/compressed/mkpiggy.c | 6 | ||||
-rw-r--r-- | arch/x86/boot/compressed/vmlinux.lds.S | 50 | ||||
-rw-r--r-- | arch/x86/boot/setup.ld | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/asm.h | 6 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 39 |
9 files changed, 206 insertions, 206 deletions
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 4346ffb2e39f..154259f18b8b 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -209,6 +209,10 @@ ifdef CONFIG_X86_64 LDFLAGS_vmlinux += -z max-page-size=0x200000 endif +# We never want expected sections to be placed heuristically by the +# linker. All sections should be explicitly named in the linker script. +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) + archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index ff7894f39e0e..4fb989ef5665 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -29,7 +29,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 vmlinux.bin.zst KBUILD_CFLAGS := -m$(BITS) -O2 -KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) +KBUILD_CFLAGS += -fno-strict-aliasing -fPIE KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small @@ -45,24 +45,19 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += -D__DISABLE_EXPORTS # Disable relocation relaxation in case the link is not PIE. KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no) +KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__ GCOV_PROFILE := n UBSAN_SANITIZE :=n KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) +KBUILD_LDFLAGS += $(call ld-option,--no-ld-generated-unwind-info) # Compressed kernel should be built as PIE since it may be loaded at any # address by the bootloader. -ifeq ($(CONFIG_X86_32),y) -KBUILD_LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) -else -# To build 64-bit compressed kernel as PIE, we disable relocation -# overflow check to avoid relocation overflow error with a new linker -# command-line option, -z noreloc-overflow. -KBUILD_LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ - && echo "-z noreloc-overflow -pie --no-dynamic-linker") -endif -LDFLAGS_vmlinux := -T +LDFLAGS_vmlinux := -pie $(call ld-option, --no-dynamic-linker) +LDFLAGS_vmlinux += $(call ld-option, --orphan-handling=warn) +LDFLAGS_vmlinux += -T hostprogs := mkpiggy HOST_EXTRACFLAGS += -I$(srctree)/tools/include @@ -96,30 +91,8 @@ vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a -# The compressed kernel is built with -fPIC/-fPIE so that a boot loader -# can place it anywhere in memory and it will still run. However, since -# it is executed as-is without any ELF relocation processing performed -# (and has already had all relocation sections stripped from the binary), -# none of the code can use data relocations (e.g. static assignments of -# pointer values), since they will be meaningless at runtime. This check -# will refuse to link the vmlinux if any of these relocations are found. -quiet_cmd_check_data_rel = DATAREL $@ -define cmd_check_data_rel - for obj in $(filter %.o,$^); do \ - $(READELF) -S $$obj | grep -qF .rel.local && { \ - echo "error: $$obj has data relocations!" >&2; \ - exit 1; \ - } || true; \ - done -endef - -# We need to run two commands under "if_changed", so merge them into a -# single invocation. -quiet_cmd_check-and-link-vmlinux = LD $@ - cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld) - $(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE - $(call if_changed,check-and-link-vmlinux) + $(call if_changed,ld) OBJCOPYFLAGS_vmlinux.bin := -R .comment -S $(obj)/vmlinux.bin: vmlinux FORCE diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index 03557f2174bf..659fad53ca82 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -33,32 +33,13 @@ #include <asm/bootparam.h> /* - * The 32-bit x86 assembler in binutils 2.26 will generate R_386_GOT32X - * relocation to get the symbol address in PIC. When the compressed x86 - * kernel isn't built as PIC, the linker optimizes R_386_GOT32X - * relocations to their fixed symbol addresses. However, when the - * compressed x86 kernel is loaded at a different address, it leads - * to the following load failure: - * - * Failed to allocate space for phdrs - * - * during the decompression stage. - * - * If the compressed x86 kernel is relocatable at run-time, it should be - * compiled with -fPIE, instead of -fPIC, if possible and should be built as - * Position Independent Executable (PIE) so that linker won't optimize - * R_386_GOT32X relocation to its fixed symbol address. Older - * linkers generate R_386_32 relocations against locally defined symbols, - * _bss, _ebss, _got, _egot and _end, in PIE. It isn't wrong, just less - * optimal than R_386_RELATIVE. But the x86 kernel fails to properly handle - * R_386_32 relocations when relocating the kernel. To generate - * R_386_RELATIVE relocations, we mark _bss, _ebss, _got, _egot and _end as - * hidden: + * These symbols needed to be marked as .hidden to prevent the BFD linker from + * generating R_386_32 (rather than R_386_RELATIVE) relocations for them when + * the 32-bit compressed kernel is linked as PIE. This is no longer necessary, + * but it doesn't hurt to keep them .hidden. */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot .hidden _end __HEAD @@ -77,10 +58,10 @@ SYM_FUNC_START(startup_32) leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %edx - subl $1b, %edx + addl $_GLOBAL_OFFSET_TABLE_+(.-1b), %edx /* Load new GDT */ - leal gdt(%edx), %eax + leal gdt@GOTOFF(%edx), %eax movl %eax, 2(%eax) lgdt (%eax) @@ -93,14 +74,16 @@ SYM_FUNC_START(startup_32) movl %eax, %ss /* - * %edx contains the address we are loaded at by the boot loader and %ebx - * contains the address where we should move the kernel image temporarily - * for safe in-place decompression. %ebp contains the address that the kernel - * will be decompressed to. + * %edx contains the address we are loaded at by the boot loader (plus the + * offset to the GOT). The below code calculates %ebx to be the address where + * we should move the kernel image temporarily for safe in-place decompression + * (again, plus the offset to the GOT). + * + * %ebp is calculated to be the address that the kernel will be decompressed to. */ #ifdef CONFIG_RELOCATABLE - movl %edx, %ebx + leal startup_32@GOTOFF(%edx), %ebx #ifdef CONFIG_EFI_STUB /* @@ -111,7 +94,7 @@ SYM_FUNC_START(startup_32) * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ - subl image_offset(%edx), %ebx + subl image_offset@GOTOFF(%edx), %ebx #endif movl BP_kernel_alignment(%esi), %eax @@ -128,10 +111,10 @@ SYM_FUNC_START(startup_32) movl %ebx, %ebp // Save the output address for later /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx - subl $_end, %ebx + subl $_end@GOTOFF, %ebx /* Set up the stack */ - leal boot_stack_end(%ebx), %esp + leal boot_stack_end@GOTOFF(%ebx), %esp /* Zero EFLAGS */ pushl $0 @@ -142,8 +125,8 @@ SYM_FUNC_START(startup_32) * where decompression in place becomes safe. */ pushl %esi - leal (_bss-4)(%edx), %esi - leal (_bss-4)(%ebx), %edi + leal (_bss@GOTOFF-4)(%edx), %esi + leal (_bss@GOTOFF-4)(%ebx), %edi movl $(_bss - startup_32), %ecx shrl $2, %ecx std @@ -156,14 +139,14 @@ SYM_FUNC_START(startup_32) * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ - leal gdt(%ebx), %eax + leal gdt@GOTOFF(%ebx), %eax movl %eax, 2(%eax) lgdt (%eax) /* * Jump to the relocated address. */ - leal .Lrelocated(%ebx), %eax + leal .Lrelocated@GOTOFF(%ebx), %eax jmp *%eax SYM_FUNC_END(startup_32) @@ -173,7 +156,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) add $0x4, %esp movl 8(%esp), %esi /* save boot_params pointer */ call efi_main - leal startup_32(%eax), %eax + /* efi_main returns the possibly relocated address of startup_32 */ jmp *%eax SYM_FUNC_END(efi32_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry) @@ -186,40 +169,26 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) * Clear BSS (stack is currently empty) */ xorl %eax, %eax - leal _bss(%ebx), %edi - leal _ebss(%ebx), %ecx + leal _bss@GOTOFF(%ebx), %edi + leal _ebss@GOTOFF(%ebx), %ecx subl %edi, %ecx shrl $2, %ecx rep stosl /* - * Adjust our own GOT - */ - leal _got(%ebx), %edx - leal _egot(%ebx), %ecx -1: - cmpl %ecx, %edx - jae 2f - addl %ebx, (%edx) - addl $4, %edx - jmp 1b -2: - -/* * Do the extraction, and jump to the new kernel.. */ - /* push arguments for extract_kernel: */ - pushl $z_output_len /* decompressed length, end of relocs */ - - pushl %ebp /* output address */ - - pushl $z_input_len /* input_len */ - leal input_data(%ebx), %eax - pushl %eax /* input_data */ - leal boot_heap(%ebx), %eax - pushl %eax /* heap area */ - pushl %esi /* real mode pointer */ - call extract_kernel /* returns kernel location in %eax */ + /* push arguments for extract_kernel: */ + + pushl output_len@GOTOFF(%ebx) /* decompressed length, end of relocs */ + pushl %ebp /* output address */ + pushl input_len@GOTOFF(%ebx) /* input_len */ + leal input_data@GOTOFF(%ebx), %eax + pushl %eax /* input_data */ + leal boot_heap@GOTOFF(%ebx), %eax + pushl %eax /* heap area */ + pushl %esi /* real mode pointer */ + call extract_kernel /* returns kernel location in %eax */ addl $24, %esp /* diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 97d37f0a34f5..9e46729cf162 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -40,11 +40,35 @@ */ .hidden _bss .hidden _ebss - .hidden _got - .hidden _egot .hidden _end __HEAD + +/* + * This macro gives the relative virtual address of X, i.e. the offset of X + * from startup_32. This is the same as the link-time virtual address of X, + * since startup_32 is at 0, but defining it this way tells the + * assembler/linker that we do not want the actual run-time address of X. This + * prevents the linker from trying to create unwanted run-time relocation + * entries for the reference when the compressed kernel is linked as PIE. + * + * A reference X(%reg) will result in the link-time VA of X being stored with + * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that + * adds the 64-bit base address where the kernel is loaded. + * + * Replacing it with (X-startup_32)(%reg) results in the offset being stored, + * and no run-time relocation. + * + * The macro should be used as a displacement with a base register containing + * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate + * [$ rva(X)]. + * + * This macro can only be used from within the .head.text section, since the + * expression requires startup_32 to be in the same section as the code being + * assembled. + */ +#define rva(X) ((X) - startup_32) + .code32 SYM_FUNC_START(startup_32) /* @@ -67,10 +91,10 @@ SYM_FUNC_START(startup_32) leal (BP_scratch+4)(%esi), %esp call 1f 1: popl %ebp - subl $1b, %ebp + subl $ rva(1b), %ebp /* Load new GDT with the 64bit segments using 32bit descriptor */ - leal gdt(%ebp), %eax + leal rva(gdt)(%ebp), %eax movl %eax, 2(%eax) lgdt (%eax) @@ -83,7 +107,7 @@ SYM_FUNC_START(startup_32) movl %eax, %ss /* setup a stack and make sure cpu supports long mode. */ - leal boot_stack_end(%ebp), %esp + leal rva(boot_stack_end)(%ebp), %esp call verify_cpu testl %eax, %eax @@ -110,7 +134,7 @@ SYM_FUNC_START(startup_32) * image_offset = startup_32 - image_base * Otherwise image_offset will be zero and has no effect on the calculations. */ - subl image_offset(%ebp), %ebx + subl rva(image_offset)(%ebp), %ebx #endif movl BP_kernel_alignment(%esi), %eax @@ -126,7 +150,7 @@ SYM_FUNC_START(startup_32) /* Target address to relocate to for decompression */ addl BP_init_size(%esi), %ebx - subl $_end, %ebx + subl $ rva(_end), %ebx /* * Prepare for entering 64 bit mode @@ -154,19 +178,19 @@ SYM_FUNC_START(startup_32) 1: /* Initialize Page tables to 0 */ - leal pgtable(%ebx), %edi + leal rva(pgtable)(%ebx), %edi xorl %eax, %eax movl $(BOOT_INIT_PGT_SIZE/4), %ecx rep stosl /* Build Level 4 */ - leal pgtable + 0(%ebx), %edi + leal rva(pgtable + 0)(%ebx), %edi leal 0x1007 (%edi), %eax movl %eax, 0(%edi) addl %edx, 4(%edi) /* Build Level 3 */ - leal pgtable + 0x1000(%ebx), %edi + leal rva(pgtable + 0x1000)(%ebx), %edi leal 0x1007(%edi), %eax movl $4, %ecx 1: movl %eax, 0x00(%edi) @@ -177,7 +201,7 @@ SYM_FUNC_START(startup_32) jnz 1b /* Build Level 2 */ - leal pgtable + 0x2000(%ebx), %edi + leal rva(pgtable + 0x2000)(%ebx), %edi movl $0x00000183, %eax movl $2048, %ecx 1: movl %eax, 0(%edi) @@ -188,7 +212,7 @@ SYM_FUNC_START(startup_32) jnz 1b /* Enable the boot page tables */ - leal pgtable(%ebx), %eax + leal rva(pgtable)(%ebx), %eax movl %eax, %cr3 /* Enable Long mode in EFER (Extended Feature Enable Register) */ @@ -213,14 +237,14 @@ SYM_FUNC_START(startup_32) * We place all of the values on our mini stack so lret can * used to perform that far jump. */ - leal startup_64(%ebp), %eax + leal rva(startup_64)(%ebp), %eax #ifdef CONFIG_EFI_MIXED - movl efi32_boot_args(%ebp), %edi + movl rva(efi32_boot_args)(%ebp), %edi cmp $0, %edi jz 1f - leal efi64_stub_entry(%ebp), %eax - movl efi32_boot_args+4(%ebp), %esi - movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer + leal rva(efi64_stub_entry)(%ebp), %eax + movl rva(efi32_boot_args+4)(%ebp), %esi + movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer cmpl $0, %edx jnz 1f /* @@ -231,7 +255,7 @@ SYM_FUNC_START(startup_32) * the correct stack alignment for entry. */ subl $40, %esp - leal efi_pe_entry(%ebp), %eax + leal rva(efi_pe_entry)(%ebp), %eax movl %edi, %ecx // MS calling convention movl %esi, %edx 1: @@ -257,18 +281,18 @@ SYM_FUNC_START(efi32_stub_entry) call 1f 1: pop %ebp - subl $1b, %ebp + subl $ rva(1b), %ebp - movl %esi, efi32_boot_args+8(%ebp) + movl %esi, rva(efi32_boot_args+8)(%ebp) SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL) - movl %ecx, efi32_boot_args(%ebp) - movl %edx, efi32_boot_args+4(%ebp) - movb $0, efi_is64(%ebp) + movl %ecx, rva(efi32_boot_args)(%ebp) + movl %edx, rva(efi32_boot_args+4)(%ebp) + movb $0, rva(efi_is64)(%ebp) /* Save firmware GDTR and code/data selectors */ - sgdtl efi32_boot_gdt(%ebp) - movw %cs, efi32_boot_cs(%ebp) - movw %ds, efi32_boot_ds(%ebp) + sgdtl rva(efi32_boot_gdt)(%ebp) + movw %cs, rva(efi32_boot_cs)(%ebp) + movw %ds, rva(efi32_boot_ds)(%ebp) /* Disable paging */ movl %cr0, %eax @@ -347,30 +371,11 @@ SYM_CODE_START(startup_64) /* Target address to relocate to for decompression */ movl BP_init_size(%rsi), %ebx - subl $_end, %ebx + subl $ rva(_end), %ebx addq %rbp, %rbx /* Set up the stack */ - leaq boot_stack_end(%rbx), %rsp - - /* - * paging_prepare() and cleanup_trampoline() below can have GOT - * references. Adjust the table with address we are running at. - * - * Zero RAX for adjust_got: the GOT was not adjusted before; - * there's no adjustment to undo. - */ - xorq %rax, %rax - - /* - * Calculate the address the binary is loaded at and use it as - * a GOT adjustment. - */ - call 1f -1: popq %rdi - subq $1b, %rdi - - call .Ladjust_got + leaq rva(boot_stack_end)(%rbx), %rsp /* * At this point we are in long mode with 4-level paging enabled, @@ -444,7 +449,7 @@ SYM_CODE_START(startup_64) lretq trampoline_return: /* Restore the stack, the 32-bit trampoline uses its own stack */ - leaq boot_stack_end(%rbx), %rsp + leaq rva(boot_stack_end)(%rbx), %rsp /* * cleanup_trampoline() would restore trampoline memory. @@ -456,7 +461,7 @@ trampoline_return: * this function call. */ pushq %rsi - leaq top_pgtable(%rbx), %rdi + leaq rva(top_pgtable)(%rbx), %rdi call cleanup_trampoline popq %rsi @@ -464,30 +469,15 @@ trampoline_return: pushq $0 popfq - /* - * Previously we've adjusted the GOT with address the binary was - * loaded at. Now we need to re-adjust for relocation address. - * - * Calculate the address the binary is loaded at, so that we can - * undo the previous GOT adjustment. - */ - call 1f -1: popq %rax - subq $1b, %rax - - /* The new adjustment is the relocation address */ - movq %rbx, %rdi - call .Ladjust_got - /* * Copy the compressed kernel to the end of our buffer * where decompression in place becomes safe. */ pushq %rsi leaq (_bss-8)(%rip), %rsi - leaq (_bss-8)(%rbx), %rdi - movq $_bss /* - $startup_32 */, %rcx - shrq $3, %rcx + leaq rva(_bss-8)(%rbx), %rdi + movl $(_bss - startup_32), %ecx + shrl $3, %ecx std rep movsq cld @@ -498,15 +488,15 @@ trampoline_return: * during extract_kernel below. To avoid any issues, repoint the GDTR * to the new copy of the GDT. */ - leaq gdt64(%rbx), %rax - leaq gdt(%rbx), %rdx + leaq rva(gdt64)(%rbx), %rax + leaq rva(gdt)(%rbx), %rdx movq %rdx, 2(%rax) lgdt (%rax) /* * Jump to the relocated address. */ - leaq .Lrelocated(%rbx), %rax + leaq rva(.Lrelocated)(%rbx), %rax jmp *%rax SYM_CODE_END(startup_64) @@ -518,7 +508,7 @@ SYM_FUNC_START_ALIAS(efi_stub_entry) movq %rdx, %rbx /* save boot_params pointer */ call efi_main movq %rbx,%rsi - leaq startup_64(%rax), %rax + leaq rva(startup_64)(%rax), %rax jmp *%rax SYM_FUNC_END(efi64_stub_entry) SYM_FUNC_END_ALIAS(efi_stub_entry) @@ -544,9 +534,9 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %rsi, %rdi /* real mode address */ leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ leaq input_data(%rip), %rdx /* input_data */ - movl $z_input_len, %ecx /* input_len */ + movl input_len(%rip), %ecx /* input_len */ movq %rbp, %r8 /* output target address */ - movl $z_output_len, %r9d /* decompressed length, end of relocs */ + movl output_len(%rip), %r9d /* decompressed length, end of relocs */ call extract_kernel /* returns kernel location in %rax */ popq %rsi @@ -556,27 +546,6 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) jmp *%rax SYM_FUNC_END(.Lrelocated) -/* - * Adjust the global offset table - * - * RAX is the previous adjustment of the table to undo (use 0 if it's the - * first time we touch GOT). - * RDI is the new adjustment to apply. - */ -.Ladjust_got: - /* Walk through the GOT adding the address to the entries */ - leaq _got(%rip), %rdx - leaq _egot(%rip), %rcx -1: - cmpq %rcx, %rdx - jae 2f - subq %rax, (%rdx) /* Undo previous adjustment */ - addq %rdi, (%rdx) /* Apply the new adjustment */ - addq $8, %rdx - jmp 1b -2: - ret - .code32 /* * This is the 32-bit trampoline that will be copied over to low memory. @@ -702,7 +671,7 @@ SYM_DATA(efi_is64, .byte 1) #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol) #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base) - .text + __HEAD .code32 SYM_FUNC_START(efi32_pe_entry) /* @@ -724,12 +693,12 @@ SYM_FUNC_START(efi32_pe_entry) call 1f 1: pop %ebx - subl $1b, %ebx + subl $ rva(1b), %ebx /* Get the loaded image protocol pointer from the image handle */ leal -4(%ebp), %eax pushl %eax // &loaded_image - leal loaded_image_proto(%ebx), %eax + leal rva(loaded_image_proto)(%ebx), %eax pushl %eax // pass the GUID address pushl 8(%ebp) // pass the image handle @@ -764,7 +733,7 @@ SYM_FUNC_START(efi32_pe_entry) * use it before we get to the 64-bit efi_pe_entry() in C code. */ subl %esi, %ebx - movl %ebx, image_offset(%ebp) // save image_offset + movl %ebx, rva(image_offset)(%ebp) // save image_offset jmp efi32_pe_stub_entry 2: popl %edi // restore callee-save registers diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 7e01248765b2..52aa56cdbacc 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c @@ -60,6 +60,12 @@ int main(int argc, char *argv[]) printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); + printf(".section \".rodata\",\"a\",@progbits\n"); + printf(".globl input_len\n"); + printf("input_len:\n\t.long %lu\n", ilen); + printf(".globl output_len\n"); + printf("output_len:\n\t.long %lu\n", (unsigned long)olen); + retval = 0; bail: if (f) diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S index 8f1025d1f681..112b2375d021 100644 --- a/arch/x86/boot/compressed/vmlinux.lds.S +++ b/arch/x86/boot/compressed/vmlinux.lds.S @@ -42,12 +42,6 @@ SECTIONS *(.rodata.*) _erodata = . ; } - .got : { - _got = .; - KEEP(*(.got.plt)) - KEEP(*(.got)) - _egot = .; - } .data : { _data = . ; *(.data) @@ -75,5 +69,49 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* keep ZO size page aligned */ _end = .; + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + DISCARDS + /DISCARD/ : { + *(.dynamic) *(.dynsym) *(.dynstr) *(.dynbss) + *(.hash) *(.gnu.hash) + *(.note.*) + } + + .got.plt (INFO) : { + *(.got.plt) + } + ASSERT(SIZEOF(.got.plt) == 0 || +#ifdef CONFIG_X86_64 + SIZEOF(.got.plt) == 0x18, +#else + SIZEOF(.got.plt) == 0xc, +#endif + "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .got : { + *(.got) + } + ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") + + .plt : { + *(.plt) *(.plt.*) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .rel.dyn : { + *(.rel.*) *(.rel_*) + } + ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") + + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") } diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld index 24c95522f231..49546c247ae2 100644 --- a/arch/x86/boot/setup.ld +++ b/arch/x86/boot/setup.ld @@ -20,7 +20,7 @@ SECTIONS .initdata : { *(.initdata) } __end_init = .; - .text : { *(.text) } + .text : { *(.text .text.*) } .text32 : { *(.text32) } . = ALIGN(16); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 0359cbbd0f50..0603c7423aca 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -141,11 +141,15 @@ # define _ASM_EXTABLE_FAULT(from, to) \ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault) -# define _ASM_NOKPROBE(entry) \ +# ifdef CONFIG_KPROBES +# define _ASM_NOKPROBE(entry) \ .pushsection "_kprobe_blacklist","aw" ; \ _ASM_ALIGN ; \ _ASM_PTR (entry); \ .popsection +# else +# define _ASM_NOKPROBE(entry) +# endif #else /* ! __ASSEMBLY__ */ # define _EXPAND_EXTABLE_HANDLE(x) #x diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9a03e5b23135..45d72447df84 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -411,10 +411,47 @@ SECTIONS STABS_DEBUG DWARF_DEBUG + ELF_DETAILS DISCARDS -} + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the lazy dispatch entries. + */ + .got.plt (INFO) : { *(.got.plt) } + ASSERT(SIZEOF(.got.plt) == 0 || +#ifdef CONFIG_X86_64 + SIZEOF(.got.plt) == 0x18, +#else + SIZEOF(.got.plt) == 0xc, +#endif + "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .got : { + *(.got) *(.igot.*) + } + ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!") + + .plt : { + *(.plt) *(.plt.*) *(.iplt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") + + .rel.dyn : { + *(.rel.*) *(.rel_*) + } + ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!") + + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") +} #ifdef CONFIG_X86_32 /* |