diff options
Diffstat (limited to 'arch/arm/kernel')
32 files changed, 237 insertions, 326 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 1d296fc8494e..4305345987d3 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -10,7 +10,7 @@ endif # Object file lists. -obj-y := compat.o entry-armv.o entry-common.o irq.o \ +obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index cc7b246e9652..2357b1cf1cf9 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -13,11 +13,11 @@ #include <linux/delay.h> #include <linux/in6.h> #include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/io.h> #include <asm/checksum.h> -#include <asm/io.h> #include <asm/system.h> -#include <asm/uaccess.h> #include <asm/ftrace.h> /* diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index e5747547b44c..17a59b6e521f 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -10,8 +10,8 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> +#include <linux/io.h> -#include <asm/io.h> #include <asm/mach-types.h> #include <asm/mach/pci.h> diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c index 3b6a1c293ee4..99995c2b2312 100644 --- a/arch/arm/kernel/crunch.c +++ b/arch/arm/kernel/crunch.c @@ -15,9 +15,9 @@ #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> +#include <linux/io.h> #include <mach/ep93xx-regs.h> #include <asm/thread_notify.h> -#include <asm/io.h> struct crunch_state *crunch_owner; diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 9550ff0ddde4..f53c58290543 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -89,10 +89,12 @@ ENTRY(printhex8) mov r1, #8 b printhex +ENDPROC(printhex8) ENTRY(printhex4) mov r1, #4 b printhex +ENDPROC(printhex4) ENTRY(printhex2) mov r1, #2 @@ -110,6 +112,7 @@ printhex: adr r2, hexbuf bne 1b mov r0, r2 b printascii +ENDPROC(printhex2) .ltorg @@ -127,11 +130,13 @@ ENTRY(printascii) teqne r1, #0 bne 1b mov pc, lr +ENDPROC(printascii) ENTRY(printch) addruart r3 mov r1, r0 mov r0, #0 b 1b +ENDPROC(printch) hexbuf: .space 16 diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 2f080a35a2d9..4a3a50495c60 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c @@ -19,10 +19,9 @@ #include <linux/ioport.h> #include <linux/init.h> #include <linux/dma-mapping.h> +#include <linux/io.h> #include <asm/dma.h> -#include <asm/io.h> - #include <asm/mach/dma.h> #define ISA_DMA_MODE_READ 0x44 diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index 7a50575a8d4d..60c079d85355 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c @@ -587,8 +587,7 @@ ecard_irq_handler(unsigned int irq, struct irq_desc *desc) pending = ecard_default_ops.irqpending(ec); if (pending) { - struct irq_desc *d = irq_desc + ec->irq; - desc_handle_irq(ec->irq, d); + generic_handle_irq(ec->irq); called ++; } } @@ -622,7 +621,6 @@ ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc) ecard_t *ec = slot_to_ecard(slot); if (ec->claimed) { - struct irq_desc *d = irq_desc + ec->irq; /* * this ugly code is so that we can operate a * prioritorising system: @@ -635,7 +633,7 @@ ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc) * Serial cards should go in 0/1, ethernet/scsi in 2/3 * otherwise you will lose serial data at high speeds! */ - desc_handle_irq(ec->irq, d); + generic_handle_irq(ec->irq); } else { printk(KERN_WARNING "card%d: interrupt from unclaimed " "card???\n", slot); diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c new file mode 100644 index 000000000000..513f332f040d --- /dev/null +++ b/arch/arm/kernel/elf.c @@ -0,0 +1,79 @@ +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/personality.h> +#include <linux/binfmts.h> +#include <linux/elf.h> + +int elf_check_arch(const struct elf32_hdr *x) +{ + unsigned int eflags; + + /* Make sure it's an ARM executable */ + if (x->e_machine != EM_ARM) + return 0; + + /* Make sure the entry address is reasonable */ + if (x->e_entry & 1) { + if (!(elf_hwcap & HWCAP_THUMB)) + return 0; + } else if (x->e_entry & 3) + return 0; + + eflags = x->e_flags; + if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { + /* APCS26 is only allowed if the CPU supports it */ + if ((eflags & EF_ARM_APCS_26) && !(elf_hwcap & HWCAP_26BIT)) + return 0; + + /* VFP requires the supporting code */ + if ((eflags & EF_ARM_VFP_FLOAT) && !(elf_hwcap & HWCAP_VFP)) + return 0; + } + return 1; +} +EXPORT_SYMBOL(elf_check_arch); + +void elf_set_personality(const struct elf32_hdr *x) +{ + unsigned int eflags = x->e_flags; + unsigned int personality = PER_LINUX_32BIT; + + /* + * APCS-26 is only valid for OABI executables + */ + if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) { + if (eflags & EF_ARM_APCS_26) + personality = PER_LINUX; + } + + set_personality(personality); + + /* + * Since the FPA coprocessor uses CP1 and CP2, and iWMMXt uses CP0 + * and CP1, we only enable access to the iWMMXt coprocessor if the + * binary is EABI or softfloat (and thus, guaranteed not to use + * FPA instructions.) + */ + if (elf_hwcap & HWCAP_IWMMXT && + eflags & (EF_ARM_EABI_MASK | EF_ARM_SOFT_FLOAT)) { + set_thread_flag(TIF_USING_IWMMXT); + } else { + clear_thread_flag(TIF_USING_IWMMXT); + } +} +EXPORT_SYMBOL(elf_set_personality); + +/* + * Set READ_IMPLIES_EXEC if: + * - the binary requires an executable stack + * - we're running on a CPU which doesn't support NX. + */ +int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) +{ + if (executable_stack != EXSTACK_ENABLE_X) + return 1; + if (cpu_architecture() <= CPU_ARCH_ARMv6) + return 1; + return 0; +} +EXPORT_SYMBOL(arm_elf_read_implies_exec); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 617e509d60df..77b047475539 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -76,14 +76,17 @@ __pabt_invalid: inv_entry BAD_PREFETCH b common_invalid +ENDPROC(__pabt_invalid) __dabt_invalid: inv_entry BAD_DATA b common_invalid +ENDPROC(__dabt_invalid) __irq_invalid: inv_entry BAD_IRQ b common_invalid +ENDPROC(__irq_invalid) __und_invalid: inv_entry BAD_UNDEFINSTR @@ -107,6 +110,7 @@ common_invalid: mov r0, sp b bad_mode +ENDPROC(__und_invalid) /* * SVC mode handlers @@ -192,6 +196,7 @@ __dabt_svc: ldr r0, [sp, #S_PSR] msr spsr_cxsf, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +ENDPROC(__dabt_svc) .align 5 __irq_svc: @@ -223,6 +228,7 @@ __irq_svc: bleq trace_hardirqs_on #endif ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +ENDPROC(__irq_svc) .ltorg @@ -272,6 +278,7 @@ __und_svc: ldr lr, [sp, #S_PSR] @ Get SVC cpsr msr spsr_cxsf, lr ldmia sp, {r0 - pc}^ @ Restore SVC registers +ENDPROC(__und_svc) .align 5 __pabt_svc: @@ -313,6 +320,7 @@ __pabt_svc: ldr r0, [sp, #S_PSR] msr spsr_cxsf, r0 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +ENDPROC(__pabt_svc) .align 5 .LCcralign: @@ -412,6 +420,7 @@ __dabt_usr: mov r2, sp adr lr, ret_from_exception b do_DataAbort +ENDPROC(__dabt_usr) .align 5 __irq_usr: @@ -441,6 +450,7 @@ __irq_usr: mov why, #0 b ret_to_user +ENDPROC(__irq_usr) .ltorg @@ -474,6 +484,7 @@ __und_usr: #else b __und_usr_unknown #endif +ENDPROC(__und_usr) @ @ fallthrough to call_fpe @@ -642,6 +653,7 @@ __und_usr_unknown: mov r0, sp adr lr, ret_from_exception b do_undefinstr +ENDPROC(__und_usr_unknown) .align 5 __pabt_usr: @@ -666,6 +678,8 @@ ENTRY(ret_from_exception) get_thread_info tsk mov why, #0 b ret_to_user +ENDPROC(__pabt_usr) +ENDPROC(ret_from_exception) /* * Register switch for ARMv3 and ARMv4 processors @@ -702,6 +716,7 @@ ENTRY(__switch_to) bl atomic_notifier_call_chain mov r0, r5 ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously +ENDPROC(__switch_to) __INIT @@ -1029,6 +1044,7 @@ vector_\name: mov r0, sp ldr lr, [pc, lr, lsl #2] movs pc, lr @ branch to handler in SVC mode +ENDPROC(vector_\name) .endm .globl __stubs_start diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 060d7e2e9f64..3aa14dcc5bab 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -77,6 +77,7 @@ no_work_pending: mov r0, r0 add sp, sp, #S_FRAME_SIZE - S_PC movs pc, lr @ return & move spsr_svc into cpsr +ENDPROC(ret_to_user) /* * This is how we return from a fork. @@ -92,7 +93,7 @@ ENTRY(ret_from_fork) mov r0, #1 @ trace exit [IP = 1] bl syscall_trace b ret_slow_syscall - +ENDPROC(ret_from_fork) .equ NR_syscalls,0 #define CALL(x) .equ NR_syscalls,NR_syscalls+1 @@ -269,6 +270,7 @@ ENTRY(vector_swi) eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back bcs arm_syscall b sys_ni_syscall @ not private func +ENDPROC(vector_swi) /* * This is the really slow path. We're going to be doing @@ -326,7 +328,6 @@ ENTRY(sys_call_table) */ @ r0 = syscall number @ r8 = syscall table - .type sys_syscall, #function sys_syscall: bic scno, r0, #__NR_OABI_SYSCALL_BASE cmp scno, #__NR_syscall - __NR_SYSCALL_BASE @@ -338,53 +339,65 @@ sys_syscall: movlo r3, r4 ldrlo pc, [tbl, scno, lsl #2] b sys_ni_syscall +ENDPROC(sys_syscall) sys_fork_wrapper: add r0, sp, #S_OFF b sys_fork +ENDPROC(sys_fork_wrapper) sys_vfork_wrapper: add r0, sp, #S_OFF b sys_vfork +ENDPROC(sys_vfork_wrapper) sys_execve_wrapper: add r3, sp, #S_OFF b sys_execve +ENDPROC(sys_execve_wrapper) sys_clone_wrapper: add ip, sp, #S_OFF str ip, [sp, #4] b sys_clone +ENDPROC(sys_clone_wrapper) sys_sigsuspend_wrapper: add r3, sp, #S_OFF b sys_sigsuspend +ENDPROC(sys_sigsuspend_wrapper) sys_rt_sigsuspend_wrapper: add r2, sp, #S_OFF b sys_rt_sigsuspend +ENDPROC(sys_rt_sigsuspend_wrapper) sys_sigreturn_wrapper: add r0, sp, #S_OFF b sys_sigreturn +ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF b sys_rt_sigreturn +ENDPROC(sys_rt_sigreturn_wrapper) sys_sigaltstack_wrapper: ldr r2, [sp, #S_OFF + S_SP] b do_sigaltstack +ENDPROC(sys_sigaltstack_wrapper) sys_statfs64_wrapper: teq r1, #88 moveq r1, #84 b sys_statfs64 +ENDPROC(sys_statfs64_wrapper) sys_fstatfs64_wrapper: teq r1, #88 moveq r1, #84 b sys_fstatfs64 +ENDPROC(sys_fstatfs64_wrapper) /* * Note: off_4k (r5) is always units of 4K. If we can't do the requested @@ -402,11 +415,14 @@ sys_mmap2: str r5, [sp, #4] b do_mmap2 #endif +ENDPROC(sys_mmap2) ENTRY(pabort_ifar) mrc p15, 0, r0, cr6, cr0, 2 ENTRY(pabort_noifar) mov pc, lr +ENDPROC(pabort_ifar) +ENDPROC(pabort_noifar) #ifdef CONFIG_OABI_COMPAT @@ -417,26 +433,31 @@ ENTRY(pabort_noifar) sys_oabi_pread64: stmia sp, {r3, r4} b sys_pread64 +ENDPROC(sys_oabi_pread64) sys_oabi_pwrite64: stmia sp, {r3, r4} b sys_pwrite64 +ENDPROC(sys_oabi_pwrite64) sys_oabi_truncate64: mov r3, r2 mov r2, r1 b sys_truncate64 +ENDPROC(sys_oabi_truncate64) sys_oabi_ftruncate64: mov r3, r2 mov r2, r1 b sys_ftruncate64 +ENDPROC(sys_oabi_ftruncate64) sys_oabi_readahead: str r3, [sp] mov r3, r2 mov r2, r1 b sys_readahead +ENDPROC(sys_oabi_readahead) /* * Let's declare a second syscall table for old ABI binaries diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index e8e90346f11c..36f81d967979 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -45,7 +45,6 @@ #include <asm/fiq.h> #include <asm/irq.h> #include <asm/system.h> -#include <asm/uaccess.h> static unsigned long no_fiq_insn; diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 1c3c6ea5f9e7..bde52df1c668 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -36,7 +36,6 @@ __switch_data: * r2 = atags pointer * r9 = processor ID */ - .type __mmap_switched, %function __mmap_switched: adr r3, __switch_data + 4 @@ -59,6 +58,7 @@ __mmap_switched: bic r4, r0, #CR_A @ Clear 'A' bit stmia r7, {r0, r4} @ Save control register values b start_kernel +ENDPROC(__mmap_switched) /* * Exception handling. Something went wrong and we can't proceed. We @@ -69,8 +69,6 @@ __mmap_switched: * and hope for the best (useful if bootloader fails to pass a proper * machine ID for example). */ - - .type __error_p, %function __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 @@ -84,8 +82,8 @@ str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" str_p2: .asciz ").\n" .align #endif +ENDPROC(__error_p) - .type __error_a, %function __error_a: #ifdef CONFIG_DEBUG_LL mov r4, r1 @ preserve machine ID @@ -115,13 +113,14 @@ __error_a: adr r0, str_a3 bl printascii b __error +ENDPROC(__error_a) + str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x" str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n" str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n" .align #endif - .type __error, %function __error: #ifdef CONFIG_ARCH_RPC /* @@ -138,6 +137,7 @@ __error: #endif 1: mov r0, r0 b 1b +ENDPROC(__error) /* @@ -153,7 +153,6 @@ __error: * r5 = proc_info pointer in physical address space * r9 = cpuid (preserved) */ - .type __lookup_processor_type, %function __lookup_processor_type: adr r3, 3f ldmda r3, {r5 - r7} @@ -169,6 +168,7 @@ __lookup_processor_type: blo 1b mov r5, #0 @ unknown processor 2: mov pc, lr +ENDPROC(__lookup_processor_type) /* * This provides a C-API version of the above function. @@ -179,6 +179,7 @@ ENTRY(lookup_processor_type) bl __lookup_processor_type mov r0, r5 ldmfd sp!, {r4 - r7, r9, pc} +ENDPROC(lookup_processor_type) /* * Look in <asm/procinfo.h> and arch/arm/kernel/arch.[ch] for @@ -201,7 +202,6 @@ ENTRY(lookup_processor_type) * r3, r4, r6 corrupted * r5 = mach_info pointer in physical address space */ - .type __lookup_machine_type, %function __lookup_machine_type: adr r3, 3b ldmia r3, {r4, r5, r6} @@ -216,6 +216,7 @@ __lookup_machine_type: blo 1b mov r5, #0 @ unknown machine 2: mov pc, lr +ENDPROC(__lookup_machine_type) /* * This provides a C-API version of the above function. @@ -226,6 +227,7 @@ ENTRY(lookup_machine_type) bl __lookup_machine_type mov r0, r5 ldmfd sp!, {r4 - r6, pc} +ENDPROC(lookup_machine_type) /* Determine validity of the r2 atags pointer. The heuristic requires * that the pointer be aligned, in the first 16k of physical RAM and @@ -239,8 +241,6 @@ ENTRY(lookup_machine_type) * r2 either valid atags pointer, or zero * r5, r6 corrupted */ - - .type __vet_atags, %function __vet_atags: tst r2, #0x3 @ aligned? bne 1f @@ -257,3 +257,4 @@ __vet_atags: 1: mov r2, #0 mov pc, lr +ENDPROC(__vet_atags) diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 27329bd32037..cc87e1765ed2 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S @@ -33,7 +33,6 @@ * */ .section ".text.head", "ax" - .type stext, %function ENTRY(stext) msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode @ and irqs disabled @@ -53,11 +52,11 @@ ENTRY(stext) @ the initialization is done adr lr, __after_proc_init @ return (PIC) address add pc, r10, #PROCINFO_INITFUNC +ENDPROC(stext) /* * Set the Control Register and Read the process ID. */ - .type __after_proc_init, %function __after_proc_init: #ifdef CONFIG_CPU_CP15 mrc p15, 0, r0, c1, c0, 0 @ read control reg @@ -85,6 +84,7 @@ __after_proc_init: mov pc, r13 @ clear the BSS and jump @ to start_kernel +ENDPROC(__after_proc_init) .ltorg #include "head-common.S" diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index bff4c6e90dd5..21e17dc94cb5 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -75,7 +75,6 @@ * circumstances, zImage) is for. */ .section ".text.head", "ax" - .type stext, %function ENTRY(stext) msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode @ and irqs disabled @@ -100,9 +99,9 @@ ENTRY(stext) @ mmu has been enabled adr lr, __enable_mmu @ return (PIC) address add pc, r10, #PROCINFO_INITFUNC +ENDPROC(stext) #if defined(CONFIG_SMP) - .type secondary_startup, #function ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. @@ -128,6 +127,7 @@ ENTRY(secondary_startup) adr lr, __enable_mmu @ return address add pc, r10, #PROCINFO_INITFUNC @ initialise processor @ (return control reg) +ENDPROC(secondary_startup) /* * r6 = &secondary_data @@ -136,6 +136,7 @@ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel +ENDPROC(__secondary_switched) .type __secondary_data, %object __secondary_data: @@ -151,7 +152,6 @@ __secondary_data: * this is just loading the page table pointer and domain access * registers. */ - .type __enable_mmu, %function __enable_mmu: #ifdef CONFIG_ALIGNMENT_TRAP orr r0, r0, #CR_A @@ -174,6 +174,7 @@ __enable_mmu: mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer b __turn_mmu_on +ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible @@ -187,7 +188,6 @@ __enable_mmu: * other registers depend on the function called upon completion */ .align 5 - .type __turn_mmu_on, %function __turn_mmu_on: mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg @@ -195,7 +195,7 @@ __turn_mmu_on: mov r3, r3 mov r3, r3 mov pc, r13 - +ENDPROC(__turn_mmu_on) /* @@ -211,7 +211,6 @@ __turn_mmu_on: * r0, r3, r6, r7 corrupted * r4 = physical page table address */ - .type __create_page_tables, %function __create_page_tables: pgtbl r4 @ page table address @@ -325,6 +324,7 @@ __create_page_tables: #endif #endif mov pc, lr +ENDPROC(__create_page_tables) .ltorg #include "head-common.S" diff --git a/arch/arm/kernel/init_task.c b/arch/arm/kernel/init_task.c index 8b8c9d38a761..0bbf80625395 100644 --- a/arch/arm/kernel/init_task.c +++ b/arch/arm/kernel/init_task.c @@ -8,8 +8,8 @@ #include <linux/init.h> #include <linux/init_task.h> #include <linux/mqueue.h> +#include <linux/uaccess.h> -#include <asm/uaccess.h> #include <asm/pgtable.h> static struct fs_struct init_fs = INIT_FS; diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c index 1f6822dfae74..f4470307edb8 100644 --- a/arch/arm/kernel/io.c +++ b/arch/arm/kernel/io.c @@ -1,7 +1,6 @@ #include <linux/module.h> #include <linux/types.h> - -#include <asm/io.h> +#include <linux/io.h> /* * Copy data from IO memory space to "real" memory space. diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index f88efb135b70..2f3eb795fa6e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -112,18 +112,17 @@ static struct irq_desc bad_irq_desc = { asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); - struct irq_desc *desc = irq_desc + irq; + + irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (irq >= NR_IRQS) - desc = &bad_irq_desc; - - irq_enter(); - - desc_handle_irq(irq, desc); + handle_bad_irq(irq, &bad_irq_desc); + else + generic_handle_irq(irq); /* AT91 specific workaround */ irq_finish(irq); diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index aaffaecffcd1..ba8ccfede964 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -111,8 +111,6 @@ int kgdb_arch_handle_exception(int exception_vector, int signo, case 'D': case 'k': case 'c': - kgdb_contthread = NULL; - /* * Try to read optional parameter, pc unchanged if no parm. * If this was a compiled breakpoint, we need to move diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index b4565bb133c1..da1f94906a4e 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -488,7 +488,7 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) if (!ubit) addr -= reg_count; - addr += (!pbit ^ !ubit); + addr += (!pbit == !ubit); reg_bit_vector = insn & 0xffff; while (reg_bit_vector) { @@ -503,7 +503,7 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) if (wbit) { if (!ubit) addr -= reg_count; - addr -= (!pbit ^ !ubit); + addr -= (!pbit == !ubit); regs->uregs[rn] = (long)addr; } } diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index d28513f14d05..3f9abe0e9aff 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -200,9 +200,12 @@ void __kprobes kprobe_handler(struct pt_regs *regs) } } -int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) +static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) { + unsigned long flags; + local_irq_save(flags); kprobe_handler(regs); + local_irq_restore(flags); return 0; } diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index fae5beb3c3d6..440dc62cdc3a 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -6,10 +6,10 @@ #include <linux/kexec.h> #include <linux/delay.h> #include <linux/reboot.h> +#include <linux/io.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> -#include <asm/io.h> #include <asm/cacheflush.h> #include <asm/mach-types.h> diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index a68259a0cccd..9203ba7d58ee 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -47,7 +47,7 @@ void *module_alloc(unsigned long size) if (!area) return NULL; - return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); + return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); } #else /* CONFIG_MMU */ void *module_alloc(unsigned long size) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 3fd882337064..d3ea6fa89521 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -28,12 +28,12 @@ #include <linux/pm.h> #include <linux/tick.h> #include <linux/utsname.h> +#include <linux/uaccess.h> #include <asm/leds.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/thread_notify.h> -#include <asm/uaccess.h> #include <asm/mach/time.h> static const char *processor_modes[] = { @@ -267,35 +267,6 @@ void show_regs(struct pt_regs * regs) __backtrace(); } -void show_fpregs(struct user_fp *regs) -{ - int i; - - for (i = 0; i < 8; i++) { - unsigned long *p; - char type; - - p = (unsigned long *)(regs->fpregs + i); - - switch (regs->ftype[i]) { - case 1: type = 'f'; break; - case 2: type = 'd'; break; - case 3: type = 'e'; break; - default: type = '?'; break; - } - if (regs->init_flag) - type = '?'; - - printk(" f%d(%c): %08lx %08lx %08lx%c", - i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' '); - } - - - printk("FPSR: %08lx FPCR: %08lx\n", - (unsigned long)regs->fpsr, - (unsigned long)regs->fpcr); -} - /* * Free current thread data structures etc.. */ @@ -414,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p) do { if (fp < stack_start || fp > stack_end) return 0; - lr = pc_pointer (((unsigned long *)fp)[-1]); + lr = ((unsigned long *)fp)[-1]; if (!in_sched_functions(lr)) return lr; fp = *(unsigned long *) (fp - 12); diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 4b05dc5c1023..df653ea59250 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -18,8 +18,8 @@ #include <linux/security.h> #include <linux/init.h> #include <linux/signal.h> +#include <linux/uaccess.h> -#include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/traps.h> @@ -126,7 +126,7 @@ ptrace_getrn(struct task_struct *child, unsigned long insn) val = get_user_reg(child, reg); if (reg == 15) - val = pc_pointer(val + 8); + val += 8; return val; } @@ -278,8 +278,7 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in else base -= aluop2; } - if (read_u32(child, base, &alt) == 0) - alt = pc_pointer(alt); + read_u32(child, base, &alt); } break; @@ -305,8 +304,7 @@ get_branch_address(struct task_struct *child, unsigned long pc, unsigned long in base = ptrace_getrn(child, insn); - if (read_u32(child, base + nr_regs, &alt) == 0) - alt = pc_pointer(alt); + read_u32(child, base + nr_regs, &alt); break; } break; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 2ca7038b67a7..1f1eecca7f55 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -26,11 +26,13 @@ #include <linux/fs.h> #include <asm/cpu.h> +#include <asm/cputype.h> #include <asm/elf.h> #include <asm/procinfo.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/cacheflush.h> +#include <asm/cachetype.h> #include <asm/tlbflush.h> #include <asm/mach/arch.h> @@ -59,13 +61,14 @@ __setup("fpe=", fpe_setup); extern void paging_init(struct meminfo *, struct machine_desc *desc); extern void reboot_setup(char *str); -extern int root_mountflags; -extern void _stext, _text, _etext, __data_start, _edata, _end; +extern void _text, _etext, __data_start, _edata, _end; unsigned int processor_id; EXPORT_SYMBOL(processor_id); unsigned int __machine_arch_type; EXPORT_SYMBOL(__machine_arch_type); +unsigned int cacheid; +EXPORT_SYMBOL(cacheid); unsigned int __atags_pointer __initdata; @@ -81,8 +84,6 @@ EXPORT_SYMBOL(system_serial_high); unsigned int elf_hwcap; EXPORT_SYMBOL(elf_hwcap); -unsigned long __initdata vmalloc_reserve = 128 << 20; - #ifdef MULTI_CPU struct processor processor; @@ -111,9 +112,6 @@ static struct stack stacks[NR_CPUS]; char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); -unsigned long phys_initrd_start __initdata = 0; -unsigned long phys_initrd_size __initdata = 0; - static struct meminfo meminfo __initdata = { 0, }; static const char *cpu_name; static const char *machine_name; @@ -178,63 +176,6 @@ static struct resource io_res[] = { #define lp1 io_res[1] #define lp2 io_res[2] -static const char *cache_types[16] = { - "write-through", - "write-back", - "write-back", - "undefined 3", - "undefined 4", - "undefined 5", - "write-back", - "write-back", - "undefined 8", - "undefined 9", - "undefined 10", - "undefined 11", - "undefined 12", - "undefined 13", - "write-back", - "undefined 15", -}; - -static const char *cache_clean[16] = { - "not required", - "read-block", - "cp15 c7 ops", - "undefined 3", - "undefined 4", - "undefined 5", - "cp15 c7 ops", - "cp15 c7 ops", - "undefined 8", - "undefined 9", - "undefined 10", - "undefined 11", - "undefined 12", - "undefined 13", - "cp15 c7 ops", - "undefined 15", -}; - -static const char *cache_lockdown[16] = { - "not supported", - "not supported", - "not supported", - "undefined 3", - "undefined 4", - "undefined 5", - "format A", - "format B", - "undefined 8", - "undefined 9", - "undefined 10", - "undefined 11", - "undefined 12", - "undefined 13", - "format C", - "undefined 15", -}; - static const char *proc_arch[] = { "undefined/unknown", "3", @@ -255,61 +196,19 @@ static const char *proc_arch[] = { "?(17)", }; -#define CACHE_TYPE(x) (((x) >> 25) & 15) -#define CACHE_S(x) ((x) & (1 << 24)) -#define CACHE_DSIZE(x) (((x) >> 12) & 4095) /* only if S=1 */ -#define CACHE_ISIZE(x) ((x) & 4095) - -#define CACHE_SIZE(y) (((y) >> 6) & 7) -#define CACHE_ASSOC(y) (((y) >> 3) & 7) -#define CACHE_M(y) ((y) & (1 << 2)) -#define CACHE_LINE(y) ((y) & 3) - -static inline void dump_cache(const char *prefix, int cpu, unsigned int cache) -{ - unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); - - printk("CPU%u: %s: %d bytes, associativity %d, %d byte lines, %d sets\n", - cpu, prefix, - mult << (8 + CACHE_SIZE(cache)), - (mult << CACHE_ASSOC(cache)) >> 1, - 8 << CACHE_LINE(cache), - 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - - CACHE_LINE(cache))); -} - -static void __init dump_cpu_info(int cpu) -{ - unsigned int info = read_cpuid(CPUID_CACHETYPE); - - if (info != processor_id) { - printk("CPU%u: D %s %s cache\n", cpu, cache_is_vivt() ? "VIVT" : "VIPT", - cache_types[CACHE_TYPE(info)]); - if (CACHE_S(info)) { - dump_cache("I cache", cpu, CACHE_ISIZE(info)); - dump_cache("D cache", cpu, CACHE_DSIZE(info)); - } else { - dump_cache("cache", cpu, CACHE_ISIZE(info)); - } - } - - if (arch_is_coherent()) - printk("Cache coherency enabled\n"); -} - int cpu_architecture(void) { int cpu_arch; - if ((processor_id & 0x0008f000) == 0) { + if ((read_cpuid_id() & 0x0008f000) == 0) { cpu_arch = CPU_ARCH_UNKNOWN; - } else if ((processor_id & 0x0008f000) == 0x00007000) { - cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; - } else if ((processor_id & 0x00080000) == 0x00000000) { - cpu_arch = (processor_id >> 16) & 7; + } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { + cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; + } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { + cpu_arch = (read_cpuid_id() >> 16) & 7; if (cpu_arch) cpu_arch += CPU_ARCH_ARMv3; - } else if ((processor_id & 0x000f0000) == 0x000f0000) { + } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { unsigned int mmfr0; /* Revised CPUID format. Read the Memory Model Feature @@ -330,6 +229,34 @@ int cpu_architecture(void) return cpu_arch; } +static void __init cacheid_init(void) +{ + unsigned int cachetype = read_cpuid_cachetype(); + unsigned int arch = cpu_architecture(); + + if (arch >= CPU_ARCH_ARMv7) { + cacheid = CACHEID_VIPT_NONALIASING; + if ((cachetype & (3 << 14)) == 1 << 14) + cacheid |= CACHEID_ASID_TAGGED; + } else if (arch >= CPU_ARCH_ARMv6) { + if (cachetype & (1 << 23)) + cacheid = CACHEID_VIPT_ALIASING; + else + cacheid = CACHEID_VIPT_NONALIASING; + } else { + cacheid = CACHEID_VIVT; + } + + printk("CPU: %s data cache, %s instruction cache\n", + cache_is_vivt() ? "VIVT" : + cache_is_vipt_aliasing() ? "VIPT aliasing" : + cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", + cache_is_vivt() ? "VIVT" : + icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : + cache_is_vipt_aliasing() ? "VIPT aliasing" : + cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); +} + /* * These functions re-use the assembly code in head.S, which * already provide the required functionality. @@ -346,10 +273,10 @@ static void __init setup_processor(void) * types. The linker builds this table for us from the * entries in arch/arm/mm/proc-*.S */ - list = lookup_processor_type(processor_id); + list = lookup_processor_type(read_cpuid_id()); if (!list) { printk("CPU configuration botched (ID %08x), unable " - "to continue.\n", processor_id); + "to continue.\n", read_cpuid_id()); while (1); } @@ -369,7 +296,7 @@ static void __init setup_processor(void) #endif printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", - cpu_name, processor_id, (int)processor_id & 15, + cpu_name, read_cpuid_id(), read_cpuid_id() & 15, proc_arch[cpu_architecture()], cr_alignment); sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); @@ -379,14 +306,14 @@ static void __init setup_processor(void) elf_hwcap &= ~HWCAP_THUMB; #endif + cacheid_init(); cpu_proc_init(); } /* * cpu_init - initialise one CPU. * - * cpu_init dumps the cache information, initialises SMP specific - * information, and sets up the per-CPU stacks. + * cpu_init sets up the per-CPU stacks. */ void cpu_init(void) { @@ -398,9 +325,6 @@ void cpu_init(void) BUG(); } - if (system_state == SYSTEM_BOOTING) - dump_cpu_info(cpu); - /* * setup stacks for re-entrant exception handlers */ @@ -443,20 +367,6 @@ static struct machine_desc * __init setup_machine(unsigned int nr) return list; } -static void __init early_initrd(char **p) -{ - unsigned long start, size; - - start = memparse(*p, p); - if (**p == ',') { - size = memparse((*p) + 1, p); - - phys_initrd_start = start; - phys_initrd_size = size; - } -} -__early_param("initrd=", early_initrd); - static void __init arm_add_memory(unsigned long start, unsigned long size) { struct membank *bank; @@ -503,17 +413,6 @@ static void __init early_mem(char **p) __early_param("mem=", early_mem); /* - * vmalloc=size forces the vmalloc area to be exactly 'size' - * bytes. This can be used to increase (or decrease) the vmalloc - * area - the default is 128m. - */ -static void __init early_vmalloc(char **arg) -{ - vmalloc_reserve = memparse(*arg, arg); -} -__early_param("vmalloc=", early_vmalloc); - -/* * Initial parsing of the command line. */ static void __init parse_cmdline(char **cmdline_p, char *from) @@ -527,12 +426,12 @@ static void __init parse_cmdline(char **cmdline_p, char *from) struct early_params *p; for (p = &__early_begin; p < &__early_end; p++) { - int len = strlen(p->arg); + int arglen = strlen(p->arg); - if (memcmp(from, p->arg, len) == 0) { + if (memcmp(from, p->arg, arglen) == 0) { if (to != command_line) to -= 1; - from += len; + from += arglen; p->fn(&from); while (*from != ' ' && *from != '\0') @@ -579,18 +478,13 @@ request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) kernel_data.end = virt_to_phys(&_end - 1); for (i = 0; i < mi->nr_banks; i++) { - unsigned long virt_start, virt_end; - if (mi->bank[i].size == 0) continue; - virt_start = __phys_to_virt(mi->bank[i].start); - virt_end = virt_start + mi->bank[i].size - 1; - res = alloc_bootmem_low(sizeof(*res)); res->name = "System RAM"; - res->start = __virt_to_phys(virt_start); - res->end = __virt_to_phys(virt_end); + res->start = mi->bank[i].start; + res->end = mi->bank[i].start + mi->bank[i].size - 1; res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); @@ -694,26 +588,6 @@ static int __init parse_tag_ramdisk(const struct tag *tag) __tagtable(ATAG_RAMDISK, parse_tag_ramdisk); -static int __init parse_tag_initrd(const struct tag *tag) -{ - printk(KERN_WARNING "ATAG_INITRD is deprecated; " - "please update your bootloader.\n"); - phys_initrd_start = __virt_to_phys(tag->u.initrd.start); - phys_initrd_size = tag->u.initrd.size; - return 0; -} - -__tagtable(ATAG_INITRD, parse_tag_initrd); - -static int __init parse_tag_initrd2(const struct tag *tag) -{ - phys_initrd_start = tag->u.initrd.start; - phys_initrd_size = tag->u.initrd.size; - return 0; -} - -__tagtable(ATAG_INITRD2, parse_tag_initrd2); - static int __init parse_tag_serialnr(const struct tag *tag) { system_serial_low = tag->u.serialnr.low; @@ -901,28 +775,12 @@ static const char *hwcap_str[] = { NULL }; -static void -c_show_cache(struct seq_file *m, const char *type, unsigned int cache) -{ - unsigned int mult = 2 + (CACHE_M(cache) ? 1 : 0); - - seq_printf(m, "%s size\t\t: %d\n" - "%s assoc\t\t: %d\n" - "%s line length\t: %d\n" - "%s sets\t\t: %d\n", - type, mult << (8 + CACHE_SIZE(cache)), - type, (mult << CACHE_ASSOC(cache)) >> 1, - type, 8 << CACHE_LINE(cache), - type, 1 << (6 + CACHE_SIZE(cache) - CACHE_ASSOC(cache) - - CACHE_LINE(cache))); -} - static int c_show(struct seq_file *m, void *v) { int i; seq_printf(m, "Processor\t: %s rev %d (%s)\n", - cpu_name, (int)processor_id & 15, elf_platform); + cpu_name, read_cpuid_id() & 15, elf_platform); #if defined(CONFIG_SMP) for_each_online_cpu(i) { @@ -949,47 +807,26 @@ static int c_show(struct seq_file *m, void *v) if (elf_hwcap & (1 << i)) seq_printf(m, "%s ", hwcap_str[i]); - seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24); + seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); - if ((processor_id & 0x0008f000) == 0x00000000) { + if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { /* pre-ARM7 */ - seq_printf(m, "CPU part\t: %07x\n", processor_id >> 4); + seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); } else { - if ((processor_id & 0x0008f000) == 0x00007000) { + if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { /* ARM7 */ seq_printf(m, "CPU variant\t: 0x%02x\n", - (processor_id >> 16) & 127); + (read_cpuid_id() >> 16) & 127); } else { /* post-ARM7 */ seq_printf(m, "CPU variant\t: 0x%x\n", - (processor_id >> 20) & 15); + (read_cpuid_id() >> 20) & 15); } seq_printf(m, "CPU part\t: 0x%03x\n", - (processor_id >> 4) & 0xfff); - } - seq_printf(m, "CPU revision\t: %d\n", processor_id & 15); - - { - unsigned int cache_info = read_cpuid(CPUID_CACHETYPE); - if (cache_info != processor_id) { - seq_printf(m, "Cache type\t: %s\n" - "Cache clean\t: %s\n" - "Cache lockdown\t: %s\n" - "Cache format\t: %s\n", - cache_types[CACHE_TYPE(cache_info)], - cache_clean[CACHE_TYPE(cache_info)], - cache_lockdown[CACHE_TYPE(cache_info)], - CACHE_S(cache_info) ? "Harvard" : "Unified"); - - if (CACHE_S(cache_info)) { - c_show_cache(m, "I", CACHE_ISIZE(cache_info)); - c_show_cache(m, "D", CACHE_DSIZE(cache_info)); - } else { - c_show_cache(m, "Cache", CACHE_ISIZE(cache_info)); - } - } + (read_cpuid_id() >> 4) & 0xfff); } + seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); seq_puts(m, "\n"); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index ef2f86a5e78a..80b8b5c7e07a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -11,11 +11,11 @@ #include <linux/signal.h> #include <linux/personality.h> #include <linux/freezer.h> +#include <linux/uaccess.h> #include <asm/elf.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> -#include <asm/uaccess.h> #include <asm/unistd.h> #include "ptrace.h" diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e9842f6767f9..e42a749a56dd 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -277,6 +277,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) /* * Enable local interrupts. */ + notify_cpu_starting(cpu); local_irq_enable(); local_fiq_enable(); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 0128687ba0f7..b3ec641b5cf8 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -27,8 +27,7 @@ #include <linux/file.h> #include <linux/utsname.h> #include <linux/ipc.h> - -#include <asm/uaccess.h> +#include <linux/uaccess.h> extern unsigned long do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index 96ab5f52949c..42623db7f870 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -82,7 +82,7 @@ #include <linux/socket.h> #include <linux/net.h> #include <linux/ipc.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> struct oldabi_stat64 { unsigned long long st_dev; diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index 368d171754cf..c68b44aa88d2 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -59,7 +59,7 @@ unsigned long profile_pc(struct pt_regs *regs) if (in_lock_functions(pc)) { fp = regs->ARM_fp; - pc = pc_pointer(((unsigned long *)fp)[-1]); + pc = ((unsigned long *)fp)[-1]; } return pc; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 872f1f8fbb57..57e6874d0b80 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -19,15 +19,13 @@ #include <linux/kallsyms.h> #include <linux/delay.h> #include <linux/init.h> -#include <linux/kprobes.h> +#include <linux/uaccess.h> #include <asm/atomic.h> #include <asm/cacheflush.h> #include <asm/system.h> -#include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/traps.h> -#include <asm/io.h> #include "ptrace.h" #include "signal.h" @@ -69,7 +67,8 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long */ static int verify_stack(unsigned long sp) { - if (sp < PAGE_OFFSET || (sp > (unsigned long)high_memory && high_memory != 0)) + if (sp < PAGE_OFFSET || + (sp > (unsigned long)high_memory && high_memory != NULL)) return -EFAULT; return 0; @@ -328,17 +327,6 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) get_user(instr, (u32 __user *)pc); } -#ifdef CONFIG_KPROBES - /* - * It is possible to have recursive kprobes, so we can't call - * the kprobe trap handler with the undef_lock held. - */ - if (instr == KPROBE_BREAKPOINT_INSTRUCTION && !user_mode(regs)) { - kprobe_trap_handler(regs, instr); - return; - } -#endif - if (call_undef_hook(regs, instr) == 0) return; diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c index 180000bfdc8f..17127db906fa 100644 --- a/arch/arm/kernel/xscale-cp0.c +++ b/arch/arm/kernel/xscale-cp0.c @@ -14,8 +14,8 @@ #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> +#include <linux/io.h> #include <asm/thread_notify.h> -#include <asm/io.h> static inline void dsp_save_state(u32 *state) { |