From 43ccf202214067f12af5d7a6e144e18bb35553a9 Mon Sep 17 00:00:00 2001 From: Dave C Boutcher Date: Thu, 12 Jan 2006 16:05:35 -0600 Subject: [PATCH] powerpc: Add some more pSeries hypervisor call constants Adds a few more hypervisor call constants. Signed-off-by: Dave Boutcher Signed-off-by: Paul Mackerras --- include/asm-powerpc/hvcall.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include') diff --git a/include/asm-powerpc/hvcall.h b/include/asm-powerpc/hvcall.h index da7af5a720e0..38ca9ad6110d 100644 --- a/include/asm-powerpc/hvcall.h +++ b/include/asm-powerpc/hvcall.h @@ -6,7 +6,10 @@ #define H_Success 0 #define H_Busy 1 /* Hardware busy -- retry later */ +#define H_Closed 2 /* Resource closed */ #define H_Constrained 4 /* Resource request constrained to max allowed */ +#define H_InProgress 14 /* Kind of like busy */ +#define H_Continue 18 /* Returned from H_Join on success */ #define H_LongBusyStartRange 9900 /* Start of long busy range */ #define H_LongBusyOrder1msec 9900 /* Long busy, hint that 1msec is a good time to retry */ #define H_LongBusyOrder10msec 9901 /* Long busy, hint that 10msec is a good time to retry */ @@ -114,6 +117,8 @@ #define H_REGISTER_VTERM 0x154 #define H_FREE_VTERM 0x158 #define H_POLL_PENDING 0x1D8 +#define H_JOIN 0x298 +#define H_ENABLE_CRQ 0x2B0 #ifndef __ASSEMBLY__ -- cgit v1.2.3-58-ga151 From 898b5395e915210f41223caa30312994d64cba1d Mon Sep 17 00:00:00 2001 From: Dave C Boutcher Date: Thu, 12 Jan 2006 16:07:17 -0600 Subject: [PATCH] powerpc: Add/remove/update properties in /proc/device-tree Add support to the proc_device_tree file for removing and updating properties. Remove just removes the proc file, update changes the data pointer within the proc file. The remainder of the device-tree changes occur elsewhere. Signed-off-by: Dave Boutcher Signed-off-by: Paul Mackerras --- fs/proc/proc_devtree.c | 24 ++++++++++++++++++++++++ include/linux/proc_fs.h | 5 +++++ 2 files changed, 29 insertions(+) (limited to 'include') diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index fb117b74809e..9bdd077d6f55 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -81,6 +81,30 @@ void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop __proc_device_tree_add_prop(pde, prop); } +void proc_device_tree_remove_prop(struct proc_dir_entry *pde, + struct property *prop) +{ + remove_proc_entry(prop->name, pde); +} + +void proc_device_tree_update_prop(struct proc_dir_entry *pde, + struct property *newprop, + struct property *oldprop) +{ + struct proc_dir_entry *ent; + + for (ent = pde->subdir; ent != NULL; ent = ent->next) + if (ent->data == oldprop) + break; + if (ent == NULL) { + printk(KERN_WARNING "device-tree: property \"%s\" " + " does not exist\n", oldprop->name); + } else { + ent->data = newprop; + ent->size = newprop->length; + } +} + /* * Process a node, adding entries for its children and its properties. */ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 74488e49166d..aa6322d45198 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -146,6 +146,11 @@ struct property; extern void proc_device_tree_init(void); extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *); extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop); +extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde, + struct property *prop); +extern void proc_device_tree_update_prop(struct proc_dir_entry *pde, + struct property *newprop, + struct property *oldprop); #endif /* CONFIG_PROC_DEVICETREE */ extern struct proc_dir_entry *proc_symlink(const char *, -- cgit v1.2.3-58-ga151 From 088186ded490ced80758200cf8f906ed741df306 Mon Sep 17 00:00:00 2001 From: Dave C Boutcher Date: Thu, 12 Jan 2006 16:08:27 -0600 Subject: [PATCH] powerpc: Add/remove/update properties in firmware device tree Add support for updating and removing device tree properties. Since we hand out pointers to properties with gay abandon, we can't just free the property storage. Instead we move deleted, or the old copy of an updated property, to a "dead properties" list. Also note, its not feasable to kref device tree properties. we call get_property() all over the kernel in a wild variety of contexts. One consequence of this change is that we now take a read_lock(&devtree_lock) when doing get_property(). Signed-off-by: Dave Boutcher Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 90 ++++++++++++++++++++++++++++++++++++++++++++-- include/asm-powerpc/prom.h | 5 +++ 2 files changed, 93 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 02e2115323e4..70057b63de21 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1627,6 +1627,11 @@ static void of_node_release(struct kref *kref) kfree(prop->value); kfree(prop); prop = next; + + if (!prop) { + prop = node->deadprops; + node->deadprops = NULL; + } } kfree(node->intrs); kfree(node->full_name); @@ -1783,13 +1788,16 @@ unsigned char *get_property(struct device_node *np, const char *name, { struct property *pp; + read_lock(&devtree_lock); for (pp = np->properties; pp != 0; pp = pp->next) if (strcmp(pp->name, name) == 0) { if (lenp != 0) *lenp = pp->length; - return pp->value; + break; } - return NULL; + read_unlock(&devtree_lock); + + return pp ? pp->value : NULL; } EXPORT_SYMBOL(get_property); @@ -1823,4 +1831,82 @@ int prom_add_property(struct device_node* np, struct property* prop) return 0; } +/* + * Remove a property from a node. Note that we don't actually + * remove it, since we have given out who-knows-how-many pointers + * to the data using get-property. Instead we just move the property + * to the "dead properties" list, so it won't be found any more. + */ +int prom_remove_property(struct device_node *np, struct property *prop) +{ + struct property **next; + int found = 0; + + write_lock(&devtree_lock); + next = &np->properties; + while (*next) { + if (*next == prop) { + /* found the node */ + *next = prop->next; + prop->next = np->deadprops; + np->deadprops = prop; + found = 1; + break; + } + next = &(*next)->next; + } + write_unlock(&devtree_lock); + + if (!found) + return -ENODEV; + +#ifdef CONFIG_PROC_DEVICETREE + /* try to remove the proc node as well */ + if (np->pde) + proc_device_tree_remove_prop(np->pde, prop); +#endif /* CONFIG_PROC_DEVICETREE */ + + return 0; +} + +/* + * Update a property in a node. Note that we don't actually + * remove it, since we have given out who-knows-how-many pointers + * to the data using get-property. Instead we just move the property + * to the "dead properties" list, and add the new property to the + * property list + */ +int prom_update_property(struct device_node *np, + struct property *newprop, + struct property *oldprop) +{ + struct property **next; + int found = 0; + + write_lock(&devtree_lock); + next = &np->properties; + while (*next) { + if (*next == oldprop) { + /* found the node */ + newprop->next = oldprop->next; + *next = newprop; + oldprop->next = np->deadprops; + np->deadprops = oldprop; + found = 1; + break; + } + next = &(*next)->next; + } + write_unlock(&devtree_lock); + + if (!found) + return -ENODEV; +#ifdef CONFIG_PROC_DEVICETREE + /* try to add to proc as well if it was initialized */ + if (np->pde) + proc_device_tree_update_prop(np->pde, newprop, oldprop); +#endif /* CONFIG_PROC_DEVICETREE */ + + return 0; +} diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 329e9bf62260..25d8d5974d19 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -87,6 +87,7 @@ struct device_node { char *full_name; struct property *properties; + struct property *deadprops; /* removed properties */ struct device_node *parent; struct device_node *child; struct device_node *sibling; @@ -164,6 +165,10 @@ extern int prom_n_size_cells(struct device_node* np); extern int prom_n_intr_cells(struct device_node* np); extern void prom_get_irq_senses(unsigned char *senses, int off, int max); extern int prom_add_property(struct device_node* np, struct property* prop); +extern int prom_remove_property(struct device_node *np, struct property *prop); +extern int prom_update_property(struct device_node *np, + struct property *newprop, + struct property *oldprop); #ifdef CONFIG_PPC32 /* -- cgit v1.2.3-58-ga151 From ecaa8b0ff326920c8a89d748382e1c1d8812676c Mon Sep 17 00:00:00 2001 From: Dave C Boutcher Date: Thu, 12 Jan 2006 16:09:29 -0600 Subject: [PATCH] powerpc: Add of_find_property function Add an of_find_property function that returns a struct property given a property name. Then change the get_property function to use that routine internally. Signed-off-by: Dave Boutcher Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/prom.c | 19 +++++++++++++------ include/asm-powerpc/prom.h | 3 +++ 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 70057b63de21..d50c8df0183e 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -1779,12 +1779,8 @@ static int __init prom_reconfig_setup(void) __initcall(prom_reconfig_setup); #endif -/* - * Find a property with a given name for a given node - * and return the value. - */ -unsigned char *get_property(struct device_node *np, const char *name, - int *lenp) +struct property *of_find_property(struct device_node *np, const char *name, + int *lenp) { struct property *pp; @@ -1797,6 +1793,17 @@ unsigned char *get_property(struct device_node *np, const char *name, } read_unlock(&devtree_lock); + return pp; +} + +/* + * Find a property with a given name for a given node + * and return the value. + */ +unsigned char *get_property(struct device_node *np, const char *name, + int *lenp) +{ + struct property *pp = of_find_property(np,name,lenp); return pp ? pp->value : NULL; } EXPORT_SYMBOL(get_property); diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 25d8d5974d19..5b2bd4eefb01 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -136,6 +136,9 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev); extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); +extern struct property *of_find_property(struct device_node *np, + const char *name, + int *lenp); extern struct device_node *of_node_get(struct device_node *node); extern void of_node_put(struct device_node *node); -- cgit v1.2.3-58-ga151 From e58c3495e6007af59382540bb21ee941e470d88d Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 13 Jan 2006 14:56:25 +1100 Subject: [PATCH] powerpc: Cleanup LOADADDR etc. asm macros This patch consolidates the variety of macros used for loading 32 or 64-bit constants in assembler (LOADADDR, LOADBASE, SET_REG_TO_*). The idea is to make the set of macros consistent across 32 and 64 bit and to make it more obvious which is the appropriate one to use in a given situation. The new macros and their semantics are described in the comments in ppc_asm.h. In the process, we change several places that were unnecessarily using immediate loads on ppc64 to use the GOT/TOC. Likewise we cleanup a couple of places where we were clumsily subtracting PAGE_OFFSET with asm instructions to use assemble-time arithmetic or the toreal() macro instead. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cpu_setup_power4.S | 4 +- arch/powerpc/kernel/entry_32.S | 2 +- arch/powerpc/kernel/entry_64.S | 12 ++--- arch/powerpc/kernel/fpu.S | 10 ++-- arch/powerpc/kernel/head_64.S | 87 +++++++++++++++++----------------- arch/powerpc/kernel/idle_power4.S | 8 ++-- arch/powerpc/kernel/misc_32.S | 4 +- arch/powerpc/kernel/misc_64.S | 10 ++-- include/asm-powerpc/ppc_asm.h | 76 +++++++++++++++-------------- 9 files changed, 108 insertions(+), 105 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S index cca942fe6115..b61d86e7ceb6 100644 --- a/arch/powerpc/kernel/cpu_setup_power4.S +++ b/arch/powerpc/kernel/cpu_setup_power4.S @@ -130,7 +130,7 @@ _GLOBAL(__save_cpu_setup) mfcr r7 /* Get storage ptr */ - LOADADDR(r5,cpu_state_storage) + LOAD_REG_IMMEDIATE(r5,cpu_state_storage) /* We only deal with 970 for now */ mfspr r0,SPRN_PVR @@ -164,7 +164,7 @@ _GLOBAL(__restore_cpu_setup) /* Get storage ptr (FIXME when using anton reloc as we * are running with translation disabled here */ - LOADADDR(r5,cpu_state_storage) + LOAD_REG_IMMEDIATE(r5,cpu_state_storage) /* We only deal with 970 for now */ mfspr r0,SPRN_PVR diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 036b71d2adfc..d8da2a35c0a4 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -988,7 +988,7 @@ _GLOBAL(enter_rtas) stwu r1,-INT_FRAME_SIZE(r1) mflr r0 stw r0,INT_FRAME_SIZE+4(r1) - LOADADDR(r4, rtas) + LOAD_REG_ADDR(r4, rtas) lis r6,1f@ha /* physical return address for rtas */ addi r6,r6,1f@l tophys(r6,r6) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index aacebb33e98a..4ba81e1b6bf1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -689,9 +689,8 @@ _GLOBAL(enter_rtas) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ - SET_REG_TO_LABEL(r4,.rtas_return_loc) - SET_REG_TO_CONST(r9,PAGE_OFFSET) - sub r4,r4,r9 + LOAD_REG_ADDR(r4,.rtas_return_loc) + clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 li r0,0 @@ -706,7 +705,7 @@ _GLOBAL(enter_rtas) sync /* disable interrupts so SRR0/1 */ mtmsrd r0 /* don't get trashed */ - SET_REG_TO_LABEL(r4,rtas) + LOAD_REG_ADDR(r4, rtas) ld r5,RTASENTRY(r4) /* get the rtas->entry value */ ld r4,RTASBASE(r4) /* get the rtas->base value */ @@ -718,8 +717,7 @@ _GLOBAL(enter_rtas) _STATIC(rtas_return_loc) /* relocation is off at this point */ mfspr r4,SPRN_SPRG3 /* Get PACA */ - SET_REG_TO_CONST(r5, PAGE_OFFSET) - sub r4,r4,r5 /* RELOC the PACA base pointer */ + clrldi r4,r4,2 /* convert to realmode address */ mfmsr r6 li r0,MSR_RI @@ -728,7 +726,7 @@ _STATIC(rtas_return_loc) mtmsrd r6 ld r1,PACAR1(r4) /* Restore our SP */ - LOADADDR(r3,.rtas_restore_regs) + LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs) ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ mtspr SPRN_SRR0,r3 diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index b780b42c95fc..e4362dfa37fb 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -39,9 +39,9 @@ _GLOBAL(load_up_fpu) * to another. Instead we call giveup_fpu in switch_to. */ #ifndef CONFIG_SMP - LOADBASE(r3, last_task_used_math) + LOAD_REG_ADDRBASE(r3, last_task_used_math) toreal(r3) - PPC_LL r4,OFF(last_task_used_math)(r3) + PPC_LL r4,ADDROFF(last_task_used_math)(r3) PPC_LCMPI 0,r4,0 beq 1f toreal(r4) @@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu) #ifndef CONFIG_SMP subi r4,r5,THREAD fromreal(r4) - PPC_STL r4,OFF(last_task_used_math)(r3) + PPC_STL r4,ADDROFF(last_task_used_math)(r3) #endif /* CONFIG_SMP */ /* restore registers and return */ /* we haven't used ctr or xer or lr */ @@ -113,8 +113,8 @@ _GLOBAL(giveup_fpu) 1: #ifndef CONFIG_SMP li r5,0 - LOADBASE(r4,last_task_used_math) - PPC_STL r5,OFF(last_task_used_math)(r4) + LOAD_REG_ADDRBASE(r4,last_task_used_math) + PPC_STL r5,ADDROFF(last_task_used_math)(r4) #endif /* CONFIG_SMP */ blr diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 1c066d125375..b3718f3eb7b5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -154,12 +154,12 @@ _GLOBAL(__secondary_hold) bne 100b #ifdef CONFIG_HMT - LOADADDR(r4, .hmt_init) + SET_REG_IMMEDIATE(r4, .hmt_init) mtctr r4 bctr #else #ifdef CONFIG_SMP - LOADADDR(r4, .pSeries_secondary_smp_init) + LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) mtctr r4 mr r3,r24 bctr @@ -205,9 +205,10 @@ exception_marker: #define EX_LR 72 /* - * We're short on space and time in the exception prolog, so we can't use - * the normal LOADADDR macro. Normally we just need the low halfword of the - * address, but for Kdump we need the whole low word. + * We're short on space and time in the exception prolog, so we can't + * use the normal SET_REG_IMMEDIATE macro. Normally we just need the + * low halfword of the address, but for Kdump we need the whole low + * word. */ #ifdef CONFIG_CRASH_DUMP #define LOAD_HANDLER(reg, label) \ @@ -713,7 +714,7 @@ system_reset_iSeries: lbz r23,PACAPROCSTART(r13) /* Test if this processor * should start */ sync - LOADADDR(r3,current_set) + LOAD_REG_IMMEDIATE(r3,current_set) sldi r28,r24,3 /* get current_set[cpu#] */ ldx r3,r3,r28 addi r1,r3,THREAD_SIZE @@ -746,8 +747,8 @@ iSeries_secondary_smp_loop: decrementer_iSeries_masked: li r11,1 stb r11,PACALPPACA+LPPACADECRINT(r13) - LOADBASE(r12,tb_ticks_per_jiffy) - lwz r12,OFF(tb_ticks_per_jiffy)(r12) + LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy) + lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12) mtspr SPRN_DEC,r12 /* fall through */ @@ -1412,7 +1413,7 @@ _GLOBAL(pSeries_secondary_smp_init) * physical cpu id in r24, we need to search the pacas to find * which logical id maps to our physical one. */ - LOADADDR(r13, paca) /* Get base vaddr of paca array */ + LOAD_REG_IMMEDIATE(r13, paca) /* Get base vaddr of paca array */ li r5,0 /* logical cpu id */ 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ cmpw r6,r24 /* Compare to our id */ @@ -1446,8 +1447,8 @@ _GLOBAL(pSeries_secondary_smp_init) #ifdef CONFIG_PPC_ISERIES _STATIC(__start_initialization_iSeries) /* Clear out the BSS */ - LOADADDR(r11,__bss_stop) - LOADADDR(r8,__bss_start) + LOAD_REG_IMMEDIATE(r11,__bss_stop) + LOAD_REG_IMMEDIATE(r8,__bss_start) sub r11,r11,r8 /* bss size */ addi r11,r11,7 /* round up to an even double word */ rldicl. r11,r11,61,3 /* shift right by 3 */ @@ -1458,17 +1459,17 @@ _STATIC(__start_initialization_iSeries) 3: stdu r0,8(r8) bdnz 3b 4: - LOADADDR(r1,init_thread_union) + LOAD_REG_IMMEDIATE(r1,init_thread_union) addi r1,r1,THREAD_SIZE li r0,0 stdu r0,-STACK_FRAME_OVERHEAD(r1) - LOADADDR(r3,cpu_specs) - LOADADDR(r4,cur_cpu_spec) + LOAD_REG_IMMEDIATE(r3,cpu_specs) + LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) li r5,0 bl .identify_cpu - LOADADDR(r2,__toc_start) + LOAD_REG_IMMEDIATE(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 @@ -1528,7 +1529,7 @@ _GLOBAL(__start_initialization_multiplatform) li r24,0 /* Switch off MMU if not already */ - LOADADDR(r4, .__after_prom_start - KERNELBASE) + LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE) add r4,r4,r30 bl .__mmu_off b .__after_prom_start @@ -1548,7 +1549,7 @@ _STATIC(__boot_from_prom) /* put a relocation offset into r3 */ bl .reloc_offset - LOADADDR(r2,__toc_start) + LOAD_REG_IMMEDIATE(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 @@ -1588,9 +1589,9 @@ _STATIC(__after_prom_start) */ bl .reloc_offset mr r26,r3 - SET_REG_TO_CONST(r27,KERNELBASE) + LOAD_REG_IMMEDIATE(r27, KERNELBASE) - LOADADDR(r3, PHYSICAL_START) /* target addr */ + LOAD_REG_IMMEDIATE(r3, PHYSICAL_START) /* target addr */ // XXX FIXME: Use phys returned by OF (r30) add r4,r27,r26 /* source addr */ @@ -1598,7 +1599,7 @@ _STATIC(__after_prom_start) /* i.e. where we are running */ /* the source addr */ - LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ + LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */ sub r5,r5,r27 li r6,0x100 /* Start offset, the first 0x100 */ @@ -1608,11 +1609,11 @@ _STATIC(__after_prom_start) /* this includes the code being */ /* executed here. */ - LOADADDR(r0, 4f) /* Jump to the copy of this code */ + LOAD_REG_IMMEDIATE(r0, 4f) /* Jump to the copy of this code */ mtctr r0 /* that we just made/relocated */ bctr -4: LOADADDR(r5,klimit) +4: LOAD_REG_IMMEDIATE(r5,klimit) add r5,r5,r26 ld r5,0(r5) /* get the value of klimit */ sub r5,r5,r27 @@ -1694,7 +1695,7 @@ _GLOBAL(pmac_secondary_start) mtmsrd r3 /* RI on */ /* Set up a paca value for this processor. */ - LOADADDR(r4, paca) /* Get base vaddr of paca array */ + LOAD_REG_IMMEDIATE(r4, paca) /* Get base vaddr of paca array */ mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ add r13,r13,r4 /* for this processor. */ mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ @@ -1731,7 +1732,7 @@ _GLOBAL(__secondary_start) bl .early_setup_secondary /* Initialize the kernel stack. Just a repeat for iSeries. */ - LOADADDR(r3,current_set) + LOAD_REG_ADDR(r3, current_set) sldi r28,r24,3 /* get current_set[cpu#] */ ldx r1,r3,r28 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD @@ -1742,8 +1743,8 @@ _GLOBAL(__secondary_start) mtlr r7 /* enable MMU and jump to start_secondary */ - LOADADDR(r3,.start_secondary_prolog) - SET_REG_TO_CONST(r4, MSR_KERNEL) + LOAD_REG_ADDR(r3, .start_secondary_prolog) + LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) #ifdef DO_SOFT_DISABLE ori r4,r4,MSR_EE #endif @@ -1792,8 +1793,8 @@ _STATIC(start_here_multiplatform) * be detached from the kernel completely. Besides, we need * to clear it now for kexec-style entry. */ - LOADADDR(r11,__bss_stop) - LOADADDR(r8,__bss_start) + LOAD_REG_IMMEDIATE(r11,__bss_stop) + LOAD_REG_IMMEDIATE(r8,__bss_start) sub r11,r11,r8 /* bss size */ addi r11,r11,7 /* round up to an even double word */ rldicl. r11,r11,61,3 /* shift right by 3 */ @@ -1831,7 +1832,7 @@ _STATIC(start_here_multiplatform) /* up the htab. This is done because we have relocated the */ /* kernel but are still running in real mode. */ - LOADADDR(r3,init_thread_union) + LOAD_REG_IMMEDIATE(r3,init_thread_union) add r3,r3,r26 /* set up a stack pointer (physical address) */ @@ -1840,14 +1841,14 @@ _STATIC(start_here_multiplatform) stdu r0,-STACK_FRAME_OVERHEAD(r1) /* set up the TOC (physical address) */ - LOADADDR(r2,__toc_start) + LOAD_REG_IMMEDIATE(r2,__toc_start) addi r2,r2,0x4000 addi r2,r2,0x4000 add r2,r2,r26 - LOADADDR(r3,cpu_specs) + LOAD_REG_IMMEDIATE(r3, cpu_specs) add r3,r3,r26 - LOADADDR(r4,cur_cpu_spec) + LOAD_REG_IMMEDIATE(r4,cur_cpu_spec) add r4,r4,r26 mr r5,r26 bl .identify_cpu @@ -1863,11 +1864,11 @@ _STATIC(start_here_multiplatform) * nowhere it can be initialized differently before we reach this * code */ - LOADADDR(r27, boot_cpuid) + LOAD_REG_IMMEDIATE(r27, boot_cpuid) add r27,r27,r26 lwz r27,0(r27) - LOADADDR(r24, paca) /* Get base vaddr of paca array */ + LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ add r13,r13,r24 /* for this processor. */ add r13,r13,r26 /* convert to physical addr */ @@ -1880,8 +1881,8 @@ _STATIC(start_here_multiplatform) mr r3,r31 bl .early_setup - LOADADDR(r3,.start_here_common) - SET_REG_TO_CONST(r4, MSR_KERNEL) + LOAD_REG_IMMEDIATE(r3, .start_here_common) + LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 rfid @@ -1895,7 +1896,7 @@ _STATIC(start_here_common) /* The following code sets up the SP and TOC now that we are */ /* running with translation enabled. */ - LOADADDR(r3,init_thread_union) + LOAD_REG_IMMEDIATE(r3,init_thread_union) /* set up the stack */ addi r1,r3,THREAD_SIZE @@ -1908,16 +1909,16 @@ _STATIC(start_here_common) li r3,0 bl .do_cpu_ftr_fixups - LOADADDR(r26, boot_cpuid) + LOAD_REG_IMMEDIATE(r26, boot_cpuid) lwz r26,0(r26) - LOADADDR(r24, paca) /* Get base vaddr of paca array */ + LOAD_REG_IMMEDIATE(r24, paca) /* Get base vaddr of paca array */ mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ add r13,r13,r24 /* for this processor. */ mtspr SPRN_SPRG3,r13 /* ptr to current */ - LOADADDR(r4,init_task) + LOAD_REG_IMMEDIATE(r4, init_task) std r4,PACACURRENT(r13) /* Load the TOC */ @@ -1940,7 +1941,7 @@ _STATIC(start_here_common) _GLOBAL(hmt_init) #ifdef CONFIG_HMT - LOADADDR(r5, hmt_thread_data) + LOAD_REG_IMMEDIATE(r5, hmt_thread_data) mfspr r7,SPRN_PVR srwi r7,r7,16 cmpwi r7,0x34 /* Pulsar */ @@ -1961,7 +1962,7 @@ _GLOBAL(hmt_init) b 101f __hmt_secondary_hold: - LOADADDR(r5, hmt_thread_data) + LOAD_REG_IMMEDIATE(r5, hmt_thread_data) clrldi r5,r5,4 li r7,0 mfspr r6,SPRN_PIR @@ -1989,7 +1990,7 @@ __hmt_secondary_hold: #ifdef CONFIG_HMT _GLOBAL(hmt_start_secondary) - LOADADDR(r4,__hmt_secondary_hold) + LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold) clrldi r4,r4,4 mtspr SPRN_NIADORM, r4 mfspr r4, SPRN_MSRDORM diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 1494e2f177f7..c16b4afab582 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -38,14 +38,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) /* We must dynamically check for the NAP feature as it * can be cleared by CPU init after the fixups are done */ - LOADBASE(r3,cur_cpu_spec) - ld r4,OFF(cur_cpu_spec)(r3) + LOAD_REG_ADDRBASE(r3,cur_cpu_spec) + ld r4,ADDROFF(cur_cpu_spec)(r3) ld r4,CPU_SPEC_FEATURES(r4) andi. r0,r4,CPU_FTR_CAN_NAP beqlr /* Now check if user or arch enabled NAP mode */ - LOADBASE(r3,powersave_nap) - lwz r4,OFF(powersave_nap)(r3) + LOAD_REG_ADDRBASE(r3,powersave_nap) + lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 01d0d97a16e1..be982023409e 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -68,7 +68,7 @@ _GLOBAL(reloc_offset) mflr r0 bl 1f 1: mflr r3 - LOADADDR(r4,1b) + LOAD_REG_IMMEDIATE(r4,1b) subf r3,r4,r3 mtlr r0 blr @@ -80,7 +80,7 @@ _GLOBAL(add_reloc_offset) mflr r0 bl 1f 1: mflr r5 - LOADADDR(r4,1b) + LOAD_REG_IMMEDIATE(r4,1b) subf r5,r4,r5 add r3,r3,r5 mtlr r0 diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index ae48a002f81a..2778cce058e2 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -39,7 +39,7 @@ _GLOBAL(reloc_offset) mflr r0 bl 1f 1: mflr r3 - LOADADDR(r4,1b) + LOAD_REG_IMMEDIATE(r4,1b) subf r3,r4,r3 mtlr r0 blr @@ -51,7 +51,7 @@ _GLOBAL(add_reloc_offset) mflr r0 bl 1f 1: mflr r5 - LOADADDR(r4,1b) + LOAD_REG_IMMEDIATE(r4,1b) subf r5,r4,r5 add r3,r3,r5 mtlr r0 @@ -498,15 +498,15 @@ _GLOBAL(identify_cpu) */ _GLOBAL(do_cpu_ftr_fixups) /* Get CPU 0 features */ - LOADADDR(r6,cur_cpu_spec) + LOAD_REG_IMMEDIATE(r6,cur_cpu_spec) sub r6,r6,r3 ld r4,0(r6) sub r4,r4,r3 ld r4,CPU_SPEC_FEATURES(r4) /* Get the fixup table */ - LOADADDR(r6,__start___ftr_fixup) + LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup) sub r6,r6,r3 - LOADADDR(r7,__stop___ftr_fixup) + LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup) sub r7,r7,r3 /* Do the fixup */ 1: cmpld r6,r7 diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h index 0dc798d46ea4..ab8688d39024 100644 --- a/include/asm-powerpc/ppc_asm.h +++ b/include/asm-powerpc/ppc_asm.h @@ -156,52 +156,56 @@ n: #endif /* - * LOADADDR( rn, name ) - * loads the address of 'name' into 'rn' + * LOAD_REG_IMMEDIATE(rn, expr) + * Loads the value of the constant expression 'expr' into register 'rn' + * using immediate instructions only. Use this when it's important not + * to reference other data (i.e. on ppc64 when the TOC pointer is not + * valid). * - * LOADBASE( rn, name ) - * loads the address (possibly without the low 16 bits) of 'name' into 'rn' - * suitable for base+disp addressing + * LOAD_REG_ADDR(rn, name) + * Loads the address of label 'name' into register 'rn'. Use this when + * you don't particularly need immediate instructions only, but you need + * the whole address in one register (e.g. it's a structure address and + * you want to access various offsets within it). On ppc32 this is + * identical to LOAD_REG_IMMEDIATE. + * + * LOAD_REG_ADDRBASE(rn, name) + * ADDROFF(name) + * LOAD_REG_ADDRBASE loads part of the address of label 'name' into + * register 'rn'. ADDROFF(name) returns the remainder of the address as + * a constant expression. ADDROFF(name) is a signed expression < 16 bits + * in size, so is suitable for use directly as an offset in load and store + * instructions. Use this when loading/storing a single word or less as: + * LOAD_REG_ADDRBASE(rX, name) + * ld rY,ADDROFF(name)(rX) */ #ifdef __powerpc64__ -#define LOADADDR(rn,name) \ - lis rn,name##@highest; \ - ori rn,rn,name##@higher; \ - rldicr rn,rn,32,31; \ - oris rn,rn,name##@h; \ - ori rn,rn,name##@l - -#define LOADBASE(rn,name) \ - ld rn,name@got(r2) - -#define OFF(name) 0 - -#define SET_REG_TO_CONST(reg, value) \ - lis reg,(((value)>>48)&0xFFFF); \ - ori reg,reg,(((value)>>32)&0xFFFF); \ - rldicr reg,reg,32,31; \ - oris reg,reg,(((value)>>16)&0xFFFF); \ - ori reg,reg,((value)&0xFFFF); - -#define SET_REG_TO_LABEL(reg, label) \ - lis reg,(label)@highest; \ - ori reg,reg,(label)@higher; \ - rldicr reg,reg,32,31; \ - oris reg,reg,(label)@h; \ - ori reg,reg,(label)@l; +#define LOAD_REG_IMMEDIATE(reg,expr) \ + lis (reg),(expr)@highest; \ + ori (reg),(reg),(expr)@higher; \ + rldicr (reg),(reg),32,31; \ + oris (reg),(reg),(expr)@h; \ + ori (reg),(reg),(expr)@l; + +#define LOAD_REG_ADDR(reg,name) \ + ld (reg),name@got(r2) + +#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name) +#define ADDROFF(name) 0 /* offsets for stack frame layout */ #define LRSAVE 16 #else /* 32-bit */ -#define LOADADDR(rn,name) \ - lis rn,name@ha; \ - addi rn,rn,name@l -#define LOADBASE(rn,name) \ - lis rn,name@ha +#define LOAD_REG_IMMEDIATE(reg,expr) \ + lis (reg),(expr)@ha; \ + addi (reg),(reg),(expr)@l; + +#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name) -#define OFF(name) name@l +#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha +#define ADDROFF(name) name@l /* offsets for stack frame layout */ #define LRSAVE 4 -- cgit v1.2.3-58-ga151 From 3356bb9f7ba378a6e2709f9df95f4ea52111f4df Mon Sep 17 00:00:00 2001 From: David Gibson Date: Fri, 13 Jan 2006 10:26:42 +1100 Subject: [PATCH] powerpc: Remove lppaca structure from the PACA At present the lppaca - the structure shared with the iSeries hypervisor and phyp - is contained within the PACA, our own low-level per-cpu structure. This doesn't have to be so, the patch below removes it, making a separate array of lppaca structures. This saves approximately 500*NR_CPUS bytes of image size and kernel memory, because we don't need aligning gap between the Linux and hypervisor portions of every PACA. On the other hand it means an extra level of dereference in many accesses to the lppaca. The patch also gets rid of several places where we assign the paca address to a local variable for no particular reason. Signed-off-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/entry_64.S | 3 ++- arch/powerpc/kernel/head_64.S | 25 ++++++++++++++--------- arch/powerpc/kernel/irq.c | 12 ++++-------- arch/powerpc/kernel/lparcfg.c | 13 +++++------- arch/powerpc/kernel/paca.c | 36 ++++++++++++++++++++++------------ arch/powerpc/kernel/time.c | 2 +- arch/powerpc/lib/locks.c | 8 ++------ arch/powerpc/platforms/iseries/irq.c | 6 ++---- arch/powerpc/platforms/iseries/misc.S | 3 ++- arch/powerpc/platforms/iseries/setup.c | 8 ++++---- arch/powerpc/platforms/iseries/smp.c | 2 +- arch/powerpc/platforms/pseries/lpar.c | 4 ++-- arch/powerpc/platforms/pseries/setup.c | 20 +++++++++---------- include/asm-powerpc/lppaca.h | 6 +++++- include/asm-powerpc/paca.h | 14 +------------ include/asm-powerpc/spinlock.h | 2 +- include/asm-powerpc/time.h | 5 ++--- 18 files changed, 84 insertions(+), 87 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 56399c5c931a..840aad43a98b 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -135,7 +135,7 @@ int main(void) DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); - DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); + DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 4ba81e1b6bf1..542036318866 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -511,7 +511,8 @@ restore: cmpdi 0,r5,0 beq 4f /* Check for pending interrupts (iSeries) */ - ld r3,PACALPPACA+LPPACAANYINT(r13) + ld r3,PACALPPACAPTR(r13) + ld r3,LPPACAANYINT(r3) cmpdi r3,0 beq+ 4f /* skip do_IRQ if no interrupts */ diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b3718f3eb7b5..308268466342 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -255,8 +255,9 @@ exception_marker: #define EXCEPTION_PROLOG_ISERIES_2 \ mfmsr r10; \ - ld r11,PACALPPACA+LPPACASRR0(r13); \ - ld r12,PACALPPACA+LPPACASRR1(r13); \ + ld r12,PACALPPACAPTR(r13); \ + ld r11,LPPACASRR0(r12); \ + ld r12,LPPACASRR1(r12); \ ori r10,r10,MSR_RI; \ mtmsrd r10,1 @@ -635,7 +636,8 @@ data_access_slb_iSeries: std r12,PACA_EXSLB+EX_R12(r13) mfspr r10,SPRN_SPRG1 std r10,PACA_EXSLB+EX_R13(r13) - ld r12,PACALPPACA+LPPACASRR1(r13); + ld r12,PACALPPACAPTR(r13) + ld r12,LPPACASRR1(r12) b .slb_miss_realmode STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) @@ -645,7 +647,8 @@ instruction_access_slb_iSeries: mtspr SPRN_SPRG1,r13 /* save r13 */ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ std r3,PACA_EXSLB+EX_R3(r13) - ld r3,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ + ld r3,PACALPPACAPTR(r13) + ld r3,LPPACASRR0(r3) /* get SRR0 value */ std r9,PACA_EXSLB+EX_R9(r13) mfcr r9 #ifdef __DISABLED__ @@ -657,7 +660,8 @@ instruction_access_slb_iSeries: std r12,PACA_EXSLB+EX_R12(r13) mfspr r10,SPRN_SPRG1 std r10,PACA_EXSLB+EX_R13(r13) - ld r12,PACALPPACA+LPPACASRR1(r13); + ld r12,PACALPPACAPTR(r13) + ld r12,LPPACASRR1(r12) b .slb_miss_realmode #ifdef __DISABLED__ @@ -746,7 +750,8 @@ iSeries_secondary_smp_loop: .globl decrementer_iSeries_masked decrementer_iSeries_masked: li r11,1 - stb r11,PACALPPACA+LPPACADECRINT(r13) + ld r12,PACALPPACAPTR(r13) + stb r11,LPPACADECRINT(r12) LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy) lwz r12,ADDROFF(tb_ticks_per_jiffy)(r12) mtspr SPRN_DEC,r12 @@ -755,8 +760,9 @@ decrementer_iSeries_masked: .globl hardware_interrupt_iSeries_masked hardware_interrupt_iSeries_masked: mtcrf 0x80,r9 /* Restore regs */ - ld r11,PACALPPACA+LPPACASRR0(r13) - ld r12,PACALPPACA+LPPACASRR1(r13) + ld r12,PACALPPACAPTR(r13) + ld r11,LPPACASRR0(r12) + ld r12,LPPACASRR1(r12) mtspr SPRN_SRR0,r11 mtspr SPRN_SRR1,r12 ld r9,PACA_EXGEN+EX_R9(r13) @@ -995,7 +1001,8 @@ _GLOBAL(slb_miss_realmode) ld r3,PACA_EXSLB+EX_R3(r13) lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ #ifdef CONFIG_PPC_ISERIES - ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ + ld r11,PACALPPACAPTR(r13) + ld r11,LPPACASRR0(r11) /* get SRR0 value */ #endif /* CONFIG_PPC_ISERIES */ mtlr r10 diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5651032d8706..d1fffce86df9 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -238,14 +238,10 @@ void do_IRQ(struct pt_regs *regs) irq_exit(); #ifdef CONFIG_PPC_ISERIES - { - struct paca_struct *lpaca = get_paca(); - - if (lpaca->lppaca.int_dword.fields.decr_int) { - lpaca->lppaca.int_dword.fields.decr_int = 0; - /* Signal a fake decrementer interrupt */ - timer_interrupt(regs); - } + if (get_lppaca()->int_dword.fields.decr_int) { + get_lppaca()->int_dword.fields.decr_int = 0; + /* Signal a fake decrementer interrupt */ + timer_interrupt(regs); } #endif } diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 9dda16ccde78..1ae96a8ed7e2 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -55,15 +55,13 @@ static unsigned long get_purr(void) { unsigned long sum_purr = 0; int cpu; - struct paca_struct *lpaca; for_each_cpu(cpu) { - lpaca = paca + cpu; - sum_purr += lpaca->lppaca.emulated_time_base; + sum_purr += lppaca[cpu].emulated_time_base; #ifdef PURR_DEBUG printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n", - cpu, lpaca->lppaca.emulated_time_base); + cpu, lppaca[cpu].emulated_time_base); #endif } return sum_purr; @@ -79,12 +77,11 @@ static int lparcfg_data(struct seq_file *m, void *v) unsigned long pool_id, lp_index; int shared, entitled_capacity, max_entitled_capacity; int processors, max_processors; - struct paca_struct *lpaca = get_paca(); unsigned long purr = get_purr(); seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS); - shared = (int)(lpaca->lppaca_ptr->shared_proc); + shared = (int)(get_lppaca()->shared_proc); seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n", e2a(xItExtVpdPanel.mfgID[2]), e2a(xItExtVpdPanel.mfgID[3]), @@ -402,7 +399,7 @@ static int lparcfg_data(struct seq_file *m, void *v) (h_resource >> 0 * 8) & 0xffff); /* pool related entries are apropriate for shared configs */ - if (paca[0].lppaca.shared_proc) { + if (lppaca[0].shared_proc) { h_pic(&pool_idle_time, &pool_procs); @@ -451,7 +448,7 @@ static int lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "partition_potential_processors=%d\n", partition_potential_processors); - seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc); + seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); return 0; } diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 999bdd816769..5d1b708086bd 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -25,6 +25,28 @@ * field correctly */ extern unsigned long __toc_start; +/* + * iSeries structure which the hypervisor knows about - this structure + * should not cross a page boundary. The vpa_init/register_vpa call + * is now known to fail if the lppaca structure crosses a page + * boundary. The lppaca is also used on POWER5 pSeries boxes. The + * lppaca is 640 bytes long, and cannot readily change since the + * hypervisor knows its layout, so a 1kB alignment will suffice to + * ensure that it doesn't cross a page boundary. + */ +struct lppaca lppaca[] = { + [0 ... (NR_CPUS-1)] = { + .desc = 0xd397d781, /* "LpPa" */ + .size = sizeof(struct lppaca), + .dyn_proc_status = 2, + .decr_val = 0x00ff0000, + .fpregs_in_use = 1, + .end_of_quantum = 0xfffffffffffffffful, + .slb_count = 64, + .vmxregs_in_use = 0, + }, +}; + /* The Paca is an array with one entry per processor. Each contains an * lppaca, which contains the information shared between the * hypervisor and Linux. @@ -35,27 +57,17 @@ extern unsigned long __toc_start; * processor (not thread). */ #define PACA_INIT_COMMON(number, start, asrr, asrv) \ + .lppaca_ptr = &lppaca[number], \ .lock_token = 0x8000, \ .paca_index = (number), /* Paca Index */ \ .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ .stab_real = (asrr), /* Real pointer to segment table */ \ .stab_addr = (asrv), /* Virt pointer to segment table */ \ .cpu_start = (start), /* Processor start */ \ - .hw_cpu_id = 0xffff, \ - .lppaca = { \ - .desc = 0xd397d781, /* "LpPa" */ \ - .size = sizeof(struct lppaca), \ - .dyn_proc_status = 2, \ - .decr_val = 0x00ff0000, \ - .fpregs_in_use = 1, \ - .end_of_quantum = 0xfffffffffffffffful, \ - .slb_count = 64, \ - .vmxregs_in_use = 0, \ - }, \ + .hw_cpu_id = 0xffff, #ifdef CONFIG_PPC_ISERIES #define PACA_INIT_ISERIES(number) \ - .lppaca_ptr = &paca[number].lppaca, \ .reg_save_ptr = &iseries_reg_save[number], #define PACA_INIT(number) \ diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 56f50e91bddb..c4a294d657b9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -431,7 +431,7 @@ void timer_interrupt(struct pt_regs * regs) profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_PPC_ISERIES - get_paca()->lppaca.int_dword.fields.decr_int = 0; + get_lppaca()->int_dword.fields.decr_int = 0; #endif while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu))) diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 35bd03c41dd1..8362fa272ca5 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -28,15 +28,13 @@ void __spin_yield(raw_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; - struct paca_struct *holder_paca; lock_value = lock->slock; if (lock_value == 0) return; holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); - holder_paca = &paca[holder_cpu]; - yield_count = holder_paca->lppaca.yield_count; + yield_count = lppaca[holder_cpu].yield_count; if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); @@ -60,15 +58,13 @@ void __rw_yield(raw_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; - struct paca_struct *holder_paca; lock_value = rw->lock; if (lock_value >= 0) return; /* no write lock at present */ holder_cpu = lock_value & 0xffff; BUG_ON(holder_cpu >= NR_CPUS); - holder_paca = &paca[holder_cpu]; - yield_count = holder_paca->lppaca.yield_count; + yield_count = lppaca[holder_cpu].yield_count; if ((yield_count & 1) == 0) return; /* virtual cpu is currently running */ rmb(); diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 83442ea77476..be3fbfc24e6c 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -334,14 +334,12 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus, */ int iSeries_get_irq(struct pt_regs *regs) { - struct paca_struct *lpaca; /* -2 means ignore this interrupt */ int irq = -2; - lpaca = get_paca(); #ifdef CONFIG_SMP - if (lpaca->lppaca.int_dword.fields.ipi_cnt) { - lpaca->lppaca.int_dword.fields.ipi_cnt = 0; + if (get_lppaca()->int_dword.fields.ipi_cnt) { + get_lppaca()->int_dword.fields.ipi_cnt = 0; iSeries_smp_message_recv(regs); } #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S index dfe7aa1ba098..7641fc7e550a 100644 --- a/arch/powerpc/platforms/iseries/misc.S +++ b/arch/powerpc/platforms/iseries/misc.S @@ -44,7 +44,8 @@ _GLOBAL(local_irq_restore) /* Check pending interrupts */ /* A decrementer, IPI or PMC interrupt may have occurred * while we were in the hypervisor (which enables) */ - ld r4,PACALPPACA+LPPACAANYINT(r13) + ld r4,PACALPPACAPTR(r13) + ld r4,LPPACAANYINT(r4) cmpdi r4,0 beqlr diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index c6bbe5c25107..3f8790146b00 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c @@ -538,7 +538,7 @@ static unsigned long __init build_iSeries_Memory_Map(void) */ static void __init iSeries_setup_arch(void) { - if (get_paca()->lppaca.shared_proc) { + if (get_lppaca()->shared_proc) { ppc_md.idle_loop = iseries_shared_idle; printk(KERN_INFO "Using shared processor idle loop\n"); } else { @@ -647,7 +647,7 @@ static void yield_shared_processor(void) * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. */ - get_paca()->lppaca.int_dword.fields.decr_int = 1; + get_lppaca()->int_dword.fields.decr_int = 1; process_iSeries_events(); } @@ -883,7 +883,7 @@ void dt_cpus(struct iseries_flat_dt *dt) pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); for (i = 0; i < NR_CPUS; i++) { - if (paca[i].lppaca.dyn_proc_status >= 2) + if (lppaca[i].dyn_proc_status >= 2) continue; snprintf(p, 32 - (p - buf), "@%d", i); @@ -891,7 +891,7 @@ void dt_cpus(struct iseries_flat_dt *dt) dt_prop_str(dt, "device_type", "cpu"); - index = paca[i].lppaca.dyn_hv_phys_proc_index; + index = lppaca[i].dyn_hv_phys_proc_index; d = &xIoHriProcessorVpd[index]; dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c index fcb094ec6aec..6f9d407a709f 100644 --- a/arch/powerpc/platforms/iseries/smp.c +++ b/arch/powerpc/platforms/iseries/smp.c @@ -91,7 +91,7 @@ static void smp_iSeries_kick_cpu(int nr) BUG_ON((nr < 0) || (nr >= NR_CPUS)); /* Verify that our partition has a processor nr */ - if (paca[nr].lppaca.dyn_proc_status >= 2) + if (lppaca[nr].dyn_proc_status >= 2) return; /* The processor is currently spinning, waiting diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 1fe445ab78a6..8952528d31ac 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -254,11 +254,11 @@ out: void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); - unsigned long vpa = __pa(&paca[cpu].lppaca); + unsigned long vpa = __pa(&lppaca[cpu]); long ret; if (cpu_has_feature(CPU_FTR_ALTIVEC)) - paca[cpu].lppaca.vmxregs_in_use = 1; + lppaca[cpu].vmxregs_in_use = 1; ret = register_vpa(hwcpu, vpa); diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 68b7f086d63d..da6cebaf72cd 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c @@ -190,7 +190,7 @@ static void pseries_lpar_enable_pmcs(void) /* instruct hypervisor to maintain PMCs */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) - get_paca()->lppaca.pmcregs_in_use = 1; + get_lppaca()->pmcregs_in_use = 1; } static void __init pSeries_setup_arch(void) @@ -234,7 +234,7 @@ static void __init pSeries_setup_arch(void) /* Choose an idle loop */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { vpa_init(boot_cpuid); - if (get_paca()->lppaca.shared_proc) { + if (get_lppaca()->shared_proc) { printk(KERN_INFO "Using shared processor idle loop\n"); ppc_md.idle_loop = pseries_shared_idle; } else { @@ -444,10 +444,10 @@ DECLARE_PER_CPU(unsigned long, smt_snooze_delay); static inline void dedicated_idle_sleep(unsigned int cpu) { - struct paca_struct *ppaca = &paca[cpu ^ 1]; + struct lppaca *plppaca = &lppaca[cpu ^ 1]; /* Only sleep if the other thread is not idle */ - if (!(ppaca->lppaca.idle)) { + if (!(plppaca->idle)) { local_irq_disable(); /* @@ -480,7 +480,6 @@ static inline void dedicated_idle_sleep(unsigned int cpu) static void pseries_dedicated_idle(void) { - struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); @@ -491,7 +490,7 @@ static void pseries_dedicated_idle(void) * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ - lpaca->lppaca.idle = 1; + get_lppaca()->idle = 1; if (!need_resched()) { start_snooze = get_tb() + @@ -518,7 +517,7 @@ static void pseries_dedicated_idle(void) HMT_medium(); } - lpaca->lppaca.idle = 0; + get_lppaca()->idle = 0; ppc64_runlatch_on(); preempt_enable_no_resched(); @@ -532,7 +531,6 @@ static void pseries_dedicated_idle(void) static void pseries_shared_idle(void) { - struct paca_struct *lpaca = get_paca(); unsigned int cpu = smp_processor_id(); while (1) { @@ -540,7 +538,7 @@ static void pseries_shared_idle(void) * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ - lpaca->lppaca.idle = 1; + get_lppaca()->idle = 1; while (!need_resched() && !cpu_is_offline(cpu)) { local_irq_disable(); @@ -564,7 +562,7 @@ static void pseries_shared_idle(void) HMT_medium(); } - lpaca->lppaca.idle = 0; + get_lppaca()->idle = 0; ppc64_runlatch_on(); preempt_enable_no_resched(); @@ -588,7 +586,7 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* Don't risk a hypervisor call if we're crashing */ if (!crash_shutdown) { - unsigned long vpa = __pa(&get_paca()->lppaca); + unsigned long vpa = __pa(get_lppaca()); if (unregister_vpa(hard_smp_processor_id(), vpa)) { printk("VPA deregistration of cpu %u (hw_cpu_id %d) " diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h index ff82ea7c4829..cd9f11f1ef14 100644 --- a/include/asm-powerpc/lppaca.h +++ b/include/asm-powerpc/lppaca.h @@ -29,7 +29,9 @@ //---------------------------------------------------------------------------- #include -struct lppaca { +/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k + * alignment is sufficient to prevent this */ +struct __attribute__((__aligned__(0x400))) lppaca { //============================================================================= // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data // NOTE: The xDynXyz fields are fields that will be dynamically changed by @@ -129,5 +131,7 @@ struct lppaca { u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF }; +extern struct lppaca lppaca[]; + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LPPACA_H */ diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index a64b4d425dab..c9add8f1ad94 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -23,6 +23,7 @@ register struct paca_struct *local_paca asm("r13"); #define get_paca() local_paca +#define get_lppaca() (get_paca()->lppaca_ptr) struct task_struct; @@ -95,19 +96,6 @@ struct paca_struct { u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ u8 proc_enabled; /* irq soft-enable flag */ - - /* - * iSeries structure which the hypervisor knows about - - * this structure should not cross a page boundary. - * The vpa_init/register_vpa call is now known to fail if the - * lppaca structure crosses a page boundary. - * The lppaca is also used on POWER5 pSeries boxes. - * The lppaca is 640 bytes long, and cannot readily change - * since the hypervisor knows its layout, so a 1kB - * alignment will suffice to ensure that it doesn't - * cross a page boundary. - */ - struct lppaca lppaca __attribute__((__aligned__(0x400))); }; extern struct paca_struct paca[]; diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 754900901cd8..26b8744ed529 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -80,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ -#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc) +#define SHARED_PROCESSOR (get_lppaca()->shared_proc) extern void __spin_yield(raw_spinlock_t *lock); extern void __rw_yield(raw_rwlock_t *lock); #else /* SPLPAR || ISERIES */ diff --git a/include/asm-powerpc/time.h b/include/asm-powerpc/time.h index d9b86a17271b..baddc9ab57ad 100644 --- a/include/asm-powerpc/time.h +++ b/include/asm-powerpc/time.h @@ -175,11 +175,10 @@ static inline void set_dec(int val) set_dec_cpu6(val); #else #ifdef CONFIG_PPC_ISERIES - struct paca_struct *lpaca = get_paca(); int cur_dec; - if (lpaca->lppaca.shared_proc) { - lpaca->lppaca.virtual_decr = val; + if (get_lppaca()->shared_proc) { + get_lppaca()->virtual_decr = val; cur_dec = get_dec(); if (cur_dec > val) HvCall_setVirtualDecr(); -- cgit v1.2.3-58-ga151 From 144b9c135b963bcb7f242c7b83bff930620d3161 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 13 Jan 2006 15:37:17 +1100 Subject: [PATCH] powerpc: use lwsync in atomics, bitops, lock functions eieio is only a store - store ordering. When used to order an unlock operation loads may leak out of the critical region. This is potentially buggy, one example is if a user wants to atomically read a couple of values. We can solve this with an lwsync which orders everything except store - load. I removed the (now unused) EIEIO_ON_SMP macros and the c versions isync_on_smp and eieio_on_smp now we dont use them. I also removed some old comments that were used to identify inline spinlocks in assembly, they dont make sense now our locks are out of line. Another interesting thing was that read_unlock was using an eieio even though the rest of the spinlock code had already been converted to use lwsync. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- include/asm-powerpc/atomic.h | 20 ++++++++++---------- include/asm-powerpc/bitops.h | 6 +++--- include/asm-powerpc/futex.h | 2 +- include/asm-powerpc/spinlock.h | 19 ++++++++++--------- include/asm-powerpc/synch.h | 23 ++++------------------- include/asm-powerpc/system.h | 8 ++++---- 6 files changed, 32 insertions(+), 46 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 248f9aec959c..9ce51ba54c13 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) int t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # atomic_add_return\n\ add %0,%1,%0\n" PPC405_ERR77(0,%2) @@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) int t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # atomic_sub_return\n\ subf %0,%1,%0\n" PPC405_ERR77(0,%2) @@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v) int t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%1 # atomic_inc_return\n\ addic %0,%0,1\n" PPC405_ERR77(0,%1) @@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v) int t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%1 # atomic_dec_return\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) @@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) int t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n" @@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # atomic64_add_return\n\ add %0,%1,%0\n\ stdcx. %0,0,%2 \n\ @@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v) long t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # atomic64_sub_return\n\ subf %0,%1,%0\n\ stdcx. %0,0,%2 \n\ @@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v) long t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%1 # atomic64_inc_return\n\ addic %0,%0,1\n\ stdcx. %0,0,%1 \n\ @@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v) long t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%1 # atomic64_dec_return\n\ addic %0,%0,-1\n\ stdcx. %0,0,%1\n\ @@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) long t; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\ addic. %0,%0,-1\n\ blt- 2f\n\ diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index 1996eaa8aeae..bf6941a810b8 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h @@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr, unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n" "or %1,%0,%2 \n" PPC405_ERR77(0,%3) @@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr, unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n" "andc %1,%0,%2 \n" PPC405_ERR77(0,%3) @@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr, unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n" "xor %1,%0,%2 \n" PPC405_ERR77(0,%3) diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index f0319d50b129..39e85f320a76 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h @@ -11,7 +11,7 @@ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ __asm__ __volatile ( \ - SYNC_ON_SMP \ + LWSYNC_ON_SMP \ "1: lwarx %0,0,%2\n" \ insn \ PPC405_ERR77(0, %2) \ diff --git a/include/asm-powerpc/spinlock.h b/include/asm-powerpc/spinlock.h index 26b8744ed529..895cb6d3a42a 100644 --- a/include/asm-powerpc/spinlock.h +++ b/include/asm-powerpc/spinlock.h @@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock) token = LOCK_TOKEN; __asm__ __volatile__( -"1: lwarx %0,0,%2 # __spin_trylock\n\ +"1: lwarx %0,0,%2\n\ cmpwi 0,%0,0\n\ bne- 2f\n\ stwcx. %1,0,%2\n\ @@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) { - __asm__ __volatile__(SYNC_ON_SMP" # __raw_spin_unlock" - : : :"memory"); + __asm__ __volatile__("# __raw_spin_unlock\n\t" + LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; } @@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw) long tmp; __asm__ __volatile__( -"1: lwarx %0,0,%1 # read_trylock\n" +"1: lwarx %0,0,%1\n" __DO_SIGN_EXTEND " addic. %0,%0,1\n\ ble- 2f\n" @@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw) token = WRLOCK_TOKEN; __asm__ __volatile__( -"1: lwarx %0,0,%2 # write_trylock\n\ +"1: lwarx %0,0,%2\n\ cmpwi 0,%0,0\n\ bne- 2f\n" PPC405_ERR77(0,%1) @@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) long tmp; __asm__ __volatile__( - "eieio # read_unlock\n\ -1: lwarx %0,0,%1\n\ + "# read_unlock\n\t" + LWSYNC_ON_SMP +"1: lwarx %0,0,%1\n\ addic %0,%0,-1\n" PPC405_ERR77(0,%1) " stwcx. %0,0,%1\n\ @@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw) static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) { - __asm__ __volatile__(SYNC_ON_SMP" # write_unlock" - : : :"memory"); + __asm__ __volatile__("# write_unlock\n\t" + LWSYNC_ON_SMP: : :"memory"); rw->lock = 0; } diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h index 794870ab8fd3..c90d9d9aae72 100644 --- a/include/asm-powerpc/synch.h +++ b/include/asm-powerpc/synch.h @@ -2,6 +2,8 @@ #define _ASM_POWERPC_SYNCH_H #ifdef __KERNEL__ +#include + #ifdef __powerpc64__ #define __SUBARCH_HAS_LWSYNC #endif @@ -12,20 +14,12 @@ # define LWSYNC sync #endif - -/* - * Arguably the bitops and *xchg operations don't imply any memory barrier - * or SMP ordering, but in fact a lot of drivers expect them to imply - * both, since they do on x86 cpus. - */ #ifdef CONFIG_SMP -#define EIEIO_ON_SMP "eieio\n" #define ISYNC_ON_SMP "\n\tisync" -#define SYNC_ON_SMP __stringify(LWSYNC) "\n" +#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" #else -#define EIEIO_ON_SMP #define ISYNC_ON_SMP -#define SYNC_ON_SMP +#define LWSYNC_ON_SMP #endif static inline void eieio(void) @@ -38,14 +32,5 @@ static inline void isync(void) __asm__ __volatile__ ("isync" : : : "memory"); } -#ifdef CONFIG_SMP -#define eieio_on_smp() eieio() -#define isync_on_smp() isync() -#else -#define eieio_on_smp() __asm__ __volatile__("": : :"memory") -#define isync_on_smp() __asm__ __volatile__("": : :"memory") -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_SYNCH_H */ - diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index 9b822afa7d0e..d9bf53653b10 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -212,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stwcx. %3,0,%2 \n\ @@ -232,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val) unsigned long prev; __asm__ __volatile__( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 \n" PPC405_ERR77(0,%2) " stdcx. %3,0,%2 \n\ @@ -287,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) unsigned int prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ cmpw 0,%0,%3\n\ bne- 2f\n" @@ -311,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) unsigned long prev; __asm__ __volatile__ ( - EIEIO_ON_SMP + LWSYNC_ON_SMP "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ cmpd 0,%0,%3\n\ bne- 2f\n\ -- cgit v1.2.3-58-ga151 From b11fa580ac06b34944a2b46a44ebce2c284e1a76 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 13 Jan 2006 16:51:52 +1100 Subject: [PATCH] powerpc: reformat atomic_add_unless It makes my eyes hurt. Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras --- include/asm-powerpc/atomic.h | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h index 9ce51ba54c13..147a38dcc766 100644 --- a/include/asm-powerpc/atomic.h +++ b/include/asm-powerpc/atomic.h @@ -176,19 +176,19 @@ static __inline__ int atomic_dec_return(atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ +#define atomic_add_unless(v, a, u) \ +({ \ + int c, old; \ + c = atomic_read(v); \ + for (;;) { \ + if (unlikely(c == (u))) \ + break; \ + old = atomic_cmpxchg((v), c, c + (a)); \ + if (likely(old == c)) \ + break; \ + c = old; \ + } \ + c != (u); \ }) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -- cgit v1.2.3-58-ga151 From 80f15dc703b3677d0b025bafd215f1f3664c8978 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sat, 14 Jan 2006 10:11:39 +1100 Subject: powerpc: Provide a suitable AT_PLATFORM value The glibc folks want to use AT_PLATFORM to select between possible alternative versions of shared libraries. This commit makes the kernel supply an AT_PLATFORM string that indicates what class of processor we are running on. Processors with the same set of user-level instructions and roughly the same instruction scheduling characteristics are given the same AT_PLATFORM value; for example, 821, 823 and 860 are all reported as "ppc823", and 7447, 7447A, 7448, 7450, 7451, 7455 are all called "ppc7450". The intention is that the AT_PLATFORM values match the values that gcc accepts for the -mcpu= option. For values which are numeric (e.g. -mcpu=750), "ppc" has been prepended. This also adds a PPC_FEATURE_BOOKE bit to the AT_HWCAP value and sets it for the 440 family and the Freescale 85xx family. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 172 ++++++++++++++++++++++++++++++----------- include/asm-powerpc/cputable.h | 4 + include/asm-powerpc/elf.h | 16 ++-- 3 files changed, 139 insertions(+), 53 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 43c74a6b07b1..cf1eea1a2299 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -55,7 +55,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); #define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4) #define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5) #define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS) - +#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) /* We only set the spe features if the kernel was compiled with * spe support @@ -80,6 +81,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = RS64, + .platform = "power3", }, { /* Power3+ */ .pvr_mask = 0xffff0000, @@ -93,6 +95,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/power3", .oprofile_type = RS64, + .platform = "power3", }, { /* Northstar */ .pvr_mask = 0xffff0000, @@ -106,6 +109,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = RS64, + .platform = "rs64", }, { /* Pulsar */ .pvr_mask = 0xffff0000, @@ -119,6 +123,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = RS64, + .platform = "rs64", }, { /* I-star */ .pvr_mask = 0xffff0000, @@ -132,6 +137,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = RS64, + .platform = "rs64", }, { /* S-star */ .pvr_mask = 0xffff0000, @@ -145,6 +151,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", .oprofile_type = RS64, + .platform = "rs64", }, { /* Power4 */ .pvr_mask = 0xffff0000, @@ -158,6 +165,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = POWER4, + .platform = "power4", }, { /* Power4+ */ .pvr_mask = 0xffff0000, @@ -171,6 +179,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power4", .oprofile_type = POWER4, + .platform = "power4", }, { /* PPC970 */ .pvr_mask = 0xffff0000, @@ -185,6 +194,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = POWER4, + .platform = "ppc970", }, #endif /* CONFIG_PPC64 */ #if defined(CONFIG_PPC64) || defined(CONFIG_POWER4) @@ -205,6 +215,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = POWER4, + .platform = "ppc970", }, #endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */ #ifdef CONFIG_PPC64 @@ -220,6 +231,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", .oprofile_type = POWER4, + .platform = "ppc970", }, { /* Power5 GR */ .pvr_mask = 0xffff0000, @@ -233,6 +245,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power5", .oprofile_type = POWER4, + .platform = "power5", }, { /* Power5 GS */ .pvr_mask = 0xffff0000, @@ -246,6 +259,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power5+", .oprofile_type = POWER4, + .platform = "power5+", }, { /* Cell Broadband Engine */ .pvr_mask = 0xffff0000, @@ -257,6 +271,7 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 128, .dcache_bsize = 128, .cpu_setup = __setup_cpu_be, + .platform = "ppc-cell-be", }, { /* default match */ .pvr_mask = 0x00000000, @@ -268,6 +283,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .num_pmcs = 6, .cpu_setup = __setup_cpu_power4, + .platform = "power4", } #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 @@ -281,6 +297,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc601", }, { /* 603 */ .pvr_mask = 0xffff0000, @@ -290,7 +307,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* 603e */ .pvr_mask = 0xffff0000, @@ -300,7 +318,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* 603ev */ .pvr_mask = 0xffff0000, @@ -310,7 +329,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* 604 */ .pvr_mask = 0xffff0000, @@ -321,7 +341,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 2, - .cpu_setup = __setup_cpu_604 + .cpu_setup = __setup_cpu_604, + .platform = "ppc604", }, { /* 604e */ .pvr_mask = 0xfffff000, @@ -332,7 +353,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_604 + .cpu_setup = __setup_cpu_604, + .platform = "ppc604", }, { /* 604r */ .pvr_mask = 0xffff0000, @@ -343,7 +365,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_604 + .cpu_setup = __setup_cpu_604, + .platform = "ppc604", }, { /* 604ev */ .pvr_mask = 0xffff0000, @@ -354,7 +377,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_604 + .cpu_setup = __setup_cpu_604, + .platform = "ppc604", }, { /* 740/750 (0x4202, don't support TAU ?) */ .pvr_mask = 0xffffffff, @@ -365,7 +389,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750 + .cpu_setup = __setup_cpu_750, + .platform = "ppc750", }, { /* 750CX (80100 and 8010x?) */ .pvr_mask = 0xfffffff0, @@ -376,7 +401,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750cx + .cpu_setup = __setup_cpu_750cx, + .platform = "ppc750", }, { /* 750CX (82201 and 82202) */ .pvr_mask = 0xfffffff0, @@ -387,7 +413,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750cx + .cpu_setup = __setup_cpu_750cx, + .platform = "ppc750", }, { /* 750CXe (82214) */ .pvr_mask = 0xfffffff0, @@ -398,7 +425,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750cx + .cpu_setup = __setup_cpu_750cx, + .platform = "ppc750", }, { /* 750CXe "Gekko" (83214) */ .pvr_mask = 0xffffffff, @@ -409,7 +437,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750cx + .cpu_setup = __setup_cpu_750cx, + .platform = "ppc750", }, { /* 745/755 */ .pvr_mask = 0xfffff000, @@ -420,7 +449,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750 + .cpu_setup = __setup_cpu_750, + .platform = "ppc750", }, { /* 750FX rev 1.x */ .pvr_mask = 0xffffff00, @@ -431,7 +461,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750 + .cpu_setup = __setup_cpu_750, + .platform = "ppc750", }, { /* 750FX rev 2.0 must disable HID0[DPM] */ .pvr_mask = 0xffffffff, @@ -442,7 +473,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750 + .cpu_setup = __setup_cpu_750, + .platform = "ppc750", }, { /* 750FX (All revs except 2.0) */ .pvr_mask = 0xffff0000, @@ -453,7 +485,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750fx + .cpu_setup = __setup_cpu_750fx, + .platform = "ppc750", }, { /* 750GX */ .pvr_mask = 0xffff0000, @@ -464,7 +497,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750fx + .cpu_setup = __setup_cpu_750fx, + .platform = "ppc750", }, { /* 740/750 (L2CR bit need fixup for 740) */ .pvr_mask = 0xffff0000, @@ -475,7 +509,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_750 + .cpu_setup = __setup_cpu_750, + .platform = "ppc750", }, { /* 7400 rev 1.1 ? (no TAU) */ .pvr_mask = 0xffffffff, @@ -486,7 +521,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_7400 + .cpu_setup = __setup_cpu_7400, + .platform = "ppc7400", }, { /* 7400 */ .pvr_mask = 0xffff0000, @@ -497,7 +533,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_7400 + .cpu_setup = __setup_cpu_7400, + .platform = "ppc7400", }, { /* 7410 */ .pvr_mask = 0xffff0000, @@ -508,7 +545,8 @@ struct cpu_spec cpu_specs[] = { .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, - .cpu_setup = __setup_cpu_7410 + .cpu_setup = __setup_cpu_7410, + .platform = "ppc7400", }, { /* 7450 2.0 - no doze/nap */ .pvr_mask = 0xffffffff, @@ -522,6 +560,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7450 2.1 */ .pvr_mask = 0xffffffff, @@ -535,6 +574,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7450 2.3 and newer */ .pvr_mask = 0xffff0000, @@ -548,6 +588,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7455 rev 1.x */ .pvr_mask = 0xffffff00, @@ -561,6 +602,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7455 rev 2.0 */ .pvr_mask = 0xffffffff, @@ -574,6 +616,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7455 others */ .pvr_mask = 0xffff0000, @@ -587,6 +630,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7447/7457 Rev 1.0 */ .pvr_mask = 0xffffffff, @@ -600,6 +644,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7447/7457 Rev 1.1 */ .pvr_mask = 0xffffffff, @@ -613,6 +658,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7447/7457 Rev 1.2 and later */ .pvr_mask = 0xffff0000, @@ -626,6 +672,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7447A */ .pvr_mask = 0xffff0000, @@ -639,6 +686,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 7448 */ .pvr_mask = 0xffff0000, @@ -652,6 +700,7 @@ struct cpu_spec cpu_specs[] = { .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", .oprofile_type = G4, + .platform = "ppc7450", }, { /* 82xx (8240, 8245, 8260 are all 603e cores) */ .pvr_mask = 0x7fff0000, @@ -661,7 +710,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* All G2_LE (603e core, plus some) have the same pvr */ .pvr_mask = 0x7fff0000, @@ -671,7 +721,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* e300 (a 603e core, plus some) on 83xx */ .pvr_mask = 0x7fff0000, @@ -681,7 +732,8 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, - .cpu_setup = __setup_cpu_603 + .cpu_setup = __setup_cpu_603, + .platform = "ppc603", }, { /* default match, we assume split I/D cache & TB (non-601)... */ .pvr_mask = 0x00000000, @@ -691,6 +743,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = COMMON_USER, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc603", }, #endif /* CLASSIC_PPC */ #ifdef CONFIG_8xx @@ -704,6 +757,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .icache_bsize = 16, .dcache_bsize = 16, + .platform = "ppc823", }, #endif /* CONFIG_8xx */ #ifdef CONFIG_40x @@ -715,6 +769,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .icache_bsize = 16, .dcache_bsize = 16, + .platform = "ppc403", }, { /* 403GCX */ .pvr_mask = 0xffffff00, @@ -725,6 +780,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB, .icache_bsize = 16, .dcache_bsize = 16, + .platform = "ppc403", }, { /* 403G ?? */ .pvr_mask = 0xffff0000, @@ -734,6 +790,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .icache_bsize = 16, .dcache_bsize = 16, + .platform = "ppc403", }, { /* 405GP */ .pvr_mask = 0xffff0000, @@ -744,6 +801,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* STB 03xxx */ .pvr_mask = 0xffff0000, @@ -754,6 +812,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* STB 04xxx */ .pvr_mask = 0xffff0000, @@ -764,6 +823,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* NP405L */ .pvr_mask = 0xffff0000, @@ -774,6 +834,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* NP4GS3 */ .pvr_mask = 0xffff0000, @@ -784,6 +845,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* NP405H */ .pvr_mask = 0xffff0000, @@ -794,6 +856,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* 405GPr */ .pvr_mask = 0xffff0000, @@ -804,6 +867,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* STBx25xx */ .pvr_mask = 0xffff0000, @@ -814,6 +878,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* 405LP */ .pvr_mask = 0xffff0000, @@ -823,6 +888,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* Xilinx Virtex-II Pro */ .pvr_mask = 0xffff0000, @@ -833,6 +899,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, { /* 405EP */ .pvr_mask = 0xffff0000, @@ -843,6 +910,7 @@ struct cpu_spec cpu_specs[] = { PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc405", }, #endif /* CONFIG_40x */ @@ -852,81 +920,90 @@ struct cpu_spec cpu_specs[] = { .pvr_value = 0x40000850, .cpu_name = "440EP Rev. A", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER, /* 440EP has an FPU */ + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { .pvr_mask = 0xf0000fff, .pvr_value = 0x400008d3, .cpu_name = "440EP Rev. B", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = COMMON_USER, /* 440EP has an FPU */ + .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440GP Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000440, .cpu_name = "440GP Rev. B", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440gp", }, { /* 440GP Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x40000481, .cpu_name = "440GP Rev. C", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440gp", }, { /* 440GX Rev. A */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000850, .cpu_name = "440GX Rev. A", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440GX Rev. B */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000851, .cpu_name = "440GX Rev. B", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440GX Rev. C */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000892, .cpu_name = "440GX Rev. C", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440GX Rev. F */ .pvr_mask = 0xf0000fff, .pvr_value = 0x50000894, .cpu_name = "440GX Rev. F", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440SP Rev. A */ .pvr_mask = 0xff000fff, .pvr_value = 0x53000891, .cpu_name = "440SP Rev. A", .cpu_features = CPU_FTRS_44X, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, { /* 440SPe Rev. A */ .pvr_mask = 0xff000fff, @@ -934,9 +1011,10 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "440SPe Rev. A", .cpu_features = CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB, - .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU, + .cpu_user_features = COMMON_USER_BOOKE, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "ppc440", }, #endif /* CONFIG_44x */ #ifdef CONFIG_FSL_BOOKE @@ -946,10 +1024,11 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "e200z5", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE | + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .dcache_bsize = 32, + .platform = "ppc5554", }, { /* e200z6 */ .pvr_mask = 0xfff00000, @@ -957,11 +1036,12 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "e200z6", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E200, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_UNIFIED_CACHE, .dcache_bsize = 32, + .platform = "ppc5554", }, { /* e500 */ .pvr_mask = 0xffff0000, @@ -969,14 +1049,15 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "e500", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E500, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_SPE_COMP | PPC_FEATURE_HAS_EFP_SINGLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = BOOKE, + .platform = "ppc8540", }, { /* e500v2 */ .pvr_mask = 0xffff0000, @@ -984,14 +1065,16 @@ struct cpu_spec cpu_specs[] = { .cpu_name = "e500v2", /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */ .cpu_features = CPU_FTRS_E500_2, - .cpu_user_features = PPC_FEATURE_32 | - PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP | - PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE, + .cpu_user_features = COMMON_USER_BOOKE | + PPC_FEATURE_SPE_COMP | + PPC_FEATURE_HAS_EFP_SINGLE | + PPC_FEATURE_HAS_EFP_DOUBLE, .icache_bsize = 32, .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", .oprofile_type = BOOKE, + .platform = "ppc8548", }, #endif #if !CLASSIC_PPC @@ -1003,6 +1086,7 @@ struct cpu_spec cpu_specs[] = { .cpu_user_features = PPC_FEATURE_32, .icache_bsize = 32, .dcache_bsize = 32, + .platform = "powerpc", } #endif /* !CLASSIC_PPC */ #endif /* CONFIG_PPC32 */ diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index ef6ead34a773..03017d905704 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -19,6 +19,7 @@ #define PPC_FEATURE_POWER5 0x00040000 #define PPC_FEATURE_POWER5_PLUS 0x00020000 #define PPC_FEATURE_CELL 0x00010000 +#define PPC_FEATURE_BOOKE 0x00008000 #ifdef __KERNEL__ #ifndef __ASSEMBLY__ @@ -64,6 +65,9 @@ struct cpu_spec { /* Processor specific oprofile operations */ enum powerpc_oprofile_type oprofile_type; + + /* Name of processor class, for the ELF AT_PLATFORM entry */ + char *platform; }; extern struct cpu_spec *cur_cpu_spec; diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h index 45f2af6f89c4..94d228f9c6ac 100644 --- a/include/asm-powerpc/elf.h +++ b/include/asm-powerpc/elf.h @@ -221,20 +221,18 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *); instruction set this cpu supports. This could be done in userspace, but it's not easy, and we've already done it here. */ # define ELF_HWCAP (cur_cpu_spec->cpu_user_features) -#ifdef __powerpc64__ -# define ELF_PLAT_INIT(_r, load_addr) do { \ - _r->gpr[2] = load_addr; \ -} while (0) -#endif /* __powerpc64__ */ /* This yields a string that ld.so will use to load implementation specific libraries for optimization. This is more specific in - intent than poking at uname or /proc/cpuinfo. + intent than poking at uname or /proc/cpuinfo. */ - For the moment, we have only optimizations for the Intel generations, - but that could change... */ +#define ELF_PLATFORM (cur_cpu_spec->platform) -#define ELF_PLATFORM (NULL) +#ifdef __powerpc64__ +# define ELF_PLAT_INIT(_r, load_addr) do { \ + _r->gpr[2] = load_addr; \ +} while (0) +#endif /* __powerpc64__ */ #ifdef __KERNEL__ -- cgit v1.2.3-58-ga151 From 7a45fb19cef93574230827e6e2c97ad5760ddecd Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Fri, 13 Jan 2006 12:35:49 +0000 Subject: [PATCH] powerpc: oprofile cpu type names clash with other code In 2.6.15-git6 a change was commited in the oprofile support in the powerpc architecture. It introduced the powerpc_oprofile_type which contains the define G4. This causes a name clash with the existing wacom usb tablet driver. CC [M] drivers/usb/input/wacom.o drivers/usb/input/wacom.c:98: error: conflicting types for `G4' include/asm/cputable.h:37: error: previous declaration of `G4' CC [M] drivers/usb/mon/mon_text.o make[3]: *** [drivers/usb/input/wacom.o] Error 1 make[2]: *** [drivers/usb/input] Error 2 The elements of an enum declared in global scope are effectivly global identifiers themselves. As such we need to ensure the names are unique. This patch updates the later oprofile support to use unique names. Signed-off-by: Andy Whitcroft Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/cputable.c | 52 +++++++++++++++++++++--------------------- arch/powerpc/oprofile/common.c | 8 +++---- include/asm-powerpc/cputable.h | 10 ++++---- 3 files changed, 35 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index cf1eea1a2299..10696456a4c6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -80,7 +80,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/power3", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "power3", }, { /* Power3+ */ @@ -94,7 +94,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/power3", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "power3", }, { /* Northstar */ @@ -108,7 +108,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* Pulsar */ @@ -122,7 +122,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* I-star */ @@ -136,7 +136,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* S-star */ @@ -150,7 +150,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power3, .oprofile_cpu_type = "ppc64/rs64", - .oprofile_type = RS64, + .oprofile_type = PPC_OPROFILE_RS64, .platform = "rs64", }, { /* Power4 */ @@ -164,7 +164,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power4", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* Power4+ */ @@ -178,7 +178,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power4", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power4", }, { /* PPC970 */ @@ -193,7 +193,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, #endif /* CONFIG_PPC64 */ @@ -214,7 +214,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 8, .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, #endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */ @@ -230,7 +230,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 128, .cpu_setup = __setup_cpu_ppc970, .oprofile_cpu_type = "ppc64/970", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "ppc970", }, { /* Power5 GR */ @@ -244,7 +244,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power5", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power5", }, { /* Power5 GS */ @@ -258,7 +258,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_power4, .oprofile_cpu_type = "ppc64/power5+", - .oprofile_type = POWER4, + .oprofile_type = PPC_OPROFILE_POWER4, .platform = "power5+", }, { /* Cell Broadband Engine */ @@ -559,7 +559,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7450 2.1 */ @@ -573,7 +573,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7450 2.3 and newer */ @@ -587,7 +587,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7455 rev 1.x */ @@ -601,7 +601,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7455 rev 2.0 */ @@ -615,7 +615,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7455 others */ @@ -629,7 +629,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.0 */ @@ -643,7 +643,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.1 */ @@ -657,7 +657,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7447/7457 Rev 1.2 and later */ @@ -671,7 +671,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7447A */ @@ -685,7 +685,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 7448 */ @@ -699,7 +699,7 @@ struct cpu_spec cpu_specs[] = { .num_pmcs = 6, .cpu_setup = __setup_cpu_745x, .oprofile_cpu_type = "ppc/7450", - .oprofile_type = G4, + .oprofile_type = PPC_OPROFILE_G4, .platform = "ppc7450", }, { /* 82xx (8240, 8245, 8260 are all 603e cores) */ @@ -1056,7 +1056,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", - .oprofile_type = BOOKE, + .oprofile_type = PPC_OPROFILE_BOOKE, .platform = "ppc8540", }, { /* e500v2 */ @@ -1073,7 +1073,7 @@ struct cpu_spec cpu_specs[] = { .dcache_bsize = 32, .num_pmcs = 4, .oprofile_cpu_type = "ppc/e500", - .oprofile_type = BOOKE, + .oprofile_type = PPC_OPROFILE_BOOKE, .platform = "ppc8548", }, #endif diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 71615eb70b2b..cc2535be3a73 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c @@ -140,19 +140,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) switch (cur_cpu_spec->oprofile_type) { #ifdef CONFIG_PPC64 - case RS64: + case PPC_OPROFILE_RS64: model = &op_model_rs64; break; - case POWER4: + case PPC_OPROFILE_POWER4: model = &op_model_power4; break; #else - case G4: + case PPC_OPROFILE_G4: model = &op_model_7450; break; #endif #ifdef CONFIG_FSL_BOOKE - case BOOKE: + case PPC_OPROFILE_BOOKE: model = &op_model_fsl_booke; break; #endif diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 03017d905704..64210549f56b 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -32,11 +32,11 @@ struct cpu_spec; typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec); enum powerpc_oprofile_type { - INVALID = 0, - RS64 = 1, - POWER4 = 2, - G4 = 3, - BOOKE = 4, + PPC_OPROFILE_INVALID = 0, + PPC_OPROFILE_RS64 = 1, + PPC_OPROFILE_POWER4 = 2, + PPC_OPROFILE_G4 = 3, + PPC_OPROFILE_BOOKE = 4, }; struct cpu_spec { -- cgit v1.2.3-58-ga151