diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-30 08:10:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-04-30 08:10:12 -0700 |
commit | 24a77daf3d80bddcece044e6dc3675e427eef3f3 (patch) | |
tree | 2c5e0b0bea394d6fe62c5d5857c252e83e48ac48 /include | |
parent | e389f9aec689209724105ae80a6c91fd2e747bc9 (diff) | |
parent | f900e9777fc9b65140cb9570438597bc8fae56ab (diff) |
Merge branch 'for-2.6.22' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'for-2.6.22' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (255 commits)
[POWERPC] Remove dev_dbg redefinition in drivers/ps3/vuart.c
[POWERPC] remove kernel module option for booke wdt
[POWERPC] Avoid putting cpu node twice
[POWERPC] Spinlock initializer cleanup
[POWERPC] ppc4xx_sgdma needs dma-mapping.h
[POWERPC] arch/powerpc/sysdev/timer.c build fix
[POWERPC] get_property cleanups
[POWERPC] Remove the unused HTDMSOUND driver
[POWERPC] cell: cbe_cpufreq cleanup and crash fix
[POWERPC] Declare enable_kernel_spe in a header
[POWERPC] Add dt_xlate_addr() to bootwrapper
[POWERPC] bootwrapper: CONFIG_ -> CONFIG_DEVICE_TREE
[POWERPC] Don't define a custom bd_t for Xilixn Virtex based boards.
[POWERPC] Add sane defaults for Xilinx EDK generated xparameters files
[POWERPC] Add uartlite boot console driver for the zImage wrapper
[POWERPC] Stop using ppc_sys for Xilinx Virtex boards
[POWERPC] New registration for common Xilinx Virtex ppc405 platform devices
[POWERPC] Merge common virtex header files
[POWERPC] Rework Kconfig dependancies for Xilinx Virtex ppc405 platform
[POWERPC] Clean up cpufreq Kconfig dependencies
...
Diffstat (limited to 'include')
38 files changed, 812 insertions, 682 deletions
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h index c89bd58ee283..c19e7367fce6 100644 --- a/include/asm-powerpc/asm-compat.h +++ b/include/asm-powerpc/asm-compat.h @@ -78,6 +78,15 @@ #define PPC_STLCX stringify_in_c(stdcx.) #define PPC_CNTLZL stringify_in_c(cntlzd) +/* Move to CR, single-entry optimized version. Only available + * on POWER4 and later. + */ +#ifdef CONFIG_POWER4_ONLY +#define PPC_MTOCRF stringify_in_c(mtocrf) +#else +#define PPC_MTOCRF stringify_in_c(mtcrf) +#endif + #else /* 32-bit */ /* operations for longs and pointers */ @@ -89,6 +98,7 @@ #define PPC_LLARX stringify_in_c(lwarx) #define PPC_STLCX stringify_in_c(stwcx.) #define PPC_CNTLZL stringify_in_c(cntlzw) +#define PPC_MTOCRF stringify_in_c(mtcrf) #endif diff --git a/include/asm-powerpc/cacheflush.h b/include/asm-powerpc/cacheflush.h index 08e93e789219..ba667a383b8c 100644 --- a/include/asm-powerpc/cacheflush.h +++ b/include/asm-powerpc/cacheflush.h @@ -64,6 +64,12 @@ extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); memcpy(dst, src, len) + +#ifdef CONFIG_DEBUG_PAGEALLOC +/* internal debugging function */ +void kernel_map_pages(struct page *page, int numpages, int enable); +#endif + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/include/asm-powerpc/cell-pmu.h b/include/asm-powerpc/cell-pmu.h index 35b95773746c..8066eede3a0c 100644 --- a/include/asm-powerpc/cell-pmu.h +++ b/include/asm-powerpc/cell-pmu.h @@ -97,11 +97,6 @@ extern void cbe_disable_pm_interrupts(u32 cpu); extern u32 cbe_get_and_clear_pm_interrupts(u32 cpu); extern void cbe_sync_irq(int node); -/* Utility functions, macros */ -extern u32 cbe_get_hw_thread_id(int cpu); - -#define cbe_cpu_to_node(cpu) ((cpu) >> 1) - #define CBE_COUNT_SUPERVISOR_MODE 0 #define CBE_COUNT_HYPERVISOR_MODE 1 #define CBE_COUNT_PROBLEM_MODE 2 diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index e870b5393175..434524931ef3 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -48,6 +48,7 @@ enum powerpc_oprofile_type { PPC_OPROFILE_G4 = 3, PPC_OPROFILE_BOOKE = 4, PPC_OPROFILE_CELL = 5, + PPC_OPROFILE_PA6T = 6, }; enum powerpc_pmc_type { @@ -223,6 +224,10 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_PPC_LE) +#define CPU_FTRS_750CL (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ + CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) #define CPU_FTRS_750FX1 (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ @@ -235,9 +240,9 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) -#define CPU_FTRS_750GX (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_MAYBE_CAN_DOZE | \ - CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_TAU | \ - CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ +#define CPU_FTRS_750GX (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ + CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ + CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \ CPU_FTR_DUAL_PLL_750FX | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE) #define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | CPU_FTR_SPLIT_ID_CACHE | \ CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \ diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h index b8708aedf925..e2c7f06931e7 100644 --- a/include/asm-powerpc/current.h +++ b/include/asm-powerpc/current.h @@ -12,6 +12,7 @@ struct task_struct; #ifdef __powerpc64__ +#include <linux/stddef.h> #include <asm/paca.h> static inline struct task_struct *get_current(void) diff --git a/include/asm-powerpc/edac.h b/include/asm-powerpc/edac.h new file mode 100644 index 000000000000..6ead88bbfbb8 --- /dev/null +++ b/include/asm-powerpc/edac.h @@ -0,0 +1,40 @@ +/* + * PPC EDAC common defs + * + * Author: Dave Jiang <djiang@mvista.com> + * + * 2007 (c) MontaVista Software, Inc. This file is licensed under + * the terms of the GNU General Public License version 2. This program + * is licensed "as is" without any warranty of any kind, whether express + * or implied. + */ +#ifndef ASM_EDAC_H +#define ASM_EDAC_H +/* + * ECC atomic, DMA, SMP and interrupt safe scrub function. + * Implements the per arch atomic_scrub() that EDAC use for software + * ECC scrubbing. It reads memory and then writes back the original + * value, allowing the hardware to detect and correct memory errors. + */ +static __inline__ void atomic_scrub(void *va, u32 size) +{ + unsigned int *virt_addr = va; + unsigned int temp; + unsigned int i; + + for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) { + /* Very carefully read and write to memory atomically + * so we are interrupt, DMA and SMP safe. + */ + __asm__ __volatile__ ("\n\ + 1: lwarx %0,0,%1\n\ + stwcx. %0,0,%1\n\ + bne- 1b\n\ + isync" + : "=&r"(temp) + : "r"(virt_addr) + : "cr0", "memory"); + } +} + +#endif diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h index dc6bf0ffb796..cc3cb04539ac 100644 --- a/include/asm-powerpc/eeh_event.h +++ b/include/asm-powerpc/eeh_event.h @@ -30,8 +30,6 @@ struct eeh_event { struct list_head list; struct device_node *dn; /* struct device node */ struct pci_dev *dev; /* affected device */ - enum pci_channel_state state; /* PCI bus state for the affected device */ - int time_unavail; /* milliseconds until device might be available */ }; /** @@ -46,9 +44,7 @@ struct eeh_event { * (from a workqueue). */ int eeh_send_failure_event (struct device_node *dn, - struct pci_dev *dev, - enum pci_channel_state state, - int time_unavail); + struct pci_dev *dev); /* Main recovery function */ struct pci_dn * handle_eeh_events (struct eeh_event *); diff --git a/include/asm-powerpc/ibmebus.h b/include/asm-powerpc/ibmebus.h index 66112114b8c5..87d396e28db2 100644 --- a/include/asm-powerpc/ibmebus.h +++ b/include/asm-powerpc/ibmebus.h @@ -2,36 +2,37 @@ * IBM PowerPC eBus Infrastructure Support. * * Copyright (c) 2005 IBM Corporation + * Joachim Fenkes <fenkes@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> - * + * * All rights reserved. * - * This source code is distributed under a dual license of GPL v2.0 and OpenIB - * BSD. + * This source code is distributed under a dual license of GPL v2.0 and OpenIB + * BSD. * * OpenIB BSD License * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: * - * Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. * - * Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials - * provided with the distribution. + * provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ @@ -46,12 +47,11 @@ extern struct bus_type ibmebus_bus_type; -struct ibmebus_dev { - const char *name; +struct ibmebus_dev { struct of_device ofdev; }; -struct ibmebus_driver { +struct ibmebus_driver { char *name; struct of_device_id *id_table; int (*probe) (struct ibmebus_dev *dev, const struct of_device_id *id); @@ -63,7 +63,7 @@ int ibmebus_register_driver(struct ibmebus_driver *drv); void ibmebus_unregister_driver(struct ibmebus_driver *drv); int ibmebus_request_irq(struct ibmebus_dev *dev, - u32 ist, + u32 ist, irq_handler_t handler, unsigned long irq_flags, const char * devname, void *dev_id); diff --git a/include/asm-powerpc/immap_86xx.h b/include/asm-powerpc/immap_86xx.h index d905b6622268..59b9e07b8e99 100644 --- a/include/asm-powerpc/immap_86xx.h +++ b/include/asm-powerpc/immap_86xx.h @@ -85,81 +85,6 @@ typedef struct ccsr_pci { char res19[472]; } ccsr_pci_t; -/* PCI Express Registers */ -typedef struct ccsr_pex { - uint pex_config_addr; /* 0x.000 - PCI Express Configuration Address Register */ - uint pex_config_data; /* 0x.004 - PCI Express Configuration Data Register */ - char res1[4]; - uint pex_otb_cpl_tor; /* 0x.00c - PCI Express Outbound completion timeout register */ - uint pex_conf_tor; /* 0x.010 - PCI Express configuration timeout register */ - char res2[12]; - uint pex_pme_mes_dr; /* 0x.020 - PCI Express PME and message detect register */ - uint pex_pme_mes_disr; /* 0x.024 - PCI Express PME and message disable register */ - uint pex_pme_mes_ier; /* 0x.028 - PCI Express PME and message interrupt enable register */ - uint pex_pmcr; /* 0x.02c - PCI Express power management command register */ - char res3[3024]; - uint pexotar0; /* 0x.c00 - PCI Express outbound translation address register 0 */ - uint pexotear0; /* 0x.c04 - PCI Express outbound translation extended address register 0*/ - char res4[8]; - uint pexowar0; /* 0x.c10 - PCI Express outbound window attributes register 0*/ - char res5[12]; - uint pexotar1; /* 0x.c20 - PCI Express outbound translation address register 1 */ - uint pexotear1; /* 0x.c24 - PCI Express outbound translation extended address register 1*/ - uint pexowbar1; /* 0x.c28 - PCI Express outbound window base address register 1*/ - char res6[4]; - uint pexowar1; /* 0x.c30 - PCI Express outbound window attributes register 1*/ - char res7[12]; - uint pexotar2; /* 0x.c40 - PCI Express outbound translation address register 2 */ - uint pexotear2; /* 0x.c44 - PCI Express outbound translation extended address register 2*/ - uint pexowbar2; /* 0x.c48 - PCI Express outbound window base address register 2*/ - char res8[4]; - uint pexowar2; /* 0x.c50 - PCI Express outbound window attributes register 2*/ - char res9[12]; - uint pexotar3; /* 0x.c60 - PCI Express outbound translation address register 3 */ - uint pexotear3; /* 0x.c64 - PCI Express outbound translation extended address register 3*/ - uint pexowbar3; /* 0x.c68 - PCI Express outbound window base address register 3*/ - char res10[4]; - uint pexowar3; /* 0x.c70 - PCI Express outbound window attributes register 3*/ - char res11[12]; - uint pexotar4; /* 0x.c80 - PCI Express outbound translation address register 4 */ - uint pexotear4; /* 0x.c84 - PCI Express outbound translation extended address register 4*/ - uint pexowbar4; /* 0x.c88 - PCI Express outbound window base address register 4*/ - char res12[4]; - uint pexowar4; /* 0x.c90 - PCI Express outbound window attributes register 4*/ - char res13[12]; - char res14[256]; - uint pexitar3; /* 0x.da0 - PCI Express inbound translation address register 3 */ - char res15[4]; - uint pexiwbar3; /* 0x.da8 - PCI Express inbound window base address register 3 */ - uint pexiwbear3; /* 0x.dac - PCI Express inbound window base extended address register 3 */ - uint pexiwar3; /* 0x.db0 - PCI Express inbound window attributes register 3 */ - char res16[12]; - uint pexitar2; /* 0x.dc0 - PCI Express inbound translation address register 2 */ - char res17[4]; - uint pexiwbar2; /* 0x.dc8 - PCI Express inbound window base address register 2 */ - uint pexiwbear2; /* 0x.dcc - PCI Express inbound window base extended address register 2 */ - uint pexiwar2; /* 0x.dd0 - PCI Express inbound window attributes register 2 */ - char res18[12]; - uint pexitar1; /* 0x.de0 - PCI Express inbound translation address register 2 */ - char res19[4]; - uint pexiwbar1; /* 0x.de8 - PCI Express inbound window base address register 2 */ - uint pexiwbear1; /* 0x.dec - PCI Express inbound window base extended address register 2 */ - uint pexiwar1; /* 0x.df0 - PCI Express inbound window attributes register 2 */ - char res20[12]; - uint pex_err_dr; /* 0x.e00 - PCI Express error detect register */ - char res21[4]; - uint pex_err_en; /* 0x.e08 - PCI Express error interrupt enable register */ - char res22[4]; - uint pex_err_disr; /* 0x.e10 - PCI Express error disable register */ - char res23[12]; - uint pex_err_cap_stat; /* 0x.e20 - PCI Express error capture status register */ - char res24[4]; - uint pex_err_cap_r0; /* 0x.e28 - PCI Express error capture register 0 */ - uint pex_err_cap_r1; /* 0x.e2c - PCI Express error capture register 0 */ - uint pex_err_cap_r2; /* 0x.e30 - PCI Express error capture register 0 */ - uint pex_err_cap_r3; /* 0x.e34 - PCI Express error capture register 0 */ -} ccsr_pex_t; - /* Global Utility Registers */ typedef struct ccsr_guts { uint porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ diff --git a/include/asm-powerpc/io.h b/include/asm-powerpc/io.h index 301c9bb308b1..350c9bdb31dc 100644 --- a/include/asm-powerpc/io.h +++ b/include/asm-powerpc/io.h @@ -11,7 +11,12 @@ /* Check of existence of legacy devices */ extern int check_legacy_ioport(unsigned long base_port); -#define PNPBIOS_BASE 0xf000 /* only relevant for PReP */ +#define I8042_DATA_REG 0x60 +#define FDC_BASE 0x3f0 +/* only relevant for PReP */ +#define _PIDXR 0x279 +#define _PNPWRP 0xa79 +#define PNPBIOS_BASE 0xf000 #include <linux/compiler.h> #include <asm/page.h> diff --git a/include/asm-powerpc/kprobes.h b/include/asm-powerpc/kprobes.h index 3a5dd492588f..f850ca7020ed 100644 --- a/include/asm-powerpc/kprobes.h +++ b/include/asm-powerpc/kprobes.h @@ -87,6 +87,11 @@ extern void arch_remove_kprobe(struct kprobe *p); struct arch_specific_insn { /* copy of original instruction */ kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; }; struct prev_kprobe { diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 1b04e5723548..b204926ce913 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -153,9 +153,6 @@ struct machdep_calls { */ long (*feature_call)(unsigned int feature, ...); - /* Check availability of legacy devices like i8042 */ - int (*check_legacy_ioport)(unsigned int baseport); - /* Get legacy PCI/IDE interrupt mapping */ int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel); diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h new file mode 100644 index 000000000000..6739457d8bc0 --- /dev/null +++ b/include/asm-powerpc/mmu-hash64.h @@ -0,0 +1,400 @@ +#ifndef _ASM_POWERPC_MMU_HASH64_H_ +#define _ASM_POWERPC_MMU_HASH64_H_ +/* + * PowerPC64 memory management structures + * + * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> + * PPC64 rework. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <asm/asm-compat.h> +#include <asm/page.h> + +/* + * Segment table + */ + +#define STE_ESID_V 0x80 +#define STE_ESID_KS 0x20 +#define STE_ESID_KP 0x10 +#define STE_ESID_N 0x08 + +#define STE_VSID_SHIFT 12 + +/* Location of cpu0's segment table */ +#define STAB0_PAGE 0x6 +#define STAB0_OFFSET (STAB0_PAGE << 12) +#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) + +#ifndef __ASSEMBLY__ +extern char initial_stab[]; +#endif /* ! __ASSEMBLY */ + +/* + * SLB + */ + +#define SLB_NUM_BOLTED 3 +#define SLB_CACHE_ENTRIES 8 + +/* Bits in the SLB ESID word */ +#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ + +/* Bits in the SLB VSID word */ +#define SLB_VSID_SHIFT 12 +#define SLB_VSID_B ASM_CONST(0xc000000000000000) +#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) +#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) +#define SLB_VSID_KS ASM_CONST(0x0000000000000800) +#define SLB_VSID_KP ASM_CONST(0x0000000000000400) +#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ +#define SLB_VSID_L ASM_CONST(0x0000000000000100) +#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ +#define SLB_VSID_LP ASM_CONST(0x0000000000000030) +#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) +#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) +#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) +#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) +#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) + +#define SLB_VSID_KERNEL (SLB_VSID_KP) +#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) + +#define SLBIE_C (0x08000000) + +/* + * Hash table + */ + +#define HPTES_PER_GROUP 8 + +#define HPTE_V_AVPN_SHIFT 7 +#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) +#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) +#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) +#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) +#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) +#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) +#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) +#define HPTE_V_VALID ASM_CONST(0x0000000000000001) + +#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) +#define HPTE_R_TS ASM_CONST(0x4000000000000000) +#define HPTE_R_RPN_SHIFT 12 +#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) +#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) +#define HPTE_R_PP ASM_CONST(0x0000000000000003) +#define HPTE_R_N ASM_CONST(0x0000000000000004) +#define HPTE_R_C ASM_CONST(0x0000000000000080) +#define HPTE_R_R ASM_CONST(0x0000000000000100) + +/* Values for PP (assumes Ks=0, Kp=1) */ +/* pp0 will always be 0 for linux */ +#define PP_RWXX 0 /* Supervisor read/write, User none */ +#define PP_RWRX 1 /* Supervisor read/write, User read */ +#define PP_RWRW 2 /* Supervisor read/write, User read/write */ +#define PP_RXRX 3 /* Supervisor read, User read */ + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned long v; + unsigned long r; +} hpte_t; + +extern hpte_t *htab_address; +extern unsigned long htab_size_bytes; +extern unsigned long htab_hash_mask; + +/* + * Page size definition + * + * shift : is the "PAGE_SHIFT" value for that page size + * sllp : is a bit mask with the value of SLB L || LP to be or'ed + * directly to a slbmte "vsid" value + * penc : is the HPTE encoding mask for the "LP" field: + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int penc; /* HPTE encoding */ + unsigned int tlbiel; /* tlbiel supported for that page size */ + unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ + unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ +}; + +#endif /* __ASSEMBLY__ */ + +/* + * The kernel use the constants below to index in the page sizes array. + * The use of fixed constants for this purpose is better for performances + * of the low level hash refill handlers. + * + * A non supported page size has a "shift" field set to 0 + * + * Any new page size being implemented can get a new entry in here. Whether + * the kernel will use it or not is a different matter though. The actual page + * size used by hugetlbfs is not defined here and may be made variable + */ + +#define MMU_PAGE_4K 0 /* 4K */ +#define MMU_PAGE_64K 1 /* 64K */ +#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ +#define MMU_PAGE_1M 3 /* 1M */ +#define MMU_PAGE_16M 4 /* 16M */ +#define MMU_PAGE_16G 5 /* 16G */ +#define MMU_PAGE_COUNT 6 + +#ifndef __ASSEMBLY__ + +/* + * The current system page sizes + */ +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; +extern int mmu_linear_psize; +extern int mmu_virtual_psize; +extern int mmu_vmalloc_psize; +extern int mmu_io_psize; + +/* + * If the processor supports 64k normal pages but not 64k cache + * inhibited pages, we have to be prepared to switch processes + * to use 4k pages when they create cache-inhibited mappings. + * If this is the case, mmu_ci_restrictions will be set to 1. + */ +extern int mmu_ci_restrictions; + +#ifdef CONFIG_HUGETLB_PAGE +/* + * The page size index of the huge pages for use by hugetlbfs + */ +extern int mmu_huge_psize; + +#endif /* CONFIG_HUGETLB_PAGE */ + +/* + * This function sets the AVPN and L fields of the HPTE appropriately + * for the page size + */ +static inline unsigned long hpte_encode_v(unsigned long va, int psize) +{ + unsigned long v = + v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); + v <<= HPTE_V_AVPN_SHIFT; + if (psize != MMU_PAGE_4K) + v |= HPTE_V_LARGE; + return v; +} + +/* + * This function sets the ARPN, and LP fields of the HPTE appropriately + * for the page size. We assume the pa is already "clean" that is properly + * aligned for the requested page size + */ +static inline unsigned long hpte_encode_r(unsigned long pa, int psize) +{ + unsigned long r; + + /* A 4K page needs no special encoding */ + if (psize == MMU_PAGE_4K) + return pa & HPTE_R_RPN; + else { + unsigned int penc = mmu_psize_defs[psize].penc; + unsigned int shift = mmu_psize_defs[psize].shift; + return (pa & ~((1ul << shift) - 1)) | (penc << 12); + } + return r; +} + +/* + * This hashes a virtual address for a 256Mb segment only for now + */ + +static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) +{ + return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); +} + +extern int __hash_page_4K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +extern int __hash_page_64K(unsigned long ea, unsigned long access, + unsigned long vsid, pte_t *ptep, unsigned long trap, + unsigned int local); +struct mm_struct; +extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); +extern int hash_huge_page(struct mm_struct *mm, unsigned long access, + unsigned long ea, unsigned long vsid, int local, + unsigned long trap); + +extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, + unsigned long pstart, unsigned long mode, + int psize); + +extern void htab_initialize(void); +extern void htab_initialize_secondary(void); +extern void hpte_init_native(void); +extern void hpte_init_lpar(void); +extern void hpte_init_iSeries(void); +extern void hpte_init_beat(void); + +extern void stabs_alloc(void); +extern void slb_initialize(void); +extern void slb_flush_and_rebolt(void); +extern void stab_initialize(unsigned long stab); + +#endif /* __ASSEMBLY__ */ + +/* + * VSID allocation + * + * We first generate a 36-bit "proto-VSID". For kernel addresses this + * is equal to the ESID, for user addresses it is: + * (context << 15) | (esid & 0x7fff) + * + * The two forms are distinguishable because the top bit is 0 for user + * addresses, whereas the top two bits are 1 for kernel addresses. + * Proto-VSIDs with the top two bits equal to 0b10 are reserved for + * now. + * + * The proto-VSIDs are then scrambled into real VSIDs with the + * multiplicative hash: + * + * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS + * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 + * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF + * + * This scramble is only well defined for proto-VSIDs below + * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are + * reserved. VSID_MULTIPLIER is prime, so in particular it is + * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. + * Because the modulus is 2^n-1 we can compute it efficiently without + * a divide or extra multiply (see below). + * + * This scheme has several advantages over older methods: + * + * - We have VSIDs allocated for every kernel address + * (i.e. everything above 0xC000000000000000), except the very top + * segment, which simplifies several things. + * + * - We allow for 15 significant bits of ESID and 20 bits of + * context for user addresses. i.e. 8T (43 bits) of address space for + * up to 1M contexts (although the page table structure and context + * allocation will need changes to take advantage of this). + * + * - The scramble function gives robust scattering in the hash + * table (at least based on some initial results). The previous + * method was more susceptible to pathological cases giving excessive + * hash collisions. + */ +/* + * WARNING - If you change these you must make sure the asm + * implementations in slb_allocate (slb_low.S), do_stab_bolted + * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. + * + * You'll also need to change the precomputed VSID values in head.S + * which are used by the iSeries firmware. + */ + +#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ +#define VSID_BITS 36 +#define VSID_MODULUS ((1UL<<VSID_BITS)-1) + +#define CONTEXT_BITS 19 +#define USER_ESID_BITS 16 + +#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) + +/* + * This macro generates asm code to compute the VSID scramble + * function. Used in slb_allocate() and do_stab_bolted. The function + * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS + * + * rt = register continaing the proto-VSID and into which the + * VSID will be stored + * rx = scratch register (clobbered) + * + * - rt and rx must be different registers + * - The answer will end up in the low 36 bits of rt. The higher + * bits may contain other garbage, so you may need to mask the + * result. + */ +#define ASM_VSID_SCRAMBLE(rt, rx) \ + lis rx,VSID_MULTIPLIER@h; \ + ori rx,rx,VSID_MULTIPLIER@l; \ + mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ + \ + srdi rx,rt,VSID_BITS; \ + clrldi rt,rt,(64-VSID_BITS); \ + add rt,rt,rx; /* add high and low bits */ \ + /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ + * 2^36-1+2^28-1. That in particular means that if r3 >= \ + * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ + * the bit clear, r3 already has the answer we want, if it \ + * doesn't, the answer is the low 36 bits of r3+1. So in all \ + * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ + addi rx,rt,1; \ + srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ + add rt,rt,rx + + +#ifndef __ASSEMBLY__ + +typedef unsigned long mm_context_id_t; + +typedef struct { + mm_context_id_t id; + u16 user_psize; /* page size index */ + u16 sllp; /* SLB entry page size encoding */ +#ifdef CONFIG_HUGETLB_PAGE + u16 low_htlb_areas, high_htlb_areas; +#endif + unsigned long vdso_base; +} mm_context_t; + + +static inline unsigned long vsid_scramble(unsigned long protovsid) +{ +#if 0 + /* The code below is equivalent to this function for arguments + * < 2^VSID_BITS, which is all this should ever be called + * with. However gcc is not clever enough to compute the + * modulus (2^n-1) without a second multiply. */ + return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); +#else /* 1 */ + unsigned long x; + + x = protovsid * VSID_MULTIPLIER; + x = (x >> VSID_BITS) + (x & VSID_MODULUS); + return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; +#endif /* 1 */ +} + +/* This is only valid for addresses >= KERNELBASE */ +static inline unsigned long get_kernel_vsid(unsigned long ea) +{ + return vsid_scramble(ea >> SID_SHIFT); +} + +/* This is only valid for user addresses (which are below 2^41) */ +static inline unsigned long get_vsid(unsigned long context, unsigned long ea) +{ + return vsid_scramble((context << USER_ESID_BITS) + | (ea >> SID_SHIFT)); +} + +#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) +#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) + +/* Physical address used by some IO functions */ +typedef unsigned long phys_addr_t; + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ diff --git a/include/asm-powerpc/mmu.h b/include/asm-powerpc/mmu.h index 200055a4b82b..06b3e6d336cb 100644 --- a/include/asm-powerpc/mmu.h +++ b/include/asm-powerpc/mmu.h @@ -2,407 +2,14 @@ #define _ASM_POWERPC_MMU_H_ #ifdef __KERNEL__ -#ifndef CONFIG_PPC64 -#include <asm-ppc/mmu.h> +#ifdef CONFIG_PPC64 +/* 64-bit classic hash table MMU */ +# include <asm/mmu-hash64.h> #else - -/* - * PowerPC memory management structures - * - * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> - * PPC64 rework. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include <asm/asm-compat.h> -#include <asm/page.h> - -/* - * Segment table - */ - -#define STE_ESID_V 0x80 -#define STE_ESID_KS 0x20 -#define STE_ESID_KP 0x10 -#define STE_ESID_N 0x08 - -#define STE_VSID_SHIFT 12 - -/* Location of cpu0's segment table */ -#define STAB0_PAGE 0x6 -#define STAB0_OFFSET (STAB0_PAGE << 12) -#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) - -#ifndef __ASSEMBLY__ -extern char initial_stab[]; -#endif /* ! __ASSEMBLY */ - -/* - * SLB - */ - -#define SLB_NUM_BOLTED 3 -#define SLB_CACHE_ENTRIES 8 - -/* Bits in the SLB ESID word */ -#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ - -/* Bits in the SLB VSID word */ -#define SLB_VSID_SHIFT 12 -#define SLB_VSID_B ASM_CONST(0xc000000000000000) -#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) -#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) -#define SLB_VSID_KS ASM_CONST(0x0000000000000800) -#define SLB_VSID_KP ASM_CONST(0x0000000000000400) -#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ -#define SLB_VSID_L ASM_CONST(0x0000000000000100) -#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ -#define SLB_VSID_LP ASM_CONST(0x0000000000000030) -#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) -#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) -#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) -#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) -#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) - -#define SLB_VSID_KERNEL (SLB_VSID_KP) -#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) - -#define SLBIE_C (0x08000000) - -/* - * Hash table - */ - -#define HPTES_PER_GROUP 8 - -#define HPTE_V_AVPN_SHIFT 7 -#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80) -#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) -#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN)) -#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) -#define HPTE_V_LOCK ASM_CONST(0x0000000000000008) -#define HPTE_V_LARGE ASM_CONST(0x0000000000000004) -#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) -#define HPTE_V_VALID ASM_CONST(0x0000000000000001) - -#define HPTE_R_PP0 ASM_CONST(0x8000000000000000) -#define HPTE_R_TS ASM_CONST(0x4000000000000000) -#define HPTE_R_RPN_SHIFT 12 -#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000) -#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff) -#define HPTE_R_PP ASM_CONST(0x0000000000000003) -#define HPTE_R_N ASM_CONST(0x0000000000000004) -#define HPTE_R_C ASM_CONST(0x0000000000000080) -#define HPTE_R_R ASM_CONST(0x0000000000000100) - -/* Values for PP (assumes Ks=0, Kp=1) */ -/* pp0 will always be 0 for linux */ -#define PP_RWXX 0 /* Supervisor read/write, User none */ -#define PP_RWRX 1 /* Supervisor read/write, User read */ -#define PP_RWRW 2 /* Supervisor read/write, User read/write */ -#define PP_RXRX 3 /* Supervisor read, User read */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned long v; - unsigned long r; -} hpte_t; - -extern hpte_t *htab_address; -extern unsigned long htab_size_bytes; -extern unsigned long htab_hash_mask; - -/* - * Page size definition - * - * shift : is the "PAGE_SHIFT" value for that page size - * sllp : is a bit mask with the value of SLB L || LP to be or'ed - * directly to a slbmte "vsid" value - * penc : is the HPTE encoding mask for the "LP" field: - * - */ -struct mmu_psize_def -{ - unsigned int shift; /* number of bits */ - unsigned int penc; /* HPTE encoding */ - unsigned int tlbiel; /* tlbiel supported for that page size */ - unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */ - unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */ -}; - -#endif /* __ASSEMBLY__ */ - -/* - * The kernel use the constants below to index in the page sizes array. - * The use of fixed constants for this purpose is better for performances - * of the low level hash refill handlers. - * - * A non supported page size has a "shift" field set to 0 - * - * Any new page size being implemented can get a new entry in here. Whether - * the kernel will use it or not is a different matter though. The actual page - * size used by hugetlbfs is not defined here and may be made variable - */ - -#define MMU_PAGE_4K 0 /* 4K */ -#define MMU_PAGE_64K 1 /* 64K */ -#define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */ -#define MMU_PAGE_1M 3 /* 1M */ -#define MMU_PAGE_16M 4 /* 16M */ -#define MMU_PAGE_16G 5 /* 16G */ -#define MMU_PAGE_COUNT 6 - -#ifndef __ASSEMBLY__ - -/* - * The current system page sizes - */ -extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; -extern int mmu_linear_psize; -extern int mmu_virtual_psize; -extern int mmu_vmalloc_psize; -extern int mmu_io_psize; - -/* - * If the processor supports 64k normal pages but not 64k cache - * inhibited pages, we have to be prepared to switch processes - * to use 4k pages when they create cache-inhibited mappings. - * If this is the case, mmu_ci_restrictions will be set to 1. - */ -extern int mmu_ci_restrictions; - -#ifdef CONFIG_HUGETLB_PAGE -/* - * The page size index of the huge pages for use by hugetlbfs - */ -extern int mmu_huge_psize; - -#endif /* CONFIG_HUGETLB_PAGE */ - -/* - * This function sets the AVPN and L fields of the HPTE appropriately - * for the page size - */ -static inline unsigned long hpte_encode_v(unsigned long va, int psize) -{ - unsigned long v = - v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm); - v <<= HPTE_V_AVPN_SHIFT; - if (psize != MMU_PAGE_4K) - v |= HPTE_V_LARGE; - return v; -} - -/* - * This function sets the ARPN, and LP fields of the HPTE appropriately - * for the page size. We assume the pa is already "clean" that is properly - * aligned for the requested page size - */ -static inline unsigned long hpte_encode_r(unsigned long pa, int psize) -{ - unsigned long r; - - /* A 4K page needs no special encoding */ - if (psize == MMU_PAGE_4K) - return pa & HPTE_R_RPN; - else { - unsigned int penc = mmu_psize_defs[psize].penc; - unsigned int shift = mmu_psize_defs[psize].shift; - return (pa & ~((1ul << shift) - 1)) | (penc << 12); - } - return r; -} - -/* - * This hashes a virtual address for a 256Mb segment only for now - */ - -static inline unsigned long hpt_hash(unsigned long va, unsigned int shift) -{ - return ((va >> 28) & 0x7fffffffffUL) ^ ((va & 0x0fffffffUL) >> shift); -} - -extern int __hash_page_4K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -extern int __hash_page_64K(unsigned long ea, unsigned long access, - unsigned long vsid, pte_t *ptep, unsigned long trap, - unsigned int local); -struct mm_struct; -extern int hash_huge_page(struct mm_struct *mm, unsigned long access, - unsigned long ea, unsigned long vsid, int local, - unsigned long trap); - -extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, - unsigned long pstart, unsigned long mode, - int psize); - -extern void htab_initialize(void); -extern void htab_initialize_secondary(void); -extern void hpte_init_native(void); -extern void hpte_init_lpar(void); -extern void hpte_init_iSeries(void); -extern void hpte_init_beat(void); - -extern void stabs_alloc(void); -extern void slb_initialize(void); -extern void slb_flush_and_rebolt(void); -extern void stab_initialize(unsigned long stab); - -#endif /* __ASSEMBLY__ */ - -/* - * VSID allocation - * - * We first generate a 36-bit "proto-VSID". For kernel addresses this - * is equal to the ESID, for user addresses it is: - * (context << 15) | (esid & 0x7fff) - * - * The two forms are distinguishable because the top bit is 0 for user - * addresses, whereas the top two bits are 1 for kernel addresses. - * Proto-VSIDs with the top two bits equal to 0b10 are reserved for - * now. - * - * The proto-VSIDs are then scrambled into real VSIDs with the - * multiplicative hash: - * - * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS - * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7 - * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF - * - * This scramble is only well defined for proto-VSIDs below - * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are - * reserved. VSID_MULTIPLIER is prime, so in particular it is - * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. - * Because the modulus is 2^n-1 we can compute it efficiently without - * a divide or extra multiply (see below). - * - * This scheme has several advantages over older methods: - * - * - We have VSIDs allocated for every kernel address - * (i.e. everything above 0xC000000000000000), except the very top - * segment, which simplifies several things. - * - * - We allow for 15 significant bits of ESID and 20 bits of - * context for user addresses. i.e. 8T (43 bits) of address space for - * up to 1M contexts (although the page table structure and context - * allocation will need changes to take advantage of this). - * - * - The scramble function gives robust scattering in the hash - * table (at least based on some initial results). The previous - * method was more susceptible to pathological cases giving excessive - * hash collisions. - */ -/* - * WARNING - If you change these you must make sure the asm - * implementations in slb_allocate (slb_low.S), do_stab_bolted - * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. - * - * You'll also need to change the precomputed VSID values in head.S - * which are used by the iSeries firmware. - */ - -#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ -#define VSID_BITS 36 -#define VSID_MODULUS ((1UL<<VSID_BITS)-1) - -#define CONTEXT_BITS 19 -#define USER_ESID_BITS 16 - -#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) - -/* - * This macro generates asm code to compute the VSID scramble - * function. Used in slb_allocate() and do_stab_bolted. The function - * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS - * - * rt = register continaing the proto-VSID and into which the - * VSID will be stored - * rx = scratch register (clobbered) - * - * - rt and rx must be different registers - * - The answer will end up in the low 36 bits of rt. The higher - * bits may contain other garbage, so you may need to mask the - * result. - */ -#define ASM_VSID_SCRAMBLE(rt, rx) \ - lis rx,VSID_MULTIPLIER@h; \ - ori rx,rx,VSID_MULTIPLIER@l; \ - mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \ - \ - srdi rx,rt,VSID_BITS; \ - clrldi rt,rt,(64-VSID_BITS); \ - add rt,rt,rx; /* add high and low bits */ \ - /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ - * 2^36-1+2^28-1. That in particular means that if r3 >= \ - * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ - * the bit clear, r3 already has the answer we want, if it \ - * doesn't, the answer is the low 36 bits of r3+1. So in all \ - * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\ - addi rx,rt,1; \ - srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ - add rt,rt,rx - - -#ifndef __ASSEMBLY__ - -typedef unsigned long mm_context_id_t; - -typedef struct { - mm_context_id_t id; - u16 user_psize; /* page size index */ - u16 sllp; /* SLB entry page size encoding */ -#ifdef CONFIG_HUGETLB_PAGE - u16 low_htlb_areas, high_htlb_areas; +/* 32-bit. FIXME: split up the 32-bit MMU types, and revise for + * arch/powerpc */ +# include <asm-ppc/mmu.h> #endif - unsigned long vdso_base; -} mm_context_t; - - -static inline unsigned long vsid_scramble(unsigned long protovsid) -{ -#if 0 - /* The code below is equivalent to this function for arguments - * < 2^VSID_BITS, which is all this should ever be called - * with. However gcc is not clever enough to compute the - * modulus (2^n-1) without a second multiply. */ - return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS); -#else /* 1 */ - unsigned long x; - - x = protovsid * VSID_MULTIPLIER; - x = (x >> VSID_BITS) + (x & VSID_MODULUS); - return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS; -#endif /* 1 */ -} - -/* This is only valid for addresses >= KERNELBASE */ -static inline unsigned long get_kernel_vsid(unsigned long ea) -{ - return vsid_scramble(ea >> SID_SHIFT); -} - -/* This is only valid for user addresses (which are below 2^41) */ -static inline unsigned long get_vsid(unsigned long context, unsigned long ea) -{ - return vsid_scramble((context << USER_ESID_BITS) - | (ea >> SID_SHIFT)); -} - -#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS) -#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea)) - -/* Physical address used by some IO functions */ -typedef unsigned long phys_addr_t; - - -#endif /* __ASSEMBLY */ -#endif /* CONFIG_PPC64 */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ diff --git a/include/asm-powerpc/mpic.h b/include/asm-powerpc/mpic.h index cb204a71e912..e4d5fc5362a0 100644 --- a/include/asm-powerpc/mpic.h +++ b/include/asm-powerpc/mpic.h @@ -199,7 +199,7 @@ enum { }; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS /* Fixup table entry */ struct mpic_irq_fixup { @@ -208,7 +208,7 @@ struct mpic_irq_fixup u32 data; unsigned int index; }; -#endif /* CONFIG_MPIC_BROKEN_U3 */ +#endif /* CONFIG_MPIC_U3_HT_IRQS */ enum mpic_reg_type { @@ -239,7 +239,7 @@ struct mpic /* The "linux" controller struct */ struct irq_chip hc_irq; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS struct irq_chip hc_ht_irq; #endif #ifdef CONFIG_SMP @@ -268,7 +268,7 @@ struct mpic /* Spurious vector to program into unused sources */ unsigned int spurious_vec; -#ifdef CONFIG_MPIC_BROKEN_U3 +#ifdef CONFIG_MPIC_U3_HT_IRQS /* The fixup table */ struct mpic_irq_fixup *fixups; spinlock_t fixup_lock; @@ -313,7 +313,7 @@ struct mpic /* Set this for a big-endian MPIC */ #define MPIC_BIG_ENDIAN 0x00000002 /* Broken U3 MPIC */ -#define MPIC_BROKEN_U3 0x00000004 +#define MPIC_U3_HT_IRQS 0x00000004 /* Broken IPI registers (autodetected) */ #define MPIC_BROKEN_IPI 0x00000008 /* MPIC wants a reset */ @@ -352,7 +352,7 @@ struct mpic * @senses_num: number of entries in the array * * Note about the sense array. If none is passed, all interrupts are - * setup to be level negative unless MPIC_BROKEN_U3 is set in which + * setup to be level negative unless MPIC_U3_HT_IRQS is set in which * case they are edge positive (and the array is ignored anyway). * The values in the array start at the first source of the MPIC, * that is senses[0] correspond to linux irq "irq_offset". diff --git a/include/asm-powerpc/of_device.h b/include/asm-powerpc/of_device.h index a889b2005bf5..4f1aabe0ce73 100644 --- a/include/asm-powerpc/of_device.h +++ b/include/asm-powerpc/of_device.h @@ -32,5 +32,8 @@ extern int of_device_register(struct of_device *ofdev); extern void of_device_unregister(struct of_device *ofdev); extern void of_release_dev(struct device *dev); +extern int of_device_uevent(struct device *dev, + char **envp, int num_envp, char *buffer, int buffer_size); + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_OF_DEVICE_H */ diff --git a/include/asm-powerpc/oprofile_impl.h b/include/asm-powerpc/oprofile_impl.h index 94c0ad2bff96..8d6b47f7b300 100644 --- a/include/asm-powerpc/oprofile_impl.h +++ b/include/asm-powerpc/oprofile_impl.h @@ -57,6 +57,8 @@ extern struct op_powerpc_model op_model_rs64; extern struct op_powerpc_model op_model_power4; extern struct op_powerpc_model op_model_7450; extern struct op_powerpc_model op_model_cell; +extern struct op_powerpc_model op_model_pa6t; + /* All the classic PPC parts use these */ static inline unsigned int classic_ctr_read(unsigned int i) diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h index 0d3adc09c847..cf95274f735e 100644 --- a/include/asm-powerpc/paca.h +++ b/include/asm-powerpc/paca.h @@ -70,6 +70,7 @@ struct paca_struct { s16 hw_cpu_id; /* Physical processor number */ u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */ + struct slb_shadow *slb_shadow_ptr; /* * Now, starting in cacheline 2, the exception save areas @@ -93,6 +94,7 @@ struct paca_struct { u64 stab_rr; /* stab/slb round-robin counter */ u64 saved_r1; /* r1 save for RTAS calls */ u64 saved_msr; /* MSR saved here by enter_rtas */ + u16 trap_save; /* Used when bad stack is encountered */ u8 soft_enabled; /* irq soft-enable flag */ u8 hard_enabled; /* set if irqs are enabled in MSR */ u8 io_sync; /* writel() needs spin_unlock sync */ @@ -101,8 +103,6 @@ struct paca_struct { u64 user_time; /* accumulated usermode TB ticks */ u64 system_time; /* accumulated system TB ticks */ u64 startpurr; /* PURR/TB value snapshot */ - - struct slb_shadow *slb_shadow_ptr; }; extern struct paca_struct paca[]; diff --git a/include/asm-powerpc/parport.h b/include/asm-powerpc/parport.h index 3fca21ddf546..b37b81e37278 100644 --- a/include/asm-powerpc/parport.h +++ b/include/asm-powerpc/parport.h @@ -20,18 +20,18 @@ extern struct parport *parport_pc_probe_port (unsigned long int base, static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma) { struct device_node *np; - u32 *prop; + const u32 *prop; u32 io1, io2; int propsize; int count = 0; for (np = NULL; (np = of_find_compatible_node(np, "parallel", "pnpPNP,400")) != NULL;) { - prop = (u32 *)get_property(np, "reg", &propsize); + prop = of_get_property(np, "reg", &propsize); if (!prop || propsize > 6*sizeof(u32)) continue; io1 = prop[1]; io2 = prop[2]; - prop = (u32 *)get_property(np, "interrupts", NULL); + prop = of_get_property(np, "interrupts", NULL); if (!prop) continue; if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL) != NULL) diff --git a/include/asm-powerpc/pci.h b/include/asm-powerpc/pci.h index ac656ee6bb19..ce0f13e8eb14 100644 --- a/include/asm-powerpc/pci.h +++ b/include/asm-powerpc/pci.h @@ -70,19 +70,22 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) */ #define PCI_DISABLE_MWI -extern struct dma_mapping_ops *pci_dma_ops; +#ifdef CONFIG_PCI +extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops); +extern struct dma_mapping_ops *get_pci_dma_ops(void); /* For DAC DMA, we currently don't support it by default, but * we let 64-bit platforms override this. */ static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask) { - if (pci_dma_ops && pci_dma_ops->dac_dma_supported) - return pci_dma_ops->dac_dma_supported(&hwdev->dev, mask); + struct dma_mapping_ops *d = get_pci_dma_ops(); + + if (d && d->dac_dma_supported) + return d->dac_dma_supported(&hwdev->dev, mask); return 0; } -#ifdef CONFIG_PCI static inline void pci_dma_burst_advice(struct pci_dev *pdev, enum pci_dma_burst_strategy *strat, unsigned long *strategy_parameter) @@ -99,6 +102,9 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, *strat = PCI_DMA_BURST_MULTIPLE; *strategy_parameter = cacheline_size; } +#else /* CONFIG_PCI */ +#define set_pci_dma_ops(d) +#define get_pci_dma_ops() NULL #endif extern int pci_domain_nr(struct pci_bus *bus); diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h index 345d9b07b3e2..a28fa8bc01da 100644 --- a/include/asm-powerpc/pgtable-4k.h +++ b/include/asm-powerpc/pgtable-4k.h @@ -97,3 +97,6 @@ #define pud_ERROR(e) \ printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) + +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 4b7126c53f37..5e84f070eaf7 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -35,6 +35,7 @@ #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ +#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ @@ -93,6 +94,10 @@ #define pte_pagesize_index(pte) \ (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ + __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) + #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PGTABLE_64K_H */ diff --git a/include/asm-powerpc/pgtable.h b/include/asm-powerpc/pgtable.h index 10f52743f4ff..19edb6982b81 100644 --- a/include/asm-powerpc/pgtable.h +++ b/include/asm-powerpc/pgtable.h @@ -272,7 +272,10 @@ static inline pte_t pte_mkhuge(pte_t pte) { return pte; } /* Atomic PTE updates */ -static inline unsigned long pte_update(pte_t *p, unsigned long clr) +static inline unsigned long pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + int huge) { unsigned long old, tmp; @@ -283,20 +286,15 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) andc %1,%0,%4 \n\ stdcx. %1,0,%3 \n\ bne- 1b" - : "=&r" (old), "=&r" (tmp), "=m" (*p) - : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) + : "=&r" (old), "=&r" (tmp), "=m" (*ptep) + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) : "cc" ); + + if (old & _PAGE_HASHPTE) + hpte_need_flush(mm, addr, ptep, old, huge); return old; } -/* PTE updating functions, this function puts the PTE in the - * batch, doesn't actually triggers the hash flush immediately, - * you need to call flush_tlb_pending() to do that. - * Pass -1 for "normal" size (4K or 64K) - */ -extern void hpte_update(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, unsigned long pte, int huge); - static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { @@ -304,11 +302,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) return 0; - old = pte_update(ptep, _PAGE_ACCESSED); - if (old & _PAGE_HASHPTE) { - hpte_update(mm, addr, ptep, old, 0); - flush_tlb_pending(); - } + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); return (old & _PAGE_ACCESSED) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG @@ -331,9 +325,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) return 0; - old = pte_update(ptep, _PAGE_DIRTY); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); return (old & _PAGE_DIRTY) != 0; } #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY @@ -352,9 +344,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, if ((pte_val(*ptep) & _PAGE_RW) == 0) return; - old = pte_update(ptep, _PAGE_RW); - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + old = pte_update(mm, addr, ptep, _PAGE_RW, 0); } /* @@ -378,7 +368,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, ({ \ int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ __ptep); \ - flush_tlb_page(__vma, __address); \ __dirty; \ }) @@ -386,20 +375,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); return __pte(old); } static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) { - unsigned long old = pte_update(ptep, ~0UL); - - if (old & _PAGE_HASHPTE) - hpte_update(mm, addr, ptep, old, 0); + pte_update(mm, addr, ptep, ~0UL, 0); } /* @@ -408,10 +391,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_present(*ptep)) { + if (pte_present(*ptep)) pte_clear(mm, addr, ptep); - flush_tlb_pending(); - } pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); *ptep = pte; } @@ -467,16 +448,6 @@ extern pgd_t swapper_pg_dir[]; extern void paging_init(void); -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to put a corresponding HPTE into the hash table - * ahead of time, instead of waiting for the inevitable extra - * hash-table miss exception. - */ -struct vm_area_struct; -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - /* Encode and de-code a swap entry */ #define __swp_type(entry) (((entry).val >> 1) & 0x3f) #define __swp_offset(entry) ((entry).val >> 8) @@ -522,6 +493,7 @@ void pgtable_cache_init(void); return pt; } + #include <asm-generic/pgtable.h> #endif /* __ASSEMBLY__ */ diff --git a/include/asm-powerpc/pmc.h b/include/asm-powerpc/pmc.h index 8588be68e0ad..d6a616a1b3ea 100644 --- a/include/asm-powerpc/pmc.h +++ b/include/asm-powerpc/pmc.h @@ -30,6 +30,7 @@ void release_pmc_hardware(void); #ifdef CONFIG_PPC64 void power4_enable_pmcs(void); +void pasemi_enable_pmcs(void); #endif #endif /* __KERNEL__ */ diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h index ab6eddb518c7..d74b2965bb82 100644 --- a/include/asm-powerpc/ppc-pci.h +++ b/include/asm-powerpc/ppc-pci.h @@ -10,6 +10,8 @@ #define _ASM_POWERPC_PPC_PCI_H #ifdef __KERNEL__ +#ifdef CONFIG_PCI + #include <linux/pci.h> #include <asm/pci-bridge.h> @@ -22,7 +24,7 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary); extern struct list_head hose_list; extern int global_phb_number; -extern unsigned long find_and_init_phbs(void); +extern void find_and_init_phbs(void); extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */ @@ -68,7 +70,7 @@ struct pci_dev *pci_get_device_by_addr(unsigned long addr); void eeh_slot_error_detail (struct pci_dn *pdn, int severity); /** - * rtas_pci_enableo - enable IO transfers for this slot + * rtas_pci_enable - enable IO transfers for this slot * @pdn: pci device node * @function: either EEH_THAW_MMIO or EEH_THAW_DMA * @@ -89,6 +91,7 @@ int rtas_pci_enable(struct pci_dn *pdn, int function); * Returns a non-zero value if the reset failed. */ int rtas_set_slot_reset (struct pci_dn *); +int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs); /** * eeh_restore_bars - Restore device configuration info. @@ -126,5 +129,10 @@ struct device_node * find_device_pe(struct device_node *dn); #endif +#else /* CONFIG_PCI */ +static inline void find_and_init_phbs(void) { } +static inline void init_pci_config_tokens(void) { } +#endif /* !CONFIG_PCI */ + #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_PPC_PCI_H */ diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h index a26c32ee5527..d947b1609491 100644 --- a/include/asm-powerpc/processor.h +++ b/include/asm-powerpc/processor.h @@ -133,7 +133,6 @@ struct thread_struct { mm_segment_t fs; /* for get_fs() validation */ #ifdef CONFIG_PPC32 void *pgdir; /* root of page-table tree */ - signed long last_syscall; #endif #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) unsigned long dbcr0; /* debug control register values */ diff --git a/include/asm-powerpc/prom.h b/include/asm-powerpc/prom.h index 020ed015a94b..ec400f608e16 100644 --- a/include/asm-powerpc/prom.h +++ b/include/asm-powerpc/prom.h @@ -18,7 +18,9 @@ #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/platform_device.h> +#include <asm/irq.h> #include <asm/atomic.h> +#include <asm/io.h> /* Definitions used by the flattened device tree */ #define OF_DT_HEADER 0xd00dfeed /* marker */ @@ -58,6 +60,8 @@ struct boot_param_header u32 boot_cpuid_phys; /* Physical CPU id we're booting on */ /* version 3 fields below */ u32 dt_strings_size; /* size of the DT strings block */ + /* version 17 fields below */ + u32 dt_struct_size; /* size of the DT structure block */ }; @@ -68,7 +72,7 @@ typedef u32 ihandle; struct property { char *name; int length; - unsigned char *value; + void *value; struct property *next; }; @@ -108,14 +112,6 @@ static inline void set_node_proc_entry(struct device_node *dn, struct proc_dir_e } -/* OBSOLETE: Old style node lookup */ -extern struct device_node *find_devices(const char *name); -extern struct device_node *find_type_devices(const char *type); -extern struct device_node *find_path_device(const char *path); -extern struct device_node *find_compatible_devices(const char *type, - const char *compat); -extern struct device_node *find_all_nodes(void); - /* New style node lookup */ extern struct device_node *of_find_node_by_name(struct device_node *from, const char *name); @@ -159,15 +155,17 @@ extern void of_detach_node(const struct device_node *); extern void finish_device_tree(void); extern void unflatten_device_tree(void); extern void early_init_devtree(void *); -extern int device_is_compatible(const struct device_node *device, +extern int of_device_is_compatible(const struct device_node *device, const char *); +#define device_is_compatible(d, c) of_device_is_compatible((d), (c)) extern int machine_is_compatible(const char *compat); -extern const void *get_property(const struct device_node *node, +extern const void *of_get_property(const struct device_node *node, const char *name, int *lenp); +#define get_property(a, b, c) of_get_property((a), (b), (c)) extern void print_properties(struct device_node *node); -extern int prom_n_addr_cells(struct device_node* np); -extern int prom_n_size_cells(struct device_node* np); +extern int of_n_addr_cells(struct device_node* np); +extern int of_n_size_cells(struct device_node* np); extern int prom_n_intr_cells(struct device_node* np); extern void prom_get_irq_senses(unsigned char *senses, int off, int max); extern int prom_add_property(struct device_node* np, struct property* prop); @@ -350,6 +348,16 @@ static inline int of_irq_to_resource(struct device_node *dev, int index, struct return irq; } +static inline void __iomem *of_iomap(struct device_node *np, int index) +{ + struct resource res; + + if (of_address_to_resource(np, index, &res)) + return NULL; + + return ioremap(res.start, 1 + res.end - res.start); +} + #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 0d7f0164ed81..749c7f953b58 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -469,12 +469,68 @@ #define SPRN_SIAR 780 #define SPRN_SDAR 781 -#define PA6T_SPRN_PMC0 787 -#define PA6T_SPRN_PMC1 788 -#define PA6T_SPRN_PMC2 789 -#define PA6T_SPRN_PMC3 790 -#define PA6T_SPRN_PMC4 791 -#define PA6T_SPRN_PMC5 792 +#define SPRN_PA6T_MMCR0 795 +#define PA6T_MMCR0_EN0 0x0000000000000001UL +#define PA6T_MMCR0_EN1 0x0000000000000002UL +#define PA6T_MMCR0_EN2 0x0000000000000004UL +#define PA6T_MMCR0_EN3 0x0000000000000008UL +#define PA6T_MMCR0_EN4 0x0000000000000010UL +#define PA6T_MMCR0_EN5 0x0000000000000020UL +#define PA6T_MMCR0_SUPEN 0x0000000000000040UL +#define PA6T_MMCR0_PREN 0x0000000000000080UL +#define PA6T_MMCR0_HYPEN 0x0000000000000100UL +#define PA6T_MMCR0_FCM0 0x0000000000000200UL +#define PA6T_MMCR0_FCM1 0x0000000000000400UL +#define PA6T_MMCR0_INTGEN 0x0000000000000800UL +#define PA6T_MMCR0_INTEN0 0x0000000000001000UL +#define PA6T_MMCR0_INTEN1 0x0000000000002000UL +#define PA6T_MMCR0_INTEN2 0x0000000000004000UL +#define PA6T_MMCR0_INTEN3 0x0000000000008000UL +#define PA6T_MMCR0_INTEN4 0x0000000000010000UL +#define PA6T_MMCR0_INTEN5 0x0000000000020000UL +#define PA6T_MMCR0_DISCNT 0x0000000000040000UL +#define PA6T_MMCR0_UOP 0x0000000000080000UL +#define PA6T_MMCR0_TRG 0x0000000000100000UL +#define PA6T_MMCR0_TRGEN 0x0000000000200000UL +#define PA6T_MMCR0_TRGREG 0x0000000001600000UL +#define PA6T_MMCR0_SIARLOG 0x0000000002000000UL +#define PA6T_MMCR0_SDARLOG 0x0000000004000000UL +#define PA6T_MMCR0_PROEN 0x0000000008000000UL +#define PA6T_MMCR0_PROLOG 0x0000000010000000UL +#define PA6T_MMCR0_DAMEN2 0x0000000020000000UL +#define PA6T_MMCR0_DAMEN3 0x0000000040000000UL +#define PA6T_MMCR0_DAMEN4 0x0000000080000000UL +#define PA6T_MMCR0_DAMEN5 0x0000000100000000UL +#define PA6T_MMCR0_DAMSEL2 0x0000000200000000UL +#define PA6T_MMCR0_DAMSEL3 0x0000000400000000UL +#define PA6T_MMCR0_DAMSEL4 0x0000000800000000UL +#define PA6T_MMCR0_DAMSEL5 0x0000001000000000UL +#define PA6T_MMCR0_HANDDIS 0x0000002000000000UL +#define PA6T_MMCR0_PCTEN 0x0000004000000000UL +#define PA6T_MMCR0_SOCEN 0x0000008000000000UL +#define PA6T_MMCR0_SOCMOD 0x0000010000000000UL + +#define SPRN_PA6T_MMCR1 798 +#define PA6T_MMCR1_ES2 0x00000000000000ffUL +#define PA6T_MMCR1_ES3 0x000000000000ff00UL +#define PA6T_MMCR1_ES4 0x0000000000ff0000UL +#define PA6T_MMCR1_ES5 0x00000000ff000000UL + +#define SPRN_PA6T_SIAR 780 +#define SPRN_PA6T_UPMC0 771 +#define SPRN_PA6T_UPMC1 772 +#define SPRN_PA6T_UPMC2 773 +#define SPRN_PA6T_UPMC3 774 +#define SPRN_PA6T_UPMC4 775 +#define SPRN_PA6T_UPMC5 776 +#define SPRN_PA6T_UMMCR0 779 +#define SPRN_PA6T_UMMCR1 782 +#define SPRN_PA6T_PMC0 787 +#define SPRN_PA6T_PMC1 788 +#define SPRN_PA6T_PMC2 789 +#define SPRN_PA6T_PMC3 790 +#define SPRN_PA6T_PMC4 791 +#define SPRN_PA6T_PMC5 792 #else /* 32-bit */ #define SPRN_MMCR0 952 /* Monitor Mode Control Register 0 */ diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h index 8aad0619eb8e..02e56a6685a2 100644 --- a/include/asm-powerpc/spu_csa.h +++ b/include/asm-powerpc/spu_csa.h @@ -242,6 +242,7 @@ struct spu_state { u64 spu_chnldata_RW[32]; u32 spu_mailbox_data[4]; u32 pu_mailbox_data[1]; + u64 dar, dsisr; unsigned long suspend_time; spinlock_t register_lock; }; diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h index f7b1227d6454..d3e0906ff2bc 100644 --- a/include/asm-powerpc/system.h +++ b/include/asm-powerpc/system.h @@ -131,6 +131,7 @@ extern void enable_kernel_altivec(void); extern void giveup_altivec(struct task_struct *); extern void load_up_altivec(struct task_struct *); extern int emulate_altivec(struct pt_regs *); +extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); extern int fix_alignment(struct pt_regs *); diff --git a/include/asm-powerpc/tlb.h b/include/asm-powerpc/tlb.h index 4e2a834683fb..0a17682663d8 100644 --- a/include/asm-powerpc/tlb.h +++ b/include/asm-powerpc/tlb.h @@ -38,7 +38,6 @@ extern void pte_free_finish(void); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_pending(); pte_free_finish(); } diff --git a/include/asm-powerpc/tlbflush.h b/include/asm-powerpc/tlbflush.h index 93c7d0c7230f..86e6266a028b 100644 --- a/include/asm-powerpc/tlbflush.h +++ b/include/asm-powerpc/tlbflush.h @@ -17,10 +17,73 @@ */ #ifdef __KERNEL__ - struct mm_struct; +struct vm_area_struct; + +#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) +/* + * TLB flushing for software loaded TLB chips + * + * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +extern void _tlbie(unsigned long address); + +#if defined(CONFIG_40x) || defined(CONFIG_8xx) +#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") +#else /* CONFIG_44x || CONFIG_FSL_BOOKE */ +extern void _tlbia(void); +#endif + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + _tlbia(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + _tlbie(vmaddr); +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + _tlbia(); +} + +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + _tlbia(); +} -#ifdef CONFIG_PPC64 +#elif defined(CONFIG_PPC32) +/* + * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx + */ +extern void _tlbie(unsigned long address); +extern void _tlbia(void); + +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); + +#else +/* + * TLB flushing for 64-bit has-MMU CPUs + */ #include <linux/percpu.h> #include <asm/page.h> @@ -28,117 +91,90 @@ struct mm_struct; #define PPC64_TLB_BATCH_NR 192 struct ppc64_tlb_batch { - unsigned long index; - struct mm_struct *mm; - real_pte_t pte[PPC64_TLB_BATCH_NR]; - unsigned long vaddr[PPC64_TLB_BATCH_NR]; - unsigned int psize; + int active; + unsigned long index; + struct mm_struct *mm; + real_pte_t pte[PPC64_TLB_BATCH_NR]; + unsigned long vaddr[PPC64_TLB_BATCH_NR]; + unsigned int psize; }; DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); -static inline void flush_tlb_pending(void) +extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pte, int huge); + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE + +static inline void arch_enter_lazy_mmu_mode(void) +{ + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + + batch->active = 1; +} + +static inline void arch_leave_lazy_mmu_mode(void) { - struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); if (batch->index) __flush_tlb_pending(batch); - put_cpu_var(ppc64_tlb_batch); + batch->active = 0; } +#define arch_flush_lazy_mmu_mode() do {} while (0) + + extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int local); extern void flush_hash_range(unsigned long number, int local); -#else /* CONFIG_PPC64 */ - -#include <linux/mm.h> - -extern void _tlbie(unsigned long address); -extern void _tlbia(void); - -/* - * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & - * flush_tlb_kernel_range are best implemented as tlbia vs - * specific tlbie's - */ - -#if (defined(CONFIG_4xx) && !defined(CONFIG_44x)) || defined(CONFIG_8xx) -#define flush_tlb_pending() asm volatile ("tlbia; sync" : : : "memory") -#elif defined(CONFIG_4xx) || defined(CONFIG_FSL_BOOKE) -#define flush_tlb_pending() _tlbia() -#endif - -/* - * This gets called at the end of handling a page fault, when - * the kernel has put a new PTE into the page table for the process. - * We use it to ensure coherency between the i-cache and d-cache - * for the page which has just been mapped in. - * On machines which use an MMU hash table, we use this to put a - * corresponding HPTE into the hash table ahead of time, instead of - * waiting for the inevitable extra hash-table miss exception. - */ -extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); - -#endif /* CONFIG_PPC64 */ - -#if defined(CONFIG_PPC64) || defined(CONFIG_4xx) || \ - defined(CONFIG_FSL_BOOKE) || defined(CONFIG_8xx) static inline void flush_tlb_mm(struct mm_struct *mm) { - flush_tlb_pending(); } static inline void flush_tlb_page(struct vm_area_struct *vma, - unsigned long vmaddr) + unsigned long vmaddr) { -#ifdef CONFIG_PPC64 - flush_tlb_pending(); -#else - _tlbie(vmaddr); -#endif } static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long vmaddr) { -#ifndef CONFIG_PPC64 - _tlbie(vmaddr); -#endif } static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { - flush_tlb_pending(); } static inline void flush_tlb_kernel_range(unsigned long start, - unsigned long end) + unsigned long end) { - flush_tlb_pending(); } -#else /* 6xx, 7xx, 7xxx cpus */ - -extern void flush_tlb_mm(struct mm_struct *mm); -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); -extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end); -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); - #endif /* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + * On machines which use an MMU hash table, we use this to put a + * corresponding HPTE into the hash table ahead of time, instead of + * waiting for the inevitable extra hash-table miss exception. + */ +extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); + +/* * This is called in munmap when we have freed up some page-table * pages. We don't need to do anything here, there's nothing special * about our page-table pages. -- paulus */ static inline void flush_tlb_pgtables(struct mm_struct *mm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end) { } diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h index adbf16b8cfbb..8e798e3758bc 100644 --- a/include/asm-powerpc/uaccess.h +++ b/include/asm-powerpc/uaccess.h @@ -110,12 +110,18 @@ struct exception_table_entry { __get_user_nocheck((x), (ptr), sizeof(*(ptr))) #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + #ifndef __powerpc64__ #define __get_user64(x, ptr) \ __get_user64_nocheck((x), (ptr), sizeof(*(ptr))) #define __put_user64(x, ptr) __put_user(x, ptr) #endif +#define __get_user_inatomic(x, ptr) \ + __get_user_nosleep((x), (ptr), sizeof(*(ptr))) +#define __put_user_inatomic(x, ptr) \ + __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) + #define __get_user_unaligned __get_user #define __put_user_unaligned __put_user @@ -198,6 +204,16 @@ do { \ __pu_err; \ }) +#define __put_user_nosleep(x, ptr, size) \ +({ \ + long __pu_err; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ +}) + + extern long __get_user_bad(void); #define __get_user_asm(x, addr, err, op) \ @@ -297,6 +313,18 @@ do { \ __gu_err; \ }) +#define __get_user_nosleep(x, ptr, size) \ +({ \ + long __gu_err; \ + unsigned long __gu_val; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + __chk_user_ptr(ptr); \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ +}) + + /* more complex routines */ extern unsigned long __copy_tofrom_user(void __user *to, diff --git a/include/asm-powerpc/uic.h b/include/asm-powerpc/uic.h new file mode 100644 index 000000000000..970eb7e2186a --- /dev/null +++ b/include/asm-powerpc/uic.h @@ -0,0 +1,23 @@ +/* + * include/asm-powerpc/uic.h + * + * IBM PPC4xx UIC external definitions and structure. + * + * Maintainer: David Gibson <dwg@au1.ibm.com> + * Copyright 2007 IBM Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ASM_POWERPC_UIC_H +#define _ASM_POWERPC_UIC_H + +#ifdef __KERNEL__ + +extern void __init uic_init_tree(void); +extern unsigned int uic_get_irq(void); + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_UIC_H */ diff --git a/include/asm-ppc/ibm4xx.h b/include/asm-ppc/ibm4xx.h index 92fd02d7b177..ed6891af05d3 100644 --- a/include/asm-ppc/ibm4xx.h +++ b/include/asm-ppc/ibm4xx.h @@ -47,12 +47,8 @@ #include <platforms/4xx/walnut.h> #endif -#if defined(CONFIG_XILINX_ML300) -#include <platforms/4xx/xilinx_ml300.h> -#endif - -#if defined(CONFIG_XILINX_ML403) -#include <platforms/4xx/xilinx_ml403.h> +#if defined(CONFIG_XILINX_VIRTEX) +#include <platforms/4xx/virtex.h> #endif #ifndef __ASSEMBLY__ diff --git a/include/asm-ppc/ppc_sys.h b/include/asm-ppc/ppc_sys.h index 40f197af6508..de99e92d627b 100644 --- a/include/asm-ppc/ppc_sys.h +++ b/include/asm-ppc/ppc_sys.h @@ -33,8 +33,6 @@ #include <asm/mpc52xx.h> #elif defined(CONFIG_MPC10X_BRIDGE) #include <asm/mpc10x.h> -#elif defined(CONFIG_XILINX_VIRTEX) -#include <platforms/4xx/virtex.h> #else #error "need definition of ppc_sys_devices" #endif diff --git a/include/asm-ppc/prom.h b/include/asm-ppc/prom.h index adc5ae784924..901f7fa8b2d7 100644 --- a/include/asm-ppc/prom.h +++ b/include/asm-ppc/prom.h @@ -34,7 +34,8 @@ extern unsigned long sub_reloc_offset(unsigned long); */ #define machine_is_compatible(x) 0 #define of_find_compatible_node(f, t, c) NULL -#define get_property(p, n, l) NULL +#define of_get_property(p, n, l) NULL +#define get_property(a, b, c) of_get_property((a), (b), (c)) #endif /* _PPC_PROM_H */ #endif /* __KERNEL__ */ diff --git a/include/linux/pmu.h b/include/linux/pmu.h index 783177387ac6..b0952e532ed5 100644 --- a/include/linux/pmu.h +++ b/include/linux/pmu.h @@ -168,24 +168,16 @@ extern int pmu_get_model(void); struct pmu_sleep_notifier { - int (*notifier_call)(struct pmu_sleep_notifier *self, int when); + void (*notifier_call)(struct pmu_sleep_notifier *self, int when); int priority; struct list_head list; }; /* Code values for calling sleep/wakeup handlers - * - * Note: If a sleep request got cancelled, all drivers will get - * the PBOOK_SLEEP_REJECT, even those who didn't get the PBOOK_SLEEP_REQUEST. */ #define PBOOK_SLEEP_REQUEST 1 #define PBOOK_SLEEP_NOW 2 -#define PBOOK_SLEEP_REJECT 3 -#define PBOOK_WAKE 4 - -/* Result codes returned by the notifiers */ -#define PBOOK_SLEEP_OK 0 -#define PBOOK_SLEEP_REFUSE -1 +#define PBOOK_WAKE 3 /* priority levels in notifiers */ #define SLEEP_LEVEL_VIDEO 100 /* Video driver (first wake) */ |