summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig84
-rw-r--r--arch/powerpc/Kconfig.debug3
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts37
-rw-r--r--arch/powerpc/boot/dts/glacier.dts37
-rw-r--r--arch/powerpc/boot/dts/mpc8610_hpcd.dts12
-rw-r--r--arch/powerpc/boot/ns16550.c5
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c39
-rw-r--r--arch/powerpc/kernel/cpu_setup_44x.S1
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S8
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S11
-rw-r--r--arch/powerpc/kernel/misc_32.S6
-rw-r--r--arch/powerpc/kernel/misc_64.S20
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/paca.c87
-rw-r--r--arch/powerpc/kernel/ppc32.h2
-rw-r--r--arch/powerpc/kernel/process.c31
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh58
-rw-r--r--arch/powerpc/kernel/ptrace32.c27
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/kernel/stacktrace.c1
-rw-r--r--arch/powerpc/kernel/udbg.c4
-rw-r--r--arch/powerpc/kvm/44x_tlb.c224
-rw-r--r--arch/powerpc/kvm/44x_tlb.h91
-rw-r--r--arch/powerpc/kvm/Kconfig42
-rw-r--r--arch/powerpc/kvm/Makefile15
-rw-r--r--arch/powerpc/kvm/booke_guest.c615
-rw-r--r--arch/powerpc/kvm/booke_host.c83
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S436
-rw-r--r--arch/powerpc/kvm/emulate.c760
-rw-r--r--arch/powerpc/kvm/powerpc.c436
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hash_low_32.S4
-rw-r--r--arch/powerpc/mm/init_32.c13
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c46
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/powerpc/mm/pgtable_32.c23
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c190
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/Kconfig13
-rw-r--r--arch/powerpc/platforms/cell/Makefile20
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c6
-rw-r--r--arch/powerpc/platforms/cell/beat.c (renamed from arch/powerpc/platforms/celleb/beat.c)2
-rw-r--r--arch/powerpc/platforms/cell/beat.h (renamed from arch/powerpc/platforms/celleb/beat.h)0
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c (renamed from arch/powerpc/platforms/celleb/htab.c)0
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S (renamed from arch/powerpc/platforms/celleb/hvCall.S)0
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c (renamed from arch/powerpc/platforms/celleb/interrupt.c)2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h (renamed from arch/powerpc/platforms/celleb/interrupt.h)0
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c (renamed from arch/powerpc/platforms/celleb/iommu.c)0
-rw-r--r--arch/powerpc/platforms/cell/beat_smp.c (renamed from arch/powerpc/platforms/celleb/smp.c)2
-rw-r--r--arch/powerpc/platforms/cell/beat_spu_priv1.c (renamed from arch/powerpc/platforms/celleb/spu_priv1.c)0
-rw-r--r--arch/powerpc/platforms/cell/beat_syscall.h (renamed from arch/powerpc/platforms/celleb/beat_syscall.h)0
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c (renamed from arch/powerpc/platforms/celleb/udbg_beat.c)0
-rw-r--r--arch/powerpc/platforms/cell/beat_wrapper.h (renamed from arch/powerpc/platforms/celleb/beat_wrapper.h)0
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c (renamed from arch/powerpc/platforms/celleb/pci.c)50
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h (renamed from arch/powerpc/platforms/celleb/pci.h)19
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc.h (renamed from arch/powerpc/platforms/celleb/scc.h)87
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c (renamed from arch/powerpc/platforms/celleb/scc_epci.c)77
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c547
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c (renamed from arch/powerpc/platforms/celleb/scc_sio.c)0
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_uhc.c (renamed from arch/powerpc/platforms/celleb/scc_uhc.c)2
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c (renamed from arch/powerpc/platforms/celleb/setup.c)12
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.c358
-rw-r--r--arch/powerpc/platforms/cell/io-workarounds.h49
-rw-r--r--arch/powerpc/platforms/cell/setup.c43
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c184
-rw-r--r--arch/powerpc/platforms/celleb/Kconfig12
-rw-r--r--arch/powerpc/platforms/celleb/Makefile9
-rw-r--r--arch/powerpc/platforms/celleb/io-workarounds.c280
-rw-r--r--arch/powerpc/platforms/iseries/exception.S27
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c1
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c1
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c10
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c39
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c36
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c14
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c23
-rw-r--r--arch/powerpc/platforms/pseries/setup.c17
-rw-r--r--arch/powerpc/platforms/pseries/smp.c11
-rw-r--r--arch/powerpc/platforms/pseries/xics.c1
-rw-r--r--arch/powerpc/sysdev/axonram.c5
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c41
-rw-r--r--arch/powerpc/sysdev/fsl_soc.h23
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c52
-rw-r--r--arch/powerpc/sysdev/mv64x60_udbg.c2
97 files changed, 4604 insertions, 978 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bb2e9310a56..4e40c122bf26 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -626,20 +626,6 @@ config ADVANCED_OPTIONS
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
-config HIGHMEM_START_BOOL
- bool "Set high memory pool address"
- depends on ADVANCED_OPTIONS && HIGHMEM
- help
- This option allows you to set the base address of the kernel virtual
- area used to map high memory pages. This can be useful in
- optimizing the layout of kernel virtual memory.
-
- Say N here unless you know what you are doing.
-
-config HIGHMEM_START
- hex "Virtual start address of high memory pool" if HIGHMEM_START_BOOL
- default "0xfe000000"
-
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
depends on ADVANCED_OPTIONS
@@ -656,21 +642,76 @@ config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
default "0x30000000"
+config RELOCATABLE
+ bool "Build a relocatable kernel (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
+ help
+ This builds a kernel image that is capable of running at the
+ location the kernel is loaded at (some alignment restrictions may
+ exist).
+
+ One use is for the kexec on panic case where the recovery kernel
+ must live at a different physical address than the primary
+ kernel.
+
+ Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
+ it has been loaded at and the compile time physical addresses
+ CONFIG_PHYSICAL_START is ignored. However CONFIG_PHYSICAL_START
+ setting can still be useful to bootwrappers that need to know the
+ load location of the kernel (eg. u-boot/mkimage).
+
+config PAGE_OFFSET_BOOL
+ bool "Set custom page offset address"
+ depends on ADVANCED_OPTIONS
+ help
+ This option allows you to set the kernel virtual address at which
+ the kernel will map low memory. This can be useful in optimizing
+ the virtual memory layout of the system.
+
+ Say N here unless you know what you are doing.
+
+config PAGE_OFFSET
+ hex "Virtual address of memory base" if PAGE_OFFSET_BOOL
+ default "0xc0000000"
+
config KERNEL_START_BOOL
bool "Set custom kernel base address"
depends on ADVANCED_OPTIONS
help
This option allows you to set the kernel virtual address at which
- the kernel will map low memory (the kernel image will be linked at
- this address). This can be useful in optimizing the virtual memory
- layout of the system.
+ the kernel will be loaded. Normally this should match PAGE_OFFSET
+ however there are times (like kdump) that one might not want them
+ to be the same.
Say N here unless you know what you are doing.
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
+ default PAGE_OFFSET if PAGE_OFFSET_BOOL
+ default "0xc2000000" if CRASH_DUMP
default "0xc0000000"
+config PHYSICAL_START_BOOL
+ bool "Set physical address where the kernel is loaded"
+ depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
+ help
+ This gives the physical address where the kernel is loaded.
+
+ Say N here unless you know what you are doing.
+
+config PHYSICAL_START
+ hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
+ default "0x02000000" if PPC_STD_MMU && CRASH_DUMP
+ default "0x00000000"
+
+config PHYSICAL_ALIGN
+ hex
+ default "0x10000000" if FSL_BOOKE
+ help
+ This value puts the alignment restrictions on physical address
+ where kernel is loaded and run from. Kernel is compiled for an
+ address which meets above alignment restriction.
+
config TASK_SIZE_BOOL
bool "Set custom user task size"
depends on ADVANCED_OPTIONS
@@ -717,9 +758,17 @@ config PIN_TLB
endmenu
if PPC64
+config PAGE_OFFSET
+ hex
+ default "0xc000000000000000"
config KERNEL_START
hex
+ default "0xc000000002000000" if CRASH_DUMP
default "0xc000000000000000"
+config PHYSICAL_START
+ hex
+ default "0x02000000" if CRASH_DUMP
+ default "0x00000000"
endif
source "net/Kconfig"
@@ -754,3 +803,4 @@ config PPC_CLOCK
config PPC_LIB_RHEAP
bool
+source "arch/powerpc/kvm/Kconfig"
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index a86d8d853214..807a2dce6263 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -151,6 +151,9 @@ config BOOTX_TEXT
config PPC_EARLY_DEBUG
bool "Early debugging (dangerous)"
+ # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water
+ # mark, which doesn't work with current 440 KVM.
+ depends on !KVM
help
Say Y to enable some early debugging facilities that may be available
for your processor/board combination. Those facilities are hacks
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e2ec4a91ccef..9dcdc036cdf7 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -145,6 +145,7 @@ core-y += arch/powerpc/kernel/ \
arch/powerpc/platforms/
core-$(CONFIG_MATH_EMULATION) += arch/powerpc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
+core-$(CONFIG_KVM) += arch/powerpc/kvm/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 5ef2bdf8d189..2347294ff35b 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -27,6 +27,7 @@ zImage.chrp
zImage.coff
zImage.coff.lds
zImage.ep*
+zImage.iseries
zImage.*lds
zImage.miboot
zImage.pmac
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 5ba50c673390..7822d25c9d31 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -40,7 +40,7 @@ $(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
-$(obj)/virtex405-head.o: BOOTCFLAGS += -mcpu=405
+$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
zlib := inffast.c inflate.c inftrees.c
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 6f3d38a1554f..39634124929b 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -142,8 +142,45 @@
#address-cells = <2>;
#size-cells = <1>;
clock-frequency = <0>; /* Filled in by U-Boot */
+ /* ranges property is supplied by U-Boot */
interrupts = <6 4>;
interrupt-parent = <&UIC1>;
+
+ nor_flash@0,0 {
+ compatible = "amd,s29gl512n", "cfi-flash";
+ bank-width = <2>;
+ reg = <0 000000 4000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "kernel";
+ reg = <0 1e0000>;
+ };
+ partition@1e0000 {
+ label = "dtb";
+ reg = <1e0000 20000>;
+ };
+ partition@200000 {
+ label = "ramdisk";
+ reg = <200000 1400000>;
+ };
+ partition@1600000 {
+ label = "jffs2";
+ reg = <1600000 400000>;
+ };
+ partition@1a00000 {
+ label = "user";
+ reg = <1a00000 2560000>;
+ };
+ partition@3f60000 {
+ label = "env";
+ reg = <3f60000 40000>;
+ };
+ partition@3fa0000 {
+ label = "u-boot";
+ reg = <3fa0000 60000>;
+ };
+ };
};
UART0: serial@ef600300 {
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index 958a5ca53d35..0f2fc077d8db 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -145,8 +145,45 @@
#address-cells = <2>;
#size-cells = <1>;
clock-frequency = <0>; /* Filled in by U-Boot */
+ /* ranges property is supplied by U-Boot */
interrupts = <6 4>;
interrupt-parent = <&UIC1>;
+
+ nor_flash@0,0 {
+ compatible = "amd,s29gl512n", "cfi-flash";
+ bank-width = <2>;
+ reg = <0 000000 4000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@0 {
+ label = "kernel";
+ reg = <0 1e0000>;
+ };
+ partition@1e0000 {
+ label = "dtb";
+ reg = <1e0000 20000>;
+ };
+ partition@200000 {
+ label = "ramdisk";
+ reg = <200000 1400000>;
+ };
+ partition@1600000 {
+ label = "jffs2";
+ reg = <1600000 400000>;
+ };
+ partition@1a00000 {
+ label = "user";
+ reg = <1a00000 2560000>;
+ };
+ partition@3f60000 {
+ label = "env";
+ reg = <3f60000 40000>;
+ };
+ partition@3fa0000 {
+ label = "u-boot";
+ reg = <3fa0000 60000>;
+ };
+ };
};
UART0: serial@ef600300 {
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
index 16c947b8a721..1f2f1e0a5571 100644
--- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts
+++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts
@@ -45,6 +45,11 @@
reg = <0x00000000 0x20000000>; // 512M at 0x0
};
+ board-control@e8000000 {
+ compatible = "fsl,fpga-pixis";
+ reg = <0xe8000000 32>; // pixis at 0xe8000000
+ };
+
soc@e0000000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -104,6 +109,13 @@
interrupt-parent = <&mpic>;
};
+ display@2c000 {
+ compatible = "fsl,diu";
+ reg = <0x2c000 100>;
+ interrupts = <72 2>;
+ interrupt-parent = <&mpic>;
+ };
+
mpic: interrupt-controller@40000 {
clock-frequency = <0>;
interrupt-controller;
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
index aef3bdc89160..8c9ead94be06 100644
--- a/arch/powerpc/boot/ns16550.c
+++ b/arch/powerpc/boot/ns16550.c
@@ -55,10 +55,15 @@ static u8 ns16550_tstc(void)
int ns16550_console_init(void *devp, struct serial_console_data *scdp)
{
int n;
+ u32 reg_offset;
if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
return -1;
+ n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
+ if (n == sizeof(reg_offset))
+ reg_base += reg_offset;
+
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ce1e8d24e747..9177b21b1a95 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -106,4 +106,13 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
+$(obj)/built-in.o: prom_init_check
+
+quiet_cmd_prom_init_check = CALL $<
+ cmd_prom_init_check = $(CONFIG_SHELL) $< "$(NM)" "$(obj)/prom_init.o"
+
+PHONY += prom_init_check
+prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
+ $(call cmd,prom_init_check)
+
clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 292c6d8db0e1..62134845af08 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -23,6 +23,9 @@
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/hrtimer.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm_host.h>
+#endif
#ifdef CONFIG_PPC64
#include <linux/time.h>
#include <linux/hardirq.h>
@@ -93,10 +96,7 @@ int main(void)
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
-#ifdef CONFIG_PPC32
- DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
@@ -165,13 +165,9 @@ int main(void)
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
-#ifndef CONFIG_PPC64
- DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
-#else /* CONFIG_PPC64 */
+ DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
+#ifdef CONFIG_PPC64
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
- /* 288 = # of volatile regs, int & fp, for leaf routines */
- /* which do not stack a frame. See the PPC64 ABI. */
- DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
@@ -331,5 +327,30 @@ int main(void)
DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
+#ifdef CONFIG_KVM
+ DEFINE(TLBE_BYTES, sizeof(struct tlbe));
+
+ DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
+ DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+ DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
+ DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+ DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
+ DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+ DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+ DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
+ DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
+ DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
+ DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
+ DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+ DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+
+ DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+ DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+ DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S
index 5465e8de0e61..e3623e3e3451 100644
--- a/arch/powerpc/kernel/cpu_setup_44x.S
+++ b/arch/powerpc/kernel/cpu_setup_44x.S
@@ -33,7 +33,6 @@ _GLOBAL(__setup_cpu_440grx)
mtlr r4
blr
_GLOBAL(__setup_cpu_460ex)
-_GLOBAL(__setup_cpu_460gt)
b __init_fpu_44x
_GLOBAL(__setup_cpu_440gx)
_GLOBAL(__setup_cpu_440spe)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f1ee0b3f78f2..72d1d7395254 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -17,7 +17,13 @@
#include <asm/cache.h>
_GLOBAL(__setup_cpu_603)
- b setup_common_caches
+ mflr r4
+BEGIN_FTR_SECTION
+ bl __init_fpu_registers
+END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
+ bl setup_common_caches
+ mtlr r4
+ blr
_GLOBAL(__setup_cpu_604)
mflr r4
bl setup_common_caches
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 26ffb44e2701..36080d4d1922 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -37,7 +37,6 @@ extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
@@ -1416,10 +1415,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pvr_value = 0x13020000,
.cpu_name = "460GT",
.cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32,
.dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460gt,
.machine_check = machine_check_440A,
.platform = "ppc440",
},
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 4ff744143566..e581524d85bc 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -371,6 +371,17 @@ skpinv: addi r6,r6,1 /* Increment */
bl early_init
+#ifdef CONFIG_RELOCATABLE
+ lis r3,kernstart_addr@ha
+ la r3,kernstart_addr@l(r3)
+#ifdef CONFIG_PHYS_64BIT
+ stw r23,0(r3)
+ stw r25,4(r3)
+#else
+ stw r25,0(r3)
+#endif
+#endif
+
mfspr r3,SPRN_TLB1CFG
andi. r3,r3,0xfff
lis r4,num_tlbcam_entries@ha
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 9d2c56621f1e..92ccc6fcc5b0 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -152,7 +152,7 @@ _GLOBAL(low_choose_750fx_pll)
mtspr SPRN_HID1,r4
/* Store new HID1 image */
- rlwinm r6,r1,0,0,18
+ rlwinm r6,r1,0,0,(31-THREAD_SHIFT)
lwz r6,TI_CPU(r6)
slwi r6,r6,2
addis r6,r6,nap_save_hid1@ha
@@ -281,7 +281,7 @@ _GLOBAL(_tlbia)
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,18
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
lwz r8,TI_CPU(r8)
oris r8,r8,10
mfmsr r10
@@ -377,7 +377,7 @@ _GLOBAL(_tlbie)
#endif /* CONFIG_SMP */
#else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,18
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
lwz r8,TI_CPU(r8)
oris r8,r8,11
mfmsr r10
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index a3c491e88a72..942951e76586 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -27,23 +27,11 @@
.text
-_GLOBAL(get_msr)
- mfmsr r3
- blr
-
-_GLOBAL(get_srr0)
- mfsrr0 r3
- blr
-
-_GLOBAL(get_srr1)
- mfsrr1 r3
- blr
-
#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
- stdu r1,THREAD_SIZE-112(r3)
+ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
bl .__do_softirq
ld r1,0(r1)
@@ -56,7 +44,7 @@ _GLOBAL(call_handle_irq)
mflr r0
std r0,16(r1)
mtctr r8
- stdu r1,THREAD_SIZE-112(r5)
+ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
mr r1,r5
bctrl
ld r1,0(r1)
@@ -599,7 +587,7 @@ _GLOBAL(kexec_sequence)
std r0,16(r1)
/* switch stacks to newstack -- &kexec_stack.stack */
- stdu r1,THREAD_SIZE-112(r3)
+ stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
mr r1,r3
li r0,0
@@ -616,7 +604,7 @@ _GLOBAL(kexec_sequence)
std r26,-48(r1)
std r25,-56(r1)
- stdu r1,-112-64(r1)
+ stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
/* save args into preserved regs */
mr r31,r3 /* newstack (both) */
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index fb698d47082d..e79ad8afda07 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -275,6 +275,8 @@ static int __devinit of_pci_phb_probe(struct of_device *dev,
/* Scan the bus */
scan_phb(phb);
+ if (phb->bus == NULL)
+ return -ENXIO;
/* Claim resources. This might need some rework as well depending
* wether we are doing probe-only or not, like assigning unassigned
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ac163bd46cfd..c9bf17eec31b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -7,17 +7,11 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/types.h>
#include <linux/threads.h>
#include <linux/module.h>
-#include <asm/processor.h>
-#include <asm/ptrace.h>
-#include <asm/page.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
-#include <asm/mmu.h>
-
/* This symbol is provided by the linker - let it fill in the paca
* field correctly */
@@ -65,60 +59,29 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
* processors. The processor VPD array needs one entry per physical
* processor (not thread).
*/
-#define PACA_INIT(number) \
-{ \
- .lppaca_ptr = &lppaca[number], \
- .lock_token = 0x8000, \
- .paca_index = (number), /* Paca Index */ \
- .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
- .hw_cpu_id = 0xffff, \
- .slb_shadow_ptr = &slb_shadow[number], \
- .__current = &init_task, \
-}
-
-struct paca_struct paca[] = {
- PACA_INIT(0),
-#if NR_CPUS > 1
- PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3),
-#if NR_CPUS > 4
- PACA_INIT( 4), PACA_INIT( 5), PACA_INIT( 6), PACA_INIT( 7),
-#if NR_CPUS > 8
- PACA_INIT( 8), PACA_INIT( 9), PACA_INIT( 10), PACA_INIT( 11),
- PACA_INIT( 12), PACA_INIT( 13), PACA_INIT( 14), PACA_INIT( 15),
- PACA_INIT( 16), PACA_INIT( 17), PACA_INIT( 18), PACA_INIT( 19),
- PACA_INIT( 20), PACA_INIT( 21), PACA_INIT( 22), PACA_INIT( 23),
- PACA_INIT( 24), PACA_INIT( 25), PACA_INIT( 26), PACA_INIT( 27),
- PACA_INIT( 28), PACA_INIT( 29), PACA_INIT( 30), PACA_INIT( 31),
-#if NR_CPUS > 32
- PACA_INIT( 32), PACA_INIT( 33), PACA_INIT( 34), PACA_INIT( 35),
- PACA_INIT( 36), PACA_INIT( 37), PACA_INIT( 38), PACA_INIT( 39),
- PACA_INIT( 40), PACA_INIT( 41), PACA_INIT( 42), PACA_INIT( 43),
- PACA_INIT( 44), PACA_INIT( 45), PACA_INIT( 46), PACA_INIT( 47),
- PACA_INIT( 48), PACA_INIT( 49), PACA_INIT( 50), PACA_INIT( 51),
- PACA_INIT( 52), PACA_INIT( 53), PACA_INIT( 54), PACA_INIT( 55),
- PACA_INIT( 56), PACA_INIT( 57), PACA_INIT( 58), PACA_INIT( 59),
- PACA_INIT( 60), PACA_INIT( 61), PACA_INIT( 62), PACA_INIT( 63),
-#if NR_CPUS > 64
- PACA_INIT( 64), PACA_INIT( 65), PACA_INIT( 66), PACA_INIT( 67),
- PACA_INIT( 68), PACA_INIT( 69), PACA_INIT( 70), PACA_INIT( 71),
- PACA_INIT( 72), PACA_INIT( 73), PACA_INIT( 74), PACA_INIT( 75),
- PACA_INIT( 76), PACA_INIT( 77), PACA_INIT( 78), PACA_INIT( 79),
- PACA_INIT( 80), PACA_INIT( 81), PACA_INIT( 82), PACA_INIT( 83),
- PACA_INIT( 84), PACA_INIT( 85), PACA_INIT( 86), PACA_INIT( 87),
- PACA_INIT( 88), PACA_INIT( 89), PACA_INIT( 90), PACA_INIT( 91),
- PACA_INIT( 92), PACA_INIT( 93), PACA_INIT( 94), PACA_INIT( 95),
- PACA_INIT( 96), PACA_INIT( 97), PACA_INIT( 98), PACA_INIT( 99),
- PACA_INIT(100), PACA_INIT(101), PACA_INIT(102), PACA_INIT(103),
- PACA_INIT(104), PACA_INIT(105), PACA_INIT(106), PACA_INIT(107),
- PACA_INIT(108), PACA_INIT(109), PACA_INIT(110), PACA_INIT(111),
- PACA_INIT(112), PACA_INIT(113), PACA_INIT(114), PACA_INIT(115),
- PACA_INIT(116), PACA_INIT(117), PACA_INIT(118), PACA_INIT(119),
- PACA_INIT(120), PACA_INIT(121), PACA_INIT(122), PACA_INIT(123),
- PACA_INIT(124), PACA_INIT(125), PACA_INIT(126), PACA_INIT(127),
-#endif
-#endif
-#endif
-#endif
-#endif
-};
+struct paca_struct paca[NR_CPUS];
EXPORT_SYMBOL(paca);
+
+void __init initialise_pacas(void)
+{
+ int cpu;
+
+ /* The TOC register (GPR2) points 32kB into the TOC, so that 64kB
+ * of the TOC can be addressed using a single machine instruction.
+ */
+ unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL;
+
+ /* Can't use for_each_*_cpu, as they aren't functional yet */
+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ struct paca_struct *new_paca = &paca[cpu];
+
+ new_paca->lppaca_ptr = &lppaca[cpu];
+ new_paca->lock_token = 0x8000;
+ new_paca->paca_index = cpu;
+ new_paca->kernel_toc = kernel_toc;
+ new_paca->hw_cpu_id = 0xffff;
+ new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+ new_paca->__current = &init_task;
+
+ }
+}
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/kernel/ppc32.h
index fda05e2211d6..90e562771791 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/kernel/ppc32.h
@@ -135,6 +135,4 @@ struct ucontext32 {
struct mcontext32 uc_mcontext;
};
-extern int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s);
-
#endif /* _PPC64_PPC32_H */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 703100d5e458..6caad17ea72e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1033,3 +1033,34 @@ void ppc64_runlatch_off(void)
}
}
#endif
+
+#if THREAD_SHIFT < PAGE_SHIFT
+
+static struct kmem_cache *thread_info_cache;
+
+struct thread_info *alloc_thread_info(struct task_struct *tsk)
+{
+ struct thread_info *ti;
+
+ ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
+ if (unlikely(ti == NULL))
+ return NULL;
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ memset(ti, 0, THREAD_SIZE);
+#endif
+ return ti;
+}
+
+void free_thread_info(struct thread_info *ti)
+{
+ kmem_cache_free(thread_info_cache, ti);
+}
+
+void thread_info_cache_init(void)
+{
+ thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ THREAD_SIZE, 0, NULL);
+ BUG_ON(thread_info_cache == NULL);
+}
+
+#endif /* THREAD_SHIFT < PAGE_SHIFT */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3bfe7837e820..2aefe2a4129a 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -53,6 +53,7 @@
#include <asm/pci-bridge.h>
#include <asm/phyp_dump.h>
#include <asm/kexec.h>
+#include <mm/mmu_decl.h>
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -978,7 +979,10 @@ static int __init early_init_dt_scan_memory(unsigned long node,
}
#endif
lmb_add(base, size);
+
+ memstart_addr = min((u64)memstart_addr, base);
}
+
return 0;
}
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
new file mode 100644
index 000000000000..8e24fc1821e8
--- /dev/null
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -0,0 +1,58 @@
+#!/bin/sh
+#
+# Copyright © 2008 IBM Corporation
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks prom_init.o to see what external symbols it
+# is using, if it finds symbols not in the whitelist it returns
+# an error. The point of this is to discourage people from
+# intentionally or accidentally adding new code to prom_init.c
+# which has side effects on other parts of the kernel.
+
+# If you really need to reference something from prom_init.o add
+# it to the list below:
+
+WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
+_end enter_prom memcpy memset reloc_offset __secondary_hold
+__secondary_hold_acknowledge __secondary_hold_spinloop __start
+strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
+reloc_got2"
+
+NM="$1"
+OBJ="$2"
+
+ERROR=0
+
+for UNDEF in $($NM -u $OBJ | awk '{print $2}')
+do
+ # On 64-bit nm gives us the function descriptors, which have
+ # a leading . on the name, so strip it off here.
+ UNDEF="${UNDEF#.}"
+
+ if [ $KBUILD_VERBOSE ]; then
+ if [ $KBUILD_VERBOSE -ne 0 ]; then
+ echo "Checking prom_init.o symbol '$UNDEF'"
+ fi
+ fi
+
+ OK=0
+ for WHITE in $WHITELIST
+ do
+ if [ "$UNDEF" = "$WHITE" ]; then
+ OK=1
+ break
+ fi
+ done
+
+ if [ $OK -eq 0 ]; then
+ ERROR=1
+ echo "Error: External symbol '$UNDEF' referenced" \
+ "from prom_init.c" >&2
+ fi
+done
+
+exit $ERROR
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 9d30e10970ac..4c1de6af4c09 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -29,15 +29,12 @@
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/compat.h>
-#include <linux/elf.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
-#include "ppc32.h"
-
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -67,27 +64,6 @@ static long compat_ptrace_old(struct task_struct *child, long request,
return -EPERM;
}
-static int compat_ptrace_getsiginfo(struct task_struct *child, compat_siginfo_t __user *data)
-{
- siginfo_t lastinfo;
- int error = -ESRCH;
-
- read_lock(&tasklist_lock);
- if (likely(child->sighand != NULL)) {
- error = -EINVAL;
- spin_lock_irq(&child->sighand->siglock);
- if (likely(child->last_siginfo != NULL)) {
- lastinfo = *child->last_siginfo;
- error = 0;
- }
- spin_unlock_irq(&child->sighand->siglock);
- }
- read_unlock(&tasklist_lock);
- if (!error)
- return copy_siginfo_to_user32(data, &lastinfo);
- return error;
-}
-
long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
@@ -306,9 +282,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
0, PT_REGS_COUNT * sizeof(compat_long_t),
compat_ptr(data));
- case PTRACE_GETSIGINFO:
- return compat_ptrace_getsiginfo(child, compat_ptr(data));
-
case PTRACE_GETFPREGS:
case PTRACE_SETFPREGS:
case PTRACE_GETVRREGS:
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 31ada9fdfc5c..dff6308d1b5e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -170,6 +170,9 @@ void __init setup_paca(int cpu)
void __init early_setup(unsigned long dt_ptr)
{
+ /* Fill in any unititialised pacas */
+ initialise_pacas();
+
/* Identify CPU type */
identify_cpu(0, mfspr(SPRN_PVR));
@@ -435,7 +438,7 @@ void __init setup_system(void)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#if PHYSICAL_START > 0
- printk("physical_start = 0x%x\n", PHYSICAL_START);
+ printk("physical_start = 0x%lx\n", PHYSICAL_START);
#endif
printk("-----------------------------------------------------\n");
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index e3638eeaaae7..962944038430 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -13,7 +13,6 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
/*
* Save stack-backtrace addresses into a stack_trace buffer.
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 7aad6203e411..7d6c9bb8c77f 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -154,8 +154,8 @@ static void udbg_console_write(struct console *con, const char *s,
static struct console udbg_console = {
.name = "udbg",
.write = udbg_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT,
- .index = -1,
+ .flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME,
+ .index = 0,
};
static int early_console_initialized;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
new file mode 100644
index 000000000000..f5d7a5eab96e
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <asm/mmu-44x.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
+#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
+
+static unsigned int kvmppc_tlb_44x_pos;
+
+static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
+{
+ /* Mask off reserved bits. */
+ attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
+
+ if (!usermode) {
+ /* Guest is in supervisor mode, so we need to translate guest
+ * supervisor permissions into user permissions. */
+ attrib &= ~PPC44x_TLB_USER_PERM_MASK;
+ attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
+ }
+
+ /* Make sure host can always access this memory. */
+ attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
+
+ return attrib;
+}
+
+/* Search the guest TLB for a matching entry. */
+int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
+ unsigned int as)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+ unsigned int tid;
+
+ if (eaddr < get_tlb_eaddr(tlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(tlbe))
+ continue;
+
+ tid = get_tlb_tid(tlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ if (!get_tlb_v(tlbe))
+ continue;
+
+ if (get_tlb_ts(tlbe) != as)
+ continue;
+
+ return i;
+ }
+
+ return -1;
+}
+
+struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+ unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+ unsigned int index;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+ if (index == -1)
+ return NULL;
+ return &vcpu->arch.guest_tlb[index];
+}
+
+static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
+{
+ return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
+}
+
+/* Must be called with mmap_sem locked for writing. */
+static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
+ unsigned int index)
+{
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
+ struct page *page = vcpu->arch.shadow_pages[index];
+
+ kunmap(vcpu->arch.shadow_pages[index]);
+
+ if (get_tlb_v(stlbe)) {
+ if (kvmppc_44x_tlbe_is_writable(stlbe))
+ kvm_release_page_dirty(page);
+ else
+ kvm_release_page_clean(page);
+ }
+}
+
+/* Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB. */
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
+ u32 flags)
+{
+ struct page *new_page;
+ struct tlbe *stlbe;
+ hpa_t hpaddr;
+ unsigned int victim;
+
+ /* Future optimization: don't overwrite the TLB entry containing the
+ * current PC (or stack?). */
+ victim = kvmppc_tlb_44x_pos++;
+ if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
+ kvmppc_tlb_44x_pos = 0;
+ stlbe = &vcpu->arch.shadow_tlb[victim];
+
+ /* Get reference to new page. */
+ down_write(&current->mm->mmap_sem);
+ new_page = gfn_to_page(vcpu->kvm, gfn);
+ if (is_error_page(new_page)) {
+ printk(KERN_ERR "Couldn't get guest page!\n");
+ kvm_release_page_clean(new_page);
+ return;
+ }
+ hpaddr = page_to_phys(new_page);
+
+ /* Drop reference to old page. */
+ kvmppc_44x_shadow_release(vcpu, victim);
+ up_write(&current->mm->mmap_sem);
+
+ vcpu->arch.shadow_pages[victim] = new_page;
+
+ /* XXX Make sure (va, size) doesn't overlap any other
+ * entries. 440x6 user manual says the result would be
+ * "undefined." */
+
+ /* XXX what about AS? */
+
+ stlbe->tid = asid & 0xff;
+
+ /* Force TS=1 for all guest mappings. */
+ /* For now we hardcode 4KB mappings, but it will be important to
+ * use host large pages in the future. */
+ stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
+ | PPC44x_TLB_4K;
+
+ stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
+ stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
+ vcpu->arch.msr & MSR_PR);
+}
+
+void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
+{
+ unsigned int pid = asid & 0xff;
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+ unsigned int tid;
+
+ if (!get_tlb_v(stlbe))
+ continue;
+
+ if (eaddr < get_tlb_eaddr(stlbe))
+ continue;
+
+ if (eaddr > get_tlb_end(stlbe))
+ continue;
+
+ tid = get_tlb_tid(stlbe);
+ if (tid && (tid != pid))
+ continue;
+
+ kvmppc_44x_shadow_release(vcpu, i);
+ stlbe->word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
+
+/* Invalidate all mappings, so that when they fault back in they will get the
+ * proper permission bits. */
+void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+{
+ int i;
+
+ /* XXX Replace loop with fancy data structures. */
+ down_write(&current->mm->mmap_sem);
+ for (i = 0; i <= tlb_44x_hwater; i++) {
+ kvmppc_44x_shadow_release(vcpu, i);
+ vcpu->arch.shadow_tlb[i].word0 = 0;
+ }
+ up_write(&current->mm->mmap_sem);
+}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
new file mode 100644
index 000000000000..2ccd46b6f6b7
--- /dev/null
+++ b/arch/powerpc/kvm/44x_tlb.h
@@ -0,0 +1,91 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __KVM_POWERPC_TLB_H__
+#define __KVM_POWERPC_TLB_H__
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-44x.h>
+
+extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
+ unsigned int pid, unsigned int as);
+extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+
+/* TLB helper functions */
+static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 4) & 0xf;
+}
+
+static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+{
+ return tlbe->word0 & 0xfffffc00;
+}
+
+static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
+{
+ unsigned int pgsize = get_tlb_size(tlbe);
+ return 1 << 10 << (pgsize << 1);
+}
+
+static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+{
+ return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+{
+ u64 word1 = tlbe->word1;
+ return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
+}
+
+static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+{
+ return tlbe->tid & 0xff;
+}
+
+static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 8) & 0x1;
+}
+
+static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+{
+ return (tlbe->word0 >> 9) & 0x1;
+}
+
+static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.mmucr & 0xff;
+}
+
+static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
+{
+ return (vcpu->arch.mmucr >> 16) & 0x1;
+}
+
+static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
+{
+ unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
+
+ return get_tlb_raddr(tlbe) | (eaddr & pgmask);
+}
+
+#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
new file mode 100644
index 000000000000..6b076010213b
--- /dev/null
+++ b/arch/powerpc/kvm/Kconfig
@@ -0,0 +1,42 @@
+#
+# KVM configuration
+#
+
+menuconfig VIRTUALIZATION
+ bool "Virtualization"
+ ---help---
+ Say Y here to get to see options for using your Linux host to run
+ other operating systems inside virtual machines (guests).
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and
+ disabled.
+
+if VIRTUALIZATION
+
+config KVM
+ bool "Kernel-based Virtual Machine (KVM) support"
+ depends on 44x && EXPERIMENTAL
+ select PREEMPT_NOTIFIERS
+ select ANON_INODES
+ # We can only run on Book E hosts so far
+ select KVM_BOOKE_HOST
+ ---help---
+ Support hosting virtualized guest machines. You will also
+ need to select one or more of the processor modules below.
+
+ This module provides access to the hardware capabilities through
+ a character device node named /dev/kvm.
+
+ If unsure, say N.
+
+config KVM_BOOKE_HOST
+ bool "KVM host support for Book E PowerPC processors"
+ depends on KVM && 44x
+ ---help---
+ Provides host support for KVM on Book E PowerPC processors. Currently
+ this works on 440 processors only.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
new file mode 100644
index 000000000000..d0d358d367ec
--- /dev/null
+++ b/arch/powerpc/kvm/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+obj-$(CONFIG_KVM) += kvm.o
+
+AFLAGS_booke_interrupts.o := -I$(obj)
+
+kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
+obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
new file mode 100644
index 000000000000..6d9884a6884a
--- /dev/null
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -0,0 +1,615 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+ { "exits", VCPU_STAT(sum_exits) },
+ { "mmio", VCPU_STAT(mmio_exits) },
+ { "dcr", VCPU_STAT(dcr_exits) },
+ { "sig", VCPU_STAT(signal_exits) },
+ { "light", VCPU_STAT(light_exits) },
+ { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
+ { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
+ { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
+ { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
+ { "sysc", VCPU_STAT(syscall_exits) },
+ { "isi", VCPU_STAT(isi_exits) },
+ { "dsi", VCPU_STAT(dsi_exits) },
+ { "inst_emu", VCPU_STAT(emulated_inst_exits) },
+ { "dec", VCPU_STAT(dec_exits) },
+ { "ext_intr", VCPU_STAT(ext_intr_exits) },
+ { NULL }
+};
+
+static const u32 interrupt_msr_mask[16] = {
+ [BOOKE_INTERRUPT_CRITICAL] = MSR_ME,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
+ [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME,
+ [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE,
+ [BOOKE_INTERRUPT_DEBUG] = MSR_ME,
+};
+
+const unsigned char exception_priority[] = {
+ [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
+ [BOOKE_INTERRUPT_INST_STORAGE] = 1,
+ [BOOKE_INTERRUPT_ALIGNMENT] = 2,
+ [BOOKE_INTERRUPT_PROGRAM] = 3,
+ [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
+ [BOOKE_INTERRUPT_SYSCALL] = 5,
+ [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
+ [BOOKE_INTERRUPT_DTLB_MISS] = 7,
+ [BOOKE_INTERRUPT_ITLB_MISS] = 8,
+ [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
+ [BOOKE_INTERRUPT_DEBUG] = 10,
+ [BOOKE_INTERRUPT_CRITICAL] = 11,
+ [BOOKE_INTERRUPT_WATCHDOG] = 12,
+ [BOOKE_INTERRUPT_EXTERNAL] = 13,
+ [BOOKE_INTERRUPT_FIT] = 14,
+ [BOOKE_INTERRUPT_DECREMENTER] = 15,
+};
+
+const unsigned char priority_exception[] = {
+ BOOKE_INTERRUPT_DATA_STORAGE,
+ BOOKE_INTERRUPT_INST_STORAGE,
+ BOOKE_INTERRUPT_ALIGNMENT,
+ BOOKE_INTERRUPT_PROGRAM,
+ BOOKE_INTERRUPT_FP_UNAVAIL,
+ BOOKE_INTERRUPT_SYSCALL,
+ BOOKE_INTERRUPT_AP_UNAVAIL,
+ BOOKE_INTERRUPT_DTLB_MISS,
+ BOOKE_INTERRUPT_ITLB_MISS,
+ BOOKE_INTERRUPT_MACHINE_CHECK,
+ BOOKE_INTERRUPT_DEBUG,
+ BOOKE_INTERRUPT_CRITICAL,
+ BOOKE_INTERRUPT_WATCHDOG,
+ BOOKE_INTERRUPT_EXTERNAL,
+ BOOKE_INTERRUPT_FIT,
+ BOOKE_INTERRUPT_DECREMENTER,
+};
+
+
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe;
+ int i;
+
+ printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+ printk("| %2s | %3s | %8s | %8s | %8s |\n",
+ "nr", "tid", "word0", "word1", "word2");
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.guest_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" G%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+
+ for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+ tlbe = &vcpu->arch.shadow_tlb[i];
+ if (tlbe->word0 & PPC44x_TLB_VALID)
+ printk(" S%2d | %02X | %08X | %08X | %08X |\n",
+ i, tlbe->tid, tlbe->word0, tlbe->word1,
+ tlbe->word2);
+ }
+}
+
+/* TODO: use vcpu_printf() */
+void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
+{
+ int i;
+
+ printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr);
+ printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
+ printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
+
+ printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+ for (i = 0; i < 32; i += 4) {
+ printk("gpr%02d: %08x %08x %08x %08x\n", i,
+ vcpu->arch.gpr[i],
+ vcpu->arch.gpr[i+1],
+ vcpu->arch.gpr[i+2],
+ vcpu->arch.gpr[i+3]);
+ }
+}
+
+/* Check if we are ready to deliver the interrupt */
+static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ int r;
+
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_CRITICAL:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ r = vcpu->arch.msr & MSR_ME;
+ break;
+ case BOOKE_INTERRUPT_EXTERNAL:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_DECREMENTER:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_FIT:
+ r = vcpu->arch.msr & MSR_EE;
+ break;
+ case BOOKE_INTERRUPT_WATCHDOG:
+ r = vcpu->arch.msr & MSR_CE;
+ break;
+ case BOOKE_INTERRUPT_DEBUG:
+ r = vcpu->arch.msr & MSR_DE;
+ break;
+ default:
+ r = 1;
+ }
+
+ return r;
+}
+
+static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+ switch (interrupt) {
+ case BOOKE_INTERRUPT_DECREMENTER:
+ vcpu->arch.tsr |= TSR_DIS;
+ break;
+ }
+
+ vcpu->arch.srr0 = vcpu->arch.pc;
+ vcpu->arch.srr1 = vcpu->arch.msr;
+ vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
+ kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
+}
+
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
+{
+ unsigned long *pending = &vcpu->arch.pending_exceptions;
+ unsigned int exception;
+ unsigned int priority;
+
+ priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
+ while (priority <= BOOKE_MAX_INTERRUPT) {
+ exception = priority_exception[priority];
+ if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
+ kvmppc_clear_exception(vcpu, exception);
+ kvmppc_deliver_interrupt(vcpu, exception);
+ break;
+ }
+
+ priority = find_next_bit(pending,
+ BITS_PER_BYTE * sizeof(*pending),
+ priority + 1);
+ }
+}
+
+static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+/**
+ * kvmppc_handle_exit
+ *
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int exit_nr)
+{
+ enum emulation_result er;
+ int r = RESUME_HOST;
+
+ local_irq_enable();
+
+ run->exit_reason = KVM_EXIT_UNKNOWN;
+ run->ready_for_interrupt_injection = 1;
+
+ switch (exit_nr) {
+ case BOOKE_INTERRUPT_MACHINE_CHECK:
+ printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
+ kvmppc_dump_vcpu(vcpu);
+ r = RESUME_HOST;
+ break;
+
+ case BOOKE_INTERRUPT_EXTERNAL:
+ case BOOKE_INTERRUPT_DECREMENTER:
+ /* Since we switched IVPR back to the host's value, the host
+ * handled this interrupt the moment we enabled interrupts.
+ * Now we just offer it a chance to reschedule the guest. */
+
+ /* XXX At this point the TLB still holds our shadow TLB, so if
+ * we do reschedule the host will fault over it. Perhaps we
+ * should politely restore the host's entries to minimize
+ * misses before ceding control. */
+ if (need_resched())
+ cond_resched();
+ if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
+ vcpu->stat.dec_exits++;
+ else
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_PROGRAM:
+ if (vcpu->arch.msr & MSR_PR) {
+ /* Program traps generated by user-level software must be handled
+ * by the guest kernel. */
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if
+ * they were actually modified by emulation. */
+ vcpu->stat.emulated_inst_exits++;
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_DCR:
+ run->exit_reason = KVM_EXIT_DCR;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
+ __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+ run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+ break;
+
+ case BOOKE_INTERRUPT_DATA_STORAGE:
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.dsi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_INST_STORAGE:
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.isi_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_SYSCALL:
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.syscall_exits++;
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_DTLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.fault_dear;
+ gfn_t gfn;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->arch.dear = vcpu->arch.fault_dear;
+ vcpu->arch.esr = vcpu->arch.fault_esr;
+ vcpu->stat.dtlb_real_miss_exits++;
+ r = RESUME_GUEST;
+ break;
+ }
+
+ vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
+ gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't, and it is RAM. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ vcpu->stat.dtlb_virt_miss_exits++;
+ r = RESUME_GUEST;
+ } else {
+ /* Guest has mapped and accessed a page which is not
+ * actually RAM. */
+ r = kvmppc_emulate_mmio(run, vcpu);
+ }
+
+ break;
+ }
+
+ case BOOKE_INTERRUPT_ITLB_MISS: {
+ struct tlbe *gtlbe;
+ unsigned long eaddr = vcpu->arch.pc;
+ gfn_t gfn;
+
+ r = RESUME_GUEST;
+
+ /* Check the guest TLB. */
+ gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
+ if (!gtlbe) {
+ /* The guest didn't have a mapping for it. */
+ kvmppc_queue_exception(vcpu, exit_nr);
+ vcpu->stat.itlb_real_miss_exits++;
+ break;
+ }
+
+ vcpu->stat.itlb_virt_miss_exits++;
+
+ gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+
+ if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+ /* The guest TLB had a mapping, but the shadow TLB
+ * didn't. This could be because:
+ * a) the entry is mapping the host kernel, or
+ * b) the guest used a large mapping which we're faking
+ * Either way, we need to satisfy the fault without
+ * invoking the guest. */
+ kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+ gtlbe->word2);
+ } else {
+ /* Guest mapped and leaped at non-RAM! */
+ kvmppc_queue_exception(vcpu,
+ BOOKE_INTERRUPT_MACHINE_CHECK);
+ }
+
+ break;
+ }
+
+ default:
+ printk(KERN_EMERG "exit_nr %d\n", exit_nr);
+ BUG();
+ }
+
+ local_irq_disable();
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ /* Do some exit accounting. */
+ vcpu->stat.sum_exits++;
+ if (!(r & RESUME_HOST)) {
+ /* To avoid clobbering exit_reason, only check for signals if
+ * we aren't already exiting to userspace for some other
+ * reason. */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+
+ vcpu->stat.signal_exits++;
+ } else {
+ vcpu->stat.light_exits++;
+ }
+ } else {
+ switch (run->exit_reason) {
+ case KVM_EXIT_MMIO:
+ vcpu->stat.mmio_exits++;
+ break;
+ case KVM_EXIT_DCR:
+ vcpu->stat.dcr_exits++;
+ break;
+ case KVM_EXIT_INTR:
+ vcpu->stat.signal_exits++;
+ break;
+ }
+ }
+
+ return r;
+}
+
+/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+ struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
+
+ tlbe->tid = 0;
+ tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
+ tlbe->word1 = 0;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
+
+ tlbe++;
+ tlbe->tid = 0;
+ tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
+ tlbe->word1 = 0xef600000;
+ tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
+ | PPC44x_TLB_I | PPC44x_TLB_G;
+
+ vcpu->arch.pc = 0;
+ vcpu->arch.msr = 0;
+ vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+
+ /* Eye-catching number so we know if the guest takes an interrupt
+ * before it's programmed its own IVPR. */
+ vcpu->arch.ivpr = 0x55550000;
+
+ /* Since the guest can directly access the timebase, it must know the
+ * real timebase frequency. Accordingly, it must see the state of
+ * CCR1[TCS]. */
+ vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ regs->pc = vcpu->arch.pc;
+ regs->cr = vcpu->arch.cr;
+ regs->ctr = vcpu->arch.ctr;
+ regs->lr = vcpu->arch.lr;
+ regs->xer = vcpu->arch.xer;
+ regs->msr = vcpu->arch.msr;
+ regs->srr0 = vcpu->arch.srr0;
+ regs->srr1 = vcpu->arch.srr1;
+ regs->pid = vcpu->arch.pid;
+ regs->sprg0 = vcpu->arch.sprg0;
+ regs->sprg1 = vcpu->arch.sprg1;
+ regs->sprg2 = vcpu->arch.sprg2;
+ regs->sprg3 = vcpu->arch.sprg3;
+ regs->sprg5 = vcpu->arch.sprg4;
+ regs->sprg6 = vcpu->arch.sprg5;
+ regs->sprg7 = vcpu->arch.sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ regs->gpr[i] = vcpu->arch.gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+ int i;
+
+ vcpu->arch.pc = regs->pc;
+ vcpu->arch.cr = regs->cr;
+ vcpu->arch.ctr = regs->ctr;
+ vcpu->arch.lr = regs->lr;
+ vcpu->arch.xer = regs->xer;
+ vcpu->arch.msr = regs->msr;
+ vcpu->arch.srr0 = regs->srr0;
+ vcpu->arch.srr1 = regs->srr1;
+ vcpu->arch.sprg0 = regs->sprg0;
+ vcpu->arch.sprg1 = regs->sprg1;
+ vcpu->arch.sprg2 = regs->sprg2;
+ vcpu->arch.sprg3 = regs->sprg3;
+ vcpu->arch.sprg5 = regs->sprg4;
+ vcpu->arch.sprg6 = regs->sprg5;
+ vcpu->arch.sprg7 = regs->sprg6;
+
+ for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
+ vcpu->arch.gpr[i] = regs->gpr[i];
+
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+ return -ENOTSUPP;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
+{
+ struct tlbe *gtlbe;
+ int index;
+ gva_t eaddr;
+ u8 pid;
+ u8 as;
+
+ eaddr = tr->linear_address;
+ pid = (tr->linear_address >> 32) & 0xff;
+ as = (tr->linear_address >> 40) & 0x1;
+
+ index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
+ if (index == -1) {
+ tr->valid = 0;
+ return 0;
+ }
+
+ gtlbe = &vcpu->arch.guest_tlb[index];
+
+ tr->physical_address = tlb_xlate(gtlbe, eaddr);
+ /* XXX what does "writeable" and "usermode" even mean? */
+ tr->valid = 1;
+
+ return 0;
+}
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
new file mode 100644
index 000000000000..b480341bc31e
--- /dev/null
+++ b/arch/powerpc/kvm/booke_host.c
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_ppc.h>
+
+unsigned long kvmppc_booke_handlers;
+
+static int kvmppc_booke_init(void)
+{
+ unsigned long ivor[16];
+ unsigned long max_ivor = 0;
+ int i;
+
+ /* We install our own exception handlers by hijacking IVPR. IVPR must
+ * be 16-bit aligned, so we need a 64KB allocation. */
+ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ VCPU_SIZE_ORDER);
+ if (!kvmppc_booke_handlers)
+ return -ENOMEM;
+
+ /* XXX make sure our handlers are smaller than Linux's */
+
+ /* Copy our interrupt handlers to match host IVORs. That way we don't
+ * have to swap the IVORs on every guest/host transition. */
+ ivor[0] = mfspr(SPRN_IVOR0);
+ ivor[1] = mfspr(SPRN_IVOR1);
+ ivor[2] = mfspr(SPRN_IVOR2);
+ ivor[3] = mfspr(SPRN_IVOR3);
+ ivor[4] = mfspr(SPRN_IVOR4);
+ ivor[5] = mfspr(SPRN_IVOR5);
+ ivor[6] = mfspr(SPRN_IVOR6);
+ ivor[7] = mfspr(SPRN_IVOR7);
+ ivor[8] = mfspr(SPRN_IVOR8);
+ ivor[9] = mfspr(SPRN_IVOR9);
+ ivor[10] = mfspr(SPRN_IVOR10);
+ ivor[11] = mfspr(SPRN_IVOR11);
+ ivor[12] = mfspr(SPRN_IVOR12);
+ ivor[13] = mfspr(SPRN_IVOR13);
+ ivor[14] = mfspr(SPRN_IVOR14);
+ ivor[15] = mfspr(SPRN_IVOR15);
+
+ for (i = 0; i < 16; i++) {
+ if (ivor[i] > max_ivor)
+ max_ivor = ivor[i];
+
+ memcpy((void *)kvmppc_booke_handlers + ivor[i],
+ kvmppc_handlers_start + i * kvmppc_handler_len,
+ kvmppc_handler_len);
+ }
+ flush_icache_range(kvmppc_booke_handlers,
+ kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+
+ return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvmppc_booke_exit(void)
+{
+ free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
+ kvm_exit();
+}
+
+module_init(kvmppc_booke_init)
+module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
new file mode 100644
index 000000000000..3b653b5309b8
--- /dev/null
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+
+#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
+
+#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
+
+/* The host stack layout: */
+#define HOST_R1 0 /* Implied by stwu. */
+#define HOST_CALLEE_LR 4
+#define HOST_RUN 8
+/* r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option. */
+#define HOST_R2 12
+#define HOST_NV_GPRS 16
+#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
+#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
+
+#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
+ (1<<BOOKE_INTERRUPT_PROGRAM) | \
+ (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+.macro KVM_HANDLER ivor_nr
+_GLOBAL(kvmppc_handler_\ivor_nr)
+ /* Get pointer to vcpu and record exit number. */
+ mtspr SPRN_SPRG0, r4
+ mfspr r4, SPRN_SPRG1
+ stw r5, VCPU_GPR(r5)(r4)
+ stw r6, VCPU_GPR(r6)(r4)
+ mfctr r5
+ lis r6, kvmppc_resume_host@h
+ stw r5, VCPU_CTR(r4)
+ li r5, \ivor_nr
+ ori r6, r6, kvmppc_resume_host@l
+ mtctr r6
+ bctr
+.endm
+
+_GLOBAL(kvmppc_handlers_start)
+KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
+KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
+KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
+KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
+KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
+KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
+KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
+KVM_HANDLER BOOKE_INTERRUPT_FIT
+KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
+KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_DEBUG
+
+_GLOBAL(kvmppc_handler_len)
+ .long kvmppc_handler_1 - kvmppc_handler_0
+
+
+/* Registers:
+ * SPRG0: guest r4
+ * r4: vcpu pointer
+ * r5: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+ stw r3, VCPU_GPR(r3)(r4)
+ mfcr r3
+ stw r3, VCPU_CR(r4)
+ stw r7, VCPU_GPR(r7)(r4)
+ stw r8, VCPU_GPR(r8)(r4)
+ stw r9, VCPU_GPR(r9)(r4)
+
+ li r6, 1
+ slw r6, r6, r5
+
+ /* Save the faulting instruction and all GPRs for emulation. */
+ andi. r7, r6, NEED_INST_MASK
+ beq ..skip_inst_copy
+ mfspr r9, SPRN_SRR0
+ mfmsr r8
+ ori r7, r8, MSR_DS
+ mtmsr r7
+ isync
+ lwz r9, 0(r9)
+ mtmsr r8
+ isync
+ stw r9, VCPU_LAST_INST(r4)
+
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+..skip_inst_copy:
+
+ /* Also grab DEAR and ESR before the host can clobber them. */
+
+ andi. r7, r6, NEED_DEAR_MASK
+ beq ..skip_dear
+ mfspr r9, SPRN_DEAR
+ stw r9, VCPU_FAULT_DEAR(r4)
+..skip_dear:
+
+ andi. r7, r6, NEED_ESR_MASK
+ beq ..skip_esr
+ mfspr r9, SPRN_ESR
+ stw r9, VCPU_FAULT_ESR(r4)
+..skip_esr:
+
+ /* Save remaining volatile guest register state to vcpu. */
+ stw r0, VCPU_GPR(r0)(r4)
+ stw r1, VCPU_GPR(r1)(r4)
+ stw r2, VCPU_GPR(r2)(r4)
+ stw r10, VCPU_GPR(r10)(r4)
+ stw r11, VCPU_GPR(r11)(r4)
+ stw r12, VCPU_GPR(r12)(r4)
+ stw r13, VCPU_GPR(r13)(r4)
+ stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+ mflr r3
+ stw r3, VCPU_LR(r4)
+ mfxer r3
+ stw r3, VCPU_XER(r4)
+ mfspr r3, SPRN_SPRG0
+ stw r3, VCPU_GPR(r4)(r4)
+ mfspr r3, SPRN_SRR0
+ stw r3, VCPU_PC(r4)
+
+ /* Restore host stack pointer and PID before IVPR, since the host
+ * exception handlers use them. */
+ lwz r1, VCPU_HOST_STACK(r4)
+ lwz r3, VCPU_HOST_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Restore host IVPR before re-enabling interrupts. We cheat and know
+ * that Linux IVPR is always 0xc0000000. */
+ lis r3, 0xc000
+ mtspr SPRN_IVPR, r3
+
+ /* Switch to kernel stack and jump to handler. */
+ LOAD_REG_ADDR(r3, kvmppc_handle_exit)
+ mtctr r3
+ lwz r3, HOST_RUN(r1)
+ lwz r2, HOST_R2(r1)
+ mr r14, r4 /* Save vcpu pointer. */
+
+ bctrl /* kvmppc_handle_exit() */
+
+ /* Restore vcpu pointer and the nonvolatiles we used. */
+ mr r4, r14
+ lwz r14, VCPU_GPR(r14)(r4)
+
+ /* Sometimes instruction emulation must restore complete GPR state. */
+ andi. r5, r3, RESUME_FLAG_NV
+ beq ..skip_nv_load
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+..skip_nv_load:
+
+ /* Should we return to the guest? */
+ andi. r5, r3, RESUME_FLAG_HOST
+ beq lightweight_exit
+
+ srawi r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+ /* Not returning to guest. */
+
+ /* We already saved guest volatile register state; now save the
+ * non-volatiles. */
+ stw r15, VCPU_GPR(r15)(r4)
+ stw r16, VCPU_GPR(r16)(r4)
+ stw r17, VCPU_GPR(r17)(r4)
+ stw r18, VCPU_GPR(r18)(r4)
+ stw r19, VCPU_GPR(r19)(r4)
+ stw r20, VCPU_GPR(r20)(r4)
+ stw r21, VCPU_GPR(r21)(r4)
+ stw r22, VCPU_GPR(r22)(r4)
+ stw r23, VCPU_GPR(r23)(r4)
+ stw r24, VCPU_GPR(r24)(r4)
+ stw r25, VCPU_GPR(r25)(r4)
+ stw r26, VCPU_GPR(r26)(r4)
+ stw r27, VCPU_GPR(r27)(r4)
+ stw r28, VCPU_GPR(r28)(r4)
+ stw r29, VCPU_GPR(r29)(r4)
+ stw r30, VCPU_GPR(r30)(r4)
+ stw r31, VCPU_GPR(r31)(r4)
+
+ /* Load host non-volatile register state from host stack. */
+ lwz r14, HOST_NV_GPR(r14)(r1)
+ lwz r15, HOST_NV_GPR(r15)(r1)
+ lwz r16, HOST_NV_GPR(r16)(r1)
+ lwz r17, HOST_NV_GPR(r17)(r1)
+ lwz r18, HOST_NV_GPR(r18)(r1)
+ lwz r19, HOST_NV_GPR(r19)(r1)
+ lwz r20, HOST_NV_GPR(r20)(r1)
+ lwz r21, HOST_NV_GPR(r21)(r1)
+ lwz r22, HOST_NV_GPR(r22)(r1)
+ lwz r23, HOST_NV_GPR(r23)(r1)
+ lwz r24, HOST_NV_GPR(r24)(r1)
+ lwz r25, HOST_NV_GPR(r25)(r1)
+ lwz r26, HOST_NV_GPR(r26)(r1)
+ lwz r27, HOST_NV_GPR(r27)(r1)
+ lwz r28, HOST_NV_GPR(r28)(r1)
+ lwz r29, HOST_NV_GPR(r29)(r1)
+ lwz r30, HOST_NV_GPR(r30)(r1)
+ lwz r31, HOST_NV_GPR(r31)(r1)
+
+ /* Return to kvm_vcpu_run(). */
+ lwz r4, HOST_STACK_LR(r1)
+ addi r1, r1, HOST_STACK_SIZE
+ mtlr r4
+ /* r3 still contains the return code from kvmppc_handle_exit(). */
+ blr
+
+
+/* Registers:
+ * r3: kvm_run pointer
+ * r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+ stwu r1, -HOST_STACK_SIZE(r1)
+ stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+ /* Save host state to stack. */
+ stw r3, HOST_RUN(r1)
+ mflr r3
+ stw r3, HOST_STACK_LR(r1)
+
+ /* Save host non-volatile register state to stack. */
+ stw r14, HOST_NV_GPR(r14)(r1)
+ stw r15, HOST_NV_GPR(r15)(r1)
+ stw r16, HOST_NV_GPR(r16)(r1)
+ stw r17, HOST_NV_GPR(r17)(r1)
+ stw r18, HOST_NV_GPR(r18)(r1)
+ stw r19, HOST_NV_GPR(r19)(r1)
+ stw r20, HOST_NV_GPR(r20)(r1)
+ stw r21, HOST_NV_GPR(r21)(r1)
+ stw r22, HOST_NV_GPR(r22)(r1)
+ stw r23, HOST_NV_GPR(r23)(r1)
+ stw r24, HOST_NV_GPR(r24)(r1)
+ stw r25, HOST_NV_GPR(r25)(r1)
+ stw r26, HOST_NV_GPR(r26)(r1)
+ stw r27, HOST_NV_GPR(r27)(r1)
+ stw r28, HOST_NV_GPR(r28)(r1)
+ stw r29, HOST_NV_GPR(r29)(r1)
+ stw r30, HOST_NV_GPR(r30)(r1)
+ stw r31, HOST_NV_GPR(r31)(r1)
+
+ /* Load guest non-volatiles. */
+ lwz r14, VCPU_GPR(r14)(r4)
+ lwz r15, VCPU_GPR(r15)(r4)
+ lwz r16, VCPU_GPR(r16)(r4)
+ lwz r17, VCPU_GPR(r17)(r4)
+ lwz r18, VCPU_GPR(r18)(r4)
+ lwz r19, VCPU_GPR(r19)(r4)
+ lwz r20, VCPU_GPR(r20)(r4)
+ lwz r21, VCPU_GPR(r21)(r4)
+ lwz r22, VCPU_GPR(r22)(r4)
+ lwz r23, VCPU_GPR(r23)(r4)
+ lwz r24, VCPU_GPR(r24)(r4)
+ lwz r25, VCPU_GPR(r25)(r4)
+ lwz r26, VCPU_GPR(r26)(r4)
+ lwz r27, VCPU_GPR(r27)(r4)
+ lwz r28, VCPU_GPR(r28)(r4)
+ lwz r29, VCPU_GPR(r29)(r4)
+ lwz r30, VCPU_GPR(r30)(r4)
+ lwz r31, VCPU_GPR(r31)(r4)
+
+lightweight_exit:
+ stw r2, HOST_R2(r1)
+
+ mfspr r3, SPRN_PID
+ stw r3, VCPU_HOST_PID(r4)
+ lwz r3, VCPU_PID(r4)
+ mtspr SPRN_PID, r3
+
+ /* Prevent all TLB updates. */
+ mfmsr r5
+ lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
+ ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+ andc r6, r5, r6
+ mtmsr r6
+
+ /* Save the host's non-pinned TLB mappings, and load the guest mappings
+ * over them. Leave the host's "pinned" kernel mappings in place. */
+ /* XXX optimization: use generation count to avoid swapping unmodified
+ * entries. */
+ mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
+ lis r8, tlb_44x_hwater@ha
+ lwz r8, tlb_44x_hwater@l(r8)
+ addi r3, r4, VCPU_HOST_TLB - 4
+ addi r9, r4, VCPU_SHADOW_TLB - 4
+ li r6, 0
+1:
+ /* Save host entry. */
+ tlbre r7, r6, PPC44x_TLB_PAGEID
+ mfspr r5, SPRN_MMUCR
+ stwu r5, 4(r3)
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_XLAT
+ stwu r7, 4(r3)
+ tlbre r7, r6, PPC44x_TLB_ATTRIB
+ stwu r7, 4(r3)
+ /* Load guest entry. */
+ lwzu r7, 4(r9)
+ mtspr SPRN_MMUCR, r7
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_PAGEID
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_XLAT
+ lwzu r7, 4(r9)
+ tlbwe r7, r6, PPC44x_TLB_ATTRIB
+ /* Increment index. */
+ addi r6, r6, 1
+ cmpw r6, r8
+ blt 1b
+ mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */
+
+ iccci 0, 0 /* XXX hack */
+
+ /* Load some guest volatiles. */
+ lwz r0, VCPU_GPR(r0)(r4)
+ lwz r2, VCPU_GPR(r2)(r4)
+ lwz r9, VCPU_GPR(r9)(r4)
+ lwz r10, VCPU_GPR(r10)(r4)
+ lwz r11, VCPU_GPR(r11)(r4)
+ lwz r12, VCPU_GPR(r12)(r4)
+ lwz r13, VCPU_GPR(r13)(r4)
+ lwz r3, VCPU_LR(r4)
+ mtlr r3
+ lwz r3, VCPU_XER(r4)
+ mtxer r3
+
+ /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
+ * so how do we make sure vcpu won't fault? */
+ lis r8, kvmppc_booke_handlers@ha
+ lwz r8, kvmppc_booke_handlers@l(r8)
+ mtspr SPRN_IVPR, r8
+
+ /* Save vcpu pointer for the exception handlers. */
+ mtspr SPRN_SPRG1, r4
+
+ /* Can't switch the stack pointer until after IVPR is switched,
+ * because host interrupt handlers would get confused. */
+ lwz r1, VCPU_GPR(r1)(r4)
+
+ /* XXX handle USPRG0 */
+ /* Host interrupt handlers may have clobbered these guest-readable
+ * SPRGs, so we need to reload them here with the guest's values. */
+ lwz r3, VCPU_SPRG4(r4)
+ mtspr SPRN_SPRG4, r3
+ lwz r3, VCPU_SPRG5(r4)
+ mtspr SPRN_SPRG5, r3
+ lwz r3, VCPU_SPRG6(r4)
+ mtspr SPRN_SPRG6, r3
+ lwz r3, VCPU_SPRG7(r4)
+ mtspr SPRN_SPRG7, r3
+
+ /* Finish loading guest volatiles and jump to guest. */
+ lwz r3, VCPU_CTR(r4)
+ mtctr r3
+ lwz r3, VCPU_CR(r4)
+ mtcr r3
+ lwz r5, VCPU_GPR(r5)(r4)
+ lwz r6, VCPU_GPR(r6)(r4)
+ lwz r7, VCPU_GPR(r7)(r4)
+ lwz r8, VCPU_GPR(r8)(r4)
+ lwz r3, VCPU_PC(r4)
+ mtsrr0 r3
+ lwz r3, VCPU_MSR(r4)
+ oris r3, r3, KVMPPC_MSR_MASK@h
+ ori r3, r3, KVMPPC_MSR_MASK@l
+ mtsrr1 r3
+ lwz r3, VCPU_GPR(r3)(r4)
+ lwz r4, VCPU_GPR(r4)(r4)
+ rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
new file mode 100644
index 000000000000..a03fe0c80698
--- /dev/null
+++ b/arch/powerpc/kvm/emulate.c
@@ -0,0 +1,760 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/time.h>
+#include <asm/byteorder.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+/* Instruction decoding */
+static inline unsigned int get_op(u32 inst)
+{
+ return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+ return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+ return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+ return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+ return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+ return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+ return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+ return inst & 0xffff;
+}
+
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+ const struct tlbe *tlbe)
+{
+ gpa_t gpa;
+
+ if (!get_tlb_v(tlbe))
+ return 0;
+
+ /* Does it match current guest AS? */
+ /* XXX what about IS != DS? */
+ if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+ return 0;
+
+ gpa = get_tlb_raddr(tlbe);
+ if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+ /* Mapping is not for RAM. */
+ return 0;
+
+ return 1;
+}
+
+static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
+{
+ u64 eaddr;
+ u64 raddr;
+ u64 asid;
+ u32 flags;
+ struct tlbe *tlbe;
+ unsigned int ra;
+ unsigned int rs;
+ unsigned int ws;
+ unsigned int index;
+
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ ws = get_ws(inst);
+
+ index = vcpu->arch.gpr[ra];
+ if (index > PPC44x_TLB_SIZE) {
+ printk("%s: index %d\n", __func__, index);
+ kvmppc_dump_vcpu(vcpu);
+ return EMULATE_FAIL;
+ }
+
+ tlbe = &vcpu->arch.guest_tlb[index];
+
+ /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
+ if (tlbe->word0 & PPC44x_TLB_VALID) {
+ eaddr = get_tlb_eaddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ kvmppc_mmu_invalidate(vcpu, eaddr, asid);
+ }
+
+ switch (ws) {
+ case PPC44x_TLB_PAGEID:
+ tlbe->tid = vcpu->arch.mmucr & 0xff;
+ tlbe->word0 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_XLAT:
+ tlbe->word1 = vcpu->arch.gpr[rs];
+ break;
+
+ case PPC44x_TLB_ATTRIB:
+ tlbe->word2 = vcpu->arch.gpr[rs];
+ break;
+
+ default:
+ return EMULATE_FAIL;
+ }
+
+ if (tlbe_is_host_safe(vcpu, tlbe)) {
+ eaddr = get_tlb_eaddr(tlbe);
+ raddr = get_tlb_raddr(tlbe);
+ asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+ flags = tlbe->word2 & 0xffff;
+
+ /* Create a 4KB mapping on the host. If the guest wanted a
+ * large page, only the first 4KB is mapped here and the rest
+ * are mapped on the fly. */
+ kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+ }
+
+ return EMULATE_DONE;
+}
+
+static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.tcr & TCR_DIE) {
+ /* The decrementer ticks at the same rate as the timebase, so
+ * that's how we convert the guest DEC value to the number of
+ * host ticks. */
+ unsigned long nr_jiffies;
+
+ nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
+ mod_timer(&vcpu->arch.dec_timer,
+ get_jiffies_64() + nr_jiffies);
+ } else {
+ del_timer(&vcpu->arch.dec_timer);
+ }
+}
+
+static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+}
+
+/* XXX to do:
+ * lhax
+ * lhaux
+ * lswx
+ * lswi
+ * stswx
+ * stswi
+ * lha
+ * lhau
+ * lmw
+ * stmw
+ *
+ * XXX is_bigendian should depend on MMU mapping or MSR[LE]
+ */
+int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ u32 inst = vcpu->arch.last_inst;
+ u32 ea;
+ int ra;
+ int rb;
+ int rc;
+ int rs;
+ int rt;
+ int sprn;
+ int dcrn;
+ enum emulation_result emulated = EMULATE_DONE;
+ int advance = 1;
+
+ switch (get_op(inst)) {
+ case 3: /* trap */
+ printk("trap!\n");
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+ advance = 0;
+ break;
+
+ case 19:
+ switch (get_xop(inst)) {
+ case 50: /* rfi */
+ kvmppc_emul_rfi(vcpu);
+ advance = 0;
+ break;
+
+ default:
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 31:
+ switch (get_xop(inst)) {
+
+ case 83: /* mfmsr */
+ rt = get_rt(inst);
+ vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ break;
+
+ case 87: /* lbzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 131: /* wrtee */
+ rs = get_rs(inst);
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (vcpu->arch.gpr[rs] & MSR_EE);
+ break;
+
+ case 146: /* mtmsr */
+ rs = get_rs(inst);
+ kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ break;
+
+ case 163: /* wrteei */
+ vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+ | (inst & MSR_EE);
+ break;
+
+ case 215: /* stbx */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 247: /* stbux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[rs] = ea;
+ break;
+
+ case 279: /* lhzx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 311: /* lhzux */
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 323: /* mfdcr */
+ dcrn = get_dcrn(inst);
+ rt = get_rt(inst);
+
+ /* The guest may access CPR0 registers to determine the timebase
+ * frequency, and it must know the real host frequency because it
+ * can directly access the timebase registers.
+ *
+ * It would be possible to emulate those accesses in userspace,
+ * but userspace can really only figure out the end frequency.
+ * We could decompose that into the factors that compute it, but
+ * that's tricky math, and it's easier to just report the real
+ * CPR0 values.
+ */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ break;
+ case DCRN_CPR0_CONFIG_DATA:
+ local_irq_disable();
+ mtdcr(DCRN_CPR0_CONFIG_ADDR,
+ vcpu->arch.cpr0_cfgaddr);
+ vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ local_irq_enable();
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = 0;
+ run->dcr.is_write = 0;
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 339: /* mfspr */
+ sprn = get_sprn(inst);
+ rt = get_rt(inst);
+
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+ case SPRN_SRR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+ case SPRN_MMUCR:
+ vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ case SPRN_PID:
+ vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ case SPRN_IVPR:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ case SPRN_CCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ case SPRN_CCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+ case SPRN_PVR:
+ vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ case SPRN_DEAR:
+ vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ case SPRN_ESR:
+ vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ case SPRN_DBCR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ case SPRN_DBCR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+
+ /* Note: mftb and TBRL/TBWL are user-accessible, so
+ * the guest can always access the real TB anyways.
+ * In fact, we probably will never see these traps. */
+ case SPRN_TBWL:
+ vcpu->arch.gpr[rt] = mftbl(); break;
+ case SPRN_TBWU:
+ vcpu->arch.gpr[rt] = mftbu(); break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+ case SPRN_SPRG1:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+ case SPRN_SPRG2:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+ case SPRN_SPRG3:
+ vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+ /* Note: SPRG4-7 are user-readable, so we don't get
+ * a trap. */
+
+ case SPRN_IVOR0:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
+
+ default:
+ printk("mfspr: unknown spr %x\n", sprn);
+ vcpu->arch.gpr[rt] = 0;
+ break;
+ }
+ break;
+
+ case 407: /* sthx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 439: /* sthux */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = ea;
+ break;
+
+ case 451: /* mtdcr */
+ dcrn = get_dcrn(inst);
+ rs = get_rs(inst);
+
+ /* emulate some access in kernel */
+ switch (dcrn) {
+ case DCRN_CPR0_CONFIG_ADDR:
+ vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ break;
+ default:
+ run->dcr.dcrn = dcrn;
+ run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.is_write = 1;
+ vcpu->arch.dcr_needed = 1;
+ emulated = EMULATE_DO_DCR;
+ }
+
+ break;
+
+ case 467: /* mtspr */
+ sprn = get_sprn(inst);
+ rs = get_rs(inst);
+ switch (sprn) {
+ case SPRN_SRR0:
+ vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SRR1:
+ vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_MMUCR:
+ vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ case SPRN_PID:
+ vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR0:
+ vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_CCR1:
+ vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DEAR:
+ vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ case SPRN_ESR:
+ vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR0:
+ vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_DBCR1:
+ vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+
+ /* XXX We need to context-switch the timebase for
+ * watchdog and FIT. */
+ case SPRN_TBWL: break;
+ case SPRN_TBWU: break;
+
+ case SPRN_DEC:
+ vcpu->arch.dec = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_TSR:
+ vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+
+ case SPRN_TCR:
+ vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ kvmppc_emulate_dec(vcpu);
+ break;
+
+ case SPRN_SPRG0:
+ vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG1:
+ vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG2:
+ vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG3:
+ vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+
+ /* Note: SPRG4-7 are user-readable. These values are
+ * loaded into the real SPRGs when resuming the
+ * guest. */
+ case SPRN_SPRG4:
+ vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG5:
+ vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG6:
+ vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ case SPRN_SPRG7:
+ vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+
+ case SPRN_IVPR:
+ vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR0:
+ vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR1:
+ vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR2:
+ vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR3:
+ vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR4:
+ vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR5:
+ vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR6:
+ vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR7:
+ vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR8:
+ vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR9:
+ vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR10:
+ vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR11:
+ vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR12:
+ vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR13:
+ vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR14:
+ vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
+ case SPRN_IVOR15:
+ vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
+
+ default:
+ printk("mtspr: unknown spr %x\n", sprn);
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 470: /* dcbi */
+ /* Do nothing. The guest is performing dcbi because
+ * hardware DMA is not snooped by the dcache, but
+ * emulated DMA either goes through the dcache as
+ * normal writes, or the host kernel has handled dcache
+ * coherence. */
+ break;
+
+ case 534: /* lwbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
+ break;
+
+ case 566: /* tlbsync */
+ break;
+
+ case 662: /* stwbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 4, 0);
+ break;
+
+ case 978: /* tlbwe */
+ emulated = kvmppc_emul_tlbwe(vcpu, inst);
+ break;
+
+ case 914: { /* tlbsx */
+ int index;
+ unsigned int as = get_mmucr_sts(vcpu);
+ unsigned int pid = get_mmucr_stid(vcpu);
+
+ rt = get_rt(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+ rc = get_rc(inst);
+
+ ea = vcpu->arch.gpr[rb];
+ if (ra)
+ ea += vcpu->arch.gpr[ra];
+
+ index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+ if (rc) {
+ if (index < 0)
+ vcpu->arch.cr &= ~0x20000000;
+ else
+ vcpu->arch.cr |= 0x20000000;
+ }
+ vcpu->arch.gpr[rt] = index;
+
+ }
+ break;
+
+ case 790: /* lhbrx */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
+ break;
+
+ case 918: /* sthbrx */
+ rs = get_rs(inst);
+ ra = get_ra(inst);
+ rb = get_rb(inst);
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ vcpu->arch.gpr[rs],
+ 2, 0);
+ break;
+
+ case 966: /* iccci */
+ break;
+
+ default:
+ printk("unknown: op %d xop %d\n", get_op(inst),
+ get_xop(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+ break;
+
+ case 32: /* lwz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ break;
+
+ case 33: /* lwzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 34: /* lbz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ break;
+
+ case 35: /* lbzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 36: /* stw */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ break;
+
+ case 37: /* stwu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 4, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 38: /* stb */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ break;
+
+ case 39: /* stbu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 1, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 40: /* lhz */
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ break;
+
+ case 41: /* lhzu */
+ ra = get_ra(inst);
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ case 44: /* sth */
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ break;
+
+ case 45: /* sthu */
+ ra = get_ra(inst);
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ 2, 1);
+ vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ break;
+
+ default:
+ printk("unknown op %d\n", get_op(inst));
+ emulated = EMULATE_FAIL;
+ break;
+ }
+
+ if (advance)
+ vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+
+ return emulated;
+}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
new file mode 100644
index 000000000000..bad40bd2d3ac
--- /dev/null
+++ b/arch/powerpc/kvm/powerpc.c
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ return gfn;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+{
+ /* XXX implement me */
+ return 0;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+ return 1;
+}
+
+
+int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+ enum emulation_result er;
+ int r;
+
+ er = kvmppc_emulate_instruction(run, vcpu);
+ switch (er) {
+ case EMULATE_DONE:
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_GUEST_NV;
+ break;
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ /* We must reload nonvolatiles because "update" load/store
+ * instructions modify register state. */
+ /* Future optimization: only reload non-volatiles if they were
+ * actually modified. */
+ r = RESUME_HOST_NV;
+ break;
+ case EMULATE_FAIL:
+ /* XXX Deliver Program interrupt to guest. */
+ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+ vcpu->arch.last_inst);
+ r = RESUME_HOST;
+ break;
+ default:
+ BUG();
+ }
+
+ return r;
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+ return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+ int r;
+
+ if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
+ r = 0;
+ else
+ r = -ENOTSUPP;
+
+ *(int *)rtn = r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+ struct kvm *kvm;
+
+ kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ if (!kvm)
+ return ERR_PTR(-ENOMEM);
+
+ return kvm;
+}
+
+static void kvmppc_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_free(kvm->vcpus[i]);
+ kvm->vcpus[i] = NULL;
+ }
+ }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvmppc_free_vcpus(kvm);
+ kvm_free_physmem(kvm);
+ kfree(kvm);
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+ int r;
+
+ switch (ext) {
+ case KVM_CAP_USER_MEMORY:
+ r = 1;
+ break;
+ default:
+ r = 0;
+ break;
+ }
+ return r;
+
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+ struct kvm_vcpu *vcpu;
+ int err;
+
+ vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ if (!vcpu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = kvm_vcpu_init(vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ return vcpu;
+
+free_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+ return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+ unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
+
+ return test_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static void kvmppc_decrementer_func(unsigned long data)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
+ (unsigned long)vcpu);
+
+ return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
+{
+ return -ENOTSUPP;
+}
+
+static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ *gpr = run->dcr.data;
+}
+
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+
+ if (run->mmio.len > sizeof(*gpr)) {
+ printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
+ return;
+ }
+
+ if (vcpu->arch.mmio_is_bigendian) {
+ switch (run->mmio.len) {
+ case 4: *gpr = *(u32 *)run->mmio.data; break;
+ case 2: *gpr = *(u16 *)run->mmio.data; break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ } else {
+ /* Convert BE data from userland back to LE. */
+ switch (run->mmio.len) {
+ case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
+ case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 1: *gpr = *(u8 *)run->mmio.data; break;
+ }
+ }
+}
+
+int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 0;
+
+ vcpu->arch.io_gpr = rt;
+ vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 0;
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ u32 val, unsigned int bytes, int is_bigendian)
+{
+ void *data = run->mmio.data;
+
+ if (bytes > sizeof(run->mmio.data)) {
+ printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+ run->mmio.len);
+ }
+
+ run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+ run->mmio.len = bytes;
+ run->mmio.is_write = 1;
+ vcpu->mmio_needed = 1;
+ vcpu->mmio_is_write = 1;
+
+ /* Store the value at the lowest bytes in 'data'. */
+ if (is_bigendian) {
+ switch (bytes) {
+ case 4: *(u32 *)data = val; break;
+ case 2: *(u16 *)data = val; break;
+ case 1: *(u8 *)data = val; break;
+ }
+ } else {
+ /* Store LE value into 'data'. */
+ switch (bytes) {
+ case 4: st_le32(data, val); break;
+ case 2: st_le16(data, val); break;
+ case 1: *(u8 *)data = val; break;
+ }
+ }
+
+ return EMULATE_DO_MMIO;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ int r;
+ sigset_t sigsaved;
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+ if (vcpu->mmio_needed) {
+ if (!vcpu->mmio_is_write)
+ kvmppc_complete_mmio_load(vcpu, run);
+ vcpu->mmio_needed = 0;
+ } else if (vcpu->arch.dcr_needed) {
+ if (!vcpu->arch.dcr_is_write)
+ kvmppc_complete_dcr_load(vcpu, run);
+ vcpu->arch.dcr_needed = 0;
+ }
+
+ kvmppc_check_and_deliver_interrupts(vcpu);
+
+ local_irq_disable();
+ kvm_guest_enter();
+ r = __kvmppc_vcpu_run(run, vcpu);
+ kvm_guest_exit();
+ local_irq_enable();
+
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
+{
+ kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+ return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+ struct kvm_mp_state *mp_state)
+{
+ return -EINVAL;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ switch (ioctl) {
+ case KVM_INTERRUPT: {
+ struct kvm_interrupt irq;
+ r = -EFAULT;
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ goto out;
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ break;
+ }
+ default:
+ r = -EINVAL;
+ }
+
+out:
+ return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+ return -ENOTSUPP;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ long r;
+
+ switch (ioctl) {
+ default:
+ r = -EINVAL;
+ }
+
+ return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+ return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ada249bf9779..ce10e2b1b902 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -202,7 +202,7 @@ adjust_total_lowmem(void)
cam_max_size = max_lowmem_size;
/* adjust lowmem size to max_lowmem_size */
- ram = min(max_lowmem_size, total_lowmem);
+ ram = min(max_lowmem_size, (phys_addr_t)total_lowmem);
/* Calculate CAM values */
__cam0 = 1UL << 2 * (__ilog2(ram) / 2);
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index e10d76a860d3..ddeaf9e38ad5 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -191,7 +191,7 @@ _GLOBAL(add_hash_page)
add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP
- rlwinm r8,r1,0,0,18 /* use cpu number to make tag */
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12
#endif /* CONFIG_SMP */
@@ -526,7 +526,7 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha
addi r9,r9,mmu_hash_lock@l
- rlwinm r8,r1,0,0,18
+ rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
add r8,r8,r7
lwz r8,TI_CPU(r8)
oris r8,r8,9
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 47325f23c51f..1952b4d3fa7f 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -59,7 +59,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long total_memory;
unsigned long total_lowmem;
-phys_addr_t memstart_addr;
+phys_addr_t memstart_addr = (phys_addr_t)~0ull;
+EXPORT_SYMBOL(memstart_addr);
+phys_addr_t kernstart_addr;
+EXPORT_SYMBOL(kernstart_addr);
phys_addr_t lowmem_end_addr;
int boot_mapsize;
@@ -68,14 +71,6 @@ unsigned long agp_special_page;
EXPORT_SYMBOL(agp_special_page);
#endif
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
-#endif
-
void MMU_init(void);
/* XXX should be in current.h -- paulus */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 698bd000f98b..c5ac532a0161 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -72,7 +72,8 @@
#warning TASK_SIZE is smaller than it needs to be.
#endif
-phys_addr_t memstart_addr;
+phys_addr_t memstart_addr = ~0;
+phys_addr_t kernstart_addr;
void free_initmem(void)
{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 16def4dcff6d..d9e37f365b54 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -45,6 +45,7 @@
#include <asm/tlb.h>
#include <asm/sections.h>
#include <asm/vdso.h>
+#include <asm/fixmap.h>
#include "mmu_decl.h"
@@ -57,6 +58,20 @@ int init_bootmem_done;
int mem_init_done;
unsigned long memory_limit;
+#ifdef CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
+
+EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
+
+static inline pte_t *virt_to_kpte(unsigned long vaddr)
+{
+ return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
+ vaddr), vaddr), vaddr);
+}
+#endif
+
int page_is_ram(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
@@ -95,15 +110,6 @@ EXPORT_SYMBOL(phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
@@ -216,7 +222,7 @@ void __init do_init_bootmem(void)
unsigned long total_pages;
int boot_mapsize;
- max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+ max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
#ifdef CONFIG_HIGHMEM
total_pages = total_lowmem >> PAGE_SHIFT;
@@ -232,7 +238,8 @@ void __init do_init_bootmem(void)
start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
- boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
+ min_low_pfn = MEMORY_START >> PAGE_SHIFT;
+ boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
/* Add active regions with valid PFNs */
for (i = 0; i < lmb.memory.cnt; i++) {
@@ -310,14 +317,19 @@ void __init paging_init(void)
unsigned long top_of_ram = lmb_end_of_DRAM();
unsigned long max_zone_pfns[MAX_NR_ZONES];
+#ifdef CONFIG_PPC32
+ unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
+ unsigned long end = __fix_to_virt(FIX_HOLE);
+
+ for (; v < end; v += PAGE_SIZE)
+ map_page(v, 0, 0); /* XXX gross */
+#endif
+
#ifdef CONFIG_HIGHMEM
map_page(PKMAP_BASE, 0, 0); /* XXX gross */
- pkmap_page_table = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
- (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE), PKMAP_BASE);
- map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */
- kmap_pte = pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k
- (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN),
- KMAP_FIX_BEGIN);
+ pkmap_page_table = virt_to_kpte(PKMAP_BASE);
+
+ kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 1efd631211ef..dc704da363eb 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -18,6 +18,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/lmb.h>
+#include <linux/of.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/system.h>
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 64c44bcc68de..80d1babb230d 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -29,6 +29,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/fixmap.h>
#include <asm/io.h>
#include "mmu_decl.h"
@@ -387,3 +388,25 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
+
+static int fixmaps;
+unsigned long FIXADDR_TOP = 0xfffff000;
+EXPORT_SYMBOL(FIXADDR_TOP);
+
+void __set_fixmap (enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long address = __fix_to_virt(idx);
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ map_page(address, phys, flags);
+ fixmaps++;
+}
+
+void __this_fixmap_does_not_exist(void)
+{
+ WARN_ON(1);
+}
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 18b8ebe930d5..5e1e8cf14e75 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -3,11 +3,12 @@
*
* Initial author: Xianghua Xiao <x.xiao@freescale.com>
* Recode: Jason Jin <jason.jin@freescale.com>
+ * York Sun <yorksun@freescale.com>
*
* Rewrite the interrupt routing. remove the 8259PIC support,
* All the integrated device in ULI use sideband interrupt.
*
- * Copyright 2007 Freescale Semiconductor Inc.
+ * Copyright 2008 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -38,6 +39,8 @@
#include <sysdev/fsl_pci.h>
#include <sysdev/fsl_soc.h>
+static unsigned char *pixis_bdcfg0, *pixis_arch;
+
static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "fsl,mpc8610-immr", },
{}
@@ -52,8 +55,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
}
machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
-static void __init
-mpc86xx_hpcd_init_irq(void)
+static void __init mpc86xx_hpcd_init_irq(void)
{
struct mpic *mpic1;
struct device_node *np;
@@ -161,12 +163,159 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5288, final_uli5288);
#endif /* CONFIG_PCI */
-static void __init
-mpc86xx_hpcd_setup_arch(void)
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+static u32 get_busfreq(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
+ struct device_node *node;
+
+ u32 fs_busfreq = 0;
+ node = of_find_node_by_type(NULL, "cpu");
+ if (node) {
+ unsigned int size;
+ const unsigned int *prop =
+ of_get_property(node, "bus-frequency", &size);
+ if (prop)
+ fs_busfreq = *prop;
+ of_node_put(node);
+ };
+ return fs_busfreq;
+}
+
+unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
+ int monitor_port)
+{
+ static const unsigned long pixelformat[][3] = {
+ {0x88882317, 0x88083218, 0x65052119},
+ {0x88883316, 0x88082219, 0x65053118},
+ };
+ unsigned int pix_fmt, arch_monitor;
+
+ arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
+ /* DVI port for board version 0x01 */
+
+ if (bits_per_pixel == 32)
+ pix_fmt = pixelformat[arch_monitor][0];
+ else if (bits_per_pixel == 24)
+ pix_fmt = pixelformat[arch_monitor][1];
+ else if (bits_per_pixel == 16)
+ pix_fmt = pixelformat[arch_monitor][2];
+ else
+ pix_fmt = pixelformat[1][0];
+
+ return pix_fmt;
+}
+
+void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
+{
+ int i;
+ if (monitor_port == 2) { /* dual link LVDS */
+ for (i = 0; i < 256*3; i++)
+ gamma_table_base[i] = (gamma_table_base[i] << 2) |
+ ((gamma_table_base[i] >> 6) & 0x03);
+ }
+}
+
+void mpc8610hpcd_set_monitor_port(int monitor_port)
+{
+ static const u8 bdcfg[] = {0xBD, 0xB5, 0xA5};
+ if (monitor_port < 3)
+ *pixis_bdcfg0 = bdcfg[monitor_port];
+}
+
+void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
+{
+ u32 __iomem *clkdvdr;
+ u32 temp;
+ /* variables for pixel clock calcs */
+ ulong bestval, bestfreq, speed_ccb, minpixclock, maxpixclock;
+ ulong pixval;
+ long err;
+ int i;
+
+ clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32));
+ if (!clkdvdr) {
+ printk(KERN_ERR "Err: can't map clock divider register!\n");
+ return;
+ }
+
+ /* Pixel Clock configuration */
+ pr_debug("DIU: Bus Frequency = %d\n", get_busfreq());
+ speed_ccb = get_busfreq();
+
+ /* Calculate the pixel clock with the smallest error */
+ /* calculate the following in steps to avoid overflow */
+ pr_debug("DIU pixclock in ps - %d\n", pixclock);
+ temp = 1000000000/pixclock;
+ temp *= 1000;
+ pixclock = temp;
+ pr_debug("DIU pixclock freq - %u\n", pixclock);
+
+ temp = pixclock * 5 / 100;
+ pr_debug("deviation = %d\n", temp);
+ minpixclock = pixclock - temp;
+ maxpixclock = pixclock + temp;
+ pr_debug("DIU minpixclock - %lu\n", minpixclock);
+ pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
+ pixval = speed_ccb/pixclock;
+ pr_debug("DIU pixval = %lu\n", pixval);
+
+ err = 100000000;
+ bestval = pixval;
+ pr_debug("DIU bestval = %lu\n", bestval);
+
+ bestfreq = 0;
+ for (i = -1; i <= 1; i++) {
+ temp = speed_ccb / ((pixval+i) + 1);
+ pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n",
+ i, pixval, temp);
+ if ((temp < minpixclock) || (temp > maxpixclock))
+ pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
+ minpixclock, maxpixclock);
+ else if (abs(temp - pixclock) < err) {
+ pr_debug("Entered the else if block %d\n", i);
+ err = abs(temp - pixclock);
+ bestval = pixval+i;
+ bestfreq = temp;
+ }
+ }
+
+ pr_debug("DIU chose = %lx\n", bestval);
+ pr_debug("DIU error = %ld\n NomPixClk ", err);
+ pr_debug("DIU: Best Freq = %lx\n", bestfreq);
+ /* Modify PXCLK in GUTS CLKDVDR */
+ pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+ temp = (*clkdvdr) & 0x2000FFFF;
+ *clkdvdr = temp; /* turn off clock */
+ *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16);
+ pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+ iounmap(clkdvdr);
+}
+
+ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE,
+ "%c0 - DVI\n"
+ "%c1 - Single link LVDS\n"
+ "%c2 - Dual link LVDS\n",
+ monitor_port == 0 ? '*' : ' ',
+ monitor_port == 1 ? '*' : ' ',
+ monitor_port == 2 ? '*' : ' ');
+}
+
+int mpc8610hpcd_set_sysfs_monitor_port(int val)
+{
+ return val < 3 ? val : 0;
+}
+
#endif
+
+static void __init mpc86xx_hpcd_setup_arch(void)
+{
+ struct resource r;
+ struct device_node *np;
+ unsigned char *pixis;
+
if (ppc_md.progress)
ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
@@ -183,6 +332,30 @@ mpc86xx_hpcd_setup_arch(void)
}
}
#endif
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+ preallocate_diu_videomemory();
+ diu_ops.get_pixel_format = mpc8610hpcd_get_pixel_format;
+ diu_ops.set_gamma_table = mpc8610hpcd_set_gamma_table;
+ diu_ops.set_monitor_port = mpc8610hpcd_set_monitor_port;
+ diu_ops.set_pixel_clock = mpc8610hpcd_set_pixel_clock;
+ diu_ops.show_monitor_port = mpc8610hpcd_show_monitor_port;
+ diu_ops.set_sysfs_monitor_port = mpc8610hpcd_set_sysfs_monitor_port;
+#endif
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
+ if (np) {
+ of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ pixis = ioremap(r.start, 32);
+ if (!pixis) {
+ printk(KERN_ERR "Err: can't map FPGA cfg register!\n");
+ return;
+ }
+ pixis_bdcfg0 = pixis + 8;
+ pixis_arch = pixis + 1;
+ } else
+ printk(KERN_ERR "Err: "
+ "can't find device node 'fsl,fpga-pixis'\n");
printk("MPC86xx HPCD board from Freescale Semiconductor\n");
}
@@ -200,8 +373,7 @@ static int __init mpc86xx_hpcd_probe(void)
return 0;
}
-static long __init
-mpc86xx_time_init(void)
+static long __init mpc86xx_time_init(void)
{
unsigned int temp;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index f38c50b4ce56..87454c526973 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -45,7 +45,6 @@ source "arch/powerpc/platforms/powermac/Kconfig"
source "arch/powerpc/platforms/prep/Kconfig"
source "arch/powerpc/platforms/maple/Kconfig"
source "arch/powerpc/platforms/pasemi/Kconfig"
-source "arch/powerpc/platforms/celleb/Kconfig"
source "arch/powerpc/platforms/ps3/Kconfig"
source "arch/powerpc/platforms/cell/Kconfig"
source "arch/powerpc/platforms/8xx/Kconfig"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 5fc7fac10e93..f7efaa925a13 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -220,8 +220,8 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-128)"
- range 2 128
+ int "Maximum number of CPUs (2-1024)"
+ range 2 1024
depends on SMP
default "32" if PPC64
default "4"
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index a984894466d9..423a0234dc31 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -24,5 +24,4 @@ obj-$(CONFIG_PPC_MAPLE) += maple/
obj-$(CONFIG_PPC_PASEMI) += pasemi/
obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
-obj-$(CONFIG_PPC_CELLEB) += celleb/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 2f169991896d..3959fcfe731c 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -25,6 +25,19 @@ config PPC_IBM_CELL_BLADE
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
+config PPC_CELLEB
+ bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
+ depends on PPC_MULTIPLATFORM && PPC64
+ select PPC_CELL
+ select PPC_CELL_NATIVE
+ select PPC_RTAS
+ select PPC_INDIRECT_IO
+ select PPC_OF_PLATFORM_PCI
+ select HAS_TXX9_SERIAL
+ select PPC_UDBG_BEAT
+ select USB_OHCI_BIG_ENDIAN_MMIO
+ select USB_EHCI_BIG_ENDIAN_MMIO
+
menu "Cell Broadband Engine options"
depends on PPC_CELL
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index c89964c6fb1f..c2a7e4e5ddf9 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_PPC_CELL_NATIVE) += interrupt.o iommu.o setup.o \
cbe_regs.o spider-pic.o \
- pervasive.o pmu.o io-workarounds.o
+ pervasive.o pmu.o io-workarounds.o \
+ spider-pci.o
obj-$(CONFIG_CBE_RAS) += ras.o
obj-$(CONFIG_CBE_THERM) += cbe_thermal.o
@@ -26,3 +27,20 @@ obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
spufs/
obj-$(CONFIG_PCI_MSI) += axon_msi.o
+
+
+# celleb stuff
+ifeq ($(CONFIG_PPC_CELLEB),y)
+obj-y += celleb_setup.o \
+ celleb_pci.o celleb_scc_epci.o \
+ celleb_scc_pciex.o \
+ celleb_scc_uhc.o \
+ io-workarounds.o spider-pci.o \
+ beat.o beat_htab.o beat_hvCall.o \
+ beat_interrupt.o beat_iommu.o
+
+obj-$(CONFIG_SMP) += beat_smp.o
+obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
+obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
+obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
+endif
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index d95e71dee91f..c39f5c225f2e 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -123,7 +123,7 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
return NULL;
}
- for (; dn; tmp = of_get_parent(dn), of_node_put(dn), dn = tmp) {
+ for (; dn; dn = of_get_next_parent(dn)) {
ph = of_get_property(dn, "msi-translator", NULL);
if (ph)
break;
@@ -169,7 +169,7 @@ static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
- struct device_node *dn, *tmp;
+ struct device_node *dn;
struct msi_desc *entry;
int len;
const u32 *prop;
@@ -182,7 +182,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
- for (; dn; tmp = of_get_parent(dn), of_node_put(dn), dn = tmp) {
+ for (; dn; dn = of_get_next_parent(dn)) {
if (entry->msi_attrib.is_64) {
prop = of_get_property(dn, "msi-address-64", &len);
if (prop)
diff --git a/arch/powerpc/platforms/celleb/beat.c b/arch/powerpc/platforms/cell/beat.c
index b64b171f245b..48c690ea65da 100644
--- a/arch/powerpc/platforms/celleb/beat.c
+++ b/arch/powerpc/platforms/cell/beat.c
@@ -33,7 +33,7 @@
#include "beat_wrapper.h"
#include "beat.h"
-#include "interrupt.h"
+#include "beat_interrupt.h"
static int beat_pm_poweroff_flag;
diff --git a/arch/powerpc/platforms/celleb/beat.h b/arch/powerpc/platforms/cell/beat.h
index 32c8efcedc80..32c8efcedc80 100644
--- a/arch/powerpc/platforms/celleb/beat.h
+++ b/arch/powerpc/platforms/cell/beat.h
diff --git a/arch/powerpc/platforms/celleb/htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 81467ff055c8..81467ff055c8 100644
--- a/arch/powerpc/platforms/celleb/htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
diff --git a/arch/powerpc/platforms/celleb/hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
index 74c817448948..74c817448948 100644
--- a/arch/powerpc/platforms/celleb/hvCall.S
+++ b/arch/powerpc/platforms/cell/beat_hvCall.S
diff --git a/arch/powerpc/platforms/celleb/interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 69562a867876..192a93509372 100644
--- a/arch/powerpc/platforms/celleb/interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -26,7 +26,7 @@
#include <asm/machdep.h>
-#include "interrupt.h"
+#include "beat_interrupt.h"
#include "beat_wrapper.h"
#define MAX_IRQS NR_IRQS
diff --git a/arch/powerpc/platforms/celleb/interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
index b470fd0051f1..b470fd0051f1 100644
--- a/arch/powerpc/platforms/celleb/interrupt.h
+++ b/arch/powerpc/platforms/cell/beat_interrupt.h
diff --git a/arch/powerpc/platforms/celleb/iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index 93b0efddd658..93b0efddd658 100644
--- a/arch/powerpc/platforms/celleb/iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
diff --git a/arch/powerpc/platforms/celleb/smp.c b/arch/powerpc/platforms/cell/beat_smp.c
index a7631250aeb4..26efc204c47f 100644
--- a/arch/powerpc/platforms/celleb/smp.c
+++ b/arch/powerpc/platforms/cell/beat_smp.c
@@ -37,7 +37,7 @@
#include <asm/machdep.h>
#include <asm/udbg.h>
-#include "interrupt.h"
+#include "beat_interrupt.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/powerpc/platforms/celleb/spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
index bcc17f7fe8ad..bcc17f7fe8ad 100644
--- a/arch/powerpc/platforms/celleb/spu_priv1.c
+++ b/arch/powerpc/platforms/cell/beat_spu_priv1.c
diff --git a/arch/powerpc/platforms/celleb/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
index 8580dc7e1798..8580dc7e1798 100644
--- a/arch/powerpc/platforms/celleb/beat_syscall.h
+++ b/arch/powerpc/platforms/cell/beat_syscall.h
diff --git a/arch/powerpc/platforms/celleb/udbg_beat.c b/arch/powerpc/platforms/cell/beat_udbg.c
index 6b418f6b6175..6b418f6b6175 100644
--- a/arch/powerpc/platforms/celleb/udbg_beat.c
+++ b/arch/powerpc/platforms/cell/beat_udbg.c
diff --git a/arch/powerpc/platforms/celleb/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
index b47dfda48d06..b47dfda48d06 100644
--- a/arch/powerpc/platforms/celleb/beat_wrapper.h
+++ b/arch/powerpc/platforms/cell/beat_wrapper.h
diff --git a/arch/powerpc/platforms/celleb/pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 51b390d34e4d..f39a3b2a1667 100644
--- a/arch/powerpc/platforms/celleb/pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -37,12 +37,11 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#include "pci.h"
-#include "interrupt.h"
+#include "io-workarounds.h"
+#include "celleb_pci.h"
#define MAX_PCI_DEVICES 32
#define MAX_PCI_FUNCTIONS 8
@@ -190,7 +189,7 @@ static int celleb_fake_pci_read_config(struct pci_bus *bus,
static int celleb_fake_pci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
+ unsigned int devfn, int where, int size, u32 val)
{
char *config;
struct device_node *node;
@@ -457,33 +456,42 @@ static int __init celleb_setup_fake_pci(struct device_node *dev,
return 0;
}
-void __init fake_pci_workaround_init(struct pci_controller *phb)
-{
- /**
- * We will add fake pci bus to scc_pci_bus for the purpose to improve
- * I/O Macro performance. But device-tree and device drivers
- * are not ready to use address with a token.
- */
-
- /* celleb_pci_add_one(phb, NULL); */
-}
+static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
+ .setup = celleb_setup_fake_pci,
+};
static struct of_device_id celleb_phb_match[] __initdata = {
{
.name = "pci-pseudo",
- .data = celleb_setup_fake_pci,
+ .data = &celleb_fake_pci_spec,
}, {
.name = "epci",
- .data = celleb_setup_epci,
+ .data = &celleb_epci_spec,
+ }, {
+ .name = "pcie",
+ .data = &celleb_pciex_spec,
}, {
},
};
+static int __init celleb_io_workaround_init(struct pci_controller *phb,
+ struct celleb_phb_spec *phb_spec)
+{
+ if (phb_spec->ops) {
+ iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init,
+ phb_spec->iowa_data);
+ io_workaround_init();
+ }
+
+ return 0;
+}
+
int __init celleb_setup_phb(struct pci_controller *phb)
{
struct device_node *dev = phb->dn;
const struct of_device_id *match;
- int (*setup_func)(struct device_node *, struct pci_controller *);
+ struct celleb_phb_spec *phb_spec;
+ int rc;
match = of_match_node(celleb_phb_match, dev);
if (!match)
@@ -492,8 +500,12 @@ int __init celleb_setup_phb(struct pci_controller *phb)
phb_set_bus_ranges(dev, phb);
phb->buid = 1;
- setup_func = match->data;
- return (*setup_func)(dev, phb);
+ phb_spec = match->data;
+ rc = (*phb_spec->setup)(dev, phb);
+ if (rc)
+ return 1;
+
+ return celleb_io_workaround_init(phb, phb_spec);
}
int celleb_pci_probe_mode(struct pci_bus *bus)
diff --git a/arch/powerpc/platforms/celleb/pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
index 5d5544ffeddb..4cba1523ec50 100644
--- a/arch/powerpc/platforms/celleb/pci.h
+++ b/arch/powerpc/platforms/cell/celleb_pci.h
@@ -27,16 +27,19 @@
#include <asm/prom.h>
#include <asm/ppc-pci.h>
+#include "io-workarounds.h"
+
+struct celleb_phb_spec {
+ int (*setup)(struct device_node *, struct pci_controller *);
+ struct ppc_pci_io *ops;
+ int (*iowa_init)(struct iowa_bus *, void *);
+ void *iowa_data;
+};
+
extern int celleb_setup_phb(struct pci_controller *);
extern int celleb_pci_probe_mode(struct pci_bus *);
-extern int celleb_setup_epci(struct device_node *, struct pci_controller *);
-
-extern void *celleb_dummy_page_va;
-extern int __init celleb_pci_workaround_init(void);
-extern void __init celleb_pci_add_one(struct pci_controller *,
- void (*)(struct pci_controller *));
-extern void fake_pci_workaround_init(struct pci_controller *);
-extern void epci_workaround_init(struct pci_controller *);
+extern struct celleb_phb_spec celleb_epci_spec;
+extern struct celleb_phb_spec celleb_pciex_spec;
#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/celleb/scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
index 6be1542a6e66..b596a711c348 100644
--- a/arch/powerpc/platforms/celleb/scc.h
+++ b/arch/powerpc/platforms/cell/celleb_scc.h
@@ -125,6 +125,93 @@
/* bits for SCC_EPCI_CNTOPT */
#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
+/* SCC PCIEXC SMMIO registers */
+#define PEXCADRS 0x000
+#define PEXCWDATA 0x004
+#define PEXCRDATA 0x008
+#define PEXDADRS 0x010
+#define PEXDCMND 0x014
+#define PEXDWDATA 0x018
+#define PEXDRDATA 0x01c
+#define PEXREQID 0x020
+#define PEXTIDMAP 0x024
+#define PEXINTMASK 0x028
+#define PEXINTSTS 0x02c
+#define PEXAERRMASK 0x030
+#define PEXAERRSTS 0x034
+#define PEXPRERRMASK 0x040
+#define PEXPRERRSTS 0x044
+#define PEXPRERRID01 0x048
+#define PEXPRERRID23 0x04c
+#define PEXVDMASK 0x050
+#define PEXVDSTS 0x054
+#define PEXRCVCPLIDA 0x060
+#define PEXLENERRIDA 0x068
+#define PEXPHYPLLST 0x070
+#define PEXDMRDEN0 0x100
+#define PEXDMRDADR0 0x104
+#define PEXDMRDENX 0x110
+#define PEXDMRDADRX 0x114
+#define PEXECMODE 0xf00
+#define PEXMAEA(n) (0xf50 + (8 * n))
+#define PEXMAEC(n) (0xf54 + (8 * n))
+#define PEXCCRCTRL 0xff0
+
+/* SCC PCIEXC bits and shifts for PEXCADRS */
+#define PEXCADRS_BYTE_EN_SHIFT 20
+#define PEXCADRS_CMD_SHIFT 16
+#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
+#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
+
+/* SCC PCIEXC shifts for PEXDADRS */
+#define PEXDADRS_BUSNO_SHIFT 20
+#define PEXDADRS_DEVNO_SHIFT 15
+#define PEXDADRS_FUNCNO_SHIFT 12
+
+/* SCC PCIEXC bits and shifts for PEXDCMND */
+#define PEXDCMND_BYTE_EN_SHIFT 4
+#define PEXDCMND_IO_READ 0x2
+#define PEXDCMND_IO_WRITE 0x3
+#define PEXDCMND_CONFIG_READ 0xa
+#define PEXDCMND_CONFIG_WRITE 0xb
+
+/* SCC PCIEXC bits for PEXPHYPLLST */
+#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
+
+/* SCC PCIEXC bits for PEXECMODE */
+#define PEXECMODE_ALL_THROUGH 0x00000000
+#define PEXECMODE_ALL_8BIT 0x00550155
+#define PEXECMODE_ALL_16BIT 0x00aa02aa
+
+/* SCC PCIEXC bits for PEXCCRCTRL */
+#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
+#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
+#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
+#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
+
+/* SCC PCIEXC port configuration registers */
+#define PEXTCERRCHK 0x21c
+#define PEXTAMAPB0 0x220
+#define PEXTAMAPL0 0x224
+#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
+#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
+#define PEXCHVC0P 0x500
+#define PEXCHVC0NP 0x504
+#define PEXCHVC0C 0x508
+#define PEXCDVC0P 0x50c
+#define PEXCDVC0NP 0x510
+#define PEXCDVC0C 0x514
+#define PEXCHVCXP 0x518
+#define PEXCHVCXNP 0x51c
+#define PEXCHVCXC 0x520
+#define PEXCDVCXP 0x524
+#define PEXCDVCXNP 0x528
+#define PEXCDVCXC 0x52c
+#define PEXCTTRG 0x530
+#define PEXTSCTRL 0x700
+#define PEXTSSTS 0x704
+#define PEXSKPCTRL 0x708
+
/* UHC registers */
#define SCC_UHC_CKRCTRL 0xff0
#define SCC_UHC_ECMODE 0xf00
diff --git a/arch/powerpc/platforms/celleb/scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index a999b393f6f6..08c285b10e30 100644
--- a/arch/powerpc/platforms/celleb/scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -30,23 +30,17 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/prom.h>
-#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#include "scc.h"
-#include "pci.h"
-#include "interrupt.h"
+#include "celleb_scc.h"
+#include "celleb_pci.h"
#define MAX_PCI_DEVICES 32
#define MAX_PCI_FUNCTIONS 8
#define iob() __asm__ __volatile__("eieio; sync":::"memory")
-struct epci_private {
- dma_addr_t dummy_page_da;
-};
-
static inline PCI_IO_ADDR celleb_epci_get_epci_base(
struct pci_controller *hose)
{
@@ -71,42 +65,6 @@ static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
return hose->cfg_data;
}
-static void scc_epci_dummy_read(struct pci_controller *hose)
-{
- PCI_IO_ADDR epci_base;
- u32 val;
-
- epci_base = celleb_epci_get_epci_base(hose);
-
- val = in_be32(epci_base + SCC_EPCI_WATRP);
- iosync();
-
- return;
-}
-
-void __init epci_workaround_init(struct pci_controller *hose)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR reg;
- struct epci_private *private = hose->private_data;
-
- BUG_ON(!private);
-
- private->dummy_page_da = dma_map_single(hose->parent,
- celleb_dummy_page_va, PAGE_SIZE, DMA_FROM_DEVICE);
- if (private->dummy_page_da == DMA_ERROR_CODE) {
- printk(KERN_ERR "EPCI: dummy read disabled. "
- "Map dummy page failed.\n");
- return;
- }
-
- celleb_pci_add_one(hose, scc_epci_dummy_read);
- epci_base = celleb_epci_get_epci_base(hose);
-
- reg = epci_base + SCC_EPCI_DUMYRADR;
- out_be32(reg, private->dummy_page_da);
-}
-
static inline void clear_and_disable_master_abort_interrupt(
struct pci_controller *hose)
{
@@ -151,10 +109,8 @@ static int celleb_epci_check_abort(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static PCI_IO_ADDR celleb_epci_make_config_addr(
- struct pci_bus *bus,
- struct pci_controller *hose,
- unsigned int devfn, int where)
+static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
+ struct pci_controller *hose, unsigned int devfn, int where)
{
PCI_IO_ADDR addr;
@@ -425,8 +381,8 @@ static int __init celleb_epci_init(struct pci_controller *hose)
return 0;
}
-int __init celleb_setup_epci(struct device_node *node,
- struct pci_controller *hose)
+static int __init celleb_setup_epci(struct device_node *node,
+ struct pci_controller *hose)
{
struct resource r;
@@ -450,8 +406,7 @@ int __init celleb_setup_epci(struct device_node *node,
if (!hose->cfg_addr)
goto error;
pr_debug("EPCI: cfg_addr map 0x%016lx->0x%016lx + 0x%016lx\n",
- r.start, (unsigned long)hose->cfg_addr,
- (r.end - r.start + 1));
+ r.start, (unsigned long)hose->cfg_addr, (r.end - r.start + 1));
if (of_address_to_resource(node, 2, &r))
goto error;
@@ -459,14 +414,7 @@ int __init celleb_setup_epci(struct device_node *node,
if (!hose->cfg_data)
goto error;
pr_debug("EPCI: cfg_data map 0x%016lx->0x%016lx + 0x%016lx\n",
- r.start, (unsigned long)hose->cfg_data,
- (r.end - r.start + 1));
-
- hose->private_data = kzalloc(sizeof(struct epci_private), GFP_KERNEL);
- if (hose->private_data == NULL) {
- printk(KERN_ERR "EPCI: no memory for private data.\n");
- goto error;
- }
+ r.start, (unsigned long)hose->cfg_data, (r.end - r.start + 1));
hose->ops = &celleb_epci_ops;
celleb_epci_init(hose);
@@ -474,8 +422,6 @@ int __init celleb_setup_epci(struct device_node *node,
return 0;
error:
- kfree(hose->private_data);
-
if (hose->cfg_addr)
iounmap(hose->cfg_addr);
@@ -483,3 +429,10 @@ error:
iounmap(hose->cfg_data);
return 1;
}
+
+struct celleb_phb_spec celleb_epci_spec __initdata = {
+ .setup = celleb_setup_epci,
+ .ops = &spiderpci_ops,
+ .iowa_init = &spiderpci_iowa_init,
+ .iowa_data = (void *)0,
+};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
new file mode 100644
index 000000000000..31da84c458d2
--- /dev/null
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -0,0 +1,547 @@
+/*
+ * Support for Celleb PCI-Express.
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/iommu.h>
+#include <asm/byteorder.h>
+
+#include "celleb_scc.h"
+#include "celleb_pci.h"
+
+#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
+#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
+
+static void scc_pciex_io_flush(struct iowa_bus *bus)
+{
+ (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
+}
+
+/*
+ * Memory space access to device on PCIEX
+ */
+#define PCIEX_MMIO_READ(name, ret) \
+static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
+{ \
+ ret val = __do_##name(addr); \
+ scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
+ return val; \
+}
+
+#define PCIEX_MMIO_READ_STR(name) \
+static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
+ unsigned long count) \
+{ \
+ __do_##name(addr, buf, count); \
+ scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
+}
+
+PCIEX_MMIO_READ(readb, u8)
+PCIEX_MMIO_READ(readw, u16)
+PCIEX_MMIO_READ(readl, u32)
+PCIEX_MMIO_READ(readq, u64)
+PCIEX_MMIO_READ(readw_be, u16)
+PCIEX_MMIO_READ(readl_be, u32)
+PCIEX_MMIO_READ(readq_be, u64)
+PCIEX_MMIO_READ_STR(readsb)
+PCIEX_MMIO_READ_STR(readsw)
+PCIEX_MMIO_READ_STR(readsl)
+
+static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
+ unsigned long n)
+{
+ __do_memcpy_fromio(dest, src, n);
+ scc_pciex_io_flush(iowa_mem_find_bus(src));
+}
+
+/*
+ * I/O port access to devices on PCIEX.
+ */
+
+static inline unsigned long get_bus_address(struct pci_controller *phb,
+ unsigned long port)
+{
+ return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
+}
+
+static u32 scc_pciex_read_port(struct pci_controller *phb,
+ unsigned long port, int size)
+{
+ unsigned int byte_enable;
+ unsigned int cmd, shift;
+ unsigned long addr;
+ u32 data, ret;
+
+ BUG_ON(((port & 0x3ul) + size) > 4);
+
+ addr = get_bus_address(phb, port);
+ shift = addr & 0x3ul;
+ byte_enable = ((1 << size) - 1) << shift;
+ cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
+ PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
+ PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
+ data = PEX_IN(phb->cfg_addr, PEXDRDATA);
+ ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
+
+ pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
+ " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
+ cmd, data, ret);
+
+ return ret;
+}
+
+static void scc_pciex_write_port(struct pci_controller *phb,
+ unsigned long port, int size, u32 val)
+{
+ unsigned int byte_enable;
+ unsigned int cmd, shift;
+ unsigned long addr;
+ u32 data;
+
+ BUG_ON(((port & 0x3ul) + size) > 4);
+
+ addr = get_bus_address(phb, port);
+ shift = addr & 0x3ul;
+ byte_enable = ((1 << size) - 1) << shift;
+ cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
+ data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
+ PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
+ PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
+ PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
+
+ pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
+ " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
+ byte_enable, cmd, data);
+}
+
+static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
+{
+ return (u8)scc_pciex_read_port(phb, port, 1);
+}
+
+static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
+{
+ u32 data;
+ if ((port & 0x3ul) < 3)
+ data = scc_pciex_read_port(phb, port, 2);
+ else {
+ u32 d1 = scc_pciex_read_port(phb, port, 1);
+ u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
+ data = d1 | (d2 << 8);
+ }
+ return (u16)data;
+}
+
+static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
+{
+ unsigned int mod = port & 0x3ul;
+ u32 data;
+ if (mod == 0)
+ data = scc_pciex_read_port(phb, port, 4);
+ else {
+ u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
+ u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
+ data = d1 | (d2 << (mod * 8));
+ }
+ return data;
+}
+
+static void __scc_pciex_outb(struct pci_controller *phb,
+ u8 val, unsigned long port)
+{
+ scc_pciex_write_port(phb, port, 1, (u32)val);
+}
+
+static void __scc_pciex_outw(struct pci_controller *phb,
+ u16 val, unsigned long port)
+{
+ if ((port & 0x3ul) < 3)
+ scc_pciex_write_port(phb, port, 2, (u32)val);
+ else {
+ u32 d1 = val & 0x000000FF;
+ u32 d2 = (val & 0x0000FF00) >> 8;
+ scc_pciex_write_port(phb, port, 1, d1);
+ scc_pciex_write_port(phb, port + 1, 1, d2);
+ }
+}
+
+static void __scc_pciex_outl(struct pci_controller *phb,
+ u32 val, unsigned long port)
+{
+ unsigned int mod = port & 0x3ul;
+ if (mod == 0)
+ scc_pciex_write_port(phb, port, 4, val);
+ else {
+ u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
+ u32 d2 = val >> ((4 - mod) * 8);
+ scc_pciex_write_port(phb, port, 4 - mod, d1);
+ scc_pciex_write_port(phb, port + 1, mod, d2);
+ }
+}
+
+#define PCIEX_PIO_FUNC(size, name) \
+static u##size scc_pciex_in##name(unsigned long port) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(port); \
+ u##size data = __scc_pciex_in##name(bus->phb, port); \
+ scc_pciex_io_flush(bus); \
+ return data; \
+} \
+static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(p); \
+ u##size *dst = b; \
+ for (; c != 0; c--, dst++) \
+ *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
+ scc_pciex_io_flush(bus); \
+} \
+static void scc_pciex_out##name(u##size val, unsigned long port) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(port); \
+ __scc_pciex_out##name(bus->phb, val, port); \
+} \
+static void scc_pciex_outs##name(unsigned long p, const void *b, \
+ unsigned long c) \
+{ \
+ struct iowa_bus *bus = iowa_pio_find_bus(p); \
+ const u##size *src = b; \
+ for (; c != 0; c--, src++) \
+ __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
+}
+#define cpu_to_le8(x) (x)
+#define le8_to_cpu(x) (x)
+PCIEX_PIO_FUNC(8, b)
+PCIEX_PIO_FUNC(16, w)
+PCIEX_PIO_FUNC(32, l)
+
+static struct ppc_pci_io scc_pciex_ops = {
+ .readb = scc_pciex_readb,
+ .readw = scc_pciex_readw,
+ .readl = scc_pciex_readl,
+ .readq = scc_pciex_readq,
+ .readw_be = scc_pciex_readw_be,
+ .readl_be = scc_pciex_readl_be,
+ .readq_be = scc_pciex_readq_be,
+ .readsb = scc_pciex_readsb,
+ .readsw = scc_pciex_readsw,
+ .readsl = scc_pciex_readsl,
+ .memcpy_fromio = scc_pciex_memcpy_fromio,
+ .inb = scc_pciex_inb,
+ .inw = scc_pciex_inw,
+ .inl = scc_pciex_inl,
+ .outb = scc_pciex_outb,
+ .outw = scc_pciex_outw,
+ .outl = scc_pciex_outl,
+ .insb = scc_pciex_insb,
+ .insw = scc_pciex_insw,
+ .insl = scc_pciex_insl,
+ .outsb = scc_pciex_outsb,
+ .outsw = scc_pciex_outsw,
+ .outsl = scc_pciex_outsl,
+};
+
+static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
+{
+ dma_addr_t dummy_page_da;
+ void *dummy_page_va;
+
+ dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dummy_page_va) {
+ pr_err("PCIEX:Alloc dummy_page_va failed\n");
+ return -1;
+ }
+
+ dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dummy_page_da)) {
+ pr_err("PCIEX:Map dummy page failed.\n");
+ kfree(dummy_page_va);
+ return -1;
+ }
+
+ PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
+
+ return 0;
+}
+
+/*
+ * config space access
+ */
+#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
+ ((uint32_t)(((addr) & ~0x3UL) | \
+ ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
+ ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
+ ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
+
+#define MK_PEXDCMND_BYTE_EN(addr, size) \
+ ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
+#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
+
+static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
+ uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
+ uint64_t off, uint64_t size)
+{
+ uint32_t ret;
+ uint32_t addr, cmd;
+
+ addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
+ cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
+ PEX_OUT(base, PEXDADRS, addr);
+ PEX_OUT(base, PEXDCMND, cmd);
+ ret = (PEX_IN(base, PEXDRDATA)
+ >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
+ return ret;
+}
+
+static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
+ uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
+ uint32_t data)
+{
+ uint32_t addr, cmd;
+
+ addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
+ cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
+ PEX_OUT(base, PEXDADRS, addr);
+ PEX_OUT(base, PEXDCMND, cmd);
+ PEX_OUT(base, PEXDWDATA,
+ (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
+}
+
+#define MK_PEXCADRS_BYTE_EN(off, len) \
+ ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
+#define MK_PEXCADRS(cmd, addr, size) \
+ ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
+static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
+ uint32_t where, uint32_t size)
+{
+ PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
+ return (PEX_IN(base, PEXCRDATA)
+ >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
+}
+
+static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
+ uint32_t size, uint32_t val)
+{
+ uint32_t data;
+
+ data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
+ PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
+ PEX_OUT(base, PEXCWDATA, data);
+}
+
+/* Interfaces */
+/* Note: Work-around
+ * On SCC PCIEXC, one device is seen on all 32 dev_no.
+ * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
+ * (dev_no = 1)
+ */
+static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, unsigned int *val)
+{
+ struct device_node *dn;
+ struct pci_controller *phb;
+
+ dn = bus->sysdata;
+ phb = pci_find_hose_for_OF_device(dn);
+
+ if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ if (bus->number == 0 && PCI_SLOT(devfn) == 0)
+ *val = config_read_pciex_rc(phb->cfg_addr, where, size);
+ else
+ *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, unsigned int val)
+{
+ struct device_node *dn;
+ struct pci_controller *phb;
+
+ dn = bus->sysdata;
+ phb = pci_find_hose_for_OF_device(dn);
+
+ if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (bus->number == 0 && PCI_SLOT(devfn) == 0)
+ config_write_pciex_rc(phb->cfg_addr, where, size, val);
+ else
+ config_write_pciex_dev(phb->cfg_addr, bus->number,
+ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops scc_pciex_pci_ops = {
+ scc_pciex_read_config,
+ scc_pciex_write_config,
+};
+
+static void pciex_clear_intr_all(unsigned int __iomem *base)
+{
+ PEX_OUT(base, PEXAERRSTS, 0xffffffff);
+ PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
+ PEX_OUT(base, PEXINTSTS, 0xffffffff);
+}
+
+#if 0
+static void pciex_disable_intr_all(unsigned int *base)
+{
+ PEX_OUT(base, PEXINTMASK, 0x0);
+ PEX_OUT(base, PEXAERRMASK, 0x0);
+ PEX_OUT(base, PEXPRERRMASK, 0x0);
+ PEX_OUT(base, PEXVDMASK, 0x0);
+}
+#endif
+
+static void pciex_enable_intr_all(unsigned int __iomem *base)
+{
+ PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
+ PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
+ PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
+ PEX_OUT(base, PEXVDMASK, 0x00000001);
+}
+
+static void pciex_check_status(unsigned int __iomem *base)
+{
+ uint32_t err = 0;
+ uint32_t intsts, aerr, prerr, rcvcp, lenerr;
+ uint32_t maea, maec;
+
+ intsts = PEX_IN(base, PEXINTSTS);
+ aerr = PEX_IN(base, PEXAERRSTS);
+ prerr = PEX_IN(base, PEXPRERRSTS);
+ rcvcp = PEX_IN(base, PEXRCVCPLIDA);
+ lenerr = PEX_IN(base, PEXLENERRIDA);
+
+ if (intsts || aerr || prerr || rcvcp || lenerr)
+ err = 1;
+
+ pr_info("PCEXC interrupt!!\n");
+ pr_info("PEXINTSTS :0x%08x\n", intsts);
+ pr_info("PEXAERRSTS :0x%08x\n", aerr);
+ pr_info("PEXPRERRSTS :0x%08x\n", prerr);
+ pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
+ pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
+
+ /* print detail of Protection Error */
+ if (intsts & 0x00004000) {
+ uint32_t i, n;
+ for (i = 0; i < 4; i++) {
+ n = 1 << i;
+ if (prerr & n) {
+ maea = PEX_IN(base, PEXMAEA(i));
+ maec = PEX_IN(base, PEXMAEC(i));
+ pr_info("PEXMAEC%d :0x%08x\n", i, maec);
+ pr_info("PEXMAEA%d :0x%08x\n", i, maea);
+ }
+ }
+ }
+
+ if (err)
+ pciex_clear_intr_all(base);
+}
+
+static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
+{
+ struct pci_controller *phb = dev_id;
+
+ pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
+
+ BUG_ON(phb->cfg_addr == NULL);
+
+ pciex_check_status(phb->cfg_addr);
+
+ return IRQ_HANDLED;
+}
+
+static __init int celleb_setup_pciex(struct device_node *node,
+ struct pci_controller *phb)
+{
+ struct resource r;
+ struct of_irq oirq;
+ int virq;
+
+ /* SMMIO registers; used inside this file */
+ if (of_address_to_resource(node, 0, &r)) {
+ pr_err("PCIEXC:Failed to get config resource.\n");
+ return 1;
+ }
+ phb->cfg_addr = ioremap(r.start, r.end - r.start + 1);
+ if (!phb->cfg_addr) {
+ pr_err("PCIEXC:Failed to remap SMMIO region.\n");
+ return 1;
+ }
+
+ /* Not use cfg_data, cmd and data regs are near address reg */
+ phb->cfg_data = NULL;
+
+ /* set pci_ops */
+ phb->ops = &scc_pciex_pci_ops;
+
+ /* internal interrupt handler */
+ if (of_irq_map_one(node, 1, &oirq)) {
+ pr_err("PCIEXC:Failed to map irq\n");
+ goto error;
+ }
+ virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
+ oirq.size);
+ if (request_irq(virq, pciex_handle_internal_irq,
+ IRQF_DISABLED, "pciex", (void *)phb)) {
+ pr_err("PCIEXC:Failed to request irq\n");
+ goto error;
+ }
+
+ /* enable all interrupts */
+ pciex_clear_intr_all(phb->cfg_addr);
+ pciex_enable_intr_all(phb->cfg_addr);
+ /* MSI: TBD */
+
+ return 0;
+
+error:
+ phb->cfg_data = NULL;
+ if (phb->cfg_addr)
+ iounmap(phb->cfg_addr);
+ phb->cfg_addr = NULL;
+ return 1;
+}
+
+struct celleb_phb_spec celleb_pciex_spec __initdata = {
+ .setup = celleb_setup_pciex,
+ .ops = &scc_pciex_ops,
+ .iowa_init = &scc_pciex_iowa_init,
+};
diff --git a/arch/powerpc/platforms/celleb/scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
index 3a16c5b3c464..3a16c5b3c464 100644
--- a/arch/powerpc/platforms/celleb/scc_sio.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
diff --git a/arch/powerpc/platforms/celleb/scc_uhc.c b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
index cb4307994087..d63b720bfe3a 100644
--- a/arch/powerpc/platforms/celleb/scc_uhc.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
@@ -25,7 +25,7 @@
#include <asm/io.h>
#include <asm/machdep.h>
-#include "scc.h"
+#include "celleb_scc.h"
#define UHC_RESET_WAIT_MAX 10000
diff --git a/arch/powerpc/platforms/celleb/setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index f27ae1e3fb58..b11cb30decb2 100644
--- a/arch/powerpc/platforms/celleb/setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -56,13 +56,13 @@
#include <asm/rtas.h>
#include <asm/cell-regs.h>
-#include "interrupt.h"
+#include "beat_interrupt.h"
#include "beat_wrapper.h"
#include "beat.h"
-#include "pci.h"
-#include "../cell/interrupt.h"
-#include "../cell/pervasive.h"
-#include "../cell/ras.h"
+#include "celleb_pci.h"
+#include "interrupt.h"
+#include "pervasive.h"
+#include "ras.h"
static char celleb_machine_type[128] = "Celleb";
@@ -114,8 +114,6 @@ static int __init celleb_publish_devices(void)
/* Publish OF platform devices for southbridge IOs */
of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
- celleb_pci_workaround_init();
-
return 0;
}
machine_device_initcall(celleb_beat, celleb_publish_devices);
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/platforms/cell/io-workarounds.c
index 979d4b67efb4..3b84e8be314c 100644
--- a/arch/powerpc/platforms/cell/io-workarounds.c
+++ b/arch/powerpc/platforms/cell/io-workarounds.c
@@ -1,6 +1,9 @@
/*
+ * Support PCI IO workaround
+ *
* Copyright (C) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* IBM, Corp.
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,335 +12,174 @@
#undef DEBUG
#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
+
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
+#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
+#include "io-workarounds.h"
-#define SPIDER_PCI_REG_BASE 0xd000
-#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
-#define SPIDER_PCI_DUMMY_READ 0x0810
-#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+#define IOWA_MAX_BUS 8
-/* Undefine that to re-enable bogus prefetch
- *
- * Without that workaround, the chip will do bogus prefetch past
- * page boundary from system memory. This setting will disable that,
- * though the documentation is unclear as to the consequences of doing
- * so, either purely performances, or possible misbehaviour... It's not
- * clear wether the chip can handle unaligned accesses at all without
- * prefetching enabled.
- *
- * For now, things appear to be behaving properly with that prefetching
- * disabled and IDE, possibly because IDE isn't doing any unaligned
- * access.
- */
-#define SPIDER_DISABLE_PREFETCH
+static struct iowa_bus iowa_busses[IOWA_MAX_BUS];
+static unsigned int iowa_bus_count;
-#define MAX_SPIDERS 3
+static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
+{
+ int i, j;
+ struct resource *res;
+ unsigned long vstart, vend;
-static struct spider_pci_bus {
- void __iomem *regs;
- unsigned long mmio_start;
- unsigned long mmio_end;
- unsigned long pio_vstart;
- unsigned long pio_vend;
-} spider_pci_busses[MAX_SPIDERS];
-static int spider_pci_count;
+ for (i = 0; i < iowa_bus_count; i++) {
+ struct iowa_bus *bus = &iowa_busses[i];
+ struct pci_controller *phb = bus->phb;
-static struct spider_pci_bus *spider_pci_find(unsigned long vaddr,
- unsigned long paddr)
-{
- int i;
-
- for (i = 0; i < spider_pci_count; i++) {
- struct spider_pci_bus *bus = &spider_pci_busses[i];
- if (paddr && paddr >= bus->mmio_start && paddr < bus->mmio_end)
- return bus;
- if (vaddr && vaddr >= bus->pio_vstart && vaddr < bus->pio_vend)
- return bus;
+ if (vaddr) {
+ vstart = (unsigned long)phb->io_base_virt;
+ vend = vstart + phb->pci_io_size - 1;
+ if ((vaddr >= vstart) && (vaddr <= vend))
+ return bus;
+ }
+
+ if (paddr)
+ for (j = 0; j < 3; j++) {
+ res = &phb->mem_resources[j];
+ if (paddr >= res->start && paddr <= res->end)
+ return bus;
+ }
}
+
return NULL;
}
-static void spider_io_flush(const volatile void __iomem *addr)
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- struct spider_pci_bus *bus;
+ struct iowa_bus *bus;
int token;
- /* Get platform token (set by ioremap) from address */
token = PCI_GET_ADDR_TOKEN(addr);
- /* Fast path if we have a non-0 token, it indicates which bus we
- * are on.
- *
- * If the token is 0, that means either that the ioremap was done
- * before we initialized this layer, or it's a PIO operation. We
- * fallback to a low path in this case. Hopefully, internal devices
- * which are ioremap'ed early should use in_XX/out_XX functions
- * instead of the PCI ones and thus not suffer from the slowdown.
- *
- * Also note that currently, the workaround will not work for areas
- * that are not mapped with PTEs (bolted in the hash table). This
- * is the case for ioremaps done very early at boot (before
- * mem_init_done) and includes the mapping of the ISA IO space.
- *
- * Fortunately, none of the affected devices is expected to do DMA
- * and thus there should be no problem in practice.
- *
- * In order to improve performances, we only do the PTE search for
- * addresses falling in the PHB IO space area. That means it will
- * not work for hotplug'ed PHBs but those don't exist with Spider.
- */
- if (token && token <= spider_pci_count)
- bus = &spider_pci_busses[token - 1];
+ if (token && token <= iowa_bus_count)
+ bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
pte_t *ptep;
- /* Fixup physical address */
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
+ if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
+ return NULL;
- /* Check if it's in allowed range for PIO */
- if (vaddr < PHB_IO_BASE || vaddr > PHB_IO_END)
- return;
-
- /* Try to find a PTE. If not, clear the paddr, we'll do
- * a vaddr only lookup (PIO only)
- */
ptep = find_linux_pte(init_mm.pgd, vaddr);
if (ptep == NULL)
paddr = 0;
else
paddr = pte_pfn(*ptep) << PAGE_SHIFT;
+ bus = iowa_pci_find(vaddr, paddr);
- bus = spider_pci_find(vaddr, paddr);
if (bus == NULL)
- return;
+ return NULL;
}
- /* Now do the workaround
- */
- (void)in_be32(bus->regs + SPIDER_PCI_DUMMY_READ);
+ return bus;
}
-static u8 spider_readb(const volatile void __iomem *addr)
+struct iowa_bus *iowa_pio_find_bus(unsigned long port)
{
- u8 val = __do_readb(addr);
- spider_io_flush(addr);
- return val;
+ unsigned long vaddr = (unsigned long)pci_io_base + port;
+ return iowa_pci_find(vaddr, 0);
}
-static u16 spider_readw(const volatile void __iomem *addr)
-{
- u16 val = __do_readw(addr);
- spider_io_flush(addr);
- return val;
-}
-static u32 spider_readl(const volatile void __iomem *addr)
-{
- u32 val = __do_readl(addr);
- spider_io_flush(addr);
- return val;
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+static ret iowa_##name at \
+{ \
+ struct iowa_bus *bus; \
+ bus = iowa_##space##_find_bus(aa); \
+ if (bus && bus->ops && bus->ops->name) \
+ return bus->ops->name al; \
+ return __do_##name al; \
}
-static u64 spider_readq(const volatile void __iomem *addr)
-{
- u64 val = __do_readq(addr);
- spider_io_flush(addr);
- return val;
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+static void iowa_##name at \
+{ \
+ struct iowa_bus *bus; \
+ bus = iowa_##space##_find_bus(aa); \
+ if (bus && bus->ops && bus->ops->name) { \
+ bus->ops->name al; \
+ return; \
+ } \
+ __do_##name al; \
}
-static u16 spider_readw_be(const volatile void __iomem *addr)
-{
- u16 val = __do_readw_be(addr);
- spider_io_flush(addr);
- return val;
-}
+#include <asm/io-defs.h>
-static u32 spider_readl_be(const volatile void __iomem *addr)
-{
- u32 val = __do_readl_be(addr);
- spider_io_flush(addr);
- return val;
-}
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
-static u64 spider_readq_be(const volatile void __iomem *addr)
-{
- u64 val = __do_readq_be(addr);
- spider_io_flush(addr);
- return val;
-}
+static struct ppc_pci_io __initdata iowa_pci_io = {
-static void spider_readsb(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- __do_readsb(addr, buf, count);
- spider_io_flush(addr);
-}
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) .name = iowa_##name,
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) .name = iowa_##name,
-static void spider_readsw(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- __do_readsw(addr, buf, count);
- spider_io_flush(addr);
-}
+#include <asm/io-defs.h>
-static void spider_readsl(const volatile void __iomem *addr, void *buf,
- unsigned long count)
-{
- __do_readsl(addr, buf, count);
- spider_io_flush(addr);
-}
-
-static void spider_memcpy_fromio(void *dest, const volatile void __iomem *src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- spider_io_flush(src);
-}
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+};
-static void __iomem * spider_ioremap(unsigned long addr, unsigned long size,
- unsigned long flags)
+static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size,
+ unsigned long flags)
{
- struct spider_pci_bus *bus;
+ struct iowa_bus *bus;
void __iomem *res = __ioremap(addr, size, flags);
int busno;
- pr_debug("spider_ioremap(0x%lx, 0x%lx, 0x%lx) -> 0x%p\n",
- addr, size, flags, res);
-
- bus = spider_pci_find(0, addr);
+ bus = iowa_pci_find(0, addr);
if (bus != NULL) {
- busno = bus - spider_pci_busses;
- pr_debug(" found bus %d, setting token\n", busno);
+ busno = bus - iowa_busses;
PCI_SET_ADDR_TOKEN(res, busno + 1);
}
- pr_debug(" result=0x%p\n", res);
-
return res;
}
-static void __init spider_pci_setup_chip(struct spider_pci_bus *bus)
-{
-#ifdef SPIDER_DISABLE_PREFETCH
- u32 val = in_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT);
- pr_debug(" PVCI_Control_Status was 0x%08x\n", val);
- out_be32(bus->regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
-#endif
-
- /* Configure the dummy address for the workaround */
- out_be32(bus->regs + SPIDER_PCI_DUMMY_READ_BASE, 0x80000000);
-}
-
-static void __init spider_pci_add_one(struct pci_controller *phb)
+/* Regist new bus to support workaround */
+void __init iowa_register_bus(struct pci_controller *phb,
+ struct ppc_pci_io *ops,
+ int (*initfunc)(struct iowa_bus *, void *), void *data)
{
- struct spider_pci_bus *bus = &spider_pci_busses[spider_pci_count];
+ struct iowa_bus *bus;
struct device_node *np = phb->dn;
- struct resource rsrc;
- void __iomem *regs;
- if (spider_pci_count >= MAX_SPIDERS) {
- printk(KERN_ERR "Too many spider bridges, workarounds"
- " disabled for %s\n", np->full_name);
+ if (iowa_bus_count >= IOWA_MAX_BUS) {
+ pr_err("IOWA:Too many pci bridges, "
+ "workarounds disabled for %s\n", np->full_name);
return;
}
- /* Get the registers for the beast */
- if (of_address_to_resource(np, 0, &rsrc)) {
- printk(KERN_ERR "Failed to get registers for spider %s"
- " workarounds disabled\n", np->full_name);
- return;
- }
+ bus = &iowa_busses[iowa_bus_count];
+ bus->phb = phb;
+ bus->ops = ops;
- /* Mask out some useless bits in there to get to the base of the
- * spider chip
- */
- rsrc.start &= ~0xfffffffful;
-
- /* Map them */
- regs = ioremap(rsrc.start + SPIDER_PCI_REG_BASE, 0x1000);
- if (regs == NULL) {
- printk(KERN_ERR "Failed to map registers for spider %s"
- " workarounds disabled\n", np->full_name);
- return;
- }
-
- spider_pci_count++;
-
- /* We assume spiders only have one MMIO resource */
- bus->mmio_start = phb->mem_resources[0].start;
- bus->mmio_end = phb->mem_resources[0].end + 1;
-
- bus->pio_vstart = (unsigned long)phb->io_base_virt;
- bus->pio_vend = bus->pio_vstart + phb->pci_io_size;
-
- bus->regs = regs;
-
- printk(KERN_INFO "PCI: Spider MMIO workaround for %s\n",np->full_name);
+ if (initfunc)
+ if ((*initfunc)(bus, data))
+ return;
- pr_debug(" mmio (P) = 0x%016lx..0x%016lx\n",
- bus->mmio_start, bus->mmio_end);
- pr_debug(" pio (V) = 0x%016lx..0x%016lx\n",
- bus->pio_vstart, bus->pio_vend);
- pr_debug(" regs (P) = 0x%016lx (V) = 0x%p\n",
- rsrc.start + SPIDER_PCI_REG_BASE, bus->regs);
+ iowa_bus_count++;
- spider_pci_setup_chip(bus);
+ pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name);
}
-static struct ppc_pci_io __initdata spider_pci_io = {
- .readb = spider_readb,
- .readw = spider_readw,
- .readl = spider_readl,
- .readq = spider_readq,
- .readw_be = spider_readw_be,
- .readl_be = spider_readl_be,
- .readq_be = spider_readq_be,
- .readsb = spider_readsb,
- .readsw = spider_readsw,
- .readsl = spider_readsl,
- .memcpy_fromio = spider_memcpy_fromio,
-};
-
-static int __init spider_pci_workaround_init(void)
+/* enable IO workaround */
+void __init io_workaround_init(void)
{
- struct pci_controller *phb;
-
- /* Find spider bridges. We assume they have been all probed
- * in setup_arch(). If that was to change, we would need to
- * update this code to cope with dynamically added busses
- */
- list_for_each_entry(phb, &hose_list, list_node) {
- struct device_node *np = phb->dn;
- const char *model = of_get_property(np, "model", NULL);
-
- /* If no model property or name isn't exactly "pci", skip */
- if (model == NULL || strcmp(np->name, "pci"))
- continue;
- /* If model is not "Spider", skip */
- if (strcmp(model, "Spider"))
- continue;
- spider_pci_add_one(phb);
- }
-
- /* No Spider PCI found, exit */
- if (spider_pci_count == 0)
- return 0;
+ static int io_workaround_inited;
- /* Setup IO callbacks. We only setup MMIO reads. PIO reads will
- * fallback to MMIO reads (though without a token, thus slower)
- */
- ppc_pci_io = spider_pci_io;
-
- /* Setup ioremap callback */
- ppc_md.ioremap = spider_ioremap;
-
- return 0;
+ if (io_workaround_inited)
+ return;
+ ppc_pci_io = iowa_pci_io;
+ ppc_md.ioremap = iowa_ioremap;
+ io_workaround_inited = 1;
}
-machine_arch_initcall(cell, spider_pci_workaround_init);
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/platforms/cell/io-workarounds.h
new file mode 100644
index 000000000000..79d8ed3d510f
--- /dev/null
+++ b/arch/powerpc/platforms/cell/io-workarounds.h
@@ -0,0 +1,49 @@
+/*
+ * Support PCI IO workaround
+ *
+ * (C) Copyright 2007-2008 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _IO_WORKAROUNDS_H
+#define _IO_WORKAROUNDS_H
+
+#include <linux/io.h>
+#include <asm/pci-bridge.h>
+
+/* Bus info */
+struct iowa_bus {
+ struct pci_controller *phb;
+ struct ppc_pci_io *ops;
+ void *private;
+};
+
+void __init io_workaround_init(void);
+void __init iowa_register_bus(struct pci_controller *, struct ppc_pci_io *,
+ int (*)(struct iowa_bus *, void *), void *);
+struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR);
+struct iowa_bus *iowa_pio_find_bus(unsigned long);
+
+extern struct ppc_pci_io spiderpci_ops;
+extern int spiderpci_iowa_init(struct iowa_bus *, void *);
+
+#define SPIDER_PCI_REG_BASE 0xd000
+#define SPIDER_PCI_REG_SIZE 0x1000
+#define SPIDER_PCI_VCI_CNTL_STAT 0x0110
+#define SPIDER_PCI_DUMMY_READ 0x0810
+#define SPIDER_PCI_DUMMY_READ_BASE 0x0814
+
+#endif /* _IO_WORKAROUNDS_H */
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 5c531e8f9f6f..ab721b50fbba 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -57,6 +57,7 @@
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
+#include "io-workarounds.h"
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
@@ -117,13 +118,50 @@ static void cell_fixup_pcie_rootcomplex(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, cell_fixup_pcie_rootcomplex);
+static int __devinit cell_setup_phb(struct pci_controller *phb)
+{
+ const char *model;
+ struct device_node *np;
+
+ int rc = rtas_setup_phb(phb);
+ if (rc)
+ return rc;
+
+ np = phb->dn;
+ model = of_get_property(np, "model", NULL);
+ if (model == NULL || strcmp(np->name, "pci"))
+ return 0;
+
+ /* Setup workarounds for spider */
+ if (strcmp(model, "Spider"))
+ return 0;
+
+ iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init,
+ (void *)SPIDER_PCI_REG_BASE);
+ io_workaround_init();
+
+ return 0;
+}
+
static int __init cell_publish_devices(void)
{
+ struct device_node *root = of_find_node_by_path("/");
+ struct device_node *np;
int node;
/* Publish OF platform devices for southbridge IOs */
of_platform_bus_probe(NULL, NULL, NULL);
+ /* On spider based blades, we need to manually create the OF
+ * platform devices for the PCI host bridges
+ */
+ for_each_child_of_node(root, np) {
+ if (np->type == NULL || (strcmp(np->type, "pci") != 0 &&
+ strcmp(np->type, "pciex") != 0))
+ continue;
+ of_platform_device_create(np, NULL, NULL);
+ }
+
/* There is no device for the MIC memory controller, thus we create
* a platform device for it to attach the EDAC driver to.
*/
@@ -132,6 +170,7 @@ static int __init cell_publish_devices(void)
continue;
platform_device_register_simple("cbe-mic", node, NULL, 0);
}
+
return 0;
}
machine_subsys_initcall(cell, cell_publish_devices);
@@ -213,7 +252,7 @@ static void __init cell_setup_arch(void)
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
- find_and_init_phbs();
+
cbe_pervasive_init();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
@@ -249,7 +288,7 @@ define_machine(cell) {
.calibrate_decr = generic_calibrate_decr,
.progress = cell_progress,
.init_IRQ = cell_init_irq,
- .pci_setup_phb = rtas_setup_phb,
+ .pci_setup_phb = cell_setup_phb,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
new file mode 100644
index 000000000000..418b605ac35a
--- /dev/null
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -0,0 +1,184 @@
+/*
+ * IO workarounds for PCI on Celleb/Cell platform
+ *
+ * (C) Copyright 2006-2007 TOSHIBA CORPORATION
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/ppc-pci.h>
+#include <asm/pci-bridge.h>
+
+#include "io-workarounds.h"
+
+#define SPIDER_PCI_DISABLE_PREFETCH
+
+struct spiderpci_iowa_private {
+ void __iomem *regs;
+};
+
+static void spiderpci_io_flush(struct iowa_bus *bus)
+{
+ struct spiderpci_iowa_private *priv;
+ u32 val;
+
+ priv = bus->private;
+ val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
+ iosync();
+}
+
+#define SPIDER_PCI_MMIO_READ(name, ret) \
+static ret spiderpci_##name(const PCI_IO_ADDR addr) \
+{ \
+ ret val = __do_##name(addr); \
+ spiderpci_io_flush(iowa_mem_find_bus(addr)); \
+ return val; \
+}
+
+#define SPIDER_PCI_MMIO_READ_STR(name) \
+static void spiderpci_##name(const PCI_IO_ADDR addr, void *buf, \
+ unsigned long count) \
+{ \
+ __do_##name(addr, buf, count); \
+ spiderpci_io_flush(iowa_mem_find_bus(addr)); \
+}
+
+SPIDER_PCI_MMIO_READ(readb, u8)
+SPIDER_PCI_MMIO_READ(readw, u16)
+SPIDER_PCI_MMIO_READ(readl, u32)
+SPIDER_PCI_MMIO_READ(readq, u64)
+SPIDER_PCI_MMIO_READ(readw_be, u16)
+SPIDER_PCI_MMIO_READ(readl_be, u32)
+SPIDER_PCI_MMIO_READ(readq_be, u64)
+SPIDER_PCI_MMIO_READ_STR(readsb)
+SPIDER_PCI_MMIO_READ_STR(readsw)
+SPIDER_PCI_MMIO_READ_STR(readsl)
+
+static void spiderpci_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
+ unsigned long n)
+{
+ __do_memcpy_fromio(dest, src, n);
+ spiderpci_io_flush(iowa_mem_find_bus(src));
+}
+
+static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
+ void __iomem *regs)
+{
+ void *dummy_page_va;
+ dma_addr_t dummy_page_da;
+
+#ifdef SPIDER_PCI_DISABLE_PREFETCH
+ u32 val = in_be32(regs + SPIDER_PCI_VCI_CNTL_STAT);
+ pr_debug("SPIDER_IOWA:PVCI_Control_Status was 0x%08x\n", val);
+ out_be32(regs + SPIDER_PCI_VCI_CNTL_STAT, val | 0x8);
+#endif /* SPIDER_PCI_DISABLE_PREFETCH */
+
+ /* setup dummy read */
+ /*
+ * On CellBlade, we can't know that which XDR memory is used by
+ * kmalloc() to allocate dummy_page_va.
+ * In order to imporve the performance, the XDR which is used to
+ * allocate dummy_page_va is the nearest the spider-pci.
+ * We have to select the CBE which is the nearest the spider-pci
+ * to allocate memory from the best XDR, but I don't know that
+ * how to do.
+ *
+ * Celleb does not have this problem, because it has only one XDR.
+ */
+ dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dummy_page_va) {
+ pr_err("SPIDERPCI-IOWA:Alloc dummy_page_va failed.\n");
+ return -1;
+ }
+
+ dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dummy_page_da)) {
+ pr_err("SPIDER-IOWA:Map dummy page filed.\n");
+ kfree(dummy_page_va);
+ return -1;
+ }
+
+ out_be32(regs + SPIDER_PCI_DUMMY_READ_BASE, dummy_page_da);
+
+ return 0;
+}
+
+int __init spiderpci_iowa_init(struct iowa_bus *bus, void *data)
+{
+ void __iomem *regs = NULL;
+ struct spiderpci_iowa_private *priv;
+ struct device_node *np = bus->phb->dn;
+ struct resource r;
+ unsigned long offset = (unsigned long)data;
+
+ pr_debug("SPIDERPCI-IOWA:Bus initialize for spider(%s)\n",
+ np->full_name);
+
+ priv = kzalloc(sizeof(struct spiderpci_iowa_private), GFP_KERNEL);
+ if (!priv) {
+ pr_err("SPIDERPCI-IOWA:"
+ "Can't allocate struct spiderpci_iowa_private");
+ return -1;
+ }
+
+ if (of_address_to_resource(np, 0, &r)) {
+ pr_err("SPIDERPCI-IOWA:Can't get resource.\n");
+ goto error;
+ }
+
+ regs = ioremap(r.start + offset, SPIDER_PCI_REG_SIZE);
+ if (!regs) {
+ pr_err("SPIDERPCI-IOWA:ioremap failed.\n");
+ goto error;
+ }
+ priv->regs = regs;
+ bus->private = priv;
+
+ if (spiderpci_pci_setup_chip(bus->phb, regs))
+ goto error;
+
+ return 0;
+
+error:
+ kfree(priv);
+ bus->private = NULL;
+
+ if (regs)
+ iounmap(regs);
+
+ return -1;
+}
+
+struct ppc_pci_io spiderpci_ops = {
+ .readb = spiderpci_readb,
+ .readw = spiderpci_readw,
+ .readl = spiderpci_readl,
+ .readq = spiderpci_readq,
+ .readw_be = spiderpci_readw_be,
+ .readl_be = spiderpci_readl_be,
+ .readq_be = spiderpci_readq_be,
+ .readsb = spiderpci_readsb,
+ .readsw = spiderpci_readsw,
+ .readsl = spiderpci_readsl,
+ .memcpy_fromio = spiderpci_memcpy_fromio,
+};
+
diff --git a/arch/powerpc/platforms/celleb/Kconfig b/arch/powerpc/platforms/celleb/Kconfig
deleted file mode 100644
index 372891edcdd2..000000000000
--- a/arch/powerpc/platforms/celleb/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-config PPC_CELLEB
- bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
- depends on PPC_MULTIPLATFORM && PPC64
- select PPC_CELL
- select PPC_CELL_NATIVE
- select PPC_RTAS
- select PPC_INDIRECT_IO
- select PPC_OF_PLATFORM_PCI
- select HAS_TXX9_SERIAL
- select PPC_UDBG_BEAT
- select USB_OHCI_BIG_ENDIAN_MMIO
- select USB_EHCI_BIG_ENDIAN_MMIO
diff --git a/arch/powerpc/platforms/celleb/Makefile b/arch/powerpc/platforms/celleb/Makefile
deleted file mode 100644
index 889d43f715ea..000000000000
--- a/arch/powerpc/platforms/celleb/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-obj-y += interrupt.o iommu.o setup.o \
- htab.o beat.o hvCall.o pci.o \
- scc_epci.o scc_uhc.o \
- io-workarounds.o
-
-obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o
-obj-$(CONFIG_SERIAL_TXX9) += scc_sio.o
-obj-$(CONFIG_SPU_BASE) += spu_priv1.o
diff --git a/arch/powerpc/platforms/celleb/io-workarounds.c b/arch/powerpc/platforms/celleb/io-workarounds.c
deleted file mode 100644
index 423339be1bac..000000000000
--- a/arch/powerpc/platforms/celleb/io-workarounds.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Support for Celleb io workarounds
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This file is based to arch/powerpc/platform/cell/io-workarounds.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/irq.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "pci.h"
-
-#define MAX_CELLEB_PCI_BUS 4
-
-void *celleb_dummy_page_va;
-
-static struct celleb_pci_bus {
- struct pci_controller *phb;
- void (*dummy_read)(struct pci_controller *);
-} celleb_pci_busses[MAX_CELLEB_PCI_BUS];
-
-static int celleb_pci_count = 0;
-
-static struct celleb_pci_bus *celleb_pci_find(unsigned long vaddr,
- unsigned long paddr)
-{
- int i, j;
- struct resource *res;
-
- for (i = 0; i < celleb_pci_count; i++) {
- struct celleb_pci_bus *bus = &celleb_pci_busses[i];
- struct pci_controller *phb = bus->phb;
- if (paddr)
- for (j = 0; j < 3; j++) {
- res = &phb->mem_resources[j];
- if (paddr >= res->start && paddr <= res->end)
- return bus;
- }
- res = &phb->io_resource;
- if (vaddr && vaddr >= res->start && vaddr <= res->end)
- return bus;
- }
- return NULL;
-}
-
-static void celleb_io_flush(const PCI_IO_ADDR addr)
-{
- struct celleb_pci_bus *bus;
- int token;
-
- token = PCI_GET_ADDR_TOKEN(addr);
-
- if (token && token <= celleb_pci_count)
- bus = &celleb_pci_busses[token - 1];
- else {
- unsigned long vaddr, paddr;
- pte_t *ptep;
-
- vaddr = (unsigned long)PCI_FIX_ADDR(addr);
- if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
- return;
-
- ptep = find_linux_pte(init_mm.pgd, vaddr);
- if (ptep == NULL)
- paddr = 0;
- else
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- bus = celleb_pci_find(vaddr, paddr);
-
- if (bus == NULL)
- return;
- }
-
- if (bus->dummy_read)
- bus->dummy_read(bus->phb);
-}
-
-static u8 celleb_readb(const PCI_IO_ADDR addr)
-{
- u8 val;
- val = __do_readb(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u16 celleb_readw(const PCI_IO_ADDR addr)
-{
- u16 val;
- val = __do_readw(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u32 celleb_readl(const PCI_IO_ADDR addr)
-{
- u32 val;
- val = __do_readl(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u64 celleb_readq(const PCI_IO_ADDR addr)
-{
- u64 val;
- val = __do_readq(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u16 celleb_readw_be(const PCI_IO_ADDR addr)
-{
- u16 val;
- val = __do_readw_be(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u32 celleb_readl_be(const PCI_IO_ADDR addr)
-{
- u32 val;
- val = __do_readl_be(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static u64 celleb_readq_be(const PCI_IO_ADDR addr)
-{
- u64 val;
- val = __do_readq_be(addr);
- celleb_io_flush(addr);
- return val;
-}
-
-static void celleb_readsb(const PCI_IO_ADDR addr,
- void *buf, unsigned long count)
-{
- __do_readsb(addr, buf, count);
- celleb_io_flush(addr);
-}
-
-static void celleb_readsw(const PCI_IO_ADDR addr,
- void *buf, unsigned long count)
-{
- __do_readsw(addr, buf, count);
- celleb_io_flush(addr);
-}
-
-static void celleb_readsl(const PCI_IO_ADDR addr,
- void *buf, unsigned long count)
-{
- __do_readsl(addr, buf, count);
- celleb_io_flush(addr);
-}
-
-static void celleb_memcpy_fromio(void *dest,
- const PCI_IO_ADDR src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- celleb_io_flush(src);
-}
-
-static void __iomem *celleb_ioremap(unsigned long addr,
- unsigned long size,
- unsigned long flags)
-{
- struct celleb_pci_bus *bus;
- void __iomem *res = __ioremap(addr, size, flags);
- int busno;
-
- bus = celleb_pci_find(0, addr);
- if (bus != NULL) {
- busno = bus - celleb_pci_busses;
- PCI_SET_ADDR_TOKEN(res, busno + 1);
- }
- return res;
-}
-
-static void celleb_iounmap(volatile void __iomem *addr)
-{
- return __iounmap(PCI_FIX_ADDR(addr));
-}
-
-static struct ppc_pci_io celleb_pci_io __initdata = {
- .readb = celleb_readb,
- .readw = celleb_readw,
- .readl = celleb_readl,
- .readq = celleb_readq,
- .readw_be = celleb_readw_be,
- .readl_be = celleb_readl_be,
- .readq_be = celleb_readq_be,
- .readsb = celleb_readsb,
- .readsw = celleb_readsw,
- .readsl = celleb_readsl,
- .memcpy_fromio = celleb_memcpy_fromio,
-};
-
-void __init celleb_pci_add_one(struct pci_controller *phb,
- void (*dummy_read)(struct pci_controller *))
-{
- struct celleb_pci_bus *bus = &celleb_pci_busses[celleb_pci_count];
- struct device_node *np = phb->dn;
-
- if (celleb_pci_count >= MAX_CELLEB_PCI_BUS) {
- printk(KERN_ERR "Too many pci bridges, workarounds"
- " disabled for %s\n", np->full_name);
- return;
- }
-
- celleb_pci_count++;
-
- bus->phb = phb;
- bus->dummy_read = dummy_read;
-}
-
-static struct of_device_id celleb_pci_workaround_match[] __initdata = {
- {
- .name = "pci-pseudo",
- .data = fake_pci_workaround_init,
- }, {
- .name = "epci",
- .data = epci_workaround_init,
- }, {
- },
-};
-
-int __init celleb_pci_workaround_init(void)
-{
- struct pci_controller *phb;
- struct device_node *node;
- const struct of_device_id *match;
- void (*init_func)(struct pci_controller *);
-
- celleb_dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!celleb_dummy_page_va) {
- printk(KERN_ERR "Celleb: dummy read disabled. "
- "Alloc celleb_dummy_page_va failed\n");
- return 1;
- }
-
- list_for_each_entry(phb, &hose_list, list_node) {
- node = phb->dn;
- match = of_match_node(celleb_pci_workaround_match, node);
-
- if (match) {
- init_func = match->data;
- (*init_func)(phb);
- }
- }
-
- ppc_pci_io = celleb_pci_io;
- ppc_md.ioremap = celleb_ioremap;
- ppc_md.iounmap = celleb_iounmap;
-
- return 0;
-}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index c775cd4b3d6e..8ff330d026ca 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -59,8 +59,33 @@ system_reset_iSeries:
andc r4,r4,r5
mtspr SPRN_CTRLT,r4
+/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
+/* In the UP case we'll yeild() later, and we will not access the paca anyway */
+#ifdef CONFIG_SMP
1:
HMT_LOW
+ LOAD_REG_IMMEDIATE(r23, __secondary_hold_spinloop)
+ ld r23,0(r23)
+ sync
+ LOAD_REG_IMMEDIATE(r3,current_set)
+ sldi r28,r24,3 /* get current_set[cpu#] */
+ ldx r3,r3,r28
+ addi r1,r3,THREAD_SIZE
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
+ cmpwi 0,r23,0 /* Keep poking the Hypervisor until */
+ bne 2f /* we're released */
+ /* Let the Hypervisor know we are alive */
+ /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
+ lis r3,0x8002
+ rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
+ li r0,-1 /* r0=-1 indicates a Hypervisor call */
+ sc /* Invoke the hypervisor via a system call */
+ b 1b
+#endif
+
+2:
+ HMT_LOW
#ifdef CONFIG_SMP
lbz r23,PACAPROCSTART(r13) /* Test if this processor
* should start */
@@ -91,7 +116,7 @@ iSeries_secondary_smp_loop:
li r0,-1 /* r0=-1 indicates a Hypervisor call */
sc /* Invoke the hypervisor via a system call */
mfspr r13,SPRN_SPRG3 /* Put r13 back ???? */
- b 1b /* If SMP not configured, secondaries
+ b 2b /* If SMP not configured, secondaries
* loop forever */
/*** ISeries-LPAR interrupt handlers ***/
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index c73379ec9141..1d201782d4e5 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -25,6 +25,7 @@
#include <linux/syscalls.h>
#include <linux/ctype.h>
#include <linux/lmb.h>
+#include <linux/of.h>
#include <asm/prom.h>
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 306a9d07491d..07fe5b69b9e2 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -34,3 +34,8 @@ config LPARCFG
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
+
+config PPC_PSERIES_DEBUG
+ depends on PPC_PSERIES && PPC_EARLY_DEBUG
+ bool "Enable extra debug logging in platforms/pseries"
+ default y
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index bdae04bb7a01..bd2593ed28dd 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -2,6 +2,10 @@ ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
+ifeq ($(CONFIG_PPC_PSERIES_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o ras.o rtasd.o \
firmware.o power.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 550b2f7d2cc1..a3fd56b186e6 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -39,7 +39,6 @@
#include <asm/ppc-pci.h>
#include <asm/rtas.h>
-#undef DEBUG
/** Overview:
* EEH, or "Extended Error Handling" is a PCI bridge technology for
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index 1e83fcd0df31..ce37040af870 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -28,7 +28,6 @@
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
-#undef DEBUG
/**
* The pci address cache subsystem. This subsystem places
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index b765b7c77b65..9d3a40f45974 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -21,17 +21,11 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
#include <asm/firmware.h>
#include <asm/prom.h>
#include <asm/udbg.h>
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
typedef struct {
unsigned long val;
@@ -72,7 +66,7 @@ void __init fw_feature_init(const char *hypertas, unsigned long len)
const char *s;
int i;
- DBG(" -> fw_feature_init()\n");
+ pr_debug(" -> fw_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
@@ -88,5 +82,5 @@ void __init fw_feature_init(const char *hypertas, unsigned long len)
}
}
- DBG(" <- fw_feature_init()\n");
+ pr_debug(" <- fw_feature_init()\n");
}
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a65c76308201..176f1f39d2d5 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -47,7 +47,6 @@
#include "plpar_wrappers.h"
-#define DBG(fmt...)
static void tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
@@ -322,7 +321,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
dn = pci_bus_to_OF_node(bus);
- DBG("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
+ pr_debug("pci_dma_bus_setup_pSeries: setting up bus %s\n", dn->full_name);
if (bus->self) {
/* This is not a root bus, any setup will be done for the
@@ -347,7 +346,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
children++;
- DBG("Children: %d\n", children);
+ pr_debug("Children: %d\n", children);
/* Calculate amount of DMA window per slot. Each window must be
* a power of two (due to pci_alloc_consistent requirements).
@@ -361,8 +360,8 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
while (pci->phb->dma_window_size * children > 0x80000000ul)
pci->phb->dma_window_size >>= 1;
- DBG("No ISA/IDE, window size is 0x%lx\n",
- pci->phb->dma_window_size);
+ pr_debug("No ISA/IDE, window size is 0x%lx\n",
+ pci->phb->dma_window_size);
pci->phb->dma_window_base_cur = 0;
return;
@@ -387,8 +386,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
while (pci->phb->dma_window_size * children > 0x70000000ul)
pci->phb->dma_window_size >>= 1;
- DBG("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
-
+ pr_debug("ISA/IDE, window size is 0x%lx\n", pci->phb->dma_window_size);
}
@@ -401,7 +399,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
dn = pci_bus_to_OF_node(bus);
- DBG("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n", dn->full_name);
+ pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %s\n",
+ dn->full_name);
/* Find nearest ibm,dma-window, walking up the device tree */
for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
@@ -411,14 +410,14 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
}
if (dma_window == NULL) {
- DBG(" no ibm,dma-window property !\n");
+ pr_debug(" no ibm,dma-window property !\n");
return;
}
ppci = PCI_DN(pdn);
- DBG(" parent is %s, iommu_table: 0x%p\n",
- pdn->full_name, ppci->iommu_table);
+ pr_debug(" parent is %s, iommu_table: 0x%p\n",
+ pdn->full_name, ppci->iommu_table);
if (!ppci->iommu_table) {
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
@@ -426,7 +425,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
bus->number);
ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
- DBG(" created table: %p\n", ppci->iommu_table);
+ pr_debug(" created table: %p\n", ppci->iommu_table);
}
if (pdn != dn)
@@ -439,7 +438,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
struct device_node *dn;
struct iommu_table *tbl;
- DBG("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
+ pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
dn = dev->dev.archdata.of_node;
@@ -450,7 +449,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
if (!dev->bus->self) {
struct pci_controller *phb = PCI_DN(dn)->phb;
- DBG(" --> first child, no bridge. Allocating iommu table.\n");
+ pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
tbl = kmalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
phb->node);
iommu_table_setparms(phb, dn, tbl);
@@ -480,7 +479,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
const void *dma_window = NULL;
struct pci_dn *pci;
- DBG("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
+ pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
/* dev setup for LPAR is a little tricky, since the device tree might
* contain the dma-window properties per-device and not neccesarily
@@ -489,7 +488,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
* already allocated.
*/
dn = pci_device_to_OF_node(dev);
- DBG(" node is %s\n", dn->full_name);
+ pr_debug(" node is %s\n", dn->full_name);
for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table;
pdn = pdn->parent) {
@@ -504,13 +503,13 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
pci_name(dev), dn? dn->full_name : "<null>");
return;
}
- DBG(" parent is %s\n", pdn->full_name);
+ pr_debug(" parent is %s\n", pdn->full_name);
/* Check for parent == NULL so we don't try to setup the empty EADS
* slots on POWER4 machines.
*/
if (dma_window == NULL || pdn->parent == NULL) {
- DBG(" no dma window for device, linking to parent\n");
+ pr_debug(" no dma window for device, linking to parent\n");
dev->dev.archdata.dma_data = PCI_DN(pdn)->iommu_table;
return;
}
@@ -522,9 +521,9 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
pci->phb->bus->number);
pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
- DBG(" created table: %p\n", pci->iommu_table);
+ pr_debug(" created table: %p\n", pci->iommu_table);
} else {
- DBG(" found DMA window, table: %p\n", pci->iommu_table);
+ pr_debug(" found DMA window, table: %p\n", pci->iommu_table);
}
dev->dev.archdata.dma_data = pci->iommu_table;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 9235c469449e..2cbaedb17f3e 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -19,7 +19,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#undef DEBUG_LOW
+/* Enables debugging of low-level hash table routines - careful! */
+#undef DEBUG
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
@@ -42,11 +43,6 @@
#include "plpar_wrappers.h"
#include "pseries.h"
-#ifdef DEBUG_LOW
-#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while(0)
-#else
-#define DBG_LOW(fmt...) do { } while(0)
-#endif
/* in hvCall.S */
EXPORT_SYMBOL(plpar_hcall);
@@ -196,6 +192,8 @@ void __init udbg_init_debug_lpar(void)
udbg_putc = udbg_putcLP;
udbg_getc = udbg_getcLP;
udbg_getc_poll = udbg_getc_pollLP;
+
+ register_early_udbg_console();
}
/* returns 0 if couldn't find or use /chosen/stdout as console */
@@ -288,15 +286,15 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long hpte_v, hpte_r;
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
+ pr_debug("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
+ "rflags=%lx, vflags=%lx, psize=%d)\n",
+ hpte_group, va, pa, rflags, vflags, psize);
hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
+ pr_debug(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
@@ -313,7 +311,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot);
if (unlikely(lpar_rc == H_PTEG_FULL)) {
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" full\n");
+ pr_debug(" full\n");
return -1;
}
@@ -324,11 +322,11 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
*/
if (unlikely(lpar_rc != H_SUCCESS)) {
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %d\n", lpar_rc);
+ pr_debug(" lpar err %lu\n", lpar_rc);
return -2;
}
if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %d\n", slot & 7);
+ pr_debug(" -> slot: %lu\n", slot & 7);
/* Because of iSeries, we have to pass down the secondary
* bucket bit here as well
@@ -420,17 +418,17 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(va, psize, ssize);
- DBG_LOW(" update: avpnv=%016lx, hash=%016lx, f=%x, psize: %d ... ",
- want_v, slot, flags, psize);
+ pr_debug(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...",
+ want_v, slot, flags, psize);
lpar_rc = plpar_pte_protect(flags, slot, want_v);
if (lpar_rc == H_NOT_FOUND) {
- DBG_LOW("not found !\n");
+ pr_debug("not found !\n");
return -1;
}
- DBG_LOW("ok\n");
+ pr_debug("ok\n");
BUG_ON(lpar_rc != H_SUCCESS);
@@ -505,8 +503,8 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va,
unsigned long lpar_rc;
unsigned long dummy1, dummy2;
- DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d",
- slot, va, psize, local);
+ pr_debug(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
+ slot, va, psize, local);
want_v = hpte_encode_avpn(va, psize, ssize);
lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index a1ab25c7082f..2b548afd1003 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -67,8 +67,6 @@ static int ras_check_exception_token;
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
-/* #define DEBUG */
-
static void request_ras_irqs(struct device_node *np,
irq_handler_t handler,
@@ -237,7 +235,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n",
*((unsigned long *)&ras_log_buf), status);
-#ifndef DEBUG
+#ifndef DEBUG_RTAS_POWER_OFF
/* Don't actually power off when debugging so we can test
* without actually failing while injecting errors.
* Error data will not be logged to syslog.
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index e3078ce41518..befadd4f9524 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -29,11 +29,6 @@
#include <asm/atomic.h>
#include <asm/machdep.h>
-#if 0
-#define DEBUG(A...) printk(KERN_ERR A)
-#else
-#define DEBUG(A...)
-#endif
static DEFINE_SPINLOCK(rtasd_log_lock);
@@ -198,7 +193,7 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
unsigned long s;
int len = 0;
- DEBUG("logging event\n");
+ pr_debug("rtasd: logging event\n");
if (buf == NULL)
return;
@@ -409,7 +404,8 @@ static int rtasd(void *unused)
daemonize("rtasd");
printk(KERN_DEBUG "RTAS daemon started\n");
- DEBUG("will sleep for %d milliseconds\n", (30000/rtas_event_scan_rate));
+ pr_debug("rtasd: will sleep for %d milliseconds\n",
+ (30000 / rtas_event_scan_rate));
/* See if we have any error stored in NVRAM */
memset(logdata, 0, rtas_error_log_max);
@@ -428,9 +424,9 @@ static int rtasd(void *unused)
do_event_scan_all_cpus(1000);
if (surveillance_timeout != -1) {
- DEBUG("enabling surveillance\n");
+ pr_debug("rtasd: enabling surveillance\n");
enable_surveillance(surveillance_timeout);
- DEBUG("surveillance enabled\n");
+ pr_debug("rtasd: surveillance enabled\n");
}
/* Delay should be at least one second since some
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index e5b0ea870164..bec3803f0618 100644
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
@@ -38,9 +38,7 @@
#define SCANLOG_HWERROR -1
#define SCANLOG_CONTINUE 1
-#define DEBUG(A...) do { if (scanlog_debug) printk(KERN_ERR "scanlog: " A); } while (0)
-static int scanlog_debug;
static unsigned int ibm_scan_log_dump; /* RTAS token */
static struct proc_dir_entry *proc_ppc64_scan_log_dump; /* The proc file */
@@ -86,14 +84,14 @@ static ssize_t scanlog_read(struct file *file, char __user *buf,
memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
- DEBUG("status=%d, data[0]=%x, data[1]=%x, data[2]=%x\n",
- status, data[0], data[1], data[2]);
+ pr_debug("scanlog: status=%d, data[0]=%x, data[1]=%x, " \
+ "data[2]=%x\n", status, data[0], data[1], data[2]);
switch (status) {
case SCANLOG_COMPLETE:
- DEBUG("hit eof\n");
+ pr_debug("scanlog: hit eof\n");
return 0;
case SCANLOG_HWERROR:
- DEBUG("hardware error reading scan log data\n");
+ pr_debug("scanlog: hardware error reading data\n");
return -EIO;
case SCANLOG_CONTINUE:
/* We may or may not have data yet */
@@ -110,7 +108,8 @@ static ssize_t scanlog_read(struct file *file, char __user *buf,
/* Assume extended busy */
wait_time = rtas_busy_delay_time(status);
if (!wait_time) {
- printk(KERN_ERR "scanlog: unknown error from rtas: %d\n", status);
+ printk(KERN_ERR "scanlog: unknown error " \
+ "from rtas: %d\n", status);
return -EIO;
}
}
@@ -134,15 +133,9 @@ static ssize_t scanlog_write(struct file * file, const char __user * buf,
if (buf) {
if (strncmp(stkbuf, "reset", 5) == 0) {
- DEBUG("reset scanlog\n");
+ pr_debug("scanlog: reset scanlog\n");
status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 0, 0);
- DEBUG("rtas returns %d\n", status);
- } else if (strncmp(stkbuf, "debugon", 7) == 0) {
- printk(KERN_ERR "scanlog: debug on\n");
- scanlog_debug = 1;
- } else if (strncmp(stkbuf, "debugoff", 8) == 0) {
- printk(KERN_ERR "scanlog: debug off\n");
- scanlog_debug = 0;
+ pr_debug("scanlog: rtas returns %d\n", status);
}
}
return count;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f66aa9c3b135..f5d29f5b13c1 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -16,8 +16,6 @@
* bootup setup stuff..
*/
-#undef DEBUG
-
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
@@ -70,11 +68,6 @@
#include "plpar_wrappers.h"
#include "pseries.h"
-#ifdef DEBUG
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -326,7 +319,7 @@ static int pseries_set_xdabr(unsigned long dabr)
*/
static void __init pSeries_init_early(void)
{
- DBG(" -> pSeries_init_early()\n");
+ pr_debug(" -> pSeries_init_early()\n");
if (firmware_has_feature(FW_FEATURE_LPAR))
find_udbg_vterm();
@@ -338,7 +331,7 @@ static void __init pSeries_init_early(void)
iommu_init_early_pSeries();
- DBG(" <- pSeries_init_early()\n");
+ pr_debug(" <- pSeries_init_early()\n");
}
/*
@@ -383,7 +376,7 @@ static int __init pSeries_probe(void)
of_flat_dt_is_compatible(root, "IBM,CBEA"))
return 0;
- DBG("pSeries detected, looking for LPAR capability...\n");
+ pr_debug("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
of_scan_flat_dt(pSeries_probe_hypertas, NULL);
@@ -393,8 +386,8 @@ static int __init pSeries_probe(void)
else
hpte_init_native();
- DBG("Machine is%s LPAR !\n",
- (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
+ pr_debug("Machine is%s LPAR !\n",
+ (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
return 1;
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index ea4c65917a64..9d8f8c84ab89 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -12,7 +12,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
@@ -51,12 +50,6 @@
#include "plpar_wrappers.h"
#include "pseries.h"
-#ifdef DEBUG
-#include <asm/udbg.h>
-#define DBG(fmt...) udbg_printf(fmt)
-#else
-#define DBG(fmt...)
-#endif
/*
* The primary thread of each non-boot processor is recorded here before
@@ -231,7 +224,7 @@ static void __init smp_init_pseries(void)
{
int i;
- DBG(" -> smp_init_pSeries()\n");
+ pr_debug(" -> smp_init_pSeries()\n");
/* Mark threads which are still spinning in hold loops. */
if (cpu_has_feature(CPU_FTR_SMT)) {
@@ -255,7 +248,7 @@ static void __init smp_init_pseries(void)
smp_ops->take_timebase = pSeries_take_timebase;
}
- DBG(" <- smp_init_pSeries()\n");
+ pr_debug(" <- smp_init_pSeries()\n");
}
#ifdef CONFIG_MPIC
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 43df53c30aa0..ebebc28fe895 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#undef DEBUG
#include <linux/types.h>
#include <linux/threads.h>
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index d359d6e92975..7f59188cd9a1 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/
static int
axon_ram_direct_access(struct block_device *device, sector_t sector,
- unsigned long *data)
+ void **kaddr, unsigned long *pfn)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset;
@@ -154,7 +154,8 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
return -ERANGE;
}
- *data = bank->ph_addr + offset;
+ *kaddr = (void *)(bank->ph_addr + offset);
+ *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
return 0;
}
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 5c1b246aaccc..7b45670c7af3 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -892,3 +892,44 @@ void fsl_rstcr_restart(char *cmd)
while (1) ;
}
#endif
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+struct platform_diu_data_ops diu_ops = {
+ .diu_size = 1280 * 1024 * 4, /* default one 1280x1024 buffer */
+};
+EXPORT_SYMBOL(diu_ops);
+
+int __init preallocate_diu_videomemory(void)
+{
+ pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+ diu_ops.diu_mem = __alloc_bootmem(diu_ops.diu_size, 8, 0);
+ if (!diu_ops.diu_mem) {
+ printk(KERN_ERR "fsl-diu: cannot allocate %lu bytes\n",
+ diu_ops.diu_size);
+ return -ENOMEM;
+ }
+
+ pr_debug("diu_mem=%p\n", diu_ops.diu_mem);
+
+ rh_init(&diu_ops.diu_rh_info, 4096, ARRAY_SIZE(diu_ops.diu_rh_block),
+ diu_ops.diu_rh_block);
+ return rh_attach_region(&diu_ops.diu_rh_info,
+ (unsigned long) diu_ops.diu_mem,
+ diu_ops.diu_size);
+}
+
+static int __init early_parse_diufb(char *p)
+{
+ if (!p)
+ return 1;
+
+ diu_ops.diu_size = _ALIGN_UP(memparse(p, &p), 8);
+
+ pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+ return 0;
+}
+early_param("diufb", early_parse_diufb);
+
+#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
index 74c4a9657b33..52c831fa1886 100644
--- a/arch/powerpc/sysdev/fsl_soc.h
+++ b/arch/powerpc/sysdev/fsl_soc.h
@@ -17,5 +17,28 @@ extern int fsl_spi_init(struct spi_board_info *board_infos,
void (*deactivate_cs)(u8 cs, u8 polarity));
extern void fsl_rstcr_restart(char *cmd);
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+#include <linux/bootmem.h>
+#include <asm/rheap.h>
+struct platform_diu_data_ops {
+ rh_block_t diu_rh_block[16];
+ rh_info_t diu_rh_info;
+ unsigned long diu_size;
+ void *diu_mem;
+
+ unsigned int (*get_pixel_format) (unsigned int bits_per_pixel,
+ int monitor_port);
+ void (*set_gamma_table) (int monitor_port, char *gamma_table_base);
+ void (*set_monitor_port) (int monitor_port);
+ void (*set_pixel_clock) (unsigned int pixclock);
+ ssize_t (*show_monitor_port) (int monitor_port, char *buf);
+ int (*set_sysfs_monitor_port) (int val);
+};
+
+extern struct platform_diu_data_ops diu_ops;
+int __init preallocate_diu_videomemory(void);
+#endif
+
#endif
#endif
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 047b31027fa6..41af1223e2a0 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -338,15 +338,13 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id)
pdata.freq_m = 8; /* default */
prop = of_get_property(np, "freq_m", NULL);
- if (!prop)
- return -ENODEV;
- pdata.freq_m = *prop;
+ if (prop)
+ pdata.freq_m = *prop;
pdata.freq_m = 3; /* default */
prop = of_get_property(np, "freq_n", NULL);
- if (!prop)
- return -ENODEV;
- pdata.freq_n = *prop;
+ if (prop)
+ pdata.freq_n = *prop;
pdata.timeout = 1000; /* default: 1 second */
@@ -433,9 +431,13 @@ static int __init mv64x60_device_setup(void)
int err;
id = 0;
- for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc")
- if ((err = mv64x60_mpsc_device_setup(np, id++)))
- goto error;
+ for_each_compatible_node(np, "serial", "marvell,mv64360-mpsc") {
+ err = mv64x60_mpsc_device_setup(np, id++);
+ if (err)
+ printk(KERN_ERR "Failed to initialize MV64x60 "
+ "serial device %s: error %d.\n",
+ np->full_name, err);
+ }
id = 0;
id2 = 0;
@@ -443,38 +445,44 @@ static int __init mv64x60_device_setup(void)
pdev = mv64x60_eth_register_shared_pdev(np, id++);
if (IS_ERR(pdev)) {
err = PTR_ERR(pdev);
- goto error;
+ printk(KERN_ERR "Failed to initialize MV64x60 "
+ "network block %s: error %d.\n",
+ np->full_name, err);
+ continue;
}
for_each_child_of_node(np, np2) {
if (!of_device_is_compatible(np2,
"marvell,mv64360-eth"))
continue;
err = mv64x60_eth_device_setup(np2, id2++, pdev);
- if (err) {
- of_node_put(np2);
- goto error;
- }
+ if (err)
+ printk(KERN_ERR "Failed to initialize "
+ "MV64x60 network device %s: "
+ "error %d.\n",
+ np2->full_name, err);
}
}
id = 0;
- for_each_compatible_node(np, "i2c", "marvell,mv64360-i2c")
- if ((err = mv64x60_i2c_device_setup(np, id++)))
- goto error;
+ for_each_compatible_node(np, "i2c", "marvell,mv64360-i2c") {
+ err = mv64x60_i2c_device_setup(np, id++);
+ if (err)
+ printk(KERN_ERR "Failed to initialize MV64x60 I2C "
+ "bus %s: error %d.\n",
+ np->full_name, err);
+ }
/* support up to one watchdog timer */
np = of_find_compatible_node(np, NULL, "marvell,mv64360-wdt");
if (np) {
if ((err = mv64x60_wdt_device_setup(np, id)))
- goto error;
+ printk(KERN_ERR "Failed to initialize MV64x60 "
+ "Watchdog %s: error %d.\n",
+ np->full_name, err);
of_node_put(np);
}
return 0;
-
-error:
- of_node_put(np);
- return err;
}
arch_initcall(mv64x60_device_setup);
diff --git a/arch/powerpc/sysdev/mv64x60_udbg.c b/arch/powerpc/sysdev/mv64x60_udbg.c
index ccdb3b0418fc..2792dc8b038c 100644
--- a/arch/powerpc/sysdev/mv64x60_udbg.c
+++ b/arch/powerpc/sysdev/mv64x60_udbg.c
@@ -94,7 +94,7 @@ static void mv64x60_udbg_init(void)
if (!np)
return;
- block_index = of_get_property(np, "block-index", NULL);
+ block_index = of_get_property(np, "cell-index", NULL);
if (!block_index)
goto error;