summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/irq.c2
-rw-r--r--arch/arm/kernel/irq.c18
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/mach-imx/clock.c2
-rw-r--r--arch/arm/mach-imx/generic.c6
-rw-r--r--arch/arm/mach-imx/include/mach/imx-regs.h106
-rw-r--r--arch/arm/mach-w90x900/mach-w90p910evb.c1
-rw-r--r--arch/arm/mach-w90x900/time.c1
-rw-r--r--arch/arm/mm/proc-syms.c1
-rw-r--r--arch/arm/oprofile/op_model_mpcore.c2
-rw-r--r--arch/blackfin/kernel/irqchip.c5
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c12
-rw-r--r--arch/ia64/kernel/msi_ia64.c4
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c2
-rw-r--r--arch/m68k/amiga/amiints.c12
-rw-r--r--arch/m68k/amiga/cia.c4
-rw-r--r--arch/m68k/amiga/config.c3
-rw-r--r--arch/m68k/apollo/config.c7
-rw-r--r--arch/m68k/atari/atakeyb.c10
-rw-r--r--arch/m68k/atari/stdma.c5
-rw-r--r--arch/m68k/atari/time.c5
-rw-r--r--arch/m68k/bvme6000/config.c1
-rw-r--r--arch/m68k/hp300/time.c3
-rw-r--r--arch/m68k/kernel/.gitignore1
-rw-r--r--arch/m68k/kernel/entry.S2
-rw-r--r--arch/m68k/kernel/setup.c11
-rw-r--r--arch/m68k/kernel/signal.c15
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds1
-rw-r--r--arch/m68k/mac/baboon.c3
-rw-r--r--arch/m68k/mac/config.c12
-rw-r--r--arch/m68k/mac/debug.c1
-rw-r--r--arch/m68k/mac/iop.c10
-rw-r--r--arch/m68k/mac/macints.c15
-rw-r--r--arch/m68k/mac/misc.c1
-rw-r--r--arch/m68k/mac/oss.c25
-rw-r--r--arch/m68k/mac/psc.c12
-rw-r--r--arch/m68k/mac/via.c35
-rw-r--r--arch/m68k/math-emu/fp_log.c1
-rw-r--r--arch/m68k/mm/init.c4
-rw-r--r--arch/m68k/mm/motorola.c7
-rw-r--r--arch/m68k/mvme147/config.c6
-rw-r--r--arch/m68k/mvme16x/config.c1
-rw-r--r--arch/m68k/q40/config.c3
-rw-r--r--arch/m68k/sun3/config.c8
-rw-r--r--arch/m68k/sun3/mmu_emu.c1
-rw-r--r--arch/m68k/sun3/sun3ints.c9
-rw-r--r--arch/m68k/sun3x/config.c1
-rw-r--r--arch/mips/Kconfig71
-rw-r--r--arch/mips/Makefile40
-rw-r--r--arch/mips/alchemy/Kconfig5
-rw-r--r--arch/mips/alchemy/common/Makefile4
-rw-r--r--arch/mips/alchemy/common/au1xxx_irqmap.c205
-rw-r--r--arch/mips/alchemy/common/clocks.c65
-rw-r--r--arch/mips/alchemy/common/cputable.c52
-rw-r--r--arch/mips/alchemy/common/dbdma.c65
-rw-r--r--arch/mips/alchemy/common/irq.c745
-rw-r--r--arch/mips/alchemy/common/power.c406
-rw-r--r--arch/mips/alchemy/common/reset.c2
-rw-r--r--arch/mips/alchemy/common/setup.c71
-rw-r--r--arch/mips/alchemy/common/sleeper.S118
-rw-r--r--arch/mips/alchemy/common/time.c311
-rw-r--r--arch/mips/alchemy/db1x00/init.c62
-rw-r--r--arch/mips/alchemy/devboards/Makefile18
-rw-r--r--arch/mips/alchemy/devboards/db1x00/Makefile (renamed from arch/mips/alchemy/db1x00/Makefile)2
-rw-r--r--arch/mips/alchemy/devboards/db1x00/board_setup.c (renamed from arch/mips/alchemy/db1x00/board_setup.c)37
-rw-r--r--arch/mips/alchemy/devboards/db1x00/irqmap.c (renamed from arch/mips/alchemy/db1x00/irqmap.c)24
-rw-r--r--arch/mips/alchemy/devboards/pb1000/Makefile (renamed from arch/mips/alchemy/pb1000/Makefile)2
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c (renamed from arch/mips/alchemy/pb1000/board_setup.c)30
-rw-r--r--arch/mips/alchemy/devboards/pb1100/Makefile (renamed from arch/mips/alchemy/pb1100/Makefile)2
-rw-r--r--arch/mips/alchemy/devboards/pb1100/board_setup.c (renamed from arch/mips/alchemy/pb1100/board_setup.c)47
-rw-r--r--arch/mips/alchemy/devboards/pb1200/Makefile (renamed from arch/mips/alchemy/pb1200/Makefile)3
-rw-r--r--arch/mips/alchemy/devboards/pb1200/board_setup.c (renamed from arch/mips/alchemy/pb1200/board_setup.c)34
-rw-r--r--arch/mips/alchemy/devboards/pb1200/irqmap.c (renamed from arch/mips/alchemy/pb1200/irqmap.c)94
-rw-r--r--arch/mips/alchemy/devboards/pb1200/platform.c (renamed from arch/mips/alchemy/pb1200/platform.c)0
-rw-r--r--arch/mips/alchemy/devboards/pb1500/Makefile (renamed from arch/mips/alchemy/pb1500/Makefile)2
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c (renamed from arch/mips/alchemy/pb1500/board_setup.c)44
-rw-r--r--arch/mips/alchemy/devboards/pb1550/Makefile (renamed from arch/mips/alchemy/pb1550/Makefile)2
-rw-r--r--arch/mips/alchemy/devboards/pb1550/board_setup.c (renamed from arch/mips/alchemy/pb1550/board_setup.c)34
-rw-r--r--arch/mips/alchemy/devboards/pm.c229
-rw-r--r--arch/mips/alchemy/devboards/prom.c (renamed from arch/mips/alchemy/pb1550/init.c)26
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c12
-rw-r--r--arch/mips/alchemy/mtx-1/init.c2
-rw-r--r--arch/mips/alchemy/mtx-1/irqmap.c18
-rw-r--r--arch/mips/alchemy/pb1000/init.c57
-rw-r--r--arch/mips/alchemy/pb1000/irqmap.c38
-rw-r--r--arch/mips/alchemy/pb1100/init.c60
-rw-r--r--arch/mips/alchemy/pb1100/irqmap.c40
-rw-r--r--arch/mips/alchemy/pb1200/init.c58
-rw-r--r--arch/mips/alchemy/pb1500/init.c58
-rw-r--r--arch/mips/alchemy/pb1500/irqmap.c46
-rw-r--r--arch/mips/alchemy/pb1550/irqmap.c43
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c12
-rw-r--r--arch/mips/alchemy/xxs1500/init.c2
-rw-r--r--arch/mips/alchemy/xxs1500/irqmap.c31
-rw-r--r--arch/mips/cavium-octeon/Kconfig85
-rw-r--r--arch/mips/cavium-octeon/Makefile16
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c58
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c32
-rw-r--r--arch/mips/cavium-octeon/executive/Makefile13
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c586
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c734
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c116
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c358
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c84
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c497
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S521
-rw-r--r--arch/mips/cavium-octeon/serial.c136
-rw-r--r--arch/mips/cavium-octeon/setup.c929
-rw-r--r--arch/mips/cavium-octeon/smp.c211
-rw-r--r--arch/mips/configs/cavium-octeon_defconfig943
-rw-r--r--arch/mips/include/asm/cpu-features.h3
-rw-r--r--arch/mips/include/asm/cpu.h14
-rw-r--r--arch/mips/include/asm/hazards.h4
-rw-r--r--arch/mips/include/asm/io.h14
-rw-r--r--arch/mips/include/asm/irq.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h89
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h5
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h78
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h64
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h244
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h131
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/war.h26
-rw-r--r--arch/mips/include/asm/mach-generic/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-jazz/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mach-lemote/dma-coherence.h26
-rw-r--r--arch/mips/include/asm/mipsregs.h22
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asm.h128
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h262
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h288
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu-defs.h1616
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gpio-defs.h219
-rw-r--r--arch/mips/include/asm/octeon/cvmx-iob-defs.h530
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd-defs.h877
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c-defs.h963
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h325
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2d-defs.h369
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h141
-rw-r--r--arch/mips/include/asm/octeon/cvmx-led-defs.h240
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h2004
-rw-r--r--arch/mips/include/asm/octeon/cvmx-packet.h61
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow-defs.h698
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spinlock.h232
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h152
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h505
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h119
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h321
-rw-r--r--arch/mips/include/asm/octeon/octeon.h248
-rw-r--r--arch/mips/include/asm/processor.h69
-rw-r--r--arch/mips/include/asm/ptrace.h4
-rw-r--r--arch/mips/include/asm/smp.h3
-rw-r--r--arch/mips/include/asm/stackframe.h17
-rw-r--r--arch/mips/include/asm/time.h24
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c31
-rw-r--r--arch/mips/kernel/branch.c33
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c31
-rw-r--r--arch/mips/kernel/csrc-r4k.c2
-rw-r--r--arch/mips/kernel/genex.S4
-rw-r--r--arch/mips/kernel/irq-gic.c2
-rw-r--r--arch/mips/kernel/irq.c1
-rw-r--r--arch/mips/kernel/octeon_switch.S506
-rw-r--r--arch/mips/kernel/ptrace32.c64
-rw-r--r--arch/mips/kernel/smtc.c6
-rw-r--r--arch/mips/kernel/traps.c21
-rw-r--r--arch/mips/lib/Makefile1
-rw-r--r--arch/mips/mm/Makefile1
-rw-r--r--arch/mips/mm/c-octeon.c307
-rw-r--r--arch/mips/mm/cache.c6
-rw-r--r--arch/mips/mm/cex-oct.S70
-rw-r--r--arch/mips/mm/dma-default.c25
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/mti-malta/malta-smtc.c5
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c2
-rw-r--r--arch/mips/sgi-ip22/ip22-time.c2
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c3
-rw-r--r--arch/mips/sibyte/sb1250/smp.c3
-rw-r--r--arch/mn10300/kernel/mn10300-watchdog.c3
-rw-r--r--arch/parisc/kernel/irq.c8
-rw-r--r--arch/powerpc/boot/dts/gef_sbc610.dts15
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig1
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c4
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c2
-rw-r--r--arch/powerpc/platforms/pseries/xics.c5
-rw-r--r--arch/powerpc/sysdev/mpic.c3
-rw-r--r--arch/sparc/configs/sparc32_defconfig290
-rw-r--r--arch/sparc/include/asm/oplib_32.h11
-rw-r--r--arch/sparc/include/asm/oplib_64.h10
-rw-r--r--arch/sparc/include/asm/signal.h4
-rw-r--r--arch/sparc/include/asm/topology_64.h4
-rw-r--r--arch/sparc/kernel/auxio_32.c3
-rw-r--r--arch/sparc/kernel/auxio_64.c2
-rw-r--r--arch/sparc/kernel/cpu.c2
-rw-r--r--arch/sparc/kernel/idprom.c3
-rw-r--r--arch/sparc/kernel/ioport.c15
-rw-r--r--arch/sparc/kernel/irq_32.c1
-rw-r--r--arch/sparc/kernel/irq_64.c5
-rw-r--r--arch/sparc/kernel/pci.c1
-rw-r--r--arch/sparc/kernel/pcic.c6
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/process_64.c2
-rw-r--r--arch/sparc/kernel/psycho_common.c60
-rw-r--r--arch/sparc/kernel/sbus.c1
-rw-r--r--arch/sparc/kernel/setup_32.c3
-rw-r--r--arch/sparc/kernel/setup_64.c3
-rw-r--r--arch/sparc/kernel/sparc_ksyms_32.c225
-rw-r--r--arch/sparc/kernel/sparc_ksyms_64.c243
-rw-r--r--arch/sparc/kernel/sun4d_smp.c4
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c2
-rw-r--r--arch/sparc/kernel/time_32.c2
-rw-r--r--arch/sparc/kernel/time_64.c4
-rw-r--r--arch/sparc/kernel/traps_32.c1
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c8
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/PeeCeeI.c8
-rw-r--r--arch/sparc/lib/ksyms.c196
-rw-r--r--arch/sparc/lib/user_fixup.c5
-rw-r--r--arch/sparc/mm/generic_32.c1
-rw-r--r--arch/sparc/mm/generic_64.c1
-rw-r--r--arch/sparc/mm/highmem.c2
-rw-r--r--arch/sparc/mm/init_32.c6
-rw-r--r--arch/sparc/mm/init_64.c4
-rw-r--r--arch/sparc/prom/init_32.c4
-rw-r--r--arch/sparc/prom/misc_32.c3
-rw-r--r--arch/sparc/prom/misc_64.c3
-rw-r--r--arch/sparc/prom/ranges.c3
-rw-r--r--arch/sparc/prom/tree_32.c17
-rw-r--r--arch/sparc/prom/tree_64.c18
-rw-r--r--arch/x86/ia32/ia32entry.S8
-rw-r--r--arch/x86/include/asm/apicnum.h12
-rw-r--r--arch/x86/include/asm/bitops.h14
-rw-r--r--arch/x86/include/asm/cpu.h21
-rw-r--r--arch/x86/include/asm/cpumask.h28
-rw-r--r--arch/x86/include/asm/current.h24
-rw-r--r--arch/x86/include/asm/hardirq_32.h3
-rw-r--r--arch/x86/include/asm/hardirq_64.h25
-rw-r--r--arch/x86/include/asm/io_apic.h26
-rw-r--r--arch/x86/include/asm/irq_regs_32.h4
-rw-r--r--arch/x86/include/asm/irq_vectors.h13
-rw-r--r--arch/x86/include/asm/mach-default/mach_wakecpu.h6
-rw-r--r--arch/x86/include/asm/mmu_context_32.h12
-rw-r--r--arch/x86/include/asm/mmu_context_64.h16
-rw-r--r--arch/x86/include/asm/mpspec_def.h23
-rw-r--r--arch/x86/include/asm/mtrr.h10
-rw-r--r--arch/x86/include/asm/page_64.h4
-rw-r--r--arch/x86/include/asm/paravirt.h8
-rw-r--r--arch/x86/include/asm/pda.h125
-rw-r--r--arch/x86/include/asm/percpu.h159
-rw-r--r--arch/x86/include/asm/pgtable.h19
-rw-r--r--arch/x86/include/asm/processor.h3
-rw-r--r--arch/x86/include/asm/setup.h1
-rw-r--r--arch/x86/include/asm/smp.h49
-rw-r--r--arch/x86/include/asm/system.h4
-rw-r--r--arch/x86/include/asm/thread_info.h20
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/include/asm/topology.h8
-rw-r--r--arch/x86/include/asm/trampoline.h1
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h3
-rw-r--r--arch/x86/kernel/acpi/boot.c96
-rw-r--r--arch/x86/kernel/acpi/sleep.c1
-rw-r--r--arch/x86/kernel/apic.c25
-rw-r--r--arch/x86/kernel/asm-offsets_64.c8
-rw-r--r--arch/x86/kernel/cpu/common.c80
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c27
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c63
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd_64.c21
-rw-r--r--arch/x86/kernel/crash.c2
-rw-r--r--arch/x86/kernel/dumpstack_64.c35
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S41
-rw-r--r--arch/x86/kernel/head64.c23
-rw-r--r--arch/x86/kernel/head_64.S47
-rw-r--r--arch/x86/kernel/io_apic.c163
-rw-r--r--arch/x86/kernel/irq.c6
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c5
-rw-r--r--arch/x86/kernel/microcode_intel.c10
-rw-r--r--arch/x86/kernel/module_32.c6
-rw-r--r--arch/x86/kernel/module_64.c32
-rw-r--r--arch/x86/kernel/mpparse.c143
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/nmi.c10
-rw-r--r--arch/x86/kernel/process_32.c5
-rw-r--r--arch/x86/kernel/process_64.c22
-rw-r--r--arch/x86/kernel/reboot.c1
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c208
-rw-r--r--arch/x86/kernel/smpboot.c69
-rw-r--r--arch/x86/kernel/smpcommon.c10
-rw-r--r--arch/x86/kernel/tlb_32.c85
-rw-r--r--arch/x86/kernel/tlb_64.c76
-rw-r--r--arch/x86/kernel/tlb_uv.c16
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S9
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S22
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c2
-rw-r--r--arch/x86/mach-voyager/setup.c1
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c6
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c127
-rw-r--r--arch/x86/pci/i386.c12
-rw-r--r--arch/x86/xen/enlighten.c47
-rw-r--r--arch/x86/xen/irq.c8
-rw-r--r--arch/x86/xen/mmu.c8
-rw-r--r--arch/x86/xen/multicalls.h2
-rw-r--r--arch/x86/xen/smp.c33
-rw-r--r--arch/x86/xen/xen-asm_64.S31
320 files changed, 20920 insertions, 4085 deletions
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 703731accda6..7bc7489223f3 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu;
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
return 0;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 7141cee1fab7..4bb723eadad1 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.lock = SPIN_LOCK_UNLOCKED
};
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
/*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
#ifdef CONFIG_SMP
- bad_irq_desc.affinity = CPU_MASK_ALL;
+ cpumask_setall(bad_irq_desc.affinity);
bad_irq_desc.cpu = smp_processor_id();
#endif
init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) {
- unsigned int newcpu = any_online_cpu(desc->affinity);
-
- if (newcpu == NR_CPUS) {
+ unsigned int newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
+ if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu);
- cpus_setall(desc->affinity);
- newcpu = any_online_cpu(desc->affinity);
+ cpumask_setall(desc->affinity);
+ newcpu = cpumask_any_and(desc->affinity,
+ cpu_online_mask);
}
route_irq(desc, i, newcpu);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 00216071eaf7..85598f7da407 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -65,6 +65,7 @@ SECTIONS
#endif
. = ALIGN(4096);
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
diff --git a/arch/arm/mach-imx/clock.c b/arch/arm/mach-imx/clock.c
index 7ec60fc91565..cf332aeb942e 100644
--- a/arch/arm/mach-imx/clock.c
+++ b/arch/arm/mach-imx/clock.c
@@ -23,7 +23,7 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <mach/imx-regs.h>
+#include <mach/hardware.h>
/*
* Very simple approach: We can't disable clocks, so we do
diff --git a/arch/arm/mach-imx/generic.c b/arch/arm/mach-imx/generic.c
index fa72174dd95c..887cb21f75b0 100644
--- a/arch/arm/mach-imx/generic.c
+++ b/arch/arm/mach-imx/generic.c
@@ -245,11 +245,11 @@ void __init imx_set_mmc_info(struct imxmmc_platform_data *info)
imx_mmc_device.dev.platform_data = info;
}
-static struct imxfb_mach_info imx_fb_info;
+static struct imx_fb_platform_data imx_fb_info;
-void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
+void __init set_imx_fb_info(struct imx_fb_platform_data *hard_imx_fb_info)
{
- memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info));
+ memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imx_fb_platform_data));
}
static struct resource imxfb_resources[] = {
diff --git a/arch/arm/mach-imx/include/mach/imx-regs.h b/arch/arm/mach-imx/include/mach/imx-regs.h
index fb9de2733879..490297fc0e38 100644
--- a/arch/arm/mach-imx/include/mach/imx-regs.h
+++ b/arch/arm/mach-imx/include/mach/imx-regs.h
@@ -373,110 +373,4 @@
#define TSTAT_CAPT (1<<1)
#define TSTAT_COMP (1<<0)
-/*
- * LCD Controller
- */
-
-#define LCDC_SSA __REG(IMX_LCDC_BASE+0x00)
-
-#define LCDC_SIZE __REG(IMX_LCDC_BASE+0x04)
-#define SIZE_XMAX(x) ((((x) >> 4) & 0x3f) << 20)
-#define SIZE_YMAX(y) ( (y) & 0x1ff )
-
-#define LCDC_VPW __REG(IMX_LCDC_BASE+0x08)
-#define VPW_VPW(x) ( (x) & 0x3ff )
-
-#define LCDC_CPOS __REG(IMX_LCDC_BASE+0x0C)
-#define CPOS_CC1 (1<<31)
-#define CPOS_CC0 (1<<30)
-#define CPOS_OP (1<<28)
-#define CPOS_CXP(x) (((x) & 3ff) << 16)
-#define CPOS_CYP(y) ((y) & 0x1ff)
-
-#define LCDC_LCWHB __REG(IMX_LCDC_BASE+0x10)
-#define LCWHB_BK_EN (1<<31)
-#define LCWHB_CW(w) (((w) & 0x1f) << 24)
-#define LCWHB_CH(h) (((h) & 0x1f) << 16)
-#define LCWHB_BD(x) ((x) & 0xff)
-
-#define LCDC_LCHCC __REG(IMX_LCDC_BASE+0x14)
-#define LCHCC_CUR_COL_R(r) (((r) & 0x1f) << 11)
-#define LCHCC_CUR_COL_G(g) (((g) & 0x3f) << 5)
-#define LCHCC_CUR_COL_B(b) ((b) & 0x1f)
-
-#define LCDC_PCR __REG(IMX_LCDC_BASE+0x18)
-#define PCR_TFT (1<<31)
-#define PCR_COLOR (1<<30)
-#define PCR_PBSIZ_1 (0<<28)
-#define PCR_PBSIZ_2 (1<<28)
-#define PCR_PBSIZ_4 (2<<28)
-#define PCR_PBSIZ_8 (3<<28)
-#define PCR_BPIX_1 (0<<25)
-#define PCR_BPIX_2 (1<<25)
-#define PCR_BPIX_4 (2<<25)
-#define PCR_BPIX_8 (3<<25)
-#define PCR_BPIX_12 (4<<25)
-#define PCR_BPIX_16 (4<<25)
-#define PCR_PIXPOL (1<<24)
-#define PCR_FLMPOL (1<<23)
-#define PCR_LPPOL (1<<22)
-#define PCR_CLKPOL (1<<21)
-#define PCR_OEPOL (1<<20)
-#define PCR_SCLKIDLE (1<<19)
-#define PCR_END_SEL (1<<18)
-#define PCR_END_BYTE_SWAP (1<<17)
-#define PCR_REV_VS (1<<16)
-#define PCR_ACD_SEL (1<<15)
-#define PCR_ACD(x) (((x) & 0x7f) << 8)
-#define PCR_SCLK_SEL (1<<7)
-#define PCR_SHARP (1<<6)
-#define PCR_PCD(x) ((x) & 0x3f)
-
-#define LCDC_HCR __REG(IMX_LCDC_BASE+0x1C)
-#define HCR_H_WIDTH(x) (((x) & 0x3f) << 26)
-#define HCR_H_WAIT_1(x) (((x) & 0xff) << 8)
-#define HCR_H_WAIT_2(x) ((x) & 0xff)
-
-#define LCDC_VCR __REG(IMX_LCDC_BASE+0x20)
-#define VCR_V_WIDTH(x) (((x) & 0x3f) << 26)
-#define VCR_V_WAIT_1(x) (((x) & 0xff) << 8)
-#define VCR_V_WAIT_2(x) ((x) & 0xff)
-
-#define LCDC_POS __REG(IMX_LCDC_BASE+0x24)
-#define POS_POS(x) ((x) & 1f)
-
-#define LCDC_LSCR1 __REG(IMX_LCDC_BASE+0x28)
-#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
-#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
-#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
-#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
-#define LSCR1_GRAY1(x) (((x) & 0xf))
-
-#define LCDC_PWMR __REG(IMX_LCDC_BASE+0x2C)
-#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
-#define PWMR_LDMSK (1<<15)
-#define PWMR_SCR1 (1<<10)
-#define PWMR_SCR0 (1<<9)
-#define PWMR_CC_EN (1<<8)
-#define PWMR_PW(x) ((x) & 0xff)
-
-#define LCDC_DMACR __REG(IMX_LCDC_BASE+0x30)
-#define DMACR_BURST (1<<31)
-#define DMACR_HM(x) (((x) & 0xf) << 16)
-#define DMACR_TM(x) ((x) &0xf)
-
-#define LCDC_RMCR __REG(IMX_LCDC_BASE+0x34)
-#define RMCR_LCDC_EN (1<<1)
-#define RMCR_SELF_REF (1<<0)
-
-#define LCDC_LCDICR __REG(IMX_LCDC_BASE+0x38)
-#define LCDICR_INT_SYN (1<<2)
-#define LCDICR_INT_CON (1)
-
-#define LCDC_LCDISR __REG(IMX_LCDC_BASE+0x40)
-#define LCDISR_UDR_ERR (1<<3)
-#define LCDISR_ERR_RES (1<<2)
-#define LCDISR_EOF (1<<1)
-#define LCDISR_BOF (1<<0)
-
#endif // _IMX_REGS_H
diff --git a/arch/arm/mach-w90x900/mach-w90p910evb.c b/arch/arm/mach-w90x900/mach-w90p910evb.c
index 9307a2475438..9ebc93f48530 100644
--- a/arch/arm/mach-w90x900/mach-w90p910evb.c
+++ b/arch/arm/mach-w90x900/mach-w90p910evb.c
@@ -29,6 +29,7 @@
#include <asm/mach-types.h>
#include <mach/regs-serial.h>
+#include <mach/map.h>
#include "cpu.h"
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index 3a69e381f316..bcc838f6b393 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -28,7 +28,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-#include <mach/system.h>
#include <mach/map.h>
#include <mach/regs-timer.h>
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 4ad3bf291ad3..195e48edd8c2 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -27,6 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
+EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
#else
EXPORT_SYMBOL(cpu_cache);
#endif
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c
index 6d6bd5899240..853d42bb8682 100644
--- a/arch/arm/oprofile/op_model_mpcore.c
+++ b/arch/arm/oprofile/op_model_mpcore.c
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
const struct cpumask *mask = cpumask_of(cpu);
spin_lock_irq(&desc->lock);
- desc->affinity = *mask;
+ cpumask_copy(desc->affinity, mask);
desc->chip->set_affinity(irq, mask);
spin_unlock_irq(&desc->lock);
}
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index ab8209cbbad0..5780d6df1542 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
#endif
};
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 5cfd3d91001a..006ad366a454 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP
/* Clear affinity */
- cpus_setall(idesc->affinity);
+ cpumask_setall(idesc->affinity);
#endif
/* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index a58f64ca9f0e..226233a6fa19 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{
if (irq < NR_IRQS) {
- cpumask_copy(&irq_desc[irq].affinity,
+ cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff);
}
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+ if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) {
/*
* Save it for phase 2 processing
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 28d3d483db92..927ad027820c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ struct irq_desc *desc = irq_to_desc(vector);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
else {
int irq = local_vector_to_irq(vector);
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
+ struct irq_desc *desc = irq_to_desc(vector);
+
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector)))
- kstat_this_cpu.irqs[vector]++;
+ kstat_incr_irqs_this_cpu(vector, desc);
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
int irq = local_vector_to_irq(vector);
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 890339339035..dcb6b7c51ea7 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data;
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
}
#endif /* CONFIG_SMP */
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg);
- irq_desc[irq].affinity = *mask;
+ cpumask_copy(irq_desc[irq].affinity, mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 10a7d47e8510..f45e4e508eca 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -219,6 +219,7 @@ SECTIONS
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
{
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index ca553b0429ce..81e428943d73 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg);
- irq_desc[irq].affinity = *cpu_mask;
+ cpumask_copy(irq_desc[irq].affinity, cpu_mask);
}
#endif /* CONFIG_SMP */
diff --git a/arch/m68k/amiga/amiints.c b/arch/m68k/amiga/amiints.c
index 907a5533c845..c5b5212cc3f9 100644
--- a/arch/m68k/amiga/amiints.c
+++ b/arch/m68k/amiga/amiints.c
@@ -72,10 +72,14 @@ static struct irq_controller amiga_irq_controller = {
void __init amiga_init_IRQ(void)
{
- request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL);
- request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL);
- request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL);
- request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL);
+ if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
+ pr_err("Couldn't register int%d\n", 1);
+ if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
+ pr_err("Couldn't register int%d\n", 3);
+ if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
+ pr_err("Couldn't register int%d\n", 4);
+ if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
+ pr_err("Couldn't register int%d\n", 5);
m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);
diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
index 343fab49bd9a..ecd0f7ca6f0e 100644
--- a/arch/m68k/amiga/cia.c
+++ b/arch/m68k/amiga/cia.c
@@ -176,5 +176,7 @@ void __init cia_init_IRQ(struct ciabase *base)
/* override auto int and install CIA handler */
m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1);
m68k_irq_startup(base->handler_irq);
- request_irq(base->handler_irq, cia_handler, IRQF_SHARED, base->name, base);
+ if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
+ base->name, base))
+ pr_err("Couldn't register %s interrupt\n", base->name);
}
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index ab9862c3a136..6e562751ad51 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -493,7 +493,8 @@ static void __init amiga_sched_init(irq_handler_t timer_routine)
* Please don't change this to use ciaa, as it interferes with the
* SCSI code. We'll have to take a look at this later
*/
- request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL);
+ if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
+ pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
}
diff --git a/arch/m68k/apollo/config.c b/arch/m68k/apollo/config.c
index 78df98f2029a..8d3eafab1ffe 100644
--- a/arch/m68k/apollo/config.c
+++ b/arch/m68k/apollo/config.c
@@ -31,10 +31,6 @@ extern unsigned long dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
-extern void dn_dummy_waitbut(void);
-extern struct fb_info *dn_fb_init(long *);
-extern void dn_dummy_debug_init(void);
-extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
#endif
@@ -204,7 +200,8 @@ void dn_sched_init(irq_handler_t timer_routine)
printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
#endif
- request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine);
+ if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
+ pr_err("Couldn't register timer interrupt\n");
}
unsigned long dn_gettimeoffset(void) {
diff --git a/arch/m68k/atari/atakeyb.c b/arch/m68k/atari/atakeyb.c
index c038b7c7eff0..a5f33c059979 100644
--- a/arch/m68k/atari/atakeyb.c
+++ b/arch/m68k/atari/atakeyb.c
@@ -33,7 +33,6 @@
#include <asm/atari_joystick.h>
#include <asm/irq.h>
-extern unsigned int keymap_count;
/* Hook for MIDI serial driver */
void (*atari_MIDI_interrupt_hook) (void);
@@ -567,14 +566,19 @@ static int atari_keyb_done = 0;
int atari_keyb_init(void)
{
+ int error;
+
if (atari_keyb_done)
return 0;
kb_state.state = KEYBOARD;
kb_state.len = 0;
- request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, IRQ_TYPE_SLOW,
- "keyboard/mouse/MIDI", atari_keyboard_interrupt);
+ error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
+ IRQ_TYPE_SLOW, "keyboard/mouse/MIDI",
+ atari_keyboard_interrupt);
+ if (error)
+ return error;
atari_turnoff_irq(IRQ_MFP_ACIA);
do {
diff --git a/arch/m68k/atari/stdma.c b/arch/m68k/atari/stdma.c
index d1bd029a34ac..604329fafbb8 100644
--- a/arch/m68k/atari/stdma.c
+++ b/arch/m68k/atari/stdma.c
@@ -179,8 +179,9 @@ EXPORT_SYMBOL(stdma_islocked);
void __init stdma_init(void)
{
stdma_isr = NULL;
- request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
- "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int);
+ if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
+ "ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int))
+ pr_err("Couldn't register ST-DMA interrupt\n");
}
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 1edde27fa32d..d076ff8d1b39 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -31,8 +31,9 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
mfp.tim_ct_cd = (mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
- request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
- "timer", timer_routine);
+ if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
+ "timer", timer_routine))
+ pr_err("Couldn't register timer interrupt\n");
}
/* ++andreas: gettimeoffset fixed to check for pending interrupt */
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
index c072595928c0..9fe6fefb5e14 100644
--- a/arch/m68k/bvme6000/config.c
+++ b/arch/m68k/bvme6000/config.c
@@ -43,7 +43,6 @@ extern unsigned long bvme6000_gettimeoffset (void);
extern int bvme6000_hwclk (int, struct rtc_time *);
extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
-extern void bvme6000_waitbut(void);
void bvme6000_set_vectors (void);
/* Save tick handler routine pointer, will point to do_timer() in
diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
index dd7c8a2583d3..f6312c7d8727 100644
--- a/arch/m68k/hp300/time.c
+++ b/arch/m68k/hp300/time.c
@@ -70,7 +70,8 @@ void __init hp300_sched_init(irq_handler_t vector)
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
- request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector);
+ if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector))
+ pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */
diff --git a/arch/m68k/kernel/.gitignore b/arch/m68k/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/m68k/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index f28404d9a2bc..5b780826647c 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -424,7 +424,7 @@ resume:
.data
ALIGN
sys_call_table:
- .long sys_ni_syscall /* 0 - old "setup()" system call*/
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long sys_fork
.long sys_read
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 4d97bd2bd573..303730afb1c9 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -26,6 +26,7 @@
#include <linux/initrd.h>
#include <asm/bootinfo.h>
+#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
@@ -62,7 +63,6 @@ EXPORT_SYMBOL(vme_brdtype);
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
-extern int end;
extern unsigned long availmem;
int m68k_num_memory;
@@ -215,11 +215,10 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
- extern int _etext, _edata, _end;
int i;
/* The bootinfo is located right after the kernel bss */
- m68k_parse_bootinfo((const struct bi_record *)&_end);
+ m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
@@ -252,9 +251,9 @@ void __init setup_arch(char **cmdline_p)
}
init_mm.start_code = PAGE_OFFSET;
- init_mm.end_code = (unsigned long) &_etext;
- init_mm.end_data = (unsigned long) &_edata;
- init_mm.brk = (unsigned long) &_end;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)_end;
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index f9af893cd289..de2d05ddd86d 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -326,6 +326,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
struct sigcontext context;
int err;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
goto badframe;
@@ -411,6 +414,9 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
unsigned long usp;
int err;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
goto badframe;
@@ -937,6 +943,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->d0 = -EINTR;
break;
+ case -ERESTART_RESTARTBLOCK:
+ if (!has_handler) {
+ regs->d0 = __NR_restart_syscall;
+ regs->pc -= 2;
+ break;
+ }
+ regs->d0 = -EINTR;
+ break;
+
case -ERESTARTSYS:
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
index 8a4919e4d36a..d9368c0709ba 100644
--- a/arch/m68k/kernel/vmlinux-sun3.lds
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -33,6 +33,7 @@ SECTIONS
} :data
/* End of data goes *here* so that freeing init code works properly. */
_edata = .;
+ NOTES
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */
diff --git a/arch/m68k/mac/baboon.c b/arch/m68k/mac/baboon.c
index 245d16d078ad..2a96bebd8969 100644
--- a/arch/m68k/mac/baboon.c
+++ b/arch/m68k/mac/baboon.c
@@ -92,7 +92,8 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
void __init baboon_register_interrupts(void)
{
baboon_disabled = 0;
- request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon);
+ if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon))
+ pr_err("Couldn't register baboon interrupt\n");
}
/*
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 8819b97be324..98b6bcfb37bf 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -47,13 +47,6 @@
struct mac_booter_data mac_bi_data;
-/* New m68k bootinfo stuff and videobase */
-
-extern int m68k_num_memory;
-extern struct mem_info m68k_memory[NUM_MEMINFO];
-
-extern struct mem_info m68k_ramdisk;
-
/* The phys. video addr. - might be bogus on some machines */
static unsigned long mac_orig_videoaddr;
@@ -61,7 +54,6 @@ static unsigned long mac_orig_videoaddr;
extern unsigned long mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
extern int mac_set_clock_mmss(unsigned long);
-extern int show_mac_interrupts(struct seq_file *, void *);
extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
@@ -805,10 +797,6 @@ static void __init mac_identify(void)
mac_bi_data.boottime, mac_bi_data.gmtbias);
printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
-#if 0
- printk("Ramdisk: addr 0x%lx size 0x%lx\n",
- m68k_ramdisk.addr, m68k_ramdisk.size);
-#endif
iop_init();
via_init();
diff --git a/arch/m68k/mac/debug.c b/arch/m68k/mac/debug.c
index 65dd77a742a3..bce074ceb768 100644
--- a/arch/m68k/mac/debug.c
+++ b/arch/m68k/mac/debug.c
@@ -27,7 +27,6 @@
#include <asm/macints.h>
extern unsigned long mac_videobase;
-extern unsigned long mac_videodepth;
extern unsigned long mac_rowbytes;
extern void mac_serial_print(const char *);
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c
index 326fb9978094..1ad4e9d80eba 100644
--- a/arch/m68k/mac/iop.c
+++ b/arch/m68k/mac/iop.c
@@ -305,14 +305,16 @@ void __init iop_register_interrupts(void)
{
if (iop_ism_present) {
if (oss_present) {
- request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
+ if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
IRQ_FLG_LOCK, "ISM IOP",
- (void *) IOP_NUM_ISM);
+ (void *) IOP_NUM_ISM))
+ pr_err("Couldn't register ISM IOP interrupt\n");
oss_irq_enable(IRQ_MAC_ADB);
} else {
- request_irq(IRQ_VIA2_0, iop_ism_irq,
+ if (request_irq(IRQ_VIA2_0, iop_ism_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP",
- (void *) IOP_NUM_ISM);
+ (void *) IOP_NUM_ISM))
+ pr_err("Couldn't register ISM IOP interrupt\n");
}
if (!iop_alive(iop_base[IOP_NUM_ISM])) {
printk("IOP: oh my god, they killed the ISM IOP!\n");
diff --git a/arch/m68k/mac/macints.c b/arch/m68k/mac/macints.c
index 82e560c076ce..23711074e0e2 100644
--- a/arch/m68k/mac/macints.c
+++ b/arch/m68k/mac/macints.c
@@ -134,6 +134,7 @@
#include <asm/errno.h>
#include <asm/macints.h>
#include <asm/irq_regs.h>
+#include <asm/mac_oss.h>
#define DEBUG_SPURIOUS
#define SHUTUP_SONIC
@@ -146,7 +147,6 @@ static int scc_mask;
* VIA/RBV hooks
*/
-extern void via_init(void);
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);
@@ -157,9 +157,6 @@ extern int via_irq_pending(int);
* OSS hooks
*/
-extern int oss_present;
-
-extern void oss_init(void);
extern void oss_register_interrupts(void);
extern void oss_irq_enable(int);
extern void oss_irq_disable(int);
@@ -170,9 +167,6 @@ extern int oss_irq_pending(int);
* PSC hooks
*/
-extern int psc_present;
-
-extern void psc_init(void);
extern void psc_register_interrupts(void);
extern void psc_irq_enable(int);
extern void psc_irq_disable(int);
@@ -191,12 +185,10 @@ extern void iop_register_interrupts(void);
extern int baboon_present;
-extern void baboon_init(void);
extern void baboon_register_interrupts(void);
extern void baboon_irq_enable(int);
extern void baboon_irq_disable(int);
extern void baboon_irq_clear(int);
-extern int baboon_irq_pending(int);
/*
* SCC interrupt routines
@@ -258,8 +250,9 @@ void __init mac_init_IRQ(void)
if (baboon_present)
baboon_register_interrupts();
iop_register_interrupts();
- request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
- mac_nmi_handler);
+ if (request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
+ mac_nmi_handler))
+ pr_err("Couldn't register NMI\n");
#ifdef DEBUG_MACINTS
printk("mac_init_IRQ(): Done!\n");
#endif
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index a44c7086ab39..5d818568b343 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -35,7 +35,6 @@
#define RTC_OFFSET 2082844800
-extern struct mac_booter_data mac_bi_data;
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA
diff --git a/arch/m68k/mac/oss.c b/arch/m68k/mac/oss.c
index 8426501119ca..f3d23d6ebcf8 100644
--- a/arch/m68k/mac/oss.c
+++ b/arch/m68k/mac/oss.c
@@ -66,16 +66,21 @@ void __init oss_init(void)
void __init oss_register_interrupts(void)
{
- request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
- "scsi", (void *) oss);
- request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
- "scc", mac_scc_dispatch);
- request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
- "nubus", (void *) oss);
- request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
- "sound", (void *) oss);
- request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
- "via1", (void *) via1);
+ if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
+ "scsi", (void *) oss))
+ pr_err("Couldn't register %s interrupt\n", "scsi");
+ if (request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
+ "scc", mac_scc_dispatch))
+ pr_err("Couldn't register %s interrupt\n", "scc");
+ if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
+ "nubus", (void *) oss))
+ pr_err("Couldn't register %s interrupt\n", "nubus");
+ if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
+ "sound", (void *) oss))
+ pr_err("Couldn't register %s interrupt\n", "sound");
+ if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
+ "via1", (void *) via1))
+ pr_err("Couldn't register %s interrupt\n", "via1");
}
/*
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index f84a4dd64f94..ba6ccab64018 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -117,10 +117,14 @@ void __init psc_init(void)
void __init psc_register_interrupts(void)
{
- request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30);
- request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40);
- request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50);
- request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60);
+ if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
+ pr_err("Couldn't register psc%d interrupt\n", 3);
+ if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
+ pr_err("Couldn't register psc%d interrupt\n", 4);
+ if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
+ pr_err("Couldn't register psc%d interrupt\n", 5);
+ if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
+ pr_err("Couldn't register psc%d interrupt\n", 6);
}
/*
diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
index f01d418e64fe..7d97ba54536e 100644
--- a/arch/m68k/mac/via.c
+++ b/arch/m68k/mac/via.c
@@ -34,6 +34,7 @@
#include <asm/macints.h>
#include <asm/mac_via.h>
#include <asm/mac_psc.h>
+#include <asm/mac_oss.h>
volatile __u8 *via1, *via2;
int rbv_present;
@@ -84,7 +85,6 @@ void via_irq_disable(int irq);
void via_irq_clear(int irq);
extern irqreturn_t mac_scc_dispatch(int, void *);
-extern int oss_present;
/*
* Initialize the VIAs
@@ -283,7 +283,8 @@ void __init via_init_clock(irq_handler_t func)
via1[vT1CL] = MAC_CLOCK_LOW;
via1[vT1CH] = MAC_CLOCK_HIGH;
- request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func);
+ if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func))
+ pr_err("Couldn't register %s interrupt\n", "timer");
}
/*
@@ -293,25 +294,31 @@ void __init via_init_clock(irq_handler_t func)
void __init via_register_interrupts(void)
{
if (via_alt_mapping) {
- request_irq(IRQ_AUTO_1, via1_irq,
+ if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
- (void *) via1);
- request_irq(IRQ_AUTO_6, via1_irq,
+ (void *) via1))
+ pr_err("Couldn't register %s interrupt\n", "software");
+ if (request_irq(IRQ_AUTO_6, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
- (void *) via1);
+ (void *) via1))
+ pr_err("Couldn't register %s interrupt\n", "via1");
} else {
- request_irq(IRQ_AUTO_1, via1_irq,
+ if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
- (void *) via1);
+ (void *) via1))
+ pr_err("Couldn't register %s interrupt\n", "via1");
}
- request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
- "via2", (void *) via2);
+ if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
+ "via2", (void *) via2))
+ pr_err("Couldn't register %s interrupt\n", "via2");
if (!psc_present) {
- request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
- "scc", mac_scc_dispatch);
+ if (request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
+ "scc", mac_scc_dispatch))
+ pr_err("Couldn't register %s interrupt\n", "scc");
}
- request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
- "nubus", (void *) via2);
+ if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
+ IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
+ pr_err("Couldn't register %s interrupt\n", "nubus");
}
/*
diff --git a/arch/m68k/math-emu/fp_log.c b/arch/m68k/math-emu/fp_log.c
index b1033ae0d6f0..367ecee2f981 100644
--- a/arch/m68k/math-emu/fp_log.c
+++ b/arch/m68k/math-emu/fp_log.c
@@ -24,7 +24,6 @@ static const struct fp_ext fp_one =
extern struct fp_ext *fp_fadd(struct fp_ext *dest, const struct fp_ext *src);
extern struct fp_ext *fp_fdiv(struct fp_ext *dest, const struct fp_ext *src);
-extern struct fp_ext *fp_fmul(struct fp_ext *dest, const struct fp_ext *src);
struct fp_ext *
fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 81bb08ceec18..0007b2adf3a3 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -28,6 +28,7 @@
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
+#include <asm/sections.h>
#include <asm/tlb.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -73,9 +74,6 @@ extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
-extern char _text[], _etext[];
-extern char __init_begin[], __init_end[];
-
extern pmd_t *zero_pgtable;
void __init mem_init(void)
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index c5dbb9bdb322..4665fc84b7dc 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -30,6 +30,7 @@
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
+#include <asm/sections.h>
#undef DEBUG
@@ -301,14 +302,12 @@ void __init paging_init(void)
}
}
-extern char __init_begin, __init_end;
-
void free_initmem(void)
{
unsigned long addr;
- addr = (unsigned long)&__init_begin;
- for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
+ addr = (unsigned long)__init_begin;
+ for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
virt_to_page(addr)->flags &= ~(1 << PG_reserved);
init_page_count(virt_to_page(addr));
free_page(addr);
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index 43cdf476ffab..100baaa692a1 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -42,7 +42,6 @@ extern unsigned long mvme147_gettimeoffset (void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
-extern void mvme147_waitbut(void);
static int bcd2int (unsigned char b);
@@ -115,8 +114,9 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
void mvme147_sched_init (irq_handler_t timer_routine)
{
tick_handler = timer_routine;
- request_irq (PCC_IRQ_TIMER1, mvme147_timer_int,
- IRQ_FLG_REPLACE, "timer 1", NULL);
+ if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE,
+ "timer 1", NULL))
+ pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
/* our clock goes off every 6.25us */
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 1521826fc3c7..11edf61cc2c4 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -48,7 +48,6 @@ extern unsigned long mvme16x_gettimeoffset (void);
extern int mvme16x_hwclk (int, struct rtc_time *);
extern int mvme16x_set_clock_mmss (unsigned long);
extern void mvme16x_reset (void);
-extern void mvme16x_waitbut(void);
int bcd2int (unsigned char b);
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 7110546e3c00..31ab3f08bbda 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,7 +36,6 @@
#include <asm/machdep.h>
#include <asm/q40_master.h>
-extern irqreturn_t q40_process_int(int level, struct pt_regs *regs);
extern void q40_init_IRQ(void);
static void q40_get_model(char *model);
extern void q40_sched_init(irq_handler_t handler);
@@ -47,8 +46,6 @@ static unsigned int q40_get_ss(void);
static int q40_set_clock_mmss(unsigned long);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
-extern void q40_waitbut(void);
-void q40_set_vectors(void);
extern void q40_mksound(unsigned int /*freq*/, unsigned int /*ticks*/);
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index 8dfaa201342e..2ca25bd01a96 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -27,23 +27,21 @@
#include <asm/sun3mmu.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
+#include <asm/idprom.h>
#include <asm/intersil.h>
#include <asm/irq.h>
+#include <asm/sections.h>
#include <asm/segment.h>
#include <asm/sun3ints.h>
-extern char _text, _end;
-
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
extern unsigned long sun3_gettimeoffset(void);
static void sun3_sched_init(irq_handler_t handler);
extern void sun3_get_model (char* model);
-extern void idprom_init (void);
extern int sun3_hwclk(int set, struct rtc_time *t);
volatile char* clock_va;
-extern volatile unsigned char* sun3_intreg;
extern unsigned long availmem;
unsigned long num_pages;
@@ -149,7 +147,7 @@ void __init config_sun3(void)
mach_halt = sun3_halt;
mach_get_hardware_list = sun3_get_hardware_list;
- memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
+ memory_start = ((((unsigned long)_end) + 0x2000) & ~0x1fff);
// PROM seems to want the last couple of physical pages. --m
memory_end = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE;
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 60f9d4500d72..3cd19390aae5 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -27,7 +27,6 @@
#include <asm/mmu_context.h>
#include <asm/dvma.h>
-extern void prom_reboot (char *) __attribute__ ((__noreturn__));
#undef DEBUG_MMU_EMU
#define DEBUG_PROM_MAPS
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 7364cd67455e..ad90393a3361 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -105,7 +105,10 @@ void __init sun3_init_IRQ(void)
m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7);
m68k_setup_user_interrupt(VEC_USER, 128, NULL);
- request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL);
- request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL);
- request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL);
+ if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
+ pr_err("Couldn't register %s interrupt\n", "int5");
+ if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL))
+ pr_err("Couldn't register %s interrupt\n", "int7");
+ if (request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL))
+ pr_err("Couldn't register %s interrupt\n", "vec255");
}
diff --git a/arch/m68k/sun3x/config.c b/arch/m68k/sun3x/config.c
index 2b1ca2db070f..fc599fad4a54 100644
--- a/arch/m68k/sun3x/config.c
+++ b/arch/m68k/sun3x/config.c
@@ -23,7 +23,6 @@
#include "time.h"
volatile char *clock_va;
-extern volatile unsigned char *sun3_intreg;
extern void sun3_get_model(char *model);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a5255e7c79e0..52c80c2a57f2 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -595,6 +595,44 @@ config WR_PPMC
This enables support for the Wind River MIPS32 4KC PPMC evaluation
board, which is based on GT64120 bridge chip.
+config CAVIUM_OCTEON_SIMULATOR
+ bool "Support for the Cavium Networks Octeon Simulator"
+ select CEVT_R4K
+ select 64BIT_PHYS_ADDR
+ select DMA_COHERENT
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select CPU_CAVIUM_OCTEON
+ help
+ The Octeon simulator is software performance model of the Cavium
+ Octeon Processor. It supports simulating Octeon processors on x86
+ hardware.
+
+config CAVIUM_OCTEON_REFERENCE_BOARD
+ bool "Support for the Cavium Networks Octeon reference board"
+ select CEVT_R4K
+ select 64BIT_PHYS_ADDR
+ select DMA_COHERENT
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select SYS_HAS_EARLY_PRINTK
+ select CPU_CAVIUM_OCTEON
+ select SWAP_IO_SPACE
+ help
+ This option supports all of the Octeon reference boards from Cavium
+ Networks. It builds a kernel that dynamically determines the Octeon
+ CPU type and supports all known board reference implementations.
+ Some of the supported boards are:
+ EBT3000
+ EBH3000
+ EBH3100
+ Thunder
+ Kodama
+ Hikari
+ Say Y here for most Octeon reference boards.
+
endchoice
source "arch/mips/alchemy/Kconfig"
@@ -607,6 +645,7 @@ source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
source "arch/mips/vr41xx/Kconfig"
+source "arch/mips/cavium-octeon/Kconfig"
endmenu
@@ -682,7 +721,11 @@ config CEVT_DS1287
config CEVT_GT641XX
bool
+config CEVT_R4K_LIB
+ bool
+
config CEVT_R4K
+ select CEVT_R4K_LIB
bool
config CEVT_SB1250
@@ -697,7 +740,11 @@ config CSRC_BCM1480
config CSRC_IOASIC
bool
+config CSRC_R4K_LIB
+ bool
+
config CSRC_R4K
+ select CSRC_R4K_LIB
bool
config CSRC_SB1250
@@ -835,6 +882,9 @@ config IRQ_GT641XX
config IRQ_GIC
bool
+config IRQ_CPU_OCTEON
+ bool
+
config MIPS_BOARDS_GEN
bool
@@ -924,7 +974,7 @@ config BOOT_ELF32
config MIPS_L1_CACHE_SHIFT
int
default "4" if MACH_DECSTATION || MIKROTIK_RB532
- default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM
+ default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
default "4" if PMC_MSP4200_EVAL
default "5"
@@ -1185,6 +1235,23 @@ config CPU_SB1
select CPU_SUPPORTS_HIGHMEM
select WEAK_ORDERING
+config CPU_CAVIUM_OCTEON
+ bool "Cavium Octeon processor"
+ select IRQ_CPU
+ select IRQ_CPU_OCTEON
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select SYS_SUPPORTS_SMP
+ select NR_CPUS_DEFAULT_16
+ select WEAK_ORDERING
+ select WEAK_REORDERING_BEYOND_LLSC
+ select CPU_SUPPORTS_HIGHMEM
+ help
+ The Cavium Octeon processor is a highly integrated chip containing
+ many ethernet hardware widgets for networking tasks. The processor
+ can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
+ Full details can be found at http://www.caviumnetworks.com.
+
endchoice
config SYS_HAS_CPU_LOONGSON2
@@ -1285,7 +1352,7 @@ config CPU_MIPSR1
config CPU_MIPSR2
bool
- default y if CPU_MIPS32_R2 || CPU_MIPS64_R2
+ default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
config SYS_SUPPORTS_32BIT_KERNEL
bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 28c55f608913..21b00e95daef 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -144,6 +144,10 @@ cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
-Wa,--trap
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
+ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
+endif
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@@ -184,84 +188,84 @@ cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00
#
# AMD Alchemy Pb1000 eval board
#
-libs-$(CONFIG_MIPS_PB1000) += arch/mips/alchemy/pb1000/
+core-$(CONFIG_MIPS_PB1000) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1000) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1000) += 0xffffffff80100000
#
# AMD Alchemy Pb1100 eval board
#
-libs-$(CONFIG_MIPS_PB1100) += arch/mips/alchemy/pb1100/
+core-$(CONFIG_MIPS_PB1100) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1100) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000
#
# AMD Alchemy Pb1500 eval board
#
-libs-$(CONFIG_MIPS_PB1500) += arch/mips/alchemy/pb1500/
+core-$(CONFIG_MIPS_PB1500) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1500) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000
#
# AMD Alchemy Pb1550 eval board
#
-libs-$(CONFIG_MIPS_PB1550) += arch/mips/alchemy/pb1550/
+core-$(CONFIG_MIPS_PB1550) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1550) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000
#
# AMD Alchemy Pb1200 eval board
#
-libs-$(CONFIG_MIPS_PB1200) += arch/mips/alchemy/pb1200/
+core-$(CONFIG_MIPS_PB1200) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1200) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1200) += 0xffffffff80100000
#
# AMD Alchemy Db1000 eval board
#
-libs-$(CONFIG_MIPS_DB1000) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1000) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000
#
# AMD Alchemy Db1100 eval board
#
-libs-$(CONFIG_MIPS_DB1100) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1100) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1100) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1100) += 0xffffffff80100000
#
# AMD Alchemy Db1500 eval board
#
-libs-$(CONFIG_MIPS_DB1500) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1500) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1500) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1500) += 0xffffffff80100000
#
# AMD Alchemy Db1550 eval board
#
-libs-$(CONFIG_MIPS_DB1550) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_DB1550) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1550) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1550) += 0xffffffff80100000
#
# AMD Alchemy Db1200 eval board
#
-libs-$(CONFIG_MIPS_DB1200) += arch/mips/alchemy/pb1200/
+core-$(CONFIG_MIPS_DB1200) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1200) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1200) += 0xffffffff80100000
#
# AMD Alchemy Bosporus eval board
#
-libs-$(CONFIG_MIPS_BOSPORUS) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_BOSPORUS) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_BOSPORUS) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_BOSPORUS) += 0xffffffff80100000
#
# AMD Alchemy Mirage eval board
#
-libs-$(CONFIG_MIPS_MIRAGE) += arch/mips/alchemy/db1x00/
+core-$(CONFIG_MIPS_MIRAGE) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_MIRAGE) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_MIRAGE) += 0xffffffff80100000
@@ -586,6 +590,18 @@ core-$(CONFIG_TOSHIBA_RBTX4927) += arch/mips/txx9/rbtx4927/
core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/txx9/rbtx4938/
core-$(CONFIG_TOSHIBA_RBTX4939) += arch/mips/txx9/rbtx4939/
+#
+# Cavium Octeon
+#
+core-$(CONFIG_CPU_CAVIUM_OCTEON) += arch/mips/cavium-octeon/
+cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
+core-$(CONFIG_CPU_CAVIUM_OCTEON) += arch/mips/cavium-octeon/executive/
+ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000
+else
+load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000
+endif
+
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index e4a057d80ab6..7f8ef13d0014 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -128,9 +128,10 @@ config SOC_AU1200
config SOC_AU1X00
bool
select 64BIT_PHYS_ADDR
- select CEVT_R4K
- select CSRC_R4K
+ select CEVT_R4K_LIB
+ select CSRC_R4K_LIB
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_APM_EMULATION
+ select GENERIC_HARDIRQS_NO__DO_IRQ
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index df48fd65bbf3..d50d4764eafe 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -6,8 +6,8 @@
#
obj-y += prom.o irq.o puts.o time.o reset.o \
- au1xxx_irqmap.o clocks.o platform.o power.o setup.o \
- sleeper.o cputable.o dma.o dbdma.o gpio.o
+ clocks.o platform.o power.o setup.o \
+ sleeper.o dma.o dbdma.o gpio.o
obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/mips/alchemy/common/au1xxx_irqmap.c b/arch/mips/alchemy/common/au1xxx_irqmap.c
deleted file mode 100644
index c7ca1596394c..000000000000
--- a/arch/mips/alchemy/common/au1xxx_irqmap.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx processor specific IRQ tables
- *
- * Copyright 2004 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <au1000.h>
-
-/* The IC0 interrupt table. This is processor, rather than
- * board dependent, so no reason to keep this info in the board
- * dependent files.
- *
- * Careful if you change match 2 request!
- * The interrupt handler is called directly from the low level dispatch code.
- */
-struct au1xxx_irqmap __initdata au1xxx_ic0_map[] = {
-
-#if defined(CONFIG_SOC_AU1000)
- { AU1000_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_UART2_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
- { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1500)
-
- { AU1500_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
- { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1500_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1500_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1100)
-
- { AU1100_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1100_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1100_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1100_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
- { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1100_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- /* { AU1000_GPIO215_208_INT, INTC_INT_HIGH_LEVEL, 0 }, */
- { AU1100_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
-
-#elif defined(CONFIG_SOC_AU1550)
-
- { AU1550_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_CRYPTO_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_PCI_RST_INT, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
- { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1550_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
- { AU1550_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1550_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
-
-#elif defined(CONFIG_SOC_AU1200)
-
- { AU1200_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_SWT_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1200_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_MAE_BE_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_MAE_FE_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
- { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0 },
- { AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
- { AU1200_MAE_BOTH_INT, INTC_INT_HIGH_LEVEL, 0 },
-
-#else
-#error "Error: Unknown Alchemy SOC"
-#endif
-
-};
-
-int __initdata au1xxx_ic0_nr_irqs = ARRAY_SIZE(au1xxx_ic0_map);
diff --git a/arch/mips/alchemy/common/clocks.c b/arch/mips/alchemy/common/clocks.c
index 043429d17c5f..d8991854530e 100644
--- a/arch/mips/alchemy/common/clocks.c
+++ b/arch/mips/alchemy/common/clocks.c
@@ -27,12 +27,21 @@
*/
#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
+/*
+ * I haven't found anyone that doesn't use a 12 MHz source clock,
+ * but just in case.....
+ */
+#define AU1000_SRC_CLK 12000000
+
static unsigned int au1x00_clock; /* Hz */
-static unsigned int lcd_clock; /* KHz */
static unsigned long uart_baud_base;
+static DEFINE_SPINLOCK(time_lock);
+
/*
* Set the au1000_clock
*/
@@ -63,31 +72,45 @@ void set_au1x00_uart_baud_base(unsigned long new_baud_base)
}
/*
- * Calculate the Au1x00's LCD clock based on the current
- * cpu clock and the system bus clock, and try to keep it
- * below 40 MHz (the Pb1000 board can lock-up if the LCD
- * clock is over 40 MHz).
+ * We read the real processor speed from the PLL. This is important
+ * because it is more accurate than computing it from the 32 KHz
+ * counter, if it exists. If we don't have an accurate processor
+ * speed, all of the peripherals that derive their clocks based on
+ * this advertised speed will introduce error and sometimes not work
+ * properly. This function is futher convoluted to still allow configurations
+ * to do that in case they have really, really old silicon with a
+ * write-only PLL register. -- Dan
*/
-void set_au1x00_lcd_clock(void)
+unsigned long au1xxx_calc_clock(void)
{
- unsigned int static_cfg0;
- unsigned int sys_busclk = (get_au1x00_speed() / 1000) /
- ((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2);
+ unsigned long cpu_speed;
+ unsigned long flags;
- static_cfg0 = au_readl(MEM_STCFG0);
+ spin_lock_irqsave(&time_lock, flags);
- if (static_cfg0 & (1 << 11))
- lcd_clock = sys_busclk / 5; /* note: BCLK switching fails with D5 */
+ /*
+ * On early Au1000, sys_cpupll was write-only. Since these
+ * silicon versions of Au1000 are not sold by AMD, we don't bend
+ * over backwards trying to determine the frequency.
+ */
+ if (au1xxx_cpu_has_pll_wo())
+#ifdef CONFIG_SOC_AU1000_FREQUENCY
+ cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
+#else
+ cpu_speed = 396000000;
+#endif
else
- lcd_clock = sys_busclk / 4;
+ cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
- if (lcd_clock > 50000) /* Epson MAX */
- printk(KERN_WARNING "warning: LCD clock too high (%u KHz)\n",
- lcd_clock);
-}
+ /* On Alchemy CPU:counter ratio is 1:1 */
+ mips_hpt_frequency = cpu_speed;
+ /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
+ set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
+ & 0x03) + 2) * 16));
-unsigned int get_au1x00_lcd_clock(void)
-{
- return lcd_clock;
+ spin_unlock_irqrestore(&time_lock, flags);
+
+ set_au1x00_speed(cpu_speed);
+
+ return cpu_speed;
}
-EXPORT_SYMBOL(get_au1x00_lcd_clock);
diff --git a/arch/mips/alchemy/common/cputable.c b/arch/mips/alchemy/common/cputable.c
deleted file mode 100644
index ba6430bc2d03..000000000000
--- a/arch/mips/alchemy/common/cputable.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * arch/mips/au1000/common/cputable.c
- *
- * Copyright (C) 2004 Dan Malek (dan@embeddededge.com)
- * Copied from PowerPC and updated for Alchemy Au1xxx processors.
- *
- * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct cpu_spec *cur_cpu_spec[NR_CPUS];
-
-/* With some thought, we can probably use the mask to reduce the
- * size of the table.
- */
-struct cpu_spec cpu_specs[] = {
- { 0xffffffff, 0x00030100, "Au1000 DA", 1, 0, 1 },
- { 0xffffffff, 0x00030201, "Au1000 HA", 1, 0, 1 },
- { 0xffffffff, 0x00030202, "Au1000 HB", 1, 0, 1 },
- { 0xffffffff, 0x00030203, "Au1000 HC", 1, 1, 0 },
- { 0xffffffff, 0x00030204, "Au1000 HD", 1, 1, 0 },
- { 0xffffffff, 0x01030200, "Au1500 AB", 1, 1, 0 },
- { 0xffffffff, 0x01030201, "Au1500 AC", 0, 1, 0 },
- { 0xffffffff, 0x01030202, "Au1500 AD", 0, 1, 0 },
- { 0xffffffff, 0x02030200, "Au1100 AB", 1, 1, 0 },
- { 0xffffffff, 0x02030201, "Au1100 BA", 1, 1, 0 },
- { 0xffffffff, 0x02030202, "Au1100 BC", 1, 1, 0 },
- { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1, 0 },
- { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1, 0 },
- { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1, 0 },
- { 0xffffffff, 0x04030200, "Au1200 AB", 0, 0, 0 },
- { 0xffffffff, 0x04030201, "Au1200 AC", 1, 0, 0 },
- { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0, 0 }
-};
-
-void set_cpuspec(void)
-{
- struct cpu_spec *sp;
- u32 prid;
-
- prid = read_c0_prid();
- sp = cpu_specs;
- while ((prid & sp->prid_mask) != sp->prid_value)
- sp++;
- cur_cpu_spec[0] = sp;
-}
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 601ee9180ee4..3ab6d80d150d 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -174,6 +174,11 @@ static dbdev_tab_t dbdev_tab[] = {
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
+#ifdef CONFIG_PM
+static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8];
+#endif
+
+
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
static dbdev_tab_t *find_dbdev_id(u32 id)
@@ -975,4 +980,64 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
return nbytes;
}
+#ifdef CONFIG_PM
+void au1xxx_dbdma_suspend(void)
+{
+ int i;
+ u32 addr;
+
+ addr = DDMA_GLOBAL_BASE;
+ au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
+ au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
+ au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
+ au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
+
+ /* save channel configurations */
+ for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+ au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
+ au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
+ au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
+ au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
+ au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
+ au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
+ au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
+
+ /* halt channel */
+ au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
+ au_sync();
+ while (!(au_readl(addr + 0x14) & 1))
+ au_sync();
+
+ addr += 0x100; /* next channel base */
+ }
+ /* disable channel interrupts */
+ au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
+ au_sync();
+}
+
+void au1xxx_dbdma_resume(void)
+{
+ int i;
+ u32 addr;
+
+ addr = DDMA_GLOBAL_BASE;
+ au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
+ au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
+ au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
+ au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
+
+ /* restore channel configurations */
+ for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
+ au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
+ au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
+ au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
+ au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
+ au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
+ au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
+ au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
+ au_sync();
+ addr += 0x100; /* next channel base */
+ }
+}
+#endif /* CONFIG_PM */
#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 40c6ceceb5f9..c88c821b4c36 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -24,6 +24,7 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -36,15 +37,172 @@
#include <asm/mach-pb1x00/pb1000.h>
#endif
-#define EXT_INTC0_REQ0 2 /* IP 2 */
-#define EXT_INTC0_REQ1 3 /* IP 3 */
-#define EXT_INTC1_REQ0 4 /* IP 4 */
-#define EXT_INTC1_REQ1 5 /* IP 5 */
-#define MIPS_TIMER_IP 7 /* IP 7 */
-
-void (*board_init_irq)(void) __initdata = NULL;
+static int au1x_ic_settype(unsigned int irq, unsigned int flow_type);
+
+/* per-processor fixed function irqs */
+struct au1xxx_irqmap au1xxx_ic0_map[] __initdata = {
+
+#if defined(CONFIG_SOC_AU1000)
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1500)
+
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1100)
+
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+
+#elif defined(CONFIG_SOC_AU1550)
+
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+
+#elif defined(CONFIG_SOC_AU1200)
+
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+
+#else
+#error "Error: Unknown Alchemy SOC"
+#endif
+};
-static DEFINE_SPINLOCK(irq_lock);
#ifdef CONFIG_PM
@@ -130,67 +288,47 @@ void restore_au1xxx_intctl(void)
#endif /* CONFIG_PM */
-inline void local_enable_irq(unsigned int irq_nr)
+static void au1x_ic0_unmask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
- if (bit >= 32) {
- au_writel(1 << (bit - 32), IC1_MASKSET);
- au_writel(1 << (bit - 32), IC1_WAKESET);
- } else {
- au_writel(1 << bit, IC0_MASKSET);
- au_writel(1 << bit, IC0_WAKESET);
- }
+ au_writel(1 << bit, IC0_MASKSET);
+ au_writel(1 << bit, IC0_WAKESET);
au_sync();
}
-
-inline void local_disable_irq(unsigned int irq_nr)
+static void au1x_ic1_unmask(unsigned int irq_nr)
{
- unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
+ unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
+ au_writel(1 << bit, IC1_MASKSET);
+ au_writel(1 << bit, IC1_WAKESET);
- if (bit >= 32) {
- au_writel(1 << (bit - 32), IC1_MASKCLR);
- au_writel(1 << (bit - 32), IC1_WAKECLR);
- } else {
- au_writel(1 << bit, IC0_MASKCLR);
- au_writel(1 << bit, IC0_WAKECLR);
- }
+/* very hacky. does the pb1000 cpld auto-disable this int?
+ * nowhere in the current kernel sources is it disabled. --mlau
+ */
+#if defined(CONFIG_MIPS_PB1000)
+ if (irq_nr == AU1000_GPIO_15)
+ au_writel(0x4000, PB1000_MDR); /* enable int */
+#endif
au_sync();
}
-
-static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr)
+static void au1x_ic0_mask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
- if (bit >= 32) {
- au_writel(1 << (bit - 32), IC1_RISINGCLR);
- au_writel(1 << (bit - 32), IC1_MASKCLR);
- } else {
- au_writel(1 << bit, IC0_RISINGCLR);
- au_writel(1 << bit, IC0_MASKCLR);
- }
+ au_writel(1 << bit, IC0_MASKCLR);
+ au_writel(1 << bit, IC0_WAKECLR);
au_sync();
}
-
-static inline void mask_and_ack_fall_edge_irq(unsigned int irq_nr)
+static void au1x_ic1_mask(unsigned int irq_nr)
{
- unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
- if (bit >= 32) {
- au_writel(1 << (bit - 32), IC1_FALLINGCLR);
- au_writel(1 << (bit - 32), IC1_MASKCLR);
- } else {
- au_writel(1 << bit, IC0_FALLINGCLR);
- au_writel(1 << bit, IC0_MASKCLR);
- }
+ unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
+ au_writel(1 << bit, IC1_MASKCLR);
+ au_writel(1 << bit, IC1_WAKECLR);
au_sync();
}
-
-static inline void mask_and_ack_either_edge_irq(unsigned int irq_nr)
+static void au1x_ic0_ack(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
@@ -198,349 +336,229 @@ static inline void mask_and_ack_either_edge_irq(unsigned int irq_nr)
* This may assume that we don't get interrupts from
* both edges at once, or if we do, that we don't care.
*/
- if (bit >= 32) {
- au_writel(1 << (bit - 32), IC1_FALLINGCLR);
- au_writel(1 << (bit - 32), IC1_RISINGCLR);
- au_writel(1 << (bit - 32), IC1_MASKCLR);
- } else {
- au_writel(1 << bit, IC0_FALLINGCLR);
- au_writel(1 << bit, IC0_RISINGCLR);
- au_writel(1 << bit, IC0_MASKCLR);
- }
+ au_writel(1 << bit, IC0_FALLINGCLR);
+ au_writel(1 << bit, IC0_RISINGCLR);
au_sync();
}
-static inline void mask_and_ack_level_irq(unsigned int irq_nr)
+static void au1x_ic1_ack(unsigned int irq_nr)
{
- local_disable_irq(irq_nr);
- au_sync();
-#if defined(CONFIG_MIPS_PB1000)
- if (irq_nr == AU1000_GPIO_15) {
- au_writel(0x8000, PB1000_MDR); /* ack int */
- au_sync();
- }
-#endif
-}
-
-static void end_irq(unsigned int irq_nr)
-{
- if (!(irq_desc[irq_nr].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- local_enable_irq(irq_nr);
+ unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
-#if defined(CONFIG_MIPS_PB1000)
- if (irq_nr == AU1000_GPIO_15) {
- au_writel(0x4000, PB1000_MDR); /* enable int */
- au_sync();
- }
-#endif
+ /*
+ * This may assume that we don't get interrupts from
+ * both edges at once, or if we do, that we don't care.
+ */
+ au_writel(1 << bit, IC1_FALLINGCLR);
+ au_writel(1 << bit, IC1_RISINGCLR);
+ au_sync();
}
-unsigned long save_local_and_disable(int controller)
+static int au1x_ic1_setwake(unsigned int irq, unsigned int on)
{
- int i;
- unsigned long flags, mask;
-
- spin_lock_irqsave(&irq_lock, flags);
- if (controller) {
- mask = au_readl(IC1_MASKSET);
- for (i = 32; i < 64; i++)
- local_disable_irq(i);
- } else {
- mask = au_readl(IC0_MASKSET);
- for (i = 0; i < 32; i++)
- local_disable_irq(i);
- }
- spin_unlock_irqrestore(&irq_lock, flags);
-
- return mask;
-}
+ unsigned int bit = irq - AU1000_INTC1_INT_BASE;
+ unsigned long wakemsk, flags;
-void restore_local_and_enable(int controller, unsigned long mask)
-{
- int i;
- unsigned long flags, new_mask;
-
- spin_lock_irqsave(&irq_lock, flags);
- for (i = 0; i < 32; i++)
- if (mask & (1 << i)) {
- if (controller)
- local_enable_irq(i + 32);
- else
- local_enable_irq(i);
- }
+ /* only GPIO 0-7 can act as wakeup source: */
+ if ((irq < AU1000_GPIO_0) || (irq > AU1000_GPIO_7))
+ return -EINVAL;
- if (controller)
- new_mask = au_readl(IC1_MASKSET);
+ local_irq_save(flags);
+ wakemsk = au_readl(SYS_WAKEMSK);
+ if (on)
+ wakemsk |= 1 << bit;
else
- new_mask = au_readl(IC0_MASKSET);
+ wakemsk &= ~(1 << bit);
+ au_writel(wakemsk, SYS_WAKEMSK);
+ au_sync();
+ local_irq_restore(flags);
- spin_unlock_irqrestore(&irq_lock, flags);
+ return 0;
}
-
-static struct irq_chip rise_edge_irq_type = {
- .name = "Au1000 Rise Edge",
- .ack = mask_and_ack_rise_edge_irq,
- .mask = local_disable_irq,
- .mask_ack = mask_and_ack_rise_edge_irq,
- .unmask = local_enable_irq,
- .end = end_irq,
-};
-
-static struct irq_chip fall_edge_irq_type = {
- .name = "Au1000 Fall Edge",
- .ack = mask_and_ack_fall_edge_irq,
- .mask = local_disable_irq,
- .mask_ack = mask_and_ack_fall_edge_irq,
- .unmask = local_enable_irq,
- .end = end_irq,
-};
-
-static struct irq_chip either_edge_irq_type = {
- .name = "Au1000 Rise or Fall Edge",
- .ack = mask_and_ack_either_edge_irq,
- .mask = local_disable_irq,
- .mask_ack = mask_and_ack_either_edge_irq,
- .unmask = local_enable_irq,
- .end = end_irq,
+/*
+ * irq_chips for both ICs; this way the mask handlers can be
+ * as short as possible.
+ *
+ * NOTE: the ->ack() callback is used by the handle_edge_irq
+ * flowhandler only, the ->mask_ack() one by handle_level_irq,
+ * so no need for an irq_chip for each type of irq (level/edge).
+ */
+static struct irq_chip au1x_ic0_chip = {
+ .name = "Alchemy-IC0",
+ .ack = au1x_ic0_ack, /* edge */
+ .mask = au1x_ic0_mask,
+ .mask_ack = au1x_ic0_mask, /* level */
+ .unmask = au1x_ic0_unmask,
+ .set_type = au1x_ic_settype,
};
-static struct irq_chip level_irq_type = {
- .name = "Au1000 Level",
- .ack = mask_and_ack_level_irq,
- .mask = local_disable_irq,
- .mask_ack = mask_and_ack_level_irq,
- .unmask = local_enable_irq,
- .end = end_irq,
+static struct irq_chip au1x_ic1_chip = {
+ .name = "Alchemy-IC1",
+ .ack = au1x_ic1_ack, /* edge */
+ .mask = au1x_ic1_mask,
+ .mask_ack = au1x_ic1_mask, /* level */
+ .unmask = au1x_ic1_unmask,
+ .set_type = au1x_ic_settype,
+ .set_wake = au1x_ic1_setwake,
};
-static void __init setup_local_irq(unsigned int irq_nr, int type, int int_req)
+static int au1x_ic_settype(unsigned int irq, unsigned int flow_type)
{
- unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
-
- if (irq_nr > AU1000_MAX_INTR)
- return;
-
- /* Config2[n], Config1[n], Config0[n] */
- if (bit >= 32) {
- switch (type) {
- case INTC_INT_RISE_EDGE: /* 0:0:1 */
- au_writel(1 << (bit - 32), IC1_CFG2CLR);
- au_writel(1 << (bit - 32), IC1_CFG1CLR);
- au_writel(1 << (bit - 32), IC1_CFG0SET);
- set_irq_chip(irq_nr, &rise_edge_irq_type);
- break;
- case INTC_INT_FALL_EDGE: /* 0:1:0 */
- au_writel(1 << (bit - 32), IC1_CFG2CLR);
- au_writel(1 << (bit - 32), IC1_CFG1SET);
- au_writel(1 << (bit - 32), IC1_CFG0CLR);
- set_irq_chip(irq_nr, &fall_edge_irq_type);
- break;
- case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
- au_writel(1 << (bit - 32), IC1_CFG2CLR);
- au_writel(1 << (bit - 32), IC1_CFG1SET);
- au_writel(1 << (bit - 32), IC1_CFG0SET);
- set_irq_chip(irq_nr, &either_edge_irq_type);
- break;
- case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
- au_writel(1 << (bit - 32), IC1_CFG2SET);
- au_writel(1 << (bit - 32), IC1_CFG1CLR);
- au_writel(1 << (bit - 32), IC1_CFG0SET);
- set_irq_chip(irq_nr, &level_irq_type);
- break;
- case INTC_INT_LOW_LEVEL: /* 1:1:0 */
- au_writel(1 << (bit - 32), IC1_CFG2SET);
- au_writel(1 << (bit - 32), IC1_CFG1SET);
- au_writel(1 << (bit - 32), IC1_CFG0CLR);
- set_irq_chip(irq_nr, &level_irq_type);
- break;
- case INTC_INT_DISABLED: /* 0:0:0 */
- au_writel(1 << (bit - 32), IC1_CFG0CLR);
- au_writel(1 << (bit - 32), IC1_CFG1CLR);
- au_writel(1 << (bit - 32), IC1_CFG2CLR);
- break;
- default: /* disable the interrupt */
- printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
- type, irq_nr);
- au_writel(1 << (bit - 32), IC1_CFG0CLR);
- au_writel(1 << (bit - 32), IC1_CFG1CLR);
- au_writel(1 << (bit - 32), IC1_CFG2CLR);
- return;
- }
- if (int_req) /* assign to interrupt request 1 */
- au_writel(1 << (bit - 32), IC1_ASSIGNCLR);
- else /* assign to interrupt request 0 */
- au_writel(1 << (bit - 32), IC1_ASSIGNSET);
- au_writel(1 << (bit - 32), IC1_SRCSET);
- au_writel(1 << (bit - 32), IC1_MASKCLR);
- au_writel(1 << (bit - 32), IC1_WAKECLR);
+ struct irq_chip *chip;
+ unsigned long icr[6];
+ unsigned int bit, ic;
+ int ret;
+
+ if (irq >= AU1000_INTC1_INT_BASE) {
+ bit = irq - AU1000_INTC1_INT_BASE;
+ chip = &au1x_ic1_chip;
+ ic = 1;
} else {
- switch (type) {
- case INTC_INT_RISE_EDGE: /* 0:0:1 */
- au_writel(1 << bit, IC0_CFG2CLR);
- au_writel(1 << bit, IC0_CFG1CLR);
- au_writel(1 << bit, IC0_CFG0SET);
- set_irq_chip(irq_nr, &rise_edge_irq_type);
- break;
- case INTC_INT_FALL_EDGE: /* 0:1:0 */
- au_writel(1 << bit, IC0_CFG2CLR);
- au_writel(1 << bit, IC0_CFG1SET);
- au_writel(1 << bit, IC0_CFG0CLR);
- set_irq_chip(irq_nr, &fall_edge_irq_type);
- break;
- case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
- au_writel(1 << bit, IC0_CFG2CLR);
- au_writel(1 << bit, IC0_CFG1SET);
- au_writel(1 << bit, IC0_CFG0SET);
- set_irq_chip(irq_nr, &either_edge_irq_type);
- break;
- case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
- au_writel(1 << bit, IC0_CFG2SET);
- au_writel(1 << bit, IC0_CFG1CLR);
- au_writel(1 << bit, IC0_CFG0SET);
- set_irq_chip(irq_nr, &level_irq_type);
- break;
- case INTC_INT_LOW_LEVEL: /* 1:1:0 */
- au_writel(1 << bit, IC0_CFG2SET);
- au_writel(1 << bit, IC0_CFG1SET);
- au_writel(1 << bit, IC0_CFG0CLR);
- set_irq_chip(irq_nr, &level_irq_type);
- break;
- case INTC_INT_DISABLED: /* 0:0:0 */
- au_writel(1 << bit, IC0_CFG0CLR);
- au_writel(1 << bit, IC0_CFG1CLR);
- au_writel(1 << bit, IC0_CFG2CLR);
- break;
- default: /* disable the interrupt */
- printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
- type, irq_nr);
- au_writel(1 << bit, IC0_CFG0CLR);
- au_writel(1 << bit, IC0_CFG1CLR);
- au_writel(1 << bit, IC0_CFG2CLR);
- return;
- }
- if (int_req) /* assign to interrupt request 1 */
- au_writel(1 << bit, IC0_ASSIGNCLR);
- else /* assign to interrupt request 0 */
- au_writel(1 << bit, IC0_ASSIGNSET);
- au_writel(1 << bit, IC0_SRCSET);
- au_writel(1 << bit, IC0_MASKCLR);
- au_writel(1 << bit, IC0_WAKECLR);
+ bit = irq - AU1000_INTC0_INT_BASE;
+ chip = &au1x_ic0_chip;
+ ic = 0;
+ }
+
+ if (bit > 31)
+ return -EINVAL;
+
+ icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET;
+ icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET;
+ icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET;
+ icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR;
+ icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR;
+ icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR;
+
+ ret = 0;
+
+ switch (flow_type) { /* cfgregs 2:1:0 */
+ case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
+ au_writel(1 << bit, icr[5]);
+ au_writel(1 << bit, icr[4]);
+ au_writel(1 << bit, icr[0]);
+ set_irq_chip_and_handler_name(irq, chip,
+ handle_edge_irq, "riseedge");
+ break;
+ case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
+ au_writel(1 << bit, icr[5]);
+ au_writel(1 << bit, icr[1]);
+ au_writel(1 << bit, icr[3]);
+ set_irq_chip_and_handler_name(irq, chip,
+ handle_edge_irq, "falledge");
+ break;
+ case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
+ au_writel(1 << bit, icr[5]);
+ au_writel(1 << bit, icr[1]);
+ au_writel(1 << bit, icr[0]);
+ set_irq_chip_and_handler_name(irq, chip,
+ handle_edge_irq, "bothedge");
+ break;
+ case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
+ au_writel(1 << bit, icr[2]);
+ au_writel(1 << bit, icr[4]);
+ au_writel(1 << bit, icr[0]);
+ set_irq_chip_and_handler_name(irq, chip,
+ handle_level_irq, "hilevel");
+ break;
+ case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
+ au_writel(1 << bit, icr[2]);
+ au_writel(1 << bit, icr[1]);
+ au_writel(1 << bit, icr[3]);
+ set_irq_chip_and_handler_name(irq, chip,
+ handle_level_irq, "lowlevel");
+ break;
+ case IRQ_TYPE_NONE: /* 0:0:0 */
+ au_writel(1 << bit, icr[5]);
+ au_writel(1 << bit, icr[4]);
+ au_writel(1 << bit, icr[3]);
+ /* set at least chip so we can call set_irq_type() on it */
+ set_irq_chip(irq, chip);
+ break;
+ default:
+ ret = -EINVAL;
}
au_sync();
-}
-/*
- * Interrupts are nested. Even if an interrupt handler is registered
- * as "fast", we might get another interrupt before we return from
- * intcX_reqX_irqdispatch().
- */
+ return ret;
+}
-static void intc0_req0_irqdispatch(void)
+asmlinkage void plat_irq_dispatch(void)
{
- static unsigned long intc0_req0;
- unsigned int bit;
-
- intc0_req0 |= au_readl(IC0_REQ0INT);
+ unsigned int pending = read_c0_status() & read_c0_cause();
+ unsigned long s, off, bit;
- if (!intc0_req0)
+ if (pending & CAUSEF_IP7) {
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
return;
-
+ } else if (pending & CAUSEF_IP2) {
+ s = IC0_REQ0INT;
+ off = AU1000_INTC0_INT_BASE;
+ } else if (pending & CAUSEF_IP3) {
+ s = IC0_REQ1INT;
+ off = AU1000_INTC0_INT_BASE;
+ } else if (pending & CAUSEF_IP4) {
+ s = IC1_REQ0INT;
+ off = AU1000_INTC1_INT_BASE;
+ } else if (pending & CAUSEF_IP5) {
+ s = IC1_REQ1INT;
+ off = AU1000_INTC1_INT_BASE;
+ } else
+ goto spurious;
+
+ bit = 0;
+ s = au_readl(s);
+ if (unlikely(!s)) {
+spurious:
+ spurious_interrupt();
+ return;
+ }
#ifdef AU1000_USB_DEV_REQ_INT
/*
* Because of the tight timing of SETUP token to reply
* transactions, the USB devices-side packet complete
* interrupt needs the highest priority.
*/
- if ((intc0_req0 & (1 << AU1000_USB_DEV_REQ_INT))) {
- intc0_req0 &= ~(1 << AU1000_USB_DEV_REQ_INT);
+ bit = 1 << (AU1000_USB_DEV_REQ_INT - AU1000_INTC0_INT_BASE);
+ if ((pending & CAUSEF_IP2) && (s & bit)) {
do_IRQ(AU1000_USB_DEV_REQ_INT);
return;
}
#endif
- bit = __ffs(intc0_req0);
- intc0_req0 &= ~(1 << bit);
- do_IRQ(AU1000_INTC0_INT_BASE + bit);
+ do_IRQ(__ffs(s) + off);
}
-
-static void intc0_req1_irqdispatch(void)
-{
- static unsigned long intc0_req1;
- unsigned int bit;
-
- intc0_req1 |= au_readl(IC0_REQ1INT);
-
- if (!intc0_req1)
- return;
-
- bit = __ffs(intc0_req1);
- intc0_req1 &= ~(1 << bit);
- do_IRQ(AU1000_INTC0_INT_BASE + bit);
-}
-
-
-/*
- * Interrupt Controller 1:
- * interrupts 32 - 63
- */
-static void intc1_req0_irqdispatch(void)
+/* setup edge/level and assign request 0/1 */
+void __init au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count)
{
- static unsigned long intc1_req0;
- unsigned int bit;
-
- intc1_req0 |= au_readl(IC1_REQ0INT);
-
- if (!intc1_req0)
- return;
-
- bit = __ffs(intc1_req0);
- intc1_req0 &= ~(1 << bit);
- do_IRQ(AU1000_INTC1_INT_BASE + bit);
-}
-
-
-static void intc1_req1_irqdispatch(void)
-{
- static unsigned long intc1_req1;
- unsigned int bit;
-
- intc1_req1 |= au_readl(IC1_REQ1INT);
-
- if (!intc1_req1)
- return;
-
- bit = __ffs(intc1_req1);
- intc1_req1 &= ~(1 << bit);
- do_IRQ(AU1000_INTC1_INT_BASE + bit);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause();
+ unsigned int bit, irq_nr;
+
+ while (count--) {
+ irq_nr = map[count].im_irq;
+
+ if (((irq_nr < AU1000_INTC0_INT_BASE) ||
+ (irq_nr >= AU1000_INTC0_INT_BASE + 32)) &&
+ ((irq_nr < AU1000_INTC1_INT_BASE) ||
+ (irq_nr >= AU1000_INTC1_INT_BASE + 32)))
+ continue;
+
+ if (irq_nr >= AU1000_INTC1_INT_BASE) {
+ bit = irq_nr - AU1000_INTC1_INT_BASE;
+ if (map[count].im_request)
+ au_writel(1 << bit, IC1_ASSIGNCLR);
+ } else {
+ bit = irq_nr - AU1000_INTC0_INT_BASE;
+ if (map[count].im_request)
+ au_writel(1 << bit, IC0_ASSIGNCLR);
+ }
- if (pending & CAUSEF_IP7)
- do_IRQ(MIPS_CPU_IRQ_BASE + 7);
- else if (pending & CAUSEF_IP2)
- intc0_req0_irqdispatch();
- else if (pending & CAUSEF_IP3)
- intc0_req1_irqdispatch();
- else if (pending & CAUSEF_IP4)
- intc1_req0_irqdispatch();
- else if (pending & CAUSEF_IP5)
- intc1_req1_irqdispatch();
- else
- spurious_interrupt();
+ au1x_ic_settype(irq_nr, map[count].im_type);
+ }
}
void __init arch_init_irq(void)
{
int i;
- struct au1xxx_irqmap *imp;
- extern struct au1xxx_irqmap au1xxx_irq_map[];
- extern struct au1xxx_irqmap au1xxx_ic0_map[];
- extern int au1xxx_nr_irqs;
- extern int au1xxx_ic0_nr_irqs;
/*
* Initialize interrupt controllers to a safe state.
@@ -569,28 +587,25 @@ void __init arch_init_irq(void)
mips_cpu_irq_init();
- /*
- * Initialize IC0, which is fixed per processor.
+ /* register all 64 possible IC0+IC1 irq sources as type "none".
+ * Use set_irq_type() to set edge/level behaviour at runtime.
*/
- imp = au1xxx_ic0_map;
- for (i = 0; i < au1xxx_ic0_nr_irqs; i++) {
- setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
- imp++;
- }
+ for (i = AU1000_INTC0_INT_BASE;
+ (i < AU1000_INTC0_INT_BASE + 32); i++)
+ au1x_ic_settype(i, IRQ_TYPE_NONE);
+
+ for (i = AU1000_INTC1_INT_BASE;
+ (i < AU1000_INTC1_INT_BASE + 32); i++)
+ au1x_ic_settype(i, IRQ_TYPE_NONE);
/*
- * Now set up the irq mapping for the board.
+ * Initialize IC0, which is fixed per processor.
*/
- imp = au1xxx_irq_map;
- for (i = 0; i < au1xxx_nr_irqs; i++) {
- setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
- imp++;
- }
-
- set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4);
+ au1xxx_setup_irqmap(au1xxx_ic0_map, ARRAY_SIZE(au1xxx_ic0_map));
- /* Board specific IRQ initialization.
+ /* Boards can register additional (GPIO-based) IRQs.
*/
- if (board_init_irq)
- board_init_irq();
+ board_init_irq();
+
+ set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3);
}
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index bd854a6d1d89..6ab7b42aa1be 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -35,25 +35,12 @@
#include <linux/jiffies.h>
#include <asm/uaccess.h>
-#include <asm/cacheflush.h>
#include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_PM
-
-#define DEBUG 1
-#ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
-#else
-#define DPRINTK(fmt, args...)
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
#endif
-static void au1000_calibrate_delay(void);
-
-extern unsigned long save_local_and_disable(int controller);
-extern void restore_local_and_enable(int controller, unsigned long mask);
-extern void local_enable_irq(unsigned int irq_nr);
-
-static DEFINE_SPINLOCK(pm_lock);
+#ifdef CONFIG_PM
/*
* We need to save/restore a bunch of core registers that are
@@ -65,29 +52,16 @@ static DEFINE_SPINLOCK(pm_lock);
* We only have to save/restore registers that aren't otherwise
* done as part of a driver pm_* function.
*/
-static unsigned int sleep_aux_pll_cntrl;
-static unsigned int sleep_cpu_pll_cntrl;
-static unsigned int sleep_pin_function;
-static unsigned int sleep_uart0_inten;
-static unsigned int sleep_uart0_fifoctl;
-static unsigned int sleep_uart0_linectl;
-static unsigned int sleep_uart0_clkdiv;
-static unsigned int sleep_uart0_enable;
-static unsigned int sleep_usbhost_enable;
-static unsigned int sleep_usbdev_enable;
-static unsigned int sleep_static_memctlr[4][3];
+static unsigned int sleep_uart0_inten;
+static unsigned int sleep_uart0_fifoctl;
+static unsigned int sleep_uart0_linectl;
+static unsigned int sleep_uart0_clkdiv;
+static unsigned int sleep_uart0_enable;
+static unsigned int sleep_usb[2];
+static unsigned int sleep_sys_clocks[5];
+static unsigned int sleep_sys_pinfunc;
+static unsigned int sleep_static_memctlr[4][3];
-/*
- * Define this to cause the value you write to /proc/sys/pm/sleep to
- * set the TOY timer for the amount of time you want to sleep.
- * This is done mainly for testing, but may be useful in other cases.
- * The value is number of 32KHz ticks to sleep.
- */
-#define SLEEP_TEST_TIMEOUT 1
-#ifdef SLEEP_TEST_TIMEOUT
-static int sleep_ticks;
-void wakeup_counter0_set(int ticks);
-#endif
static void save_core_regs(void)
{
@@ -105,31 +79,45 @@ static void save_core_regs(void)
sleep_uart0_linectl = au_readl(UART0_ADDR + UART_LCR);
sleep_uart0_clkdiv = au_readl(UART0_ADDR + UART_CLK);
sleep_uart0_enable = au_readl(UART0_ADDR + UART_MOD_CNTRL);
+ au_sync();
+#ifndef CONFIG_SOC_AU1200
/* Shutdown USB host/device. */
- sleep_usbhost_enable = au_readl(USB_HOST_CONFIG);
+ sleep_usb[0] = au_readl(USB_HOST_CONFIG);
/* There appears to be some undocumented reset register.... */
- au_writel(0, 0xb0100004); au_sync();
- au_writel(0, USB_HOST_CONFIG); au_sync();
+ au_writel(0, 0xb0100004);
+ au_sync();
+ au_writel(0, USB_HOST_CONFIG);
+ au_sync();
- sleep_usbdev_enable = au_readl(USBD_ENABLE);
- au_writel(0, USBD_ENABLE); au_sync();
+ sleep_usb[1] = au_readl(USBD_ENABLE);
+ au_writel(0, USBD_ENABLE);
+ au_sync();
+
+#else /* AU1200 */
+
+ /* enable access to OTG mmio so we can save OTG CAP/MUX.
+ * FIXME: write an OTG driver and move this stuff there!
+ */
+ au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
+ au_sync();
+ sleep_usb[0] = au_readl(0xb4020020); /* OTG_CAP */
+ sleep_usb[1] = au_readl(0xb4020024); /* OTG_MUX */
+#endif
/* Save interrupt controller state. */
save_au1xxx_intctl();
/* Clocks and PLLs. */
- sleep_aux_pll_cntrl = au_readl(SYS_AUXPLL);
+ sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
+ sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
+ sleep_sys_clocks[2] = au_readl(SYS_CLKSRC);
+ sleep_sys_clocks[3] = au_readl(SYS_CPUPLL);
+ sleep_sys_clocks[4] = au_readl(SYS_AUXPLL);
- /*
- * We don't really need to do this one, but unless we
- * write it again it won't have a valid value if we
- * happen to read it.
- */
- sleep_cpu_pll_cntrl = au_readl(SYS_CPUPLL);
-
- sleep_pin_function = au_readl(SYS_PINFUNC);
+ /* pin mux config */
+ sleep_sys_pinfunc = au_readl(SYS_PINFUNC);
/* Save the static memory controller configuration. */
sleep_static_memctlr[0][0] = au_readl(MEM_STCFG0);
@@ -144,16 +132,45 @@ static void save_core_regs(void)
sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
+
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+ au1xxx_dbdma_suspend();
+#endif
}
static void restore_core_regs(void)
{
- extern void restore_au1xxx_intctl(void);
- extern void wakeup_counter0_adjust(void);
+ /* restore clock configuration. Writing CPUPLL last will
+ * stall a bit and stabilize other clocks (unless this is
+ * one of those Au1000 with a write-only PLL, where we dont
+ * have a valid value)
+ */
+ au_writel(sleep_sys_clocks[0], SYS_FREQCTRL0);
+ au_writel(sleep_sys_clocks[1], SYS_FREQCTRL1);
+ au_writel(sleep_sys_clocks[2], SYS_CLKSRC);
+ au_writel(sleep_sys_clocks[4], SYS_AUXPLL);
+ if (!au1xxx_cpu_has_pll_wo())
+ au_writel(sleep_sys_clocks[3], SYS_CPUPLL);
+ au_sync();
- au_writel(sleep_aux_pll_cntrl, SYS_AUXPLL); au_sync();
- au_writel(sleep_cpu_pll_cntrl, SYS_CPUPLL); au_sync();
- au_writel(sleep_pin_function, SYS_PINFUNC); au_sync();
+ au_writel(sleep_sys_pinfunc, SYS_PINFUNC);
+ au_sync();
+
+#ifndef CONFIG_SOC_AU1200
+ au_writel(sleep_usb[0], USB_HOST_CONFIG);
+ au_writel(sleep_usb[1], USBD_ENABLE);
+ au_sync();
+#else
+ /* enable accces to OTG memory */
+ au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
+ au_sync();
+
+ /* restore OTG caps and port mux. */
+ au_writel(sleep_usb[0], 0xb4020020 + 0); /* OTG_CAP */
+ au_sync();
+ au_writel(sleep_usb[1], 0xb4020020 + 4); /* OTG_MUX */
+ au_sync();
+#endif
/* Restore the static memory controller configuration. */
au_writel(sleep_static_memctlr[0][0], MEM_STCFG0);
@@ -184,282 +201,17 @@ static void restore_core_regs(void)
}
restore_au1xxx_intctl();
- wakeup_counter0_adjust();
-}
-
-unsigned long suspend_mode;
-void wakeup_from_suspend(void)
-{
- suspend_mode = 0;
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+ au1xxx_dbdma_resume();
+#endif
}
-int au_sleep(void)
+void au_sleep(void)
{
- unsigned long wakeup, flags;
- extern void save_and_sleep(void);
-
- spin_lock_irqsave(&pm_lock, flags);
-
save_core_regs();
-
- flush_cache_all();
-
- /**
- ** The code below is all system dependent and we should probably
- ** have a function call out of here to set this up. You need
- ** to configure the GPIO or timer interrupts that will bring
- ** you out of sleep.
- ** For testing, the TOY counter wakeup is useful.
- **/
-#if 0
- au_writel(au_readl(SYS_PINSTATERD) & ~(1 << 11), SYS_PINSTATERD);
-
- /* GPIO 6 can cause a wake up event */
- wakeup = au_readl(SYS_WAKEMSK);
- wakeup &= ~(1 << 8); /* turn off match20 wakeup */
- wakeup |= 1 << 6; /* turn on GPIO 6 wakeup */
-#else
- /* For testing, allow match20 to wake us up. */
-#ifdef SLEEP_TEST_TIMEOUT
- wakeup_counter0_set(sleep_ticks);
-#endif
- wakeup = 1 << 8; /* turn on match20 wakeup */
- wakeup = 0;
-#endif
- au_writel(1, SYS_WAKESRC); /* clear cause */
- au_sync();
- au_writel(wakeup, SYS_WAKEMSK);
- au_sync();
-
- save_and_sleep();
-
- /*
- * After a wakeup, the cpu vectors back to 0x1fc00000, so
- * it's up to the boot code to get us back here.
- */
+ au1xxx_save_and_sleep();
restore_core_regs();
- spin_unlock_irqrestore(&pm_lock, flags);
- return 0;
-}
-
-static int pm_do_sleep(ctl_table *ctl, int write, struct file *file,
- void __user *buffer, size_t *len, loff_t *ppos)
-{
-#ifdef SLEEP_TEST_TIMEOUT
-#define TMPBUFLEN2 16
- char buf[TMPBUFLEN2], *p;
-#endif
-
- if (!write)
- *len = 0;
- else {
-#ifdef SLEEP_TEST_TIMEOUT
- if (*len > TMPBUFLEN2 - 1)
- return -EFAULT;
- if (copy_from_user(buf, buffer, *len))
- return -EFAULT;
- buf[*len] = 0;
- p = buf;
- sleep_ticks = simple_strtoul(p, &p, 0);
-#endif
-
- au_sleep();
- }
- return 0;
-}
-
-static int pm_do_freq(ctl_table *ctl, int write, struct file *file,
- void __user *buffer, size_t *len, loff_t *ppos)
-{
- int retval = 0, i;
- unsigned long val, pll;
-#define TMPBUFLEN 64
-#define MAX_CPU_FREQ 396
- char buf[TMPBUFLEN], *p;
- unsigned long flags, intc0_mask, intc1_mask;
- unsigned long old_baud_base, old_cpu_freq, old_clk, old_refresh;
- unsigned long new_baud_base, new_cpu_freq, new_clk, new_refresh;
- unsigned long baud_rate;
-
- spin_lock_irqsave(&pm_lock, flags);
- if (!write)
- *len = 0;
- else {
- /* Parse the new frequency */
- if (*len > TMPBUFLEN - 1) {
- spin_unlock_irqrestore(&pm_lock, flags);
- return -EFAULT;
- }
- if (copy_from_user(buf, buffer, *len)) {
- spin_unlock_irqrestore(&pm_lock, flags);
- return -EFAULT;
- }
- buf[*len] = 0;
- p = buf;
- val = simple_strtoul(p, &p, 0);
- if (val > MAX_CPU_FREQ) {
- spin_unlock_irqrestore(&pm_lock, flags);
- return -EFAULT;
- }
-
- pll = val / 12;
- if ((pll > 33) || (pll < 7)) { /* 396 MHz max, 84 MHz min */
- /* Revisit this for higher speed CPUs */
- spin_unlock_irqrestore(&pm_lock, flags);
- return -EFAULT;
- }
-
- old_baud_base = get_au1x00_uart_baud_base();
- old_cpu_freq = get_au1x00_speed();
-
- new_cpu_freq = pll * 12 * 1000000;
- new_baud_base = (new_cpu_freq / (2 * ((int)(au_readl(SYS_POWERCTRL)
- & 0x03) + 2) * 16));
- set_au1x00_speed(new_cpu_freq);
- set_au1x00_uart_baud_base(new_baud_base);
-
- old_refresh = au_readl(MEM_SDREFCFG) & 0x1ffffff;
- new_refresh = ((old_refresh * new_cpu_freq) / old_cpu_freq) |
- (au_readl(MEM_SDREFCFG) & ~0x1ffffff);
-
- au_writel(pll, SYS_CPUPLL);
- au_sync_delay(1);
- au_writel(new_refresh, MEM_SDREFCFG);
- au_sync_delay(1);
-
- for (i = 0; i < 4; i++)
- if (au_readl(UART_BASE + UART_MOD_CNTRL +
- i * 0x00100000) == 3) {
- old_clk = au_readl(UART_BASE + UART_CLK +
- i * 0x00100000);
- baud_rate = old_baud_base / old_clk;
- /*
- * We won't get an exact baud rate and the error
- * could be significant enough that our new
- * calculation will result in a clock that will
- * give us a baud rate that's too far off from
- * what we really want.
- */
- if (baud_rate > 100000)
- baud_rate = 115200;
- else if (baud_rate > 50000)
- baud_rate = 57600;
- else if (baud_rate > 30000)
- baud_rate = 38400;
- else if (baud_rate > 17000)
- baud_rate = 19200;
- else
- baud_rate = 9600;
- new_clk = new_baud_base / baud_rate;
- au_writel(new_clk, UART_BASE + UART_CLK +
- i * 0x00100000);
- au_sync_delay(10);
- }
- }
-
- /*
- * We don't want _any_ interrupts other than match20. Otherwise our
- * au1000_calibrate_delay() calculation will be off, potentially a lot.
- */
- intc0_mask = save_local_and_disable(0);
- intc1_mask = save_local_and_disable(1);
- local_enable_irq(AU1000_TOY_MATCH2_INT);
- spin_unlock_irqrestore(&pm_lock, flags);
- au1000_calibrate_delay();
- restore_local_and_enable(0, intc0_mask);
- restore_local_and_enable(1, intc1_mask);
-
- return retval;
}
-
-static struct ctl_table pm_table[] = {
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sleep",
- .data = NULL,
- .maxlen = 0,
- .mode = 0600,
- .proc_handler = &pm_do_sleep
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "freq",
- .data = NULL,
- .maxlen = 0,
- .mode = 0600,
- .proc_handler = &pm_do_freq
- },
- {}
-};
-
-static struct ctl_table pm_dir_table[] = {
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "pm",
- .mode = 0555,
- .child = pm_table
- },
- {}
-};
-
-/*
- * Initialize power interface
- */
-static int __init pm_init(void)
-{
- register_sysctl_table(pm_dir_table);
- return 0;
-}
-
-__initcall(pm_init);
-
-/*
- * This is right out of init/main.c
- */
-
-/*
- * This is the number of bits of precision for the loops_per_jiffy.
- * Each bit takes on average 1.5/HZ seconds. This (like the original)
- * is a little better than 1%.
- */
-#define LPS_PREC 8
-
-static void au1000_calibrate_delay(void)
-{
- unsigned long ticks, loopbit;
- int lps_precision = LPS_PREC;
-
- loops_per_jiffy = 1 << 12;
-
- while (loops_per_jiffy <<= 1) {
- /* Wait for "start of" clock tick */
- ticks = jiffies;
- while (ticks == jiffies)
- /* nothing */ ;
- /* Go ... */
- ticks = jiffies;
- __delay(loops_per_jiffy);
- ticks = jiffies - ticks;
- if (ticks)
- break;
- }
-
- /*
- * Do a binary approximation to get loops_per_jiffy set to be equal
- * one clock (up to lps_precision bits)
- */
- loops_per_jiffy >>= 1;
- loopbit = loops_per_jiffy;
- while (lps_precision-- && (loopbit >>= 1)) {
- loops_per_jiffy |= loopbit;
- ticks = jiffies;
- while (ticks == jiffies);
- ticks = jiffies;
- __delay(loops_per_jiffy);
- if (jiffies != ticks) /* longer than 1 tick */
- loops_per_jiffy &= ~loopbit;
- }
-}
#endif /* CONFIG_PM */
diff --git a/arch/mips/alchemy/common/reset.c b/arch/mips/alchemy/common/reset.c
index d555429c8d6f..0191c936cb5e 100644
--- a/arch/mips/alchemy/common/reset.c
+++ b/arch/mips/alchemy/common/reset.c
@@ -31,8 +31,6 @@
#include <asm/mach-au1x00/au1000.h>
-extern int au_sleep(void);
-
void au1000_restart(char *command)
{
/* Set all integrated peripherals to disabled states */
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 1ac6b06f42a3..3f036b3d400e 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -35,7 +35,6 @@
#include <asm/time.h>
#include <au1000.h>
-#include <prom.h>
extern void __init board_setup(void);
extern void au1000_restart(char *);
@@ -45,80 +44,34 @@ extern void set_cpuspec(void);
void __init plat_mem_setup(void)
{
- struct cpu_spec *sp;
- char *argptr;
- unsigned long prid, cpufreq, bclk;
+ unsigned long est_freq;
- set_cpuspec();
- sp = cur_cpu_spec[0];
+ /* determine core clock */
+ est_freq = au1xxx_calc_clock();
+ est_freq += 5000; /* round */
+ est_freq -= est_freq % 10000;
+ printk(KERN_INFO "(PRId %08x) @ %lu.%02lu MHz\n", read_c0_prid(),
+ est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
- board_setup(); /* board specific setup */
-
- prid = read_c0_prid();
- if (sp->cpu_pll_wo)
-#ifdef CONFIG_SOC_AU1000_FREQUENCY
- cpufreq = CONFIG_SOC_AU1000_FREQUENCY / 1000000;
-#else
- cpufreq = 396;
-#endif
- else
- cpufreq = (au_readl(SYS_CPUPLL) & 0x3F) * 12;
- printk(KERN_INFO "(PRID %08lx) @ %ld MHz\n", prid, cpufreq);
+ _machine_restart = au1000_restart;
+ _machine_halt = au1000_halt;
+ pm_power_off = au1000_power_off;
- if (sp->cpu_bclk) {
- /* Enable BCLK switching */
- bclk = au_readl(SYS_POWERCTRL);
- au_writel(bclk | 0x60, SYS_POWERCTRL);
- printk(KERN_INFO "BCLK switching enabled!\n");
- }
+ board_setup(); /* board specific setup */
- if (sp->cpu_od)
+ if (au1xxx_cpu_needs_config_od())
/* Various early Au1xx0 errata corrected by this */
set_c0_config(1 << 19); /* Set Config[OD] */
else
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
- argptr = prom_getcmdline();
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
- argptr = strstr(argptr, "console=");
- if (argptr == NULL) {
- argptr = prom_getcmdline();
- strcat(argptr, " console=ttyS0,115200");
- }
-#endif
-
-#ifdef CONFIG_FB_AU1100
- argptr = strstr(argptr, "video=");
- if (argptr == NULL) {
- argptr = prom_getcmdline();
- /* default panel */
- /*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
- }
-#endif
-
-#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
- /* au1000 does not support vra, au1500 and au1100 do */
- strcat(argptr, " au1000_audio=vra");
- argptr = prom_getcmdline();
-#endif
- _machine_restart = au1000_restart;
- _machine_halt = au1000_halt;
- pm_power_off = au1000_power_off;
-
/* IO/MEM resources. */
set_io_port_base(0);
ioport_resource.start = IOPORT_RESOURCE_START;
ioport_resource.end = IOPORT_RESOURCE_END;
iomem_resource.start = IOMEM_RESOURCE_START;
iomem_resource.end = IOMEM_RESOURCE_END;
-
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_E0S);
- au_writel(SYS_CNTRL_E0 | SYS_CNTRL_EN0, SYS_COUNTER_CNTRL);
- au_sync();
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S);
- au_writel(0, SYS_TOYTRIM);
}
#if defined(CONFIG_64BIT_PHYS_ADDR)
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index 3006e270c8bc..4f4b16741d12 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -15,16 +15,17 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
+ .extern __flush_cache_all
+
.text
- .set macro
- .set noat
+ .set noreorder
+ .set noat
.align 5
/* Save all of the processor general registers and go to sleep.
* A wakeup condition will get us back here to restore the registers.
*/
-LEAF(save_and_sleep)
-
+LEAF(au1xxx_save_and_sleep)
subu sp, PT_SIZE
sw $1, PT_R1(sp)
sw $2, PT_R2(sp)
@@ -33,14 +34,6 @@ LEAF(save_and_sleep)
sw $5, PT_R5(sp)
sw $6, PT_R6(sp)
sw $7, PT_R7(sp)
- sw $8, PT_R8(sp)
- sw $9, PT_R9(sp)
- sw $10, PT_R10(sp)
- sw $11, PT_R11(sp)
- sw $12, PT_R12(sp)
- sw $13, PT_R13(sp)
- sw $14, PT_R14(sp)
- sw $15, PT_R15(sp)
sw $16, PT_R16(sp)
sw $17, PT_R17(sp)
sw $18, PT_R18(sp)
@@ -49,12 +42,9 @@ LEAF(save_and_sleep)
sw $21, PT_R21(sp)
sw $22, PT_R22(sp)
sw $23, PT_R23(sp)
- sw $24, PT_R24(sp)
- sw $25, PT_R25(sp)
sw $26, PT_R26(sp)
sw $27, PT_R27(sp)
sw $28, PT_R28(sp)
- sw $29, PT_R29(sp)
sw $30, PT_R30(sp)
sw $31, PT_R31(sp)
mfc0 k0, CP0_STATUS
@@ -66,20 +56,26 @@ LEAF(save_and_sleep)
mfc0 k0, CP0_CONFIG
sw k0, 0x14(sp)
+ /* flush caches to make sure context is in memory */
+ la t1, __flush_cache_all
+ lw t0, 0(t1)
+ jalr t0
+ nop
+
/* Now set up the scratch registers so the boot rom will
* return to this point upon wakeup.
+ * sys_scratch0 : SP
+ * sys_scratch1 : RA
*/
- la k0, 1f
- lui k1, 0xb190
- ori k1, 0x18
- sw sp, 0(k1)
- ori k1, 0x1c
- sw k0, 0(k1)
+ lui t3, 0xb190 /* sys_xxx */
+ sw sp, 0x0018(t3)
+ la k0, 3f /* resume path */
+ sw k0, 0x001c(t3)
-/* Put SDRAM into self refresh. Preload instructions into cache,
- * issue a precharge, then auto refresh, then sleep commands to it.
- */
- la t0, sdsleep
+ /* Put SDRAM into self refresh: Preload instructions into cache,
+ * issue a precharge, auto/self refresh, then sleep commands to it.
+ */
+ la t0, 1f
.set mips3
cache 0x14, 0(t0)
cache 0x14, 32(t0)
@@ -87,24 +83,57 @@ LEAF(save_and_sleep)
cache 0x14, 96(t0)
.set mips0
-sdsleep:
- lui k0, 0xb400
- sw zero, 0x001c(k0) /* Precharge */
- sw zero, 0x0020(k0) /* Auto refresh */
- sw zero, 0x0030(k0) /* SDRAM sleep */
+1: lui a0, 0xb400 /* mem_xxx */
+#if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100) || \
+ defined(CONFIG_SOC_AU1500)
+ sw zero, 0x001c(a0) /* Precharge */
+ sync
+ sw zero, 0x0020(a0) /* Auto Refresh */
+ sync
+ sw zero, 0x0030(a0) /* Sleep */
+ sync
+#endif
+
+#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
+ sw zero, 0x08c0(a0) /* Precharge */
sync
+ sw zero, 0x08d0(a0) /* Self Refresh */
+ sync
+
+ /* wait for sdram to enter self-refresh mode */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
+ and t2, t1, t0
+ beq t2, zero, 2b
+ nop
- lui k1, 0xb190
- sw zero, 0x0078(k1) /* get ready to sleep */
+ /* disable SDRAM clocks */
+ lui t0, 0xcfff
+ ori t0, t0, 0xffff
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
sync
- sw zero, 0x007c(k1) /* Put processor to sleep */
+#endif
+
+ /* put power supply and processor to sleep */
+ sw zero, 0x0078(t3) /* sys_slppwr */
+ sync
+ sw zero, 0x007c(t3) /* sys_sleep */
sync
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
/* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
-1: nop
- lw k0, 0x20(sp)
+3: lw k0, 0x20(sp)
mtc0 k0, CP0_STATUS
lw k0, 0x1c(sp)
mtc0 k0, CP0_CONTEXT
@@ -113,10 +142,11 @@ sdsleep:
lw k0, 0x14(sp)
mtc0 k0, CP0_CONFIG
- /* We need to catch the ealry Alchemy SOCs with
+ /* We need to catch the early Alchemy SOCs with
* the write-only Config[OD] bit and set it back to one...
*/
jal au1x00_fixup_config_od
+ nop
lw $1, PT_R1(sp)
lw $2, PT_R2(sp)
lw $3, PT_R3(sp)
@@ -124,14 +154,6 @@ sdsleep:
lw $5, PT_R5(sp)
lw $6, PT_R6(sp)
lw $7, PT_R7(sp)
- lw $8, PT_R8(sp)
- lw $9, PT_R9(sp)
- lw $10, PT_R10(sp)
- lw $11, PT_R11(sp)
- lw $12, PT_R12(sp)
- lw $13, PT_R13(sp)
- lw $14, PT_R14(sp)
- lw $15, PT_R15(sp)
lw $16, PT_R16(sp)
lw $17, PT_R17(sp)
lw $18, PT_R18(sp)
@@ -140,15 +162,11 @@ sdsleep:
lw $21, PT_R21(sp)
lw $22, PT_R22(sp)
lw $23, PT_R23(sp)
- lw $24, PT_R24(sp)
- lw $25, PT_R25(sp)
lw $26, PT_R26(sp)
lw $27, PT_R27(sp)
lw $28, PT_R28(sp)
- lw $29, PT_R29(sp)
lw $30, PT_R30(sp)
lw $31, PT_R31(sp)
- addiu sp, PT_SIZE
-
jr ra
-END(save_and_sleep)
+ addiu sp, PT_SIZE
+END(au1xxx_save_and_sleep)
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 563d9390a872..32880146cbc1 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -1,5 +1,7 @@
/*
+ * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
*
+ * Previous incarnations were:
* Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
* Copied and modified Carsten Langgaard's time.c
*
@@ -23,244 +25,141 @@
*
* ########################################################################
*
- * Setting up the clock on the MIPS boards.
- *
- * We provide the clock interrupt processing and the timer offset compute
- * functions. If CONFIG_PM is selected, we also ensure the 32KHz timer is
- * available. -- Dan
+ * Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
+ * databooks). Firmware/Board init code must enable the counters in the
+ * counter control register, otherwise the CP0 counter clocksource/event
+ * will be installed instead (and use of 'wait' instruction is prohibited).
*/
-#include <linux/types.h>
-#include <linux/init.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <asm/mipsregs.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
-static int no_au1xxx_32khz;
-extern int allow_au1k_wait; /* default off for CP0 Counter */
-
-#ifdef CONFIG_PM
-#if HZ < 100 || HZ > 1000
-#error "unsupported HZ value! Must be in [100,1000]"
-#endif
-#define MATCH20_INC (328 * 100 / HZ) /* magic number 328 is for HZ=100... */
-static unsigned long last_pc0, last_match20;
-#endif
+/* 32kHz clock enabled and detected */
+#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
-static DEFINE_SPINLOCK(time_lock);
-
-unsigned long wtimer;
+extern int allow_au1k_wait; /* default off for CP0 Counter */
-#ifdef CONFIG_PM
-static irqreturn_t counter0_irq(int irq, void *dev_id)
+static cycle_t au1x_counter1_read(void)
{
- unsigned long pc0;
- int time_elapsed;
- static int jiffie_drift;
-
- if (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) {
- /* should never happen! */
- printk(KERN_WARNING "counter 0 w status error\n");
- return IRQ_NONE;
- }
-
- pc0 = au_readl(SYS_TOYREAD);
- if (pc0 < last_match20)
- /* counter overflowed */
- time_elapsed = (0xffffffff - last_match20) + pc0;
- else
- time_elapsed = pc0 - last_match20;
-
- while (time_elapsed > 0) {
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- time_elapsed -= MATCH20_INC;
- last_match20 += MATCH20_INC;
- jiffie_drift++;
- }
-
- last_pc0 = pc0;
- au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
- au_sync();
-
- /*
- * Our counter ticks at 10.009765625 ms/tick, we we're running
- * almost 10 uS too slow per tick.
- */
-
- if (jiffie_drift >= 999) {
- jiffie_drift -= 999;
- do_timer(1); /* increment jiffies by one */
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- }
-
- return IRQ_HANDLED;
+ return au_readl(SYS_RTCREAD);
}
-struct irqaction counter0_action = {
- .handler = counter0_irq,
- .flags = IRQF_DISABLED,
- .name = "alchemy-toy",
- .dev_id = NULL,
+static struct clocksource au1x_counter1_clocksource = {
+ .name = "alchemy-counter1",
+ .read = au1x_counter1_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .rating = 100,
};
-/* When we wakeup from sleep, we have to "catch up" on all of the
- * timer ticks we have missed.
- */
-void wakeup_counter0_adjust(void)
+static int au1x_rtcmatch2_set_next_event(unsigned long delta,
+ struct clock_event_device *cd)
{
- unsigned long pc0;
- int time_elapsed;
-
- pc0 = au_readl(SYS_TOYREAD);
- if (pc0 < last_match20)
- /* counter overflowed */
- time_elapsed = (0xffffffff - last_match20) + pc0;
- else
- time_elapsed = pc0 - last_match20;
-
- while (time_elapsed > 0) {
- time_elapsed -= MATCH20_INC;
- last_match20 += MATCH20_INC;
- }
-
- last_pc0 = pc0;
- au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
+ delta += au_readl(SYS_RTCREAD);
+ /* wait for register access */
+ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21)
+ ;
+ au_writel(delta, SYS_RTCMATCH2);
au_sync();
+ return 0;
}
-/* This is just for debugging to set the timer for a sleep delay. */
-void wakeup_counter0_set(int ticks)
+static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *cd)
{
- unsigned long pc0;
-
- pc0 = au_readl(SYS_TOYREAD);
- last_pc0 = pc0;
- au_writel(last_match20 + (MATCH20_INC * ticks), SYS_TOYMATCH2);
- au_sync();
}
-#endif
-/*
- * I haven't found anyone that doesn't use a 12 MHz source clock,
- * but just in case.....
- */
-#define AU1000_SRC_CLK 12000000
-
-/*
- * We read the real processor speed from the PLL. This is important
- * because it is more accurate than computing it from the 32 KHz
- * counter, if it exists. If we don't have an accurate processor
- * speed, all of the peripherals that derive their clocks based on
- * this advertised speed will introduce error and sometimes not work
- * properly. This function is futher convoluted to still allow configurations
- * to do that in case they have really, really old silicon with a
- * write-only PLL register, that we need the 32 KHz when power management
- * "wait" is enabled, and we need to detect if the 32 KHz isn't present
- * but requested......got it? :-) -- Dan
- */
-unsigned long calc_clock(void)
+static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
{
- unsigned long cpu_speed;
- unsigned long flags;
- unsigned long counter;
-
- spin_lock_irqsave(&time_lock, flags);
-
- /* Power management cares if we don't have a 32 KHz counter. */
- no_au1xxx_32khz = 0;
- counter = au_readl(SYS_COUNTER_CNTRL);
- if (counter & SYS_CNTRL_E0) {
- int trim_divide = 16;
-
- au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL);
-
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
- /* RTC now ticks at 32.768/16 kHz */
- au_writel(trim_divide - 1, SYS_RTCTRIM);
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
+ struct clock_event_device *cd = dev_id;
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
- au_writel(0, SYS_TOYWRITE);
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
- } else
- no_au1xxx_32khz = 1;
+static struct clock_event_device au1x_rtcmatch2_clockdev = {
+ .name = "rtcmatch2",
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 100,
+ .irq = AU1000_RTC_MATCH2_INT,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
+ .set_mode = au1x_rtcmatch2_set_mode,
+ .cpumask = CPU_MASK_ALL,
+};
- /*
- * On early Au1000, sys_cpupll was write-only. Since these
- * silicon versions of Au1000 are not sold by AMD, we don't bend
- * over backwards trying to determine the frequency.
- */
- if (cur_cpu_spec[0]->cpu_pll_wo)
-#ifdef CONFIG_SOC_AU1000_FREQUENCY
- cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
-#else
- cpu_speed = 396000000;
-#endif
- else
- cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
- /* On Alchemy CPU:counter ratio is 1:1 */
- mips_hpt_frequency = cpu_speed;
- /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
- set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
- & 0x03) + 2) * 16));
- spin_unlock_irqrestore(&time_lock, flags);
- return cpu_speed;
-}
+static struct irqaction au1x_rtcmatch2_irqaction = {
+ .handler = au1x_rtcmatch2_irq,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .name = "timer",
+ .dev_id = &au1x_rtcmatch2_clockdev,
+};
void __init plat_time_init(void)
{
- unsigned int est_freq = calc_clock();
-
- est_freq += 5000; /* round */
- est_freq -= est_freq%10000;
- printk(KERN_INFO "CPU frequency %u.%02u MHz\n",
- est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
- set_au1x00_speed(est_freq);
- set_au1x00_lcd_clock(); /* program the LCD clock */
+ struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
+ unsigned long t;
+
+ /* Check if firmware (YAMON, ...) has enabled 32kHz and clock
+ * has been detected. If so install the rtcmatch2 clocksource,
+ * otherwise don't bother. Note that both bits being set is by
+ * no means a definite guarantee that the counters actually work
+ * (the 32S bit seems to be stuck set to 1 once a single clock-
+ * edge is detected, hence the timeouts).
+ */
+ if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
+ goto cntr_err;
-#ifdef CONFIG_PM
/*
- * setup counter 0, since it keeps ticking after a
- * 'wait' instruction has been executed. The CP0 timer and
- * counter 1 do NOT continue running after 'wait'
- *
- * It's too early to call request_irq() here, so we handle
- * counter 0 interrupt as a special irq and it doesn't show
- * up under /proc/interrupts.
- *
- * Check to ensure we really have a 32 KHz oscillator before
- * we do this.
+ * setup counter 1 (RTC) to tick at full speed
*/
- if (no_au1xxx_32khz)
- printk(KERN_WARNING "WARNING: no 32KHz clock found.\n");
- else {
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
- au_writel(0, SYS_TOYWRITE);
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
-
- au_writel(au_readl(SYS_WAKEMSK) | (1 << 8), SYS_WAKEMSK);
- au_writel(~0, SYS_WAKESRC);
- au_sync();
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
- /* Setup match20 to interrupt once every HZ */
- last_pc0 = last_match20 = au_readl(SYS_TOYREAD);
- au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
- au_sync();
- while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
- setup_irq(AU1000_TOY_MATCH2_INT, &counter0_action);
+ au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */
+ au_sync();
- /* We can use the real 'wait' instruction. */
- allow_au1k_wait = 1;
- }
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+ au_writel(0, SYS_RTCWRITE);
+ au_sync();
-#endif
+ t = 0xffffff;
+ while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
+ asm volatile ("nop");
+ if (!t)
+ goto cntr_err;
+
+ /* register counter1 clocksource and event device */
+ clocksource_set_clock(&au1x_counter1_clocksource, 32768);
+ clocksource_register(&au1x_counter1_clocksource);
+
+ cd->shift = 32;
+ cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
+ cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */
+ clockevents_register_device(cd);
+ setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction);
+
+ printk(KERN_INFO "Alchemy clocksource installed\n");
+
+ /* can now use 'wait' */
+ allow_au1k_wait = 1;
+ return;
+
+cntr_err:
+ /* counters unusable, use C0 counter */
+ r4k_clockevent_init();
+ init_r4k_clocksource();
+ allow_au1k_wait = 0;
}
diff --git a/arch/mips/alchemy/db1x00/init.c b/arch/mips/alchemy/db1x00/init.c
deleted file mode 100644
index 847413514964..000000000000
--- a/arch/mips/alchemy/db1x00/init.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * PB1000 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
-#ifdef CONFIG_MIPS_BOSPORUS
- return "Alchemy Bosporus Gateway Reference";
-#else
- return "Alchemy Db1x00";
-#endif
-}
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
-
- prom_init_cmdline();
-
- memsize_str = prom_getenv("memsize");
- if (!memsize_str)
- memsize = 0x04000000;
- else
- strict_strtol(memsize_str, 0, &memsize);
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
new file mode 100644
index 000000000000..730f9f2b30e8
--- /dev/null
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -0,0 +1,18 @@
+#
+# Alchemy Develboards
+#
+
+obj-y += prom.o
+obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_MIPS_PB1000) += pb1000/
+obj-$(CONFIG_MIPS_PB1100) += pb1100/
+obj-$(CONFIG_MIPS_PB1200) += pb1200/
+obj-$(CONFIG_MIPS_PB1500) += pb1500/
+obj-$(CONFIG_MIPS_PB1550) += pb1550/
+obj-$(CONFIG_MIPS_DB1000) += db1x00/
+obj-$(CONFIG_MIPS_DB1100) += db1x00/
+obj-$(CONFIG_MIPS_DB1200) += pb1200/
+obj-$(CONFIG_MIPS_DB1500) += db1x00/
+obj-$(CONFIG_MIPS_DB1550) += db1x00/
+obj-$(CONFIG_MIPS_BOSPORUS) += db1x00/
+obj-$(CONFIG_MIPS_MIRAGE) += db1x00/
diff --git a/arch/mips/alchemy/db1x00/Makefile b/arch/mips/alchemy/devboards/db1x00/Makefile
index 274db3b55d82..432241ab8677 100644
--- a/arch/mips/alchemy/db1x00/Makefile
+++ b/arch/mips/alchemy/devboards/db1x00/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor DBAu1xx0 boards.
#
-lib-y := init.o board_setup.o irqmap.o
+obj-y := board_setup.o irqmap.o
diff --git a/arch/mips/alchemy/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
index 9e5ccbbfcedd..a75ffbf99f25 100644
--- a/arch/mips/alchemy/db1x00/board_setup.c
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -32,8 +32,20 @@
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/db1x00.h>
+#include <prom.h>
+
+
static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+const char *get_system_type(void)
+{
+#ifdef CONFIG_MIPS_BOSPORUS
+ return "Alchemy Bosporus Gateway Reference";
+#else
+ return "Alchemy Db1x00";
+#endif
+}
+
void board_reset(void)
{
/* Hit BCSR.SW_RESET[RESET] */
@@ -43,6 +55,31 @@ void board_reset(void)
void __init board_setup(void)
{
u32 pin_func = 0;
+ char *argptr;
+
+ argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
+#ifdef CONFIG_FB_AU1100
+ argptr = strstr(argptr, "video=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ /* default panel */
+ /*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
+ }
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+ /* au1000 does not support vra, au1500 and au1100 do */
+ strcat(argptr, " au1000_audio=vra");
+ argptr = prom_getcmdline();
+#endif
/* Not valid for Au1550 */
#if defined(CONFIG_IRDA) && \
diff --git a/arch/mips/alchemy/db1x00/irqmap.c b/arch/mips/alchemy/devboards/db1x00/irqmap.c
index 94c090e8bf7a..0b09025087c6 100644
--- a/arch/mips/alchemy/db1x00/irqmap.c
+++ b/arch/mips/alchemy/devboards/db1x00/irqmap.c
@@ -27,6 +27,7 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
@@ -66,21 +67,24 @@ struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
#ifndef CONFIG_MIPS_MIRAGE
#ifdef CONFIG_MIPS_DB1550
- { AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
- { AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
+ { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
+ { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
#else
- { AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 Fully_Interted# */
- { AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 STSCHG# */
- { AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
+ { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 Fully_Interted# */
+ { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 STSCHG# */
+ { AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
- { AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 Fully_Interted# */
- { AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 STSCHG# */
- { AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
+ { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 Fully_Interted# */
+ { AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 STSCHG# */
+ { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
#endif
#else
- { AU1000_GPIO_7, INTC_INT_RISE_EDGE, 0 }, /* touchscreen pen down */
+ { AU1000_GPIO_7, IRQF_TRIGGER_RISING, 0 }, /* touchscreen pen down */
#endif
};
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/alchemy/pb1000/Makefile b/arch/mips/alchemy/devboards/pb1000/Makefile
index 99bbec0ca41b..97c6615ba2bb 100644
--- a/arch/mips/alchemy/pb1000/Makefile
+++ b/arch/mips/alchemy/devboards/pb1000/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1000 board.
#
-lib-y := init.o board_setup.o irqmap.o
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index 25df167a95b3..aed2fdecc709 100644
--- a/arch/mips/alchemy/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -23,22 +23,48 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/delay.h>
-
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1000.h>
+#include <prom.h>
+
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+ { AU1000_GPIO_15, IRQF_TRIGGER_LOW, 0 },
+};
+
+
+const char *get_system_type(void)
+{
+ return "Alchemy Pb1000";
+}
void board_reset(void)
{
}
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
void __init board_setup(void)
{
u32 pin_func, static_cfg0;
u32 sys_freqctrl, sys_clksrc;
u32 prid = read_c0_prid();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char *argptr = prom_getcmdline();
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
/* Set AUX clock to 12 MHz * 8 = 96 MHz */
au_writel(8, SYS_AUXPLL);
au_writel(0, SYS_PINSTATERD);
diff --git a/arch/mips/alchemy/pb1100/Makefile b/arch/mips/alchemy/devboards/pb1100/Makefile
index 793e97c49e46..c586dd7e91dc 100644
--- a/arch/mips/alchemy/pb1100/Makefile
+++ b/arch/mips/alchemy/devboards/pb1100/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1100 board.
#
-lib-y := init.o board_setup.o irqmap.o
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index c0bfd59a7a36..4df57fae15d4 100644
--- a/arch/mips/alchemy/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -25,19 +25,66 @@
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1100.h>
+#include <prom.h>
+
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+ { AU1000_GPIO_9, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card Fully_Inserted# */
+ { AU1000_GPIO_10, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card STSCHG# */
+ { AU1000_GPIO_11, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card IRQ# */
+ { AU1000_GPIO_13, IRQF_TRIGGER_LOW, 0 }, /* DC_IRQ# */
+};
+
+
+const char *get_system_type(void)
+{
+ return "Alchemy Pb1100";
+}
+
void board_reset(void)
{
/* Hit BCSR.RST_VDDI[SOFT_RESET] */
au_writel(0x00000000, PB1100_RST_VDDI);
}
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
void __init board_setup(void)
{
volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
+ char *argptr;
+
+ argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
+#ifdef CONFIG_FB_AU1100
+ argptr = strstr(argptr, "video=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ /* default panel */
+ /*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
+ }
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+ /* au1000 does not support vra, au1500 and au1100 do */
+ strcat(argptr, " au1000_audio=vra");
+ argptr = prom_getcmdline();
+#endif
/* Set AUX clock to 12 MHz * 8 = 96 MHz */
au_writel(8, SYS_AUXPLL);
diff --git a/arch/mips/alchemy/pb1200/Makefile b/arch/mips/alchemy/devboards/pb1200/Makefile
index d678adf7ce85..c8c3a99fb68a 100644
--- a/arch/mips/alchemy/pb1200/Makefile
+++ b/arch/mips/alchemy/devboards/pb1200/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Alchemy Semiconductor Pb1200/DBAu1200 boards.
#
-lib-y := init.o board_setup.o irqmap.o
-obj-y += platform.o
+obj-y := board_setup.o irqmap.o platform.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 6cb2115059ad..94e6b7e7753d 100644
--- a/arch/mips/alchemy/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -30,8 +30,11 @@
#include <prom.h>
#include <au1xxx.h>
-extern void _board_init_irq(void);
-extern void (*board_init_irq)(void);
+
+const char *get_system_type(void)
+{
+ return "Alchemy Pb1200";
+}
void board_reset(void)
{
@@ -41,7 +44,19 @@ void board_reset(void)
void __init board_setup(void)
{
- char *argptr = NULL;
+ char *argptr;
+
+ argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+#ifdef CONFIG_FB_AU1200
+ strcat(argptr, " video=au1200fb:panel:bs");
+#endif
#if 0
{
@@ -99,16 +114,6 @@ void __init board_setup(void)
}
#endif
-#ifdef CONFIG_FB_AU1200
- argptr = prom_getcmdline();
-#ifdef CONFIG_MIPS_PB1200
- strcat(argptr, " video=au1200fb:panel:bs");
-#endif
-#ifdef CONFIG_MIPS_DB1200
- strcat(argptr, " video=au1200fb:panel:bs");
-#endif
-#endif
-
/*
* The Pb1200 development board uses external MUX for PSC0 to
* support SMB/SPI. bcsr->resets bit 12: 0=SMB 1=SPI
@@ -124,9 +129,6 @@ void __init board_setup(void)
#ifdef CONFIG_MIPS_DB1200
printk(KERN_INFO "AMD Alchemy Db1200 Board\n");
#endif
-
- /* Setup Pb1200 External Interrupt Controller */
- board_init_irq = _board_init_irq;
}
int board_au1200fb_panel(void)
diff --git a/arch/mips/alchemy/pb1200/irqmap.c b/arch/mips/alchemy/devboards/pb1200/irqmap.c
index 2a505ad8715b..fe47498da280 100644
--- a/arch/mips/alchemy/pb1200/irqmap.c
+++ b/arch/mips/alchemy/devboards/pb1200/irqmap.c
@@ -40,91 +40,65 @@
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
/* This is external interrupt cascade */
- { AU1000_GPIO_7, INTC_INT_LOW_LEVEL, 0 },
+ { AU1000_GPIO_7, IRQF_TRIGGER_LOW, 0 },
};
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
/*
* Support for External interrupts on the Pb1200 Development platform.
*/
-static volatile int pb1200_cascade_en;
-irqreturn_t pb1200_cascade_handler(int irq, void *dev_id)
+static void pb1200_cascade_handler(unsigned int irq, struct irq_desc *d)
{
unsigned short bisr = bcsr->int_status;
- int extirq_nr = 0;
-
- /* Clear all the edge interrupts. This has no effect on level. */
- bcsr->int_status = bisr;
- for ( ; bisr; bisr &= bisr - 1) {
- extirq_nr = PB1200_INT_BEGIN + __ffs(bisr);
- /* Ack and dispatch IRQ */
- do_IRQ(extirq_nr);
- }
-
- return IRQ_RETVAL(1);
-}
-inline void pb1200_enable_irq(unsigned int irq_nr)
-{
- bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
- bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
+ for ( ; bisr; bisr &= bisr - 1)
+ generic_handle_irq(PB1200_INT_BEGIN + __ffs(bisr));
}
-inline void pb1200_disable_irq(unsigned int irq_nr)
+/* NOTE: both the enable and mask bits must be cleared, otherwise the
+ * CPLD generates tons of spurious interrupts (at least on the DB1200).
+ */
+static void pb1200_mask_irq(unsigned int irq_nr)
{
bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
+ au_sync();
}
-static unsigned int pb1200_setup_cascade(void)
-{
- return request_irq(AU1000_GPIO_7, &pb1200_cascade_handler,
- 0, "Pb1200 Cascade", &pb1200_cascade_handler);
-}
-
-static unsigned int pb1200_startup_irq(unsigned int irq)
+static void pb1200_maskack_irq(unsigned int irq_nr)
{
- if (++pb1200_cascade_en == 1) {
- int res;
-
- res = pb1200_setup_cascade();
- if (res)
- return res;
- }
-
- pb1200_enable_irq(irq);
-
- return 0;
+ bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
+ bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
+ bcsr->int_status = 1 << (irq_nr - PB1200_INT_BEGIN); /* ack */
+ au_sync();
}
-static void pb1200_shutdown_irq(unsigned int irq)
+static void pb1200_unmask_irq(unsigned int irq_nr)
{
- pb1200_disable_irq(irq);
- if (--pb1200_cascade_en == 0)
- free_irq(AU1000_GPIO_7, &pb1200_cascade_handler);
+ bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
+ bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
+ au_sync();
}
-static struct irq_chip external_irq_type = {
+static struct irq_chip pb1200_cpld_irq_type = {
#ifdef CONFIG_MIPS_PB1200
.name = "Pb1200 Ext",
#endif
#ifdef CONFIG_MIPS_DB1200
.name = "Db1200 Ext",
#endif
- .startup = pb1200_startup_irq,
- .shutdown = pb1200_shutdown_irq,
- .ack = pb1200_disable_irq,
- .mask = pb1200_disable_irq,
- .mask_ack = pb1200_disable_irq,
- .unmask = pb1200_enable_irq,
+ .mask = pb1200_mask_irq,
+ .mask_ack = pb1200_maskack_irq,
+ .unmask = pb1200_unmask_irq,
};
-void _board_init_irq(void)
+void __init board_init_irq(void)
{
unsigned int irq;
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+
#ifdef CONFIG_MIPS_PB1200
/* We have a problem with CPLD rev 3. */
if (((bcsr->whoami & BCSR_WHOAMI_CPLD) >> 4) <= 3) {
@@ -146,15 +120,15 @@ void _board_init_irq(void)
panic("Game over. Your score is 0.");
}
#endif
+ /* mask & disable & ack all */
+ bcsr->intclr_mask = 0xffff;
+ bcsr->intclr = 0xffff;
+ bcsr->int_status = 0xffff;
+ au_sync();
- for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++) {
- set_irq_chip_and_handler(irq, &external_irq_type,
- handle_level_irq);
- pb1200_disable_irq(irq);
- }
+ for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++)
+ set_irq_chip_and_handler_name(irq, &pb1200_cpld_irq_type,
+ handle_level_irq, "level");
- /*
- * GPIO_7 can not be hooked here, so it is hooked upon first
- * request of any source attached to the cascade.
- */
+ set_irq_chained_handler(AU1000_GPIO_7, pb1200_cascade_handler);
}
diff --git a/arch/mips/alchemy/pb1200/platform.c b/arch/mips/alchemy/devboards/pb1200/platform.c
index 95303297c534..95303297c534 100644
--- a/arch/mips/alchemy/pb1200/platform.c
+++ b/arch/mips/alchemy/devboards/pb1200/platform.c
diff --git a/arch/mips/alchemy/pb1500/Makefile b/arch/mips/alchemy/devboards/pb1500/Makefile
index 602f38df20bb..173b419a7479 100644
--- a/arch/mips/alchemy/pb1500/Makefile
+++ b/arch/mips/alchemy/devboards/pb1500/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1500 board.
#
-lib-y := init.o board_setup.o irqmap.o
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index 035771c6e5b8..fed3b093156a 100644
--- a/arch/mips/alchemy/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -25,20 +25,64 @@
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1500.h>
+#include <prom.h>
+
+
+char irq_tab_alchemy[][5] __initdata = {
+ [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */
+ [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
+};
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+ { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+ { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
+};
+
+
+const char *get_system_type(void)
+{
+ return "Alchemy Pb1500";
+}
+
void board_reset(void)
{
/* Hit BCSR.RST_VDDI[SOFT_RESET] */
au_writel(0x00000000, PB1500_RST_VDDI);
}
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
void __init board_setup(void)
{
u32 pin_func;
u32 sys_freqctrl, sys_clksrc;
+ char *argptr;
+
+ argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
+#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
+ /* au1000 does not support vra, au1500 and au1100 do */
+ strcat(argptr, " au1000_audio=vra");
+ argptr = prom_getcmdline();
+#endif
sys_clksrc = sys_freqctrl = pin_func = 0;
/* Set AUX clock to 12 MHz * 8 = 96 MHz */
diff --git a/arch/mips/alchemy/pb1550/Makefile b/arch/mips/alchemy/devboards/pb1550/Makefile
index 7d8beca87fa5..cff95bcdb2ca 100644
--- a/arch/mips/alchemy/pb1550/Makefile
+++ b/arch/mips/alchemy/devboards/pb1550/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1550 board.
#
-lib-y := init.o board_setup.o irqmap.o
+obj-y := board_setup.o
diff --git a/arch/mips/alchemy/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index 0ed76b64b6ab..b6e9e7d247a3 100644
--- a/arch/mips/alchemy/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -28,20 +28,54 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1550.h>
+#include <prom.h>
+
+
+char irq_tab_alchemy[][5] __initdata = {
+ [12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
+ [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
+};
+
+struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
+ { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
+ { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
+};
+
+const char *get_system_type(void)
+{
+ return "Alchemy Pb1550";
+}
+
void board_reset(void)
{
/* Hit BCSR.SYSTEM[RESET] */
au_writew(au_readw(0xAF00001C) & ~BCSR_SYSTEM_RESET, 0xAF00001C);
}
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
+
void __init board_setup(void)
{
u32 pin_func;
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char *argptr;
+ argptr = prom_getcmdline();
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
/*
* Enable PSC1 SYNC for AC'97. Normaly done in audio driver,
* but it is board specific code, so put it here.
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
new file mode 100644
index 000000000000..d5eb9c325ed0
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -0,0 +1,229 @@
+/*
+ * Alchemy Development Board example suspend userspace interface.
+ *
+ * (c) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
+ */
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/suspend.h>
+#include <linux/sysfs.h>
+#include <asm/mach-au1x00/au1000.h>
+
+/*
+ * Generic suspend userspace interface for Alchemy development boards.
+ * This code exports a few sysfs nodes under /sys/power/db1x/ which
+ * can be used by userspace to en/disable all au1x-provided wakeup
+ * sources and configure the timeout after which the the TOYMATCH2 irq
+ * is to trigger a wakeup.
+ */
+
+
+static unsigned long db1x_pm_sleep_secs;
+static unsigned long db1x_pm_wakemsk;
+static unsigned long db1x_pm_last_wakesrc;
+
+static int db1x_pm_enter(suspend_state_t state)
+{
+ /* enable GPIO based wakeup */
+ au_writel(1, SYS_PININPUTEN);
+
+ /* clear and setup wake cause and source */
+ au_writel(0, SYS_WAKEMSK);
+ au_sync();
+ au_writel(0, SYS_WAKESRC);
+ au_sync();
+
+ au_writel(db1x_pm_wakemsk, SYS_WAKEMSK);
+ au_sync();
+
+ /* setup 1Hz-timer-based wakeup: wait for reg access */
+ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
+ asm volatile ("nop");
+
+ au_writel(au_readl(SYS_TOYREAD) + db1x_pm_sleep_secs, SYS_TOYMATCH2);
+ au_sync();
+
+ /* wait for value to really hit the register */
+ while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
+ asm volatile ("nop");
+
+ /* ...and now the sandman can come! */
+ au_sleep();
+
+ return 0;
+}
+
+static int db1x_pm_begin(suspend_state_t state)
+{
+ if (!db1x_pm_wakemsk) {
+ printk(KERN_ERR "db1x: no wakeup source activated!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void db1x_pm_end(void)
+{
+ /* read and store wakeup source, the clear the register. To
+ * be able to clear it, WAKEMSK must be cleared first.
+ */
+ db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
+
+ au_writel(0, SYS_WAKEMSK);
+ au_writel(0, SYS_WAKESRC);
+ au_sync();
+
+}
+
+static struct platform_suspend_ops db1x_pm_ops = {
+ .valid = suspend_valid_only_mem,
+ .begin = db1x_pm_begin,
+ .enter = db1x_pm_enter,
+ .end = db1x_pm_end,
+};
+
+#define ATTRCMP(x) (0 == strcmp(attr->attr.name, #x))
+
+static ssize_t db1x_pmattr_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ int idx;
+
+ if (ATTRCMP(timer_timeout))
+ return sprintf(buf, "%lu\n", db1x_pm_sleep_secs);
+
+ else if (ATTRCMP(timer))
+ return sprintf(buf, "%u\n",
+ !!(db1x_pm_wakemsk & SYS_WAKEMSK_M2));
+
+ else if (ATTRCMP(wakesrc))
+ return sprintf(buf, "%lu\n", db1x_pm_last_wakesrc);
+
+ else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+ ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+ ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+ idx = (attr->attr.name)[4] - '0';
+ return sprintf(buf, "%d\n",
+ !!(db1x_pm_wakemsk & SYS_WAKEMSK_GPIO(idx)));
+
+ } else if (ATTRCMP(wakemsk)) {
+ return sprintf(buf, "%08lx\n", db1x_pm_wakemsk);
+ }
+
+ return -ENOENT;
+}
+
+static ssize_t db1x_pmattr_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *instr,
+ size_t bytes)
+{
+ unsigned long l;
+ int tmp;
+
+ if (ATTRCMP(timer_timeout)) {
+ tmp = strict_strtoul(instr, 0, &l);
+ if (tmp)
+ return tmp;
+
+ db1x_pm_sleep_secs = l;
+
+ } else if (ATTRCMP(timer)) {
+ if (instr[0] != '0')
+ db1x_pm_wakemsk |= SYS_WAKEMSK_M2;
+ else
+ db1x_pm_wakemsk &= ~SYS_WAKEMSK_M2;
+
+ } else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
+ ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
+ ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
+ tmp = (attr->attr.name)[4] - '0';
+ if (instr[0] != '0') {
+ db1x_pm_wakemsk |= SYS_WAKEMSK_GPIO(tmp);
+ } else {
+ db1x_pm_wakemsk &= ~SYS_WAKEMSK_GPIO(tmp);
+ }
+
+ } else if (ATTRCMP(wakemsk)) {
+ tmp = strict_strtoul(instr, 0, &l);
+ if (tmp)
+ return tmp;
+
+ db1x_pm_wakemsk = l & 0x0000003f;
+
+ } else
+ bytes = -ENOENT;
+
+ return bytes;
+}
+
+#define ATTR(x) \
+ static struct kobj_attribute x##_attribute = \
+ __ATTR(x, 0664, db1x_pmattr_show, \
+ db1x_pmattr_store);
+
+ATTR(gpio0) /* GPIO-based wakeup enable */
+ATTR(gpio1)
+ATTR(gpio2)
+ATTR(gpio3)
+ATTR(gpio4)
+ATTR(gpio5)
+ATTR(gpio6)
+ATTR(gpio7)
+ATTR(timer) /* TOYMATCH2-based wakeup enable */
+ATTR(timer_timeout) /* timer-based wakeup timeout value, in seconds */
+ATTR(wakesrc) /* contents of SYS_WAKESRC after last wakeup */
+ATTR(wakemsk) /* direct access to SYS_WAKEMSK */
+
+#define ATTR_LIST(x) & x ## _attribute.attr
+static struct attribute *db1x_pmattrs[] = {
+ ATTR_LIST(gpio0),
+ ATTR_LIST(gpio1),
+ ATTR_LIST(gpio2),
+ ATTR_LIST(gpio3),
+ ATTR_LIST(gpio4),
+ ATTR_LIST(gpio5),
+ ATTR_LIST(gpio6),
+ ATTR_LIST(gpio7),
+ ATTR_LIST(timer),
+ ATTR_LIST(timer_timeout),
+ ATTR_LIST(wakesrc),
+ ATTR_LIST(wakemsk),
+ NULL, /* terminator */
+};
+
+static struct attribute_group db1x_pmattr_group = {
+ .name = "db1x",
+ .attrs = db1x_pmattrs,
+};
+
+/*
+ * Initialize suspend interface
+ */
+static int __init pm_init(void)
+{
+ /* init TOY to tick at 1Hz if not already done. No need to wait
+ * for confirmation since there's plenty of time from here to
+ * the next suspend cycle.
+ */
+ if (au_readl(SYS_TOYTRIM) != 32767) {
+ au_writel(32767, SYS_TOYTRIM);
+ au_sync();
+ }
+
+ db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
+
+ au_writel(0, SYS_WAKESRC);
+ au_sync();
+ au_writel(0, SYS_WAKEMSK);
+ au_sync();
+
+ suspend_set_ops(&db1x_pm_ops);
+
+ return sysfs_create_group(power_kobj, &db1x_pmattr_group);
+}
+
+late_initcall(pm_init);
diff --git a/arch/mips/alchemy/pb1550/init.c b/arch/mips/alchemy/devboards/prom.c
index e1055a13a1a0..0042bd6b1d7d 100644
--- a/arch/mips/alchemy/pb1550/init.c
+++ b/arch/mips/alchemy/devboards/prom.c
@@ -1,9 +1,9 @@
/*
+ * Common code used by all Alchemy develboards.
*
- * BRIEF MODULE DESCRIPTION
- * Pb1550 board setup
+ * Extracted from files which had this to say:
*
- * Copyright 2001, 2008 MontaVista Software Inc.
+ * Copyright 2000, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -29,15 +29,19 @@
#include <linux/init.h>
#include <linux/kernel.h>
-
#include <asm/bootinfo.h>
-
+#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
-const char *get_system_type(void)
-{
- return "Alchemy Pb1550";
-}
+#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_DB1000) || \
+ defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_DB1100) || \
+ defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_DB1500) || \
+ defined(CONFIG_MIPS_BOSPORUS) || defined(CONFIG_MIPS_MIRAGE)
+#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x04000000
+
+#else /* Au1550/Au1200-based develboards */
+#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x08000000
+#endif
void __init prom_init(void)
{
@@ -51,8 +55,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
- memsize = 0x08000000;
+ memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
else
- strict_strtol(memsize_str, 0, &memsize);
+ strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 3f8079186cf2..8ed1ae12bc55 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -32,6 +32,8 @@
#include <asm/mach-au1x00/au1000.h>
+#include <prom.h>
+
extern int (*board_pci_idsel)(unsigned int devsel, int assert);
int mtx1_pci_idsel(unsigned int devsel, int assert);
@@ -43,6 +45,16 @@ void board_reset(void)
void __init board_setup(void)
{
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char *argptr;
+ argptr = prom_getcmdline();
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
/* Enable USB power switch */
au_writel(au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR);
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c
index 3bae13c28954..5e871c8d9e96 100644
--- a/arch/mips/alchemy/mtx-1/init.c
+++ b/arch/mips/alchemy/mtx-1/init.c
@@ -55,6 +55,6 @@ void __init prom_init(void)
if (!memsize_str)
memsize = 0x04000000;
else
- strict_strtol(memsize_str, 0, &memsize);
+ strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/mtx-1/irqmap.c b/arch/mips/alchemy/mtx-1/irqmap.c
index f2bf02951e9c..f1ab12ab3433 100644
--- a/arch/mips/alchemy/mtx-1/irqmap.c
+++ b/arch/mips/alchemy/mtx-1/irqmap.c
@@ -27,7 +27,7 @@
*/
#include <linux/init.h>
-
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
char irq_tab_alchemy[][5] __initdata = {
@@ -42,11 +42,15 @@ char irq_tab_alchemy[][5] __initdata = {
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
- { AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
+ { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+ { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
};
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
+
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/alchemy/pb1000/init.c b/arch/mips/alchemy/pb1000/init.c
deleted file mode 100644
index 8a9c7d57208d..000000000000
--- a/arch/mips/alchemy/pb1000/init.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Pb1000 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
- return "Alchemy Pb1000";
-}
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = (int)fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
-
- prom_init_cmdline();
- memsize_str = prom_getenv("memsize");
- if (!memsize_str)
- memsize = 0x04000000;
- else
- strict_strtol(memsize_str, 0, &memsize);
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1000/irqmap.c b/arch/mips/alchemy/pb1000/irqmap.c
deleted file mode 100644
index b3d56b0af321..000000000000
--- a/arch/mips/alchemy/pb1000/irqmap.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_15, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1100/init.c b/arch/mips/alchemy/pb1100/init.c
deleted file mode 100644
index 7c6792308bc5..000000000000
--- a/arch/mips/alchemy/pb1100/init.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Pb1100 board setup
- *
- * Copyright 2002, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
- return "Alchemy Pb1100";
-}
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg3;
-
- prom_init_cmdline();
-
- memsize_str = prom_getenv("memsize");
- if (!memsize_str)
- memsize = 0x04000000;
- else
- strict_strtol(memsize_str, 0, &memsize);
-
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1100/irqmap.c b/arch/mips/alchemy/pb1100/irqmap.c
deleted file mode 100644
index 9b7dd8b41283..000000000000
--- a/arch/mips/alchemy/pb1100/irqmap.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xx0 IRQ map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_9, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card Fully_Inserted# */
- { AU1000_GPIO_10, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card STSCHG# */
- { AU1000_GPIO_11, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card IRQ# */
- { AU1000_GPIO_13, INTC_INT_LOW_LEVEL, 0 }, /* DC_IRQ# */
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1200/init.c b/arch/mips/alchemy/pb1200/init.c
deleted file mode 100644
index e9b2a0fd48ae..000000000000
--- a/arch/mips/alchemy/pb1200/init.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PB1200 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
- return "Alchemy Pb1200";
-}
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = (int)fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
-
- prom_init_cmdline();
- memsize_str = prom_getenv("memsize");
- if (!memsize_str)
- memsize = 0x08000000;
- else
- strict_strtol(memsize_str, 0, &memsize);
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1500/init.c b/arch/mips/alchemy/pb1500/init.c
deleted file mode 100644
index 3b6e395cf952..000000000000
--- a/arch/mips/alchemy/pb1500/init.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Pb1500 board setup
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-
-#include <prom.h>
-
-const char *get_system_type(void)
-{
- return "Alchemy Pb1500";
-}
-
-void __init prom_init(void)
-{
- unsigned char *memsize_str;
- unsigned long memsize;
-
- prom_argc = (int)fw_arg0;
- prom_argv = (char **)fw_arg1;
- prom_envp = (char **)fw_arg2;
-
- prom_init_cmdline();
- memsize_str = prom_getenv("memsize");
- if (!memsize_str)
- memsize = 0x04000000;
- else
- strict_strtol(memsize_str, 0, &memsize);
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/alchemy/pb1500/irqmap.c b/arch/mips/alchemy/pb1500/irqmap.c
deleted file mode 100644
index 39c4682766a8..000000000000
--- a/arch/mips/alchemy/pb1500/irqmap.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-char irq_tab_alchemy[][5] __initdata = {
- [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
- { AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/pb1550/irqmap.c b/arch/mips/alchemy/pb1550/irqmap.c
deleted file mode 100644
index a02a4d1fa899..000000000000
--- a/arch/mips/alchemy/pb1550/irqmap.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xx0 IRQ map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-char irq_tab_alchemy[][5] __initdata = {
- [12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
-};
-
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index 4c587acac5c3..a2634fabc50d 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -28,6 +28,8 @@
#include <asm/mach-au1x00/au1000.h>
+#include <prom.h>
+
void board_reset(void)
{
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
@@ -38,6 +40,16 @@ void __init board_setup(void)
{
u32 pin_func;
+#ifdef CONFIG_SERIAL_8250_CONSOLE
+ char *argptr;
+ argptr = prom_getcmdline();
+ argptr = strstr(argptr, "console=");
+ if (argptr == NULL) {
+ argptr = prom_getcmdline();
+ strcat(argptr, " console=ttyS0,115200");
+ }
+#endif
+
/* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
pin_func |= SYS_PF_UR3;
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c
index 7516434760a1..456fa142c093 100644
--- a/arch/mips/alchemy/xxs1500/init.c
+++ b/arch/mips/alchemy/xxs1500/init.c
@@ -53,6 +53,6 @@ void __init prom_init(void)
if (!memsize_str)
memsize = 0x04000000;
else
- strict_strtol(memsize_str, 0, &memsize);
+ strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/xxs1500/irqmap.c b/arch/mips/alchemy/xxs1500/irqmap.c
index edf06ed11870..0f0f3012e5fd 100644
--- a/arch/mips/alchemy/xxs1500/irqmap.c
+++ b/arch/mips/alchemy/xxs1500/irqmap.c
@@ -27,23 +27,26 @@
*/
#include <linux/init.h>
-
+#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
- { AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
- { AU1500_GPIO_207, INTC_INT_LOW_LEVEL, 0 },
+ { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
+ { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
+ { AU1500_GPIO_207, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 },
- { AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* CF interrupt */
- { AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 },
+ { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
+ { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
+ { AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 },
+ { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 },
+ { AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* CF interrupt */
+ { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 },
};
-int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
+void __init board_init_irq(void)
+{
+ au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+}
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
new file mode 100644
index 000000000000..094c17e38e16
--- /dev/null
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -0,0 +1,85 @@
+config CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ bool "Enable Octeon specific options"
+ depends on CPU_CAVIUM_OCTEON
+ default "y"
+
+config CAVIUM_OCTEON_2ND_KERNEL
+ bool "Build the kernel to be used as a 2nd kernel on the same chip"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "n"
+ help
+ This option configures this kernel to be linked at a different
+ address and use the 2nd uart for output. This allows a kernel built
+ with this option to be run at the same time as one built without this
+ option.
+
+config CAVIUM_OCTEON_HW_FIX_UNALIGNED
+ bool "Enable hardware fixups of unaligned loads and stores"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "y"
+ help
+ Configure the Octeon hardware to automatically fix unaligned loads
+ and stores. Normally unaligned accesses are fixed using a kernel
+ exception handler. This option enables the hardware automatic fixups,
+ which requires only an extra 3 cycles. Disable this option if you
+ are running code that relies on address exceptions on unaligned
+ accesses.
+
+config CAVIUM_OCTEON_CVMSEG_SIZE
+ int "Number of L1 cache lines reserved for CVMSEG memory"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ range 0 54
+ default 1
+ help
+ CVMSEG LM is a segment that accesses portions of the dcache as a
+ local memory; the larger CVMSEG is, the smaller the cache is.
+ This selects the size of CVMSEG LM, which is in cache blocks. The
+ legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
+ between zero and 6192 bytes).
+
+config CAVIUM_OCTEON_LOCK_L2
+ bool "Lock often used kernel code in the L2"
+ depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
+ default "y"
+ help
+ Enable locking parts of the kernel into the L2 cache.
+
+config CAVIUM_OCTEON_LOCK_L2_TLB
+ bool "Lock the TLB handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level TLB fast path into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ bool "Lock the exception handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level exception handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ bool "Lock the interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the low level interrupt handler into L2.
+
+config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ bool "Lock the 2nd level interrupt handler in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the 2nd level interrupt handler in L2.
+
+config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ bool "Lock memcpy() in L2"
+ depends on CAVIUM_OCTEON_LOCK_L2
+ default "y"
+ help
+ Lock the kernel's implementation of memcpy() into L2.
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ select SPARSEMEM_STATIC
+ depends on CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
new file mode 100644
index 000000000000..1c2a7faf5881
--- /dev/null
+++ b/arch/mips/cavium-octeon/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o
+obj-y += dma-octeon.o flash_setup.o
+obj-y += octeon-memcpy.o
+
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
new file mode 100644
index 000000000000..70fd92c31657
--- /dev/null
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ipd-defs.h>
+
+/*
+ * Set the current core's cvmcount counter to the value of the
+ * IPD_CLK_COUNT. We do this on all cores as they are brought
+ * on-line. This allows for a read from a local cpu register to
+ * access a synchronized counter.
+ *
+ */
+void octeon_init_cvmcount(void)
+{
+ unsigned long flags;
+ unsigned loops = 2;
+
+ /* Clobber loops so GCC will not unroll the following while loop. */
+ asm("" : "+r" (loops));
+
+ local_irq_save(flags);
+ /*
+ * Loop several times so we are executing from the cache,
+ * which should give more deterministic timing.
+ */
+ while (loops--)
+ write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
+ local_irq_restore(flags);
+}
+
+static cycle_t octeon_cvmcount_read(void)
+{
+ return read_c0_cvmcount();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "OCTEON_CVMCOUNT",
+ .read = octeon_cvmcount_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init plat_time_init(void)
+{
+ clocksource_mips.rating = 300;
+ clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
+ clocksource_register(&clocksource_mips);
+}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
new file mode 100644
index 000000000000..01b1ef94b361
--- /dev/null
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -0,0 +1,32 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
+ * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
+ * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
+ * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
+ * IP32 changes by Ilya.
+ * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
+ * the kernels original.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+
+#include <dma-coherence.h>
+
+dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
+{
+ /* Without PCI/PCIe this function can be called for Octeon internal
+ devices such as USB. These devices all support 64bit addressing */
+ mb();
+ return virt_to_phys(ptr);
+}
+
+void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+{
+ /* Without PCI/PCIe this function can be called for Octeon internal
+ * devices such as USB. These devices all support 64bit addressing */
+ return;
+}
diff --git a/arch/mips/cavium-octeon/executive/Makefile b/arch/mips/cavium-octeon/executive/Makefile
new file mode 100644
index 000000000000..80d6cb26766b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the Cavium Octeon specific kernel interface routines
+# under Linux.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2005-2008 Cavium Networks
+#
+
+obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o
+
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
new file mode 100644
index 000000000000..4f5a08b37ccd
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -0,0 +1,586 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#include <linux/kernel.h>
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-bootmem.h>
+
+/*#define DEBUG */
+
+
+static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
+
+/* See header file for descriptions of functions */
+
+/*
+ * Wrapper functions are provided for reading/writing the size and
+ * next block values as these may not be directly addressible (in 32
+ * bit applications, for instance.) Offsets of data elements in
+ * bootmem list, must match cvmx_bootmem_block_header_t.
+ */
+#define NEXT_OFFSET 0
+#define SIZE_OFFSET 8
+
+static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
+{
+ cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
+}
+
+static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
+{
+ cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
+}
+
+static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
+}
+
+static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
+{
+ return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
+}
+
+void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr)
+{
+ int64_t address;
+ address =
+ cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
+
+ if (address > 0)
+ return cvmx_phys_to_ptr(address);
+ else
+ return NULL;
+}
+
+void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+ uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, address,
+ address + size);
+}
+
+void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
+{
+ return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
+}
+
+int cvmx_bootmem_free_named(char *name)
+{
+ return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+ return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+
+void cvmx_bootmem_lock(void)
+{
+ cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+void cvmx_bootmem_unlock(void)
+{
+ cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
+}
+
+int cvmx_bootmem_init(void *mem_desc_ptr)
+{
+ /* Here we set the global pointer to the bootmem descriptor
+ * block. This pointer will be used directly, so we will set
+ * it up to be directly usable by the application. It is set
+ * up as follows for the various runtime/ABI combinations:
+ *
+ * Linux 64 bit: Set XKPHYS bit
+ * Linux 32 bit: use mmap to create mapping, use virtual address
+ * CVMX 64 bit: use physical address directly
+ * CVMX 32 bit: use physical address directly
+ *
+ * Note that the CVMX environment assumes the use of 1-1 TLB
+ * mappings so that the physical addresses can be used
+ * directly
+ */
+ if (!cvmx_bootmem_desc) {
+#if defined(CVMX_ABI_64)
+ /* Set XKPHYS bit */
+ cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
+#else
+ cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
+#endif
+ }
+
+ return 0;
+}
+
+/*
+ * The cvmx_bootmem_phy* functions below return 64 bit physical
+ * addresses, and expose more features that the cvmx_bootmem_functions
+ * above. These are required for full memory space access in 32 bit
+ * applications, as well as for using some advance features. Most
+ * applications should not need to use these.
+ */
+
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+ uint64_t address_max, uint64_t alignment,
+ uint32_t flags)
+{
+
+ uint64_t head_addr;
+ uint64_t ent_addr;
+ /* points to previous list entry, NULL current entry is head of list */
+ uint64_t prev_addr = 0;
+ uint64_t new_ent_addr = 0;
+ uint64_t desired_min_addr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
+ "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
+ (unsigned long long)req_size,
+ (unsigned long long)address_min,
+ (unsigned long long)address_max,
+ (unsigned long long)alignment);
+#endif
+
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ goto error_out;
+ }
+
+ /*
+ * Do a variety of checks to validate the arguments. The
+ * allocator code will later assume that these checks have
+ * been made. We validate that the requested constraints are
+ * not self-contradictory before we look through the list of
+ * available memory.
+ */
+
+ /* 0 is not a valid req_size for this allocator */
+ if (!req_size)
+ goto error_out;
+
+ /* Round req_size up to mult of minimum alignment bytes */
+ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
+ ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
+
+ /*
+ * Convert !0 address_min and 0 address_max to special case of
+ * range that specifies an exact memory block to allocate. Do
+ * this before other checks and adjustments so that this
+ * tranformation will be validated.
+ */
+ if (address_min && !address_max)
+ address_max = address_min + req_size;
+ else if (!address_min && !address_max)
+ address_max = ~0ull; /* If no limits given, use max limits */
+
+
+ /*
+ * Enforce minimum alignment (this also keeps the minimum free block
+ * req_size the same as the alignment req_size.
+ */
+ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
+ alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
+
+ /*
+ * Adjust address minimum based on requested alignment (round
+ * up to meet alignment). Do this here so we can reject
+ * impossible requests up front. (NOP for address_min == 0)
+ */
+ if (alignment)
+ address_min = __ALIGN_MASK(address_min, (alignment - 1));
+
+ /*
+ * Reject inconsistent args. We have adjusted these, so this
+ * may fail due to our internal changes even if this check
+ * would pass for the values the user supplied.
+ */
+ if (req_size > address_max - address_min)
+ goto error_out;
+
+ /* Walk through the list entries - first fit found is returned */
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ head_addr = cvmx_bootmem_desc->head_addr;
+ ent_addr = head_addr;
+ for (; ent_addr;
+ prev_addr = ent_addr,
+ ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
+ uint64_t usable_base, usable_max;
+ uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
+
+ if (cvmx_bootmem_phy_get_next(ent_addr)
+ && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
+ cvmx_dprintf("Internal bootmem_alloc() error: ent: "
+ "0x%llx, next: 0x%llx\n",
+ (unsigned long long)ent_addr,
+ (unsigned long long)
+ cvmx_bootmem_phy_get_next(ent_addr));
+ goto error_out;
+ }
+
+ /*
+ * Determine if this is an entry that can satisify the
+ * request Check to make sure entry is large enough to
+ * satisfy request.
+ */
+ usable_base =
+ __ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
+ usable_max = min(address_max, ent_addr + ent_size);
+ /*
+ * We should be able to allocate block at address
+ * usable_base.
+ */
+
+ desired_min_addr = usable_base;
+ /*
+ * Determine if request can be satisfied from the
+ * current entry.
+ */
+ if (!((ent_addr + ent_size) > usable_base
+ && ent_addr < address_max
+ && req_size <= usable_max - usable_base))
+ continue;
+ /*
+ * We have found an entry that has room to satisfy the
+ * request, so allocate it from this entry. If end
+ * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
+ * the end of this block rather than the beginning.
+ */
+ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
+ desired_min_addr = usable_max - req_size;
+ /*
+ * Align desired address down to required
+ * alignment.
+ */
+ desired_min_addr &= ~(alignment - 1);
+ }
+
+ /* Match at start of entry */
+ if (desired_min_addr == ent_addr) {
+ if (req_size < ent_size) {
+ /*
+ * big enough to create a new block
+ * from top portion of block.
+ */
+ new_ent_addr = ent_addr + req_size;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ ent_size -
+ req_size);
+
+ /*
+ * Adjust next pointer as following
+ * code uses this.
+ */
+ cvmx_bootmem_phy_set_next(ent_addr,
+ new_ent_addr);
+ }
+
+ /*
+ * adjust prev ptr or head to remove this
+ * entry from list.
+ */
+ if (prev_addr)
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(ent_addr));
+ else
+ /*
+ * head of list being returned, so
+ * update head ptr.
+ */
+ cvmx_bootmem_desc->head_addr =
+ cvmx_bootmem_phy_get_next(ent_addr);
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return desired_min_addr;
+ }
+ /*
+ * block returned doesn't start at beginning of entry,
+ * so we know that we will be splitting a block off
+ * the front of this one. Create a new block from the
+ * beginning, add to list, and go to top of loop
+ * again.
+ *
+ * create new block from high portion of
+ * block, so that top block starts at desired
+ * addr.
+ */
+ new_ent_addr = desired_min_addr;
+ cvmx_bootmem_phy_set_next(new_ent_addr,
+ cvmx_bootmem_phy_get_next
+ (ent_addr));
+ cvmx_bootmem_phy_set_size(new_ent_addr,
+ cvmx_bootmem_phy_get_size
+ (ent_addr) -
+ (desired_min_addr -
+ ent_addr));
+ cvmx_bootmem_phy_set_size(ent_addr,
+ desired_min_addr - ent_addr);
+ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
+ /* Loop again to handle actual alloc from new block */
+ }
+error_out:
+ /* We didn't find anything, so return error */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return -1;
+}
+
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
+{
+ uint64_t cur_addr;
+ uint64_t prev_addr = 0; /* zero is invalid */
+ int retval = 0;
+
+#ifdef DEBUG
+ cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
+ (unsigned long long)phy_addr, (unsigned long long)size);
+#endif
+ if (cvmx_bootmem_desc->major_version > 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+
+ /* 0 is not a valid size for this allocator */
+ if (!size)
+ return 0;
+
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+ cur_addr = cvmx_bootmem_desc->head_addr;
+ if (cur_addr == 0 || phy_addr < cur_addr) {
+ /* add at front of list - special case with changing head ptr */
+ if (cur_addr && phy_addr + size > cur_addr)
+ goto bootmem_free_done; /* error, overlapping section */
+ else if (phy_addr + size == cur_addr) {
+ /* Add to front of existing first block */
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+
+ } else {
+ /* New block before first block. OK if cur_addr is 0 */
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_desc->head_addr = phy_addr;
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* Find place in list to add block */
+ while (cur_addr && phy_addr > cur_addr) {
+ prev_addr = cur_addr;
+ cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
+ }
+
+ if (!cur_addr) {
+ /*
+ * We have reached the end of the list, add on to end,
+ * checking to see if we need to combine with last
+ * block
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ } else {
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, 0);
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else {
+ /*
+ * insert between prev and cur nodes, checking for
+ * merge with either/both.
+ */
+ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
+ phy_addr) {
+ /* Merge with previous */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size
+ (prev_addr) + size);
+ if (phy_addr + size == cur_addr) {
+ /* Also merge with current */
+ cvmx_bootmem_phy_set_size(prev_addr,
+ cvmx_bootmem_phy_get_size(cur_addr) +
+ cvmx_bootmem_phy_get_size(prev_addr));
+ cvmx_bootmem_phy_set_next(prev_addr,
+ cvmx_bootmem_phy_get_next(cur_addr));
+ }
+ retval = 1;
+ goto bootmem_free_done;
+ } else if (phy_addr + size == cur_addr) {
+ /* Merge with current */
+ cvmx_bootmem_phy_set_size(phy_addr,
+ cvmx_bootmem_phy_get_size
+ (cur_addr) + size);
+ cvmx_bootmem_phy_set_next(phy_addr,
+ cvmx_bootmem_phy_get_next
+ (cur_addr));
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+ retval = 1;
+ goto bootmem_free_done;
+ }
+
+ /* It is a standalone block, add in between prev and cur */
+ cvmx_bootmem_phy_set_size(phy_addr, size);
+ cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
+ cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
+
+ }
+ retval = 1;
+
+bootmem_free_done:
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+ return retval;
+
+}
+
+struct cvmx_bootmem_named_block_desc *
+ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
+{
+ unsigned int i;
+ struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
+
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
+#endif
+ /*
+ * Lock the structure to make sure that it is not being
+ * changed while we are examining it.
+ */
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_lock();
+
+ /* Use XKPHYS for 64 bit linux */
+ named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
+ cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
+
+#ifdef DEBUG
+ cvmx_dprintf
+ ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
+ named_block_array_ptr);
+#endif
+ if (cvmx_bootmem_desc->major_version == 3) {
+ for (i = 0;
+ i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
+ if ((name && named_block_array_ptr[i].size
+ && !strncmp(name, named_block_array_ptr[i].name,
+ cvmx_bootmem_desc->named_block_name_len
+ - 1))
+ || (!name && !named_block_array_ptr[i].size)) {
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return &(named_block_array_ptr[i]);
+ }
+ }
+ } else {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
+ "version: %d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ }
+ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
+ cvmx_bootmem_unlock();
+
+ return NULL;
+}
+
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+{
+ struct cvmx_bootmem_named_block_desc *named_block_ptr;
+
+ if (cvmx_bootmem_desc->major_version != 3) {
+ cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
+ "%d.%d at addr: %p\n",
+ (int)cvmx_bootmem_desc->major_version,
+ (int)cvmx_bootmem_desc->minor_version,
+ cvmx_bootmem_desc);
+ return 0;
+ }
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
+#endif
+
+ /*
+ * Take lock here, as name lookup/block free/name free need to
+ * be atomic.
+ */
+ cvmx_bootmem_lock();
+
+ named_block_ptr =
+ cvmx_bootmem_phy_named_block_find(name,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ if (named_block_ptr) {
+#ifdef DEBUG
+ cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
+ "%s, base: 0x%llx, size: 0x%llx\n",
+ name,
+ (unsigned long long)named_block_ptr->base_addr,
+ (unsigned long long)named_block_ptr->size);
+#endif
+ __cvmx_bootmem_phy_free(named_block_ptr->base_addr,
+ named_block_ptr->size,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+ named_block_ptr->size = 0;
+ /* Set size to zero to indicate block not used. */
+ }
+
+ cvmx_bootmem_unlock();
+ return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
new file mode 100644
index 000000000000..6abe56f1e097
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -0,0 +1,734 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Implementation of the Level 2 Cache (L2C) control, measurement, and
+ * debugging facilities.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-l2c.h>
+#include <asm/octeon/cvmx-spinlock.h>
+
+/*
+ * This spinlock is used internally to ensure that only one core is
+ * performing certain L2 operations at a time.
+ *
+ * NOTE: This only protects calls from within a single application -
+ * if multiple applications or operating systems are running, then it
+ * is up to the user program to coordinate between them.
+ */
+static cvmx_spinlock_t cvmx_l2c_spinlock;
+
+static inline int l2_size_half(void)
+{
+ uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
+ return !!(val & (1ull << 34));
+}
+
+int cvmx_l2c_get_core_way_partition(uint32_t core)
+{
+ uint32_t field;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ /*
+ * Use the lower two bits of the coreNumber to determine the
+ * bit offset of the UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /*
+ * Return the UMSK[] field from the appropriate L2C_SPAR
+ * register based on the coreNumber.
+ */
+
+ switch (core & 0xC) {
+ case 0x0:
+ return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
+ field;
+ case 0x4:
+ return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
+ field;
+ case 0x8:
+ return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
+ field;
+ case 0xC:
+ return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
+ field;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
+{
+ uint32_t field;
+ uint32_t valid_mask;
+
+ valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error. */
+ if (mask == valid_mask)
+ return -1;
+
+ /* Validate the core number */
+ if (core >= cvmx_octeon_num_cores())
+ return -1;
+
+ /* Check to make sure current mask & new mask don't block all ways */
+ if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
+ valid_mask)
+ return -1;
+
+ /* Use the lower two bits of core to determine the bit offset of the
+ * UMSK[] field in the L2C_SPAR register.
+ */
+ field = (core & 0x3) * 8;
+
+ /* Assign the new mask setting to the UMSK[] field in the appropriate
+ * L2C_SPAR register based on the core_num.
+ *
+ */
+ switch (core & 0xC) {
+ case 0x0:
+ cvmx_write_csr(CVMX_L2C_SPAR0,
+ (cvmx_read_csr(CVMX_L2C_SPAR0) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0x4:
+ cvmx_write_csr(CVMX_L2C_SPAR1,
+ (cvmx_read_csr(CVMX_L2C_SPAR1) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0x8:
+ cvmx_write_csr(CVMX_L2C_SPAR2,
+ (cvmx_read_csr(CVMX_L2C_SPAR2) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ case 0xC:
+ cvmx_write_csr(CVMX_L2C_SPAR3,
+ (cvmx_read_csr(CVMX_L2C_SPAR3) &
+ ~(0xFF << field)) | mask << field);
+ break;
+ }
+ return 0;
+}
+
+int cvmx_l2c_set_hw_way_partition(uint32_t mask)
+{
+ uint32_t valid_mask;
+
+ valid_mask = 0xff;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ if (l2_size_half())
+ valid_mask = 0xf;
+ } else if (l2_size_half())
+ valid_mask = 0x3;
+
+ mask &= valid_mask;
+
+ /* A UMSK setting which blocks all L2C Ways is an error. */
+ if (mask == valid_mask)
+ return -1;
+ /* Check to make sure current mask & new mask don't block all ways */
+ if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
+ valid_mask)
+ return -1;
+
+ cvmx_write_csr(CVMX_L2C_SPAR4,
+ (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
+ return 0;
+}
+
+int cvmx_l2c_get_hw_way_partition(void)
+{
+ return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
+}
+
+void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
+ uint32_t clear_on_read)
+{
+ union cvmx_l2c_pfctl pfctl;
+
+ pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
+
+ switch (counter) {
+ case 0:
+ pfctl.s.cnt0sel = event;
+ pfctl.s.cnt0ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt0rdclr = clear_on_read;
+ break;
+ case 1:
+ pfctl.s.cnt1sel = event;
+ pfctl.s.cnt1ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt1rdclr = clear_on_read;
+ break;
+ case 2:
+ pfctl.s.cnt2sel = event;
+ pfctl.s.cnt2ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt2rdclr = clear_on_read;
+ break;
+ case 3:
+ default:
+ pfctl.s.cnt3sel = event;
+ pfctl.s.cnt3ena = 1;
+ if (!cvmx_octeon_is_pass1())
+ pfctl.s.cnt3rdclr = clear_on_read;
+ break;
+ }
+
+ cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
+}
+
+uint64_t cvmx_l2c_read_perf(uint32_t counter)
+{
+ switch (counter) {
+ case 0:
+ return cvmx_read_csr(CVMX_L2C_PFC0);
+ case 1:
+ return cvmx_read_csr(CVMX_L2C_PFC1);
+ case 2:
+ return cvmx_read_csr(CVMX_L2C_PFC2);
+ case 3:
+ default:
+ return cvmx_read_csr(CVMX_L2C_PFC3);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Helper function use to fault in cache lines for L2 cache locking
+ *
+ * @addr: Address of base of memory region to read into L2 cache
+ * @len: Length (in bytes) of region to fault in
+ */
+static void fault_in(uint64_t addr, int len)
+{
+ volatile char *ptr;
+ volatile char dummy;
+ /*
+ * Adjust addr and length so we get all cache lines even for
+ * small ranges spanning two cache lines
+ */
+ len += addr & CVMX_CACHE_LINE_MASK;
+ addr &= ~CVMX_CACHE_LINE_MASK;
+ ptr = (volatile char *)cvmx_phys_to_ptr(addr);
+ /*
+ * Invalidate L1 cache to make sure all loads result in data
+ * being in L2.
+ */
+ CVMX_DCACHE_INVALIDATE;
+ while (len > 0) {
+ dummy += *ptr;
+ len -= CVMX_CACHE_LINE_SIZE;
+ ptr += CVMX_CACHE_LINE_SIZE;
+ }
+}
+
+int cvmx_l2c_lock_line(uint64_t addr)
+{
+ int retval = 0;
+ union cvmx_l2c_dbg l2cdbg;
+ union cvmx_l2c_lckbase lckbase;
+ union cvmx_l2c_lckoff lckoff;
+ union cvmx_l2t_err l2t_err;
+ l2cdbg.u64 = 0;
+ lckbase.u64 = 0;
+ lckoff.u64 = 0;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ /* Clear l2t error bits if set */
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ l2t_err.s.lckerr = 1;
+ l2t_err.s.lckerr2 = 1;
+ cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
+
+ addr &= ~CVMX_CACHE_LINE_MASK;
+
+ /* Set this core as debug core */
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
+ cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
+ cvmx_read_csr(CVMX_L2C_LCKOFF);
+
+ if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
+ int alias_shift =
+ CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
+ uint64_t addr_tmp =
+ addr ^ (addr & ((1 << alias_shift) - 1)) >>
+ CVMX_L2_SET_BITS;
+ lckbase.s.lck_base = addr_tmp >> 7;
+ } else {
+ lckbase.s.lck_base = addr >> 7;
+ }
+
+ lckbase.s.lck_ena = 1;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+
+ fault_in(addr, CVMX_CACHE_LINE_SIZE);
+
+ lckbase.s.lck_ena = 0;
+ cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
+ cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
+
+ /* Stop being debug core */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
+ if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
+ retval = 1; /* We were unable to lock the line */
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+
+ return retval;
+}
+
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
+{
+ int retval = 0;
+
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+
+ while (len) {
+ retval += cvmx_l2c_lock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return retval;
+}
+
+void cvmx_l2c_flush(void)
+{
+ uint64_t assoc, set;
+ uint64_t n_assoc, n_set;
+ union cvmx_l2c_dbg l2cdbg;
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+
+ l2cdbg.u64 = 0;
+ if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+ n_set = CVMX_L2_SETS;
+ n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
+ for (set = 0; set < n_set; set++) {
+ for (assoc = 0; assoc < n_assoc; assoc++) {
+ l2cdbg.s.set = assoc;
+ /* Enter debug mode, and make sure all other
+ ** writes complete before we enter debug
+ ** mode */
+ CVMX_SYNCW;
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+ (CVMX_MIPS_SPACE_XKPHYS,
+ set * CVMX_CACHE_LINE_SIZE), 0);
+ CVMX_SYNCW; /* Push STF out to L2 */
+ /* Exit debug mode */
+ CVMX_SYNC;
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ }
+ }
+
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+}
+
+int cvmx_l2c_unlock_line(uint64_t address)
+{
+ int assoc;
+ union cvmx_l2c_tag tag;
+ union cvmx_l2c_dbg l2cdbg;
+ uint32_t tag_addr;
+
+ uint32_t index = cvmx_l2c_address_to_index(address);
+
+ cvmx_spinlock_lock(&cvmx_l2c_spinlock);
+ /* Compute portion of address that is stored in tag */
+ tag_addr =
+ ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
+ ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
+ for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
+ tag = cvmx_get_l2c_tag(assoc, index);
+
+ if (tag.s.V && (tag.s.addr == tag_addr)) {
+ l2cdbg.u64 = 0;
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.set = assoc;
+ l2cdbg.s.finv = 1;
+
+ CVMX_SYNC;
+ /* Enter debug mode */
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
+ (CVMX_MIPS_SPACE_XKPHYS,
+ address), 0);
+ CVMX_SYNC;
+ /* Exit debug mode */
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return tag.s.L;
+ }
+ }
+ cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
+ return 0;
+}
+
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
+{
+ int num_unlocked = 0;
+ /* Round start/end to cache line boundaries */
+ len += start & CVMX_CACHE_LINE_MASK;
+ start &= ~CVMX_CACHE_LINE_MASK;
+ len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
+ while (len > 0) {
+ num_unlocked += cvmx_l2c_unlock_line(start);
+ start += CVMX_CACHE_LINE_SIZE;
+ len -= CVMX_CACHE_LINE_SIZE;
+ }
+
+ return num_unlocked;
+}
+
+/*
+ * Internal l2c tag types. These are converted to a generic structure
+ * that can be used on all chips.
+ */
+union __cvmx_l2c_tag {
+ uint64_t u64;
+ struct cvmx_l2c_tag_cn50xx {
+ uint64_t reserved:40;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:20; /* Phys mem addr (33..14) */
+ } cn50xx;
+ struct cvmx_l2c_tag_cn30xx {
+ uint64_t reserved:41;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:19; /* Phys mem addr (33..15) */
+ } cn30xx;
+ struct cvmx_l2c_tag_cn31xx {
+ uint64_t reserved:42;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:18; /* Phys mem addr (33..16) */
+ } cn31xx;
+ struct cvmx_l2c_tag_cn38xx {
+ uint64_t reserved:43;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:17; /* Phys mem addr (33..17) */
+ } cn38xx;
+ struct cvmx_l2c_tag_cn58xx {
+ uint64_t reserved:44;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:16; /* Phys mem addr (33..18) */
+ } cn58xx;
+ struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
+ struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
+};
+
+/**
+ * @INTERNAL
+ * Function to read a L2C tag. This code make the current core
+ * the 'debug core' for the L2. This code must only be executed by
+ * 1 core at a time.
+ *
+ * @assoc: Association (way) of the tag to dump
+ * @index: Index of the cacheline
+ *
+ * Returns The Octeon model specific tag structure. This is
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
+ */
+static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
+{
+
+ uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
+ uint64_t core = cvmx_get_core_num();
+ union __cvmx_l2c_tag tag_val;
+ uint64_t dbg_addr = CVMX_L2C_DBG;
+ unsigned long flags;
+
+ union cvmx_l2c_dbg debug_val;
+ debug_val.u64 = 0;
+ /*
+ * For low core count parts, the core number is always small enough
+ * to stay in the correct field and not set any reserved bits.
+ */
+ debug_val.s.ppnum = core;
+ debug_val.s.l2t = 1;
+ debug_val.s.set = assoc;
+ /*
+ * Make sure core is quiet (no prefetches, etc.) before
+ * entering debug mode.
+ */
+ CVMX_SYNC;
+ /* Flush L1 to make sure debug load misses L1 */
+ CVMX_DCACHE_INVALIDATE;
+
+ local_irq_save(flags);
+
+ /*
+ * The following must be done in assembly as when in debug
+ * mode all data loads from L2 return special debug data, not
+ * normal memory contents. Also, interrupts must be
+ * disabled, since if an interrupt occurs while in debug mode
+ * the ISR will get debug data from all its memory reads
+ * instead of the contents of memory
+ */
+
+ asm volatile (".set push \n"
+ " .set mips64 \n"
+ " .set noreorder \n"
+ /* Enter debug mode, wait for store */
+ " sd %[dbg_val], 0(%[dbg_addr]) \n"
+ " ld $0, 0(%[dbg_addr]) \n"
+ /* Read L2C tag data */
+ " ld %[tag_val], 0(%[tag_addr]) \n"
+ /* Exit debug mode, wait for store */
+ " sd $0, 0(%[dbg_addr]) \n"
+ " ld $0, 0(%[dbg_addr]) \n"
+ /* Invalidate dcache to discard debug data */
+ " cache 9, 0($0) \n"
+ " .set pop" :
+ [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
+ [dbg_val] "r"(debug_val.u64),
+ [tag_addr] "r"(debug_tag_addr) : "memory");
+
+ local_irq_restore(flags);
+ return tag_val;
+
+}
+
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
+{
+ union __cvmx_l2c_tag tmp_tag;
+ union cvmx_l2c_tag tag;
+ tag.u64 = 0;
+
+ if ((int)association >= cvmx_l2c_get_num_assoc()) {
+ cvmx_dprintf
+ ("ERROR: cvmx_get_l2c_tag association out of range\n");
+ return tag;
+ }
+ if ((int)index >= cvmx_l2c_get_num_sets()) {
+ cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
+ "index out of range (arg: %d, max: %d\n",
+ index, cvmx_l2c_get_num_sets());
+ return tag;
+ }
+ /* __read_l2_tag is intended for internal use only */
+ tmp_tag = __read_l2_tag(association, index);
+
+ /*
+ * Convert all tag structure types to generic version, as it
+ * can represent all models.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.addr = tmp_tag.cn58xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.addr = tmp_tag.cn38xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.addr = tmp_tag.cn31xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.addr = tmp_tag.cn30xx.addr;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.addr = tmp_tag.cn50xx.addr;
+ } else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ }
+
+ return tag;
+}
+
+uint32_t cvmx_l2c_address_to_index(uint64_t addr)
+{
+ uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
+ union cvmx_l2c_cfg l2c_cfg;
+ l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
+
+ if (l2c_cfg.s.idxalias) {
+ idx ^=
+ ((addr & CVMX_L2C_ALIAS_MASK) >>
+ CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
+ }
+ idx &= CVMX_L2C_IDX_MASK;
+ return idx;
+}
+
+int cvmx_l2c_get_cache_size_bytes(void)
+{
+ return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
+ CVMX_CACHE_LINE_SIZE;
+}
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void)
+{
+ int l2_set_bits;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
+ l2_set_bits = 11; /* 2048 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_set_bits = 10; /* 1024 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ l2_set_bits = 9; /* 512 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_set_bits = 8; /* 256 sets */
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ l2_set_bits = 7; /* 128 sets */
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_set_bits = 11; /* 2048 sets */
+ }
+ return l2_set_bits;
+
+}
+
+/* Return the number of sets in the L2 Cache */
+int cvmx_l2c_get_num_sets(void)
+{
+ return 1 << cvmx_l2c_get_set_bits();
+}
+
+/* Return the number of associations in the L2 Cache */
+int cvmx_l2c_get_num_assoc(void)
+{
+ int l2_assoc;
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN52XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN58XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
+ l2_assoc = 8;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
+ OCTEON_IS_MODEL(OCTEON_CN30XX))
+ l2_assoc = 4;
+ else {
+ cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
+ l2_assoc = 8;
+ }
+
+ /* Check to see if part of the cache is disabled */
+ if (cvmx_fuse_read(265))
+ l2_assoc = l2_assoc >> 2;
+ else if (cvmx_fuse_read(264))
+ l2_assoc = l2_assoc >> 1;
+
+ return l2_assoc;
+}
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc: Association (or way) to flush
+ * @index: Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
+{
+ union cvmx_l2c_dbg l2cdbg;
+
+ l2cdbg.u64 = 0;
+ l2cdbg.s.ppnum = cvmx_get_core_num();
+ l2cdbg.s.finv = 1;
+
+ l2cdbg.s.set = assoc;
+ /*
+ * Enter debug mode, and make sure all other writes complete
+ * before we enter debug mode.
+ */
+ asm volatile ("sync" : : : "memory");
+ cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
+ cvmx_read_csr(CVMX_L2C_DBG);
+
+ CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
+ /* Exit debug mode */
+ asm volatile ("sync" : : : "memory");
+ cvmx_write_csr(CVMX_L2C_DBG, 0);
+ cvmx_read_csr(CVMX_L2C_DBG);
+}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
new file mode 100644
index 000000000000..4812370706a1
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -0,0 +1,116 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board/application information obtained
+ * by the bootloader.
+ */
+
+#include <asm/octeon/cvmx.h>
+#include <asm/octeon/cvmx-spinlock.h>
+#include <asm/octeon/cvmx-sysinfo.h>
+
+/**
+ * This structure defines the private state maintained by sysinfo module.
+ *
+ */
+static struct {
+ struct cvmx_sysinfo sysinfo; /* system information */
+ cvmx_spinlock_t lock; /* mutex spinlock */
+
+} state = {
+ .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
+};
+
+
+/*
+ * Global variables that define the min/max of the memory region set
+ * up for 32 bit userspace access.
+ */
+uint64_t linux_mem32_min;
+uint64_t linux_mem32_max;
+uint64_t linux_mem32_wired;
+uint64_t linux_mem32_offset;
+
+/**
+ * This function returns the application information as obtained
+ * by the bootloader. This provides the core mask of the cores
+ * running the same application image, as well as the physical
+ * memory regions available to the core.
+ *
+ * Returns Pointer to the boot information structure
+ *
+ */
+struct cvmx_sysinfo *cvmx_sysinfo_get(void)
+{
+ return &(state.sysinfo);
+}
+
+/**
+ * This function is used in non-simple executive environments (such as
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * are required to use simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @phy_mem_desc_ptr:
+ * Pointer to global physical memory descriptor
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
+ *
+ * @board_rev_major:
+ * Board major revision
+ * @board_rev_minor:
+ * Board minor revision
+ * @cpu_clock_hz:
+ * CPU clock freqency in hertz
+ *
+ * Returns 0: Failure
+ * 1: success
+ */
+int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
+ uint16_t board_type,
+ uint8_t board_rev_major,
+ uint8_t board_rev_minor,
+ uint32_t cpu_clock_hz)
+{
+
+ /* The sysinfo structure was already initialized */
+ if (state.sysinfo.board_type)
+ return 0;
+
+ memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
+ state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
+ state.sysinfo.board_type = board_type;
+ state.sysinfo.board_rev_major = board_rev_major;
+ state.sysinfo.board_rev_minor = board_rev_minor;
+ state.sysinfo.cpu_clock_hz = cpu_clock_hz;
+
+ return 1;
+}
+
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
new file mode 100644
index 000000000000..9afc3794ed1b
--- /dev/null
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -0,0 +1,358 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining functions for working with different Octeon
+ * models.
+ */
+#include <asm/octeon/octeon.h>
+
+/**
+ * Given the chip processor ID from COP0, this function returns a
+ * string representing the chip model number. The string is of the
+ * form CNXXXXpX.X-FREQ-SUFFIX.
+ * - XXXX = The chip model number
+ * - X.X = Chip pass number
+ * - FREQ = Current frequency in Mhz
+ * - SUFFIX = NSP, EXP, SCP, SSP, or CP
+ *
+ * @chip_id: Chip ID
+ *
+ * Returns Model string
+ */
+const char *octeon_model_get_string(uint32_t chip_id)
+{
+ static char buffer[32];
+ return octeon_model_get_string_buffer(chip_id, buffer);
+}
+
+/*
+ * Version of octeon_model_get_string() that takes buffer as argument,
+ * as running early in u-boot static/global variables don't work when
+ * running from flash.
+ */
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
+{
+ const char *family;
+ const char *core_model;
+ char pass[4];
+ int clock_mhz;
+ const char *suffix;
+ union cvmx_l2d_fus3 fus3;
+ int num_cores;
+ union cvmx_mio_fus_dat2 fus_dat2;
+ union cvmx_mio_fus_dat3 fus_dat3;
+ char fuse_model[10];
+ uint32_t fuse_data = 0;
+
+ fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
+ fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
+ fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
+
+ num_cores = cvmx_octeon_num_cores();
+
+ /* Make sure the non existant devices look disabled */
+ switch ((chip_id >> 8) & 0xff) {
+ case 6: /* CN50XX */
+ case 2: /* CN30XX */
+ fus_dat3.s.nodfa_dte = 1;
+ fus_dat3.s.nozip = 1;
+ break;
+ case 4: /* CN57XX or CN56XX */
+ fus_dat3.s.nodfa_dte = 1;
+ break;
+ default:
+ break;
+ }
+
+ /* Make a guess at the suffix */
+ /* NSP = everything */
+ /* EXP = No crypto */
+ /* SCP = No DFA, No zip */
+ /* CP = No DFA, No crypto, No zip */
+ if (fus_dat3.s.nodfa_dte) {
+ if (fus_dat2.s.nocrypto)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ } else if (fus_dat2.s.nocrypto)
+ suffix = "EXP";
+ else
+ suffix = "NSP";
+
+ /*
+ * Assume pass number is encoded using <5:3><2:0>. Exceptions
+ * will be fixed later.
+ */
+ sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
+
+ /*
+ * Use the number of cores to determine the last 2 digits of
+ * the model number. There are some exceptions that are fixed
+ * later.
+ */
+ switch (num_cores) {
+ case 16:
+ core_model = "60";
+ break;
+ case 15:
+ core_model = "58";
+ break;
+ case 14:
+ core_model = "55";
+ break;
+ case 13:
+ core_model = "52";
+ break;
+ case 12:
+ core_model = "50";
+ break;
+ case 11:
+ core_model = "48";
+ break;
+ case 10:
+ core_model = "45";
+ break;
+ case 9:
+ core_model = "42";
+ break;
+ case 8:
+ core_model = "40";
+ break;
+ case 7:
+ core_model = "38";
+ break;
+ case 6:
+ core_model = "34";
+ break;
+ case 5:
+ core_model = "32";
+ break;
+ case 4:
+ core_model = "30";
+ break;
+ case 3:
+ core_model = "25";
+ break;
+ case 2:
+ core_model = "20";
+ break;
+ case 1:
+ core_model = "10";
+ break;
+ default:
+ core_model = "XX";
+ break;
+ }
+
+ /* Now figure out the family, the first two digits */
+ switch ((chip_id >> 8) & 0xff) {
+ case 0: /* CN38XX, CN37XX or CN36XX */
+ if (fus3.cn38xx.crip_512k) {
+ /*
+ * For some unknown reason, the 16 core one is
+ * called 37 instead of 36.
+ */
+ if (num_cores >= 16)
+ family = "37";
+ else
+ family = "36";
+ } else
+ family = "38";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.X");
+ break;
+ case 1:
+ strcpy(pass, "2.X");
+ break;
+ case 3:
+ strcpy(pass, "3.X");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 1: /* CN31XX or CN3020 */
+ if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
+ family = "30";
+ else
+ family = "31";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 2: /* CN3010 or CN3005 */
+ family = "30";
+ /* A chip with half cache is an 05 */
+ if (fus3.cn30xx.crip_64k)
+ core_model = "05";
+ /*
+ * This series of chips didn't follow the standard
+ * pass numbering.
+ */
+ switch (chip_id & 0xf) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 2:
+ strcpy(pass, "1.1");
+ break;
+ default:
+ strcpy(pass, "X.X");
+ break;
+ }
+ break;
+ case 3: /* CN58XX */
+ family = "58";
+ /* Special case. 4 core, no crypto */
+ if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
+ core_model = "29";
+
+ /* Pass 1 uses different encodings for pass numbers */
+ if ((chip_id & 0xFF) < 0x8) {
+ switch (chip_id & 0x3) {
+ case 0:
+ strcpy(pass, "1.0");
+ break;
+ case 1:
+ strcpy(pass, "1.1");
+ break;
+ case 3:
+ strcpy(pass, "1.2");
+ break;
+ default:
+ strcpy(pass, "1.X");
+ break;
+ }
+ }
+ break;
+ case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
+ if (fus_dat2.cn56xx.raid_en) {
+ if (fus3.cn56xx.crip_1024k)
+ family = "55";
+ else
+ family = "57";
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "SP";
+ else
+ suffix = "SSP";
+ } else {
+ if (fus_dat2.cn56xx.nocrypto)
+ suffix = "CP";
+ else {
+ suffix = "NSP";
+ if (fus_dat3.s.nozip)
+ suffix = "SCP";
+ }
+ if (fus3.cn56xx.crip_1024k)
+ family = "54";
+ else
+ family = "56";
+ }
+ break;
+ case 6: /* CN50XX */
+ family = "50";
+ break;
+ case 7: /* CN52XX */
+ if (fus3.cn52xx.crip_256k)
+ family = "51";
+ else
+ family = "52";
+ break;
+ default:
+ family = "XX";
+ core_model = "XX";
+ strcpy(pass, "X.X");
+ suffix = "XXX";
+ break;
+ }
+
+ clock_mhz = octeon_get_clock_rate() / 1000000;
+
+ if (family[0] != '3') {
+ /* Check for model in fuses, overrides normal decode */
+ /* This is _not_ valid for Octeon CN3XXX models */
+ fuse_data |= cvmx_fuse_read_byte(51);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(50);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(49);
+ fuse_data = fuse_data << 8;
+ fuse_data |= cvmx_fuse_read_byte(48);
+ if (fuse_data & 0x7ffff) {
+ int model = fuse_data & 0x3fff;
+ int suffix = (fuse_data >> 14) & 0x1f;
+ if (suffix && model) {
+ /*
+ * Have both number and suffix in
+ * fuses, so both
+ */
+ sprintf(fuse_model, "%d%c",
+ model, 'A' + suffix - 1);
+ core_model = "";
+ family = fuse_model;
+ } else if (suffix && !model) {
+ /*
+ * Only have suffix, so add suffix to
+ * 'normal' model number.
+ */
+ sprintf(fuse_model, "%s%c", core_model,
+ 'A' + suffix - 1);
+ core_model = fuse_model;
+ } else {
+ /*
+ * Don't have suffix, so just use
+ * model from fuses.
+ */
+ sprintf(fuse_model, "%d", model);
+ core_model = "";
+ family = fuse_model;
+ }
+ }
+ }
+ sprintf(buffer, "CN%s%sp%s-%d-%s",
+ family, core_model, pass, clock_mhz, suffix);
+ return buffer;
+}
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
new file mode 100644
index 000000000000..553d36cbcc42
--- /dev/null
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -0,0 +1,84 @@
+/*
+ * Octeon Bootbus flash setup
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007, 2008 Cavium Networks
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/octeon/octeon.h>
+
+static struct map_info flash_map;
+static struct mtd_info *mymtd;
+#ifdef CONFIG_MTD_PARTITIONS
+static int nr_parts;
+static struct mtd_partition *parts;
+static const char *part_probe_types[] = {
+ "cmdlinepart",
+#ifdef CONFIG_MTD_REDBOOT_PARTS
+ "RedBoot",
+#endif
+ NULL
+};
+#endif
+
+/**
+ * Module/ driver initialization.
+ *
+ * Returns Zero on success
+ */
+static int __init flash_init(void)
+{
+ /*
+ * Read the bootbus region 0 setup to determine the base
+ * address of the flash.
+ */
+ union cvmx_mio_boot_reg_cfgx region_cfg;
+ region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
+ if (region_cfg.s.en) {
+ /*
+ * The bootloader always takes the flash and sets its
+ * address so the entire flash fits below
+ * 0x1fc00000. This way the flash aliases to
+ * 0x1fc00000 for booting. Software can access the
+ * full flash at the true address, while core boot can
+ * access 4MB.
+ */
+ /* Use this name so old part lines work */
+ flash_map.name = "phys_mapped_flash";
+ flash_map.phys = region_cfg.s.base << 16;
+ flash_map.size = 0x1fc00000 - flash_map.phys;
+ flash_map.bankwidth = 1;
+ flash_map.virt = ioremap(flash_map.phys, flash_map.size);
+ pr_notice("Bootbus flash: Setting flash for %luMB flash at "
+ "0x%08lx\n", flash_map.size >> 20, flash_map.phys);
+ simple_map_init(&flash_map);
+ mymtd = do_map_probe("cfi_probe", &flash_map);
+ if (mymtd) {
+ mymtd->owner = THIS_MODULE;
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(mymtd,
+ part_probe_types,
+ &parts, 0);
+ if (nr_parts > 0)
+ add_mtd_partitions(mymtd, parts, nr_parts);
+ else
+ add_mtd_device(mymtd);
+#else
+ add_mtd_device(mymtd);
+#endif
+ } else {
+ pr_err("Failed to register MTD device for flash\n");
+ }
+ }
+ return 0;
+}
+
+late_initcall(flash_init);
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
new file mode 100644
index 000000000000..fc72984a5dae
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -0,0 +1,497 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/hardirq.h>
+
+#include <asm/octeon/octeon.h>
+
+DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
+DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
+DEFINE_SPINLOCK(octeon_irq_msi_lock);
+
+static void octeon_irq_core_ack(unsigned int irq)
+{
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ clear_c0_status(0x100 << bit);
+ /* The two user interrupts must be cleared manually. */
+ if (bit < 2)
+ clear_c0_cause(0x100 << bit);
+}
+
+static void octeon_irq_core_eoi(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * If an IRQ is being processed while we are disabling it the
+ * handler will attempt to unmask the interrupt after it has
+ * been disabled.
+ */
+ if (desc->status & IRQ_DISABLED)
+ return;
+
+ /* There is a race here. We should fix it. */
+
+ /*
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ set_c0_status(0x100 << bit);
+}
+
+static void octeon_irq_core_enable(unsigned int irq)
+{
+ unsigned long flags;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+
+ /*
+ * We need to disable interrupts to make sure our updates are
+ * atomic.
+ */
+ local_irq_save(flags);
+ set_c0_status(0x100 << bit);
+ local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable_local(unsigned int irq)
+{
+ unsigned long flags;
+ unsigned int bit = irq - OCTEON_IRQ_SW0;
+ /*
+ * We need to disable interrupts to make sure our updates are
+ * atomic.
+ */
+ local_irq_save(flags);
+ clear_c0_status(0x100 << bit);
+ local_irq_restore(flags);
+}
+
+static void octeon_irq_core_disable(unsigned int irq)
+{
+#ifdef CONFIG_SMP
+ on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
+ (void *) (long) irq, 1);
+#else
+ octeon_irq_core_disable_local(irq);
+#endif
+}
+
+static struct irq_chip octeon_irq_chip_core = {
+ .name = "Core",
+ .enable = octeon_irq_core_enable,
+ .disable = octeon_irq_core_disable,
+ .ack = octeon_irq_core_ack,
+ .eoi = octeon_irq_core_eoi,
+};
+
+
+static void octeon_irq_ciu0_ack(unsigned int irq)
+{
+ /*
+ * In order to avoid any locking accessing the CIU, we
+ * acknowledge CIU interrupts by disabling all of them. This
+ * way we can use a per core register and avoid any out of
+ * core locking requirements. This has the side affect that
+ * CIU interrupts can't be processed recursively.
+ *
+ * We don't need to disable IRQs to make these atomic since
+ * they are already disabled earlier in the low level
+ * interrupt code.
+ */
+ clear_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_eoi(unsigned int irq)
+{
+ /*
+ * Enable all CIU interrupts again. We don't need to disable
+ * IRQs to make these atomic since they are already disabled
+ * earlier in the low level interrupt code.
+ */
+ set_c0_status(0x100 << 2);
+}
+
+static void octeon_irq_ciu0_enable(unsigned int irq)
+{
+ int coreid = cvmx_get_core_num();
+ unsigned long flags;
+ uint64_t en0;
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+
+ /*
+ * A read lock is used here to make sure only one core is ever
+ * updating the CIU enable bits at a time. During an enable
+ * the cores don't interfere with each other. During a disable
+ * the write lock stops any enables that might cause a
+ * problem.
+ */
+ read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 |= 1ull << bit;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+}
+
+static void octeon_irq_ciu0_disable(unsigned int irq)
+{
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+ unsigned long flags;
+ uint64_t en0;
+#ifdef CONFIG_SMP
+ int cpu;
+ write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+ write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
+#else
+ int coreid = cvmx_get_core_num();
+ local_irq_save(flags);
+ en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ int cpu;
+ int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
+
+ write_lock(&octeon_irq_ciu0_rwlock);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ uint64_t en0 =
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
+ if (cpumask_test_cpu(cpu, dest))
+ en0 |= 1ull << bit;
+ else
+ en0 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
+ write_unlock(&octeon_irq_ciu0_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu0 = {
+ .name = "CIU0",
+ .enable = octeon_irq_ciu0_enable,
+ .disable = octeon_irq_ciu0_disable,
+ .ack = octeon_irq_ciu0_ack,
+ .eoi = octeon_irq_ciu0_eoi,
+#ifdef CONFIG_SMP
+ .set_affinity = octeon_irq_ciu0_set_affinity,
+#endif
+};
+
+
+static void octeon_irq_ciu1_ack(unsigned int irq)
+{
+ /*
+ * In order to avoid any locking accessing the CIU, we
+ * acknowledge CIU interrupts by disabling all of them. This
+ * way we can use a per core register and avoid any out of
+ * core locking requirements. This has the side affect that
+ * CIU interrupts can't be processed recursively. We don't
+ * need to disable IRQs to make these atomic since they are
+ * already disabled earlier in the low level interrupt code.
+ */
+ clear_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_eoi(unsigned int irq)
+{
+ /*
+ * Enable all CIU interrupts again. We don't need to disable
+ * IRQs to make these atomic since they are already disabled
+ * earlier in the low level interrupt code.
+ */
+ set_c0_status(0x100 << 3);
+}
+
+static void octeon_irq_ciu1_enable(unsigned int irq)
+{
+ int coreid = cvmx_get_core_num();
+ unsigned long flags;
+ uint64_t en1;
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+
+ /*
+ * A read lock is used here to make sure only one core is ever
+ * updating the CIU enable bits at a time. During an enable
+ * the cores don't interfere with each other. During a disable
+ * the write lock stops any enables that might cause a
+ * problem.
+ */
+ read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 |= 1ull << bit;
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+}
+
+static void octeon_irq_ciu1_disable(unsigned int irq)
+{
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+ unsigned long flags;
+ uint64_t en1;
+#ifdef CONFIG_SMP
+ int cpu;
+ write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+ write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
+#else
+ int coreid = cvmx_get_core_num();
+ local_irq_save(flags);
+ en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
+ local_irq_restore(flags);
+#endif
+}
+
+#ifdef CONFIG_SMP
+static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
+{
+ int cpu;
+ int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
+
+ write_lock(&octeon_irq_ciu1_rwlock);
+ for_each_online_cpu(cpu) {
+ int coreid = cpu_logical_map(cpu);
+ uint64_t en1 =
+ cvmx_read_csr(CVMX_CIU_INTX_EN1
+ (coreid * 2 + 1));
+ if (cpumask_test_cpu(cpu, dest))
+ en1 |= 1ull << bit;
+ else
+ en1 &= ~(1ull << bit);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
+ }
+ /*
+ * We need to do a read after the last update to make sure all
+ * of them are done.
+ */
+ cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
+ write_unlock(&octeon_irq_ciu1_rwlock);
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu1 = {
+ .name = "CIU1",
+ .enable = octeon_irq_ciu1_enable,
+ .disable = octeon_irq_ciu1_disable,
+ .ack = octeon_irq_ciu1_ack,
+ .eoi = octeon_irq_ciu1_eoi,
+#ifdef CONFIG_SMP
+ .set_affinity = octeon_irq_ciu1_set_affinity,
+#endif
+};
+
+#ifdef CONFIG_PCI_MSI
+
+static void octeon_irq_msi_ack(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /* These chips have PCI */
+ cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
+ 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ } else {
+ /*
+ * These chips have PCIe. Thankfully the ACK doesn't
+ * need any locking.
+ */
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
+ 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ }
+}
+
+static void octeon_irq_msi_eoi(unsigned int irq)
+{
+ /* Nothing needed */
+}
+
+static void octeon_irq_msi_enable(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /*
+ * Octeon PCI doesn't have the ability to mask/unmask
+ * MSI interrupts individually. Instead of
+ * masking/unmasking them in groups of 16, we simple
+ * assume MSI devices are well behaved. MSI
+ * interrupts are always enable and the ACK is assumed
+ * to be enough.
+ */
+ } else {
+ /* These chips have PCIe. Note that we only support
+ * the first 64 MSI interrupts. Unfortunately all the
+ * MSI enables are in the same register. We use
+ * MSI0's lock to control access to them all.
+ */
+ uint64_t en;
+ unsigned long flags;
+ spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+ cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+ }
+}
+
+static void octeon_irq_msi_disable(unsigned int irq)
+{
+ if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
+ /* See comment in enable */
+ } else {
+ /*
+ * These chips have PCIe. Note that we only support
+ * the first 64 MSI interrupts. Unfortunately all the
+ * MSI enables are in the same register. We use
+ * MSI0's lock to control access to them all.
+ */
+ uint64_t en;
+ unsigned long flags;
+ spin_lock_irqsave(&octeon_irq_msi_lock, flags);
+ en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
+ cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
+ cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
+ spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
+ }
+}
+
+static struct irq_chip octeon_irq_chip_msi = {
+ .name = "MSI",
+ .enable = octeon_irq_msi_enable,
+ .disable = octeon_irq_msi_disable,
+ .ack = octeon_irq_msi_ack,
+ .eoi = octeon_irq_msi_eoi,
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ int irq;
+
+#ifdef CONFIG_SMP
+ /* Set the default affinity to the boot cpu. */
+ cpumask_clear(irq_default_affinity);
+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+#endif
+
+ if (NR_IRQS < OCTEON_IRQ_LAST)
+ pr_err("octeon_irq_init: NR_IRQS is set too low\n");
+
+ /* 0 - 15 reserved for i8259 master and slave controller. */
+
+ /* 17 - 23 Mips internal */
+ for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
+ handle_percpu_irq);
+ }
+
+ /* 24 - 87 CIU_INT_SUM0 */
+ for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
+ handle_percpu_irq);
+ }
+
+ /* 88 - 151 CIU_INT_SUM1 */
+ for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
+ handle_percpu_irq);
+ }
+
+#ifdef CONFIG_PCI_MSI
+ /* 152 - 215 PCI/PCIe MSI interrupts */
+ for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
+ set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
+ handle_percpu_irq);
+ }
+#endif
+ set_c0_status(0x300 << 2);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ const unsigned long core_id = cvmx_get_core_num();
+ const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
+ const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
+ const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
+ const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
+ unsigned long cop0_cause;
+ unsigned long cop0_status;
+ uint64_t ciu_en;
+ uint64_t ciu_sum;
+
+ while (1) {
+ cop0_cause = read_c0_cause();
+ cop0_status = read_c0_status();
+ cop0_cause &= cop0_status;
+ cop0_cause &= ST0_IM;
+
+ if (unlikely(cop0_cause & STATUSF_IP2)) {
+ ciu_sum = cvmx_read_csr(ciu_sum0_address);
+ ciu_en = cvmx_read_csr(ciu_en0_address);
+ ciu_sum &= ciu_en;
+ if (likely(ciu_sum))
+ do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
+ else
+ spurious_interrupt();
+ } else if (unlikely(cop0_cause & STATUSF_IP3)) {
+ ciu_sum = cvmx_read_csr(ciu_sum1_address);
+ ciu_en = cvmx_read_csr(ciu_en1_address);
+ ciu_sum &= ciu_en;
+ if (likely(ciu_sum))
+ do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
+ else
+ spurious_interrupt();
+ } else if (likely(cop0_cause)) {
+ do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
+ } else {
+ break;
+ }
+ }
+}
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
new file mode 100644
index 000000000000..88e0cddca205
--- /dev/null
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -0,0 +1,521 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ * memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ * - src and dst don't overlap
+ * - src is readable
+ * - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ * copy_to_user
+ * - src is readable (no exceptions when reading src)
+ * copy_from_user
+ * - dst is writable (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * arch/mips/include/asm/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ * 1- AT contain the address of the byte just past the end of the source
+ * of the copy,
+ * 2- src_entry <= src < AT, and
+ * 3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler) \
+9: inst_reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_64BIT
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD ld
+#define LOADL ldl
+#define LOADR ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE sd
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SRA dsra
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#else
+
+#define LOAD lw
+#define LOADL lwl
+#define LOADR lwr
+#define STOREL swl
+#define STORER swr
+#define STORE sw
+#define ADD addu
+#define SUB subu
+#define SRL srl
+#define SLL sll
+#define SRA sra
+#define SLLV sllv
+#define SRLV srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+#define UNIT(unit) FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+ .text
+ .set noreorder
+ .set noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, dst /* return value */
+__memcpy:
+FEXPORT(__copy_user)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ #
+ # Octeon doesn't care if the destination is unaligned. The hardware
+ # can fix it faster than we can special case the assembly.
+ #
+ pref 0, 0(src)
+ sltu t0, len, NBYTES # Check if < 1 word
+ bnez t0, copy_bytes_checklen
+ and t0, src, ADDRMASK # Check if src unaligned
+ bnez t0, src_unaligned
+ sltu t0, len, 4*NBYTES # Check if < 4 words
+ bnez t0, less_than_4units
+ sltu t0, len, 8*NBYTES # Check if < 8 words
+ bnez t0, less_than_8units
+ sltu t0, len, 16*NBYTES # Check if < 16 words
+ bnez t0, cleanup_both_aligned
+ sltu t0, len, 128+1 # Check if len < 129
+ bnez t0, 1f # Skip prefetch if len is too short
+ sltu t0, len, 256+1 # Check if len < 257
+ bnez t0, 1f # Skip prefetch if len is too short
+ pref 0, 128(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if there is more than 128 bytes left
+2: pref 0, 256(src) # We must not prefetch invalid addresses
+ #
+ # This is where we loop if we can't prefetch anymore
+1:
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 16*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
+ ADD src, src, 16*NBYTES
+EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
+ ADD dst, dst, 16*NBYTES
+EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
+EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
+EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
+ sltu t0, len, 256+1 # See if we can prefetch more
+ beqz t0, 2b
+ sltu t0, len, 128 # See if we can loop more time
+ beqz t0, 1b
+ nop
+ #
+ # Jump here if there are less than 16*NBYTES left.
+ #
+cleanup_both_aligned:
+ beqz len, done
+ sltu t0, len, 8*NBYTES
+ bnez t0, less_than_8units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
+EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
+EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
+ ADD src, src, 8*NBYTES
+ beqz len, done
+ ADD dst, dst, 8*NBYTES
+ #
+ # Jump here if there are less than 8*NBYTES left.
+ #
+less_than_8units:
+ sltu t0, len, 4*NBYTES
+ bnez t0, less_than_4units
+ nop
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ ADD src, src, 4*NBYTES
+ beqz len, done
+ ADD dst, dst, 4*NBYTES
+ #
+ # Jump here if there are less than 4*NBYTES left. This means
+ # we may need to copy up to 3 NBYTES words.
+ #
+less_than_4units:
+ sltu t0, len, 1*NBYTES
+ bnez t0, copy_bytes_checklen
+ nop
+ #
+ # 1) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 2) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ sltu t1, len, 8
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bnez t1, copy_bytes_checklen
+ ADD dst, dst, NBYTES
+ #
+ # 3) Copy NBYTES, then check length again
+ #
+EXC( LOAD t0, 0(src), l_exc)
+ SUB len, len, NBYTES
+ ADD src, src, NBYTES
+ ADD dst, dst, NBYTES
+ b copy_bytes_checklen
+EXC( STORE t0, -8(dst), s_exc_p1u)
+
+src_unaligned:
+#define rem t8
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ beqz t0, cleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+EXC( LDREST t1, REST(1)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDREST t2, REST(2)(src), l_exc_copy)
+EXC( LDREST t3, REST(3)(src), l_exc_copy)
+ ADD src, src, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ bne len, rem, 1b
+ ADD dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+ beqz len, done
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, copy_bytes
+ nop
+1:
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ ADD src, src, NBYTES
+ bne len, rem, 1b
+ ADD dst, dst, NBYTES
+
+copy_bytes_checklen:
+ beqz len, done
+ nop
+copy_bytes:
+ /* 0 < len < NBYTES */
+#define COPY_BYTE(N) \
+EXC( lb t0, N(src), l_exc); \
+ SUB len, len, 1; \
+ beqz len, done; \
+EXC( sb t0, N(dst), s_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lb t0, NBYTES-2(src), l_exc)
+ SUB len, len, 1
+ jr ra
+EXC( sb t0, NBYTES-2(dst), s_exc_p1)
+done:
+ jr ra
+ nop
+ END(memcpy)
+
+l_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0)
+1:
+EXC( lb t1, 0(src), l_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ bne src, t0, 1b
+ ADD dst, dst, 1
+l_exc:
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ nop
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ beqz len, done
+ SUB src, len, 1
+1: sb zero, 0(dst)
+ ADD dst, dst, 1
+ bnez src, 1b
+ SUB src, src, 1
+ jr ra
+ nop
+
+
+#define SEXC(n) \
+s_exc_p ## n ## u: \
+ jr ra; \
+ ADD len, len, n*NBYTES
+
+SEXC(16)
+SEXC(15)
+SEXC(14)
+SEXC(13)
+SEXC(12)
+SEXC(11)
+SEXC(10)
+SEXC(9)
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+ jr ra
+ ADD len, len, 1
+s_exc:
+ jr ra
+ nop
+
+ .align 5
+LEAF(memmove)
+ ADD t0, a0, a2
+ ADD t1, a1, a2
+ sltu t0, a1, t0 # dst + len <= src -> memcpy
+ sltu t1, a0, t1 # dst >= src + len -> memcpy
+ and t0, t1
+ beqz t0, __memcpy
+ move v0, a0 /* return value */
+ beqz a2, r_out
+ END(memmove)
+
+ /* fall through to __rmemcpy */
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ sltu t0, a1, a0
+ beqz t0, r_end_bytes_up # src >= dst
+ nop
+ ADD a0, a2 # dst = dst + len
+ ADD a1, a2 # src = src + len
+
+r_end_bytes:
+ lb t0, -1(a1)
+ SUB a2, a2, 0x1
+ sb t0, -1(a0)
+ SUB a1, a1, 0x1
+ bnez a2, r_end_bytes
+ SUB a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+r_end_bytes_up:
+ lb t0, (a1)
+ SUB a2, a2, 0x1
+ sb t0, (a0)
+ ADD a1, a1, 0x1
+ bnez a2, r_end_bytes_up
+ ADD a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
new file mode 100644
index 000000000000..8240728d485a
--- /dev/null
+++ b/arch/mips/cavium-octeon/serial.c
@@ -0,0 +1,136 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ */
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#include <linux/tty.h>
+
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_GDB_CONSOLE
+#define DEBUG_UART 0
+#else
+#define DEBUG_UART 1
+#endif
+
+unsigned int octeon_serial_in(struct uart_port *up, int offset)
+{
+ int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+ if (offset == UART_IIR && (rv & 0xf) == 7) {
+ /* Busy interrupt, read the USR (39) and try again. */
+ cvmx_read_csr((uint64_t)(up->membase + (39 << 3)));
+ rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
+ }
+ return rv;
+}
+
+void octeon_serial_out(struct uart_port *up, int offset, int value)
+{
+ /*
+ * If bits 6 or 7 of the OCTEON UART's LCR are set, it quits
+ * working.
+ */
+ if (offset == UART_LCR)
+ value &= 0x9f;
+ cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
+}
+
+/*
+ * Allocated in .bss, so it is all zeroed.
+ */
+#define OCTEON_MAX_UARTS 3
+static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
+static struct platform_device octeon_uart8250_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = octeon_uart8250_data,
+ },
+};
+
+static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
+{
+ p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+ p->type = PORT_OCTEON;
+ p->iotype = UPIO_MEM;
+ p->regshift = 3; /* I/O addresses are every 8 bytes */
+ p->uartclk = mips_hpt_frequency;
+ p->serial_in = octeon_serial_in;
+ p->serial_out = octeon_serial_out;
+}
+
+static int __init octeon_serial_init(void)
+{
+ int enable_uart0;
+ int enable_uart1;
+ int enable_uart2;
+ struct plat_serial8250_port *p;
+
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ /*
+ * If we are configured to run as the second of two kernels,
+ * disable uart0 and enable uart1. Uart0 is owned by the first
+ * kernel
+ */
+ enable_uart0 = 0;
+ enable_uart1 = 1;
+#else
+ /*
+ * We are configured for the first kernel. We'll enable uart0
+ * if the bootloader told us to use 0, otherwise will enable
+ * uart 1.
+ */
+ enable_uart0 = (octeon_get_boot_uart() == 0);
+ enable_uart1 = (octeon_get_boot_uart() == 1);
+#ifdef CONFIG_KGDB
+ enable_uart1 = 1;
+#endif
+#endif
+
+ /* Right now CN52XX is the only chip with a third uart */
+ enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+ p = octeon_uart8250_data;
+ if (enable_uart0) {
+ /* Add a ttyS device for hardware uart 0 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
+ p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART0;
+ p++;
+ }
+
+ if (enable_uart1) {
+ /* Add a ttyS device for hardware uart 1 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
+ p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART1;
+ p++;
+ }
+ if (enable_uart2) {
+ /* Add a ttyS device for hardware uart 2 */
+ octeon_uart_set_common(p);
+ p->membase = (void *) CVMX_MIO_UART2_RBR;
+ p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
+ p->irq = OCTEON_IRQ_UART2;
+ p++;
+ }
+
+ BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
+
+ return platform_device_register(&octeon_uart8250_device);
+}
+
+device_initcall(octeon_serial_init);
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
new file mode 100644
index 000000000000..e085feddb4a4
--- /dev/null
+++ b/arch/mips/cavium-octeon/setup.c
@@ -0,0 +1,929 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2007 Cavium Networks
+ * Copyright (C) 2008 Wind River Systems
+ */
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/serial.h>
+#include <linux/types.h>
+#include <linux/string.h> /* for memset */
+#include <linux/serial.h>
+#include <linux/tty.h>
+#include <linux/time.h>
+#include <linux/platform_device.h>
+#include <linux/serial_core.h>
+#include <linux/serial_8250.h>
+#include <linux/string.h>
+
+#include <asm/processor.h>
+#include <asm/reboot.h>
+#include <asm/smp-ops.h>
+#include <asm/system.h>
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/bootinfo.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+extern void cvmx_interrupt_rsl_decode(void);
+extern int __cvmx_interrupt_ecc_report_single_bit_errors;
+extern void cvmx_interrupt_rsl_enable(void);
+#endif
+
+extern struct plat_smp_ops octeon_smp_ops;
+
+#ifdef CONFIG_PCI
+extern void pci_console_init(const char *arg);
+#endif
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+extern uint64_t octeon_reserve32_memory;
+#endif
+static unsigned long long MAX_MEMORY = 512ull << 20;
+
+struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+
+struct cvmx_bootinfo *octeon_bootinfo;
+EXPORT_SYMBOL(octeon_bootinfo);
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+uint64_t octeon_reserve32_memory;
+EXPORT_SYMBOL(octeon_reserve32_memory);
+#endif
+
+static int octeon_uart;
+
+extern asmlinkage void handle_int(void);
+extern asmlinkage void plat_irq_dispatch(void);
+
+/**
+ * Return non zero if we are currently running in the Octeon simulator
+ *
+ * Returns
+ */
+int octeon_is_simulation(void)
+{
+ return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
+}
+EXPORT_SYMBOL(octeon_is_simulation);
+
+/**
+ * Return true if Octeon is in PCI Host mode. This means
+ * Linux can control the PCI bus.
+ *
+ * Returns Non zero if Octeon in host mode.
+ */
+int octeon_is_pci_host(void)
+{
+#ifdef CONFIG_PCI
+ return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Get the clock rate of Octeon
+ *
+ * Returns Clock rate in HZ
+ */
+uint64_t octeon_get_clock_rate(void)
+{
+ if (octeon_is_simulation())
+ octeon_bootinfo->eclock_hz = 6000000;
+ return octeon_bootinfo->eclock_hz;
+}
+EXPORT_SYMBOL(octeon_get_clock_rate);
+
+/**
+ * Write to the LCD display connected to the bootbus. This display
+ * exists on most Cavium evaluation boards. If it doesn't exist, then
+ * this function doesn't do anything.
+ *
+ * @s: String to write
+ */
+void octeon_write_lcd(const char *s)
+{
+ if (octeon_bootinfo->led_display_base_addr) {
+ void __iomem *lcd_address =
+ ioremap_nocache(octeon_bootinfo->led_display_base_addr,
+ 8);
+ int i;
+ for (i = 0; i < 8; i++, s++) {
+ if (*s)
+ iowrite8(*s, lcd_address + i);
+ else
+ iowrite8(' ', lcd_address + i);
+ }
+ iounmap(lcd_address);
+ }
+}
+
+/**
+ * Return the console uart passed by the bootloader
+ *
+ * Returns uart (0 or 1)
+ */
+int octeon_get_boot_uart(void)
+{
+ int uart;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ uart = 1;
+#else
+ uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
+ 1 : 0;
+#endif
+ return uart;
+}
+
+/**
+ * Get the coremask Linux was booted on.
+ *
+ * Returns Core mask
+ */
+int octeon_get_boot_coremask(void)
+{
+ return octeon_boot_desc_ptr->core_mask;
+}
+
+/**
+ * Check the hardware BIST results for a CPU
+ */
+void octeon_check_cpu_bist(void)
+{
+ const int coreid = cvmx_get_core_num();
+ unsigned long long mask;
+ unsigned long long bist_val;
+
+ /* Check BIST results for COP0 registers */
+ mask = 0x1f00000000ull;
+ bist_val = read_octeon_c0_icacheerr();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
+ coreid, bist_val);
+
+ bist_val = read_octeon_c0_dcacheerr();
+ if (bist_val & 1)
+ pr_err("Core%d L1 Dcache parity error: "
+ "CacheErr(dcache) = 0x%llx\n",
+ coreid, bist_val);
+
+ mask = 0xfc00000000000000ull;
+ bist_val = read_c0_cvmmemctl();
+ if (bist_val & mask)
+ pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
+ coreid, bist_val);
+
+ write_octeon_c0_dcacheerr(0);
+}
+
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+/**
+ * Called on every core to setup the wired tlb entry needed
+ * if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
+ *
+ */
+static void octeon_hal_setup_per_cpu_reserved32(void *unused)
+{
+ /*
+ * The config has selected to wire the reserve32 memory for all
+ * userspace applications. We need to put a wired TLB entry in for each
+ * 512MB of reserve32 memory. We only handle double 256MB pages here,
+ * so reserve32 must be multiple of 512MB.
+ */
+ uint32_t size = CONFIG_CAVIUM_RESERVE32;
+ uint32_t entrylo0 =
+ 0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
+ uint32_t entrylo1 = entrylo0 + (256 << 14);
+ uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
+ while (size >= 512) {
+#if 0
+ pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
+ smp_processor_id(), entryhi);
+#endif
+ add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
+ entrylo0 += 512 << 14;
+ entrylo1 += 512 << 14;
+ entryhi += 512 << 20;
+ size -= 512;
+ }
+}
+#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
+
+/**
+ * Called to release the named block which was used to made sure
+ * that nobody used the memory for something else during
+ * init. Now we'll free it so userspace apps can use this
+ * memory region with bootmem_alloc.
+ *
+ * This function is called only once from prom_free_prom_memory().
+ */
+void octeon_hal_setup_reserved32(void)
+{
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+ on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
+#endif
+}
+
+/**
+ * Reboot Octeon
+ *
+ * @command: Command to pass to the bootloader. Currently ignored.
+ */
+static void octeon_restart(char *command)
+{
+ /* Disable all watchdogs before soft reset. They don't get cleared */
+#ifdef CONFIG_SMP
+ int cpu;
+ for_each_online_cpu(cpu)
+ cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
+#else
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+#endif
+
+ mb();
+ while (1)
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
+}
+
+
+/**
+ * Permanently stop a core.
+ *
+ * @arg: Ignored.
+ */
+static void octeon_kill_core(void *arg)
+{
+ mb();
+ if (octeon_is_simulation()) {
+ /* The simulator needs the watchdog to stop for dead cores */
+ cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
+ /* A break instruction causes the simulator stop a core */
+ asm volatile ("sync\nbreak");
+ }
+}
+
+
+/**
+ * Halt the system
+ */
+static void octeon_halt(void)
+{
+ smp_call_function(octeon_kill_core, NULL, 0);
+
+ switch (octeon_bootinfo->board_type) {
+ case CVMX_BOARD_TYPE_NAO38:
+ /* Driving a 1 to GPIO 12 shuts off this board */
+ cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
+ cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
+ break;
+ default:
+ octeon_write_lcd("PowerOff");
+ break;
+ }
+
+ octeon_kill_core(NULL);
+}
+
+#if 0
+/**
+ * Platform time init specifics.
+ * Returns
+ */
+void __init plat_time_init(void)
+{
+ /* Nothing special here, but we are required to have one */
+}
+
+#endif
+
+/**
+ * Handle all the error condition interrupts that might occur.
+ *
+ */
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
+{
+ cvmx_interrupt_rsl_decode();
+ return IRQ_HANDLED;
+}
+#endif
+
+/**
+ * Return a string representing the system type
+ *
+ * Returns
+ */
+const char *octeon_board_type_string(void)
+{
+ static char name[80];
+ sprintf(name, "%s (%s)",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type),
+ octeon_model_get_string(read_c0_prid()));
+ return name;
+}
+
+const char *get_system_type(void)
+ __attribute__ ((alias("octeon_board_type_string")));
+
+void octeon_user_io_init(void)
+{
+ union octeon_cvmemctl cvmmemctl;
+ union cvmx_iob_fau_timeout fau_timeout;
+ union cvmx_pow_nw_tim nm_tim;
+ uint64_t cvmctl;
+
+ /* Get the current settings for CP0_CVMMEMCTL_REG */
+ cvmmemctl.u64 = read_c0_cvmmemctl();
+ /* R/W If set, marked write-buffer entries time out the same
+ * as as other entries; if clear, marked write-buffer entries
+ * use the maximum timeout. */
+ cvmmemctl.s.dismarkwblongto = 1;
+ /* R/W If set, a merged store does not clear the write-buffer
+ * entry timeout state. */
+ cvmmemctl.s.dismrgclrwbto = 0;
+ /* R/W Two bits that are the MSBs of the resultant CVMSEG LM
+ * word location for an IOBDMA. The other 8 bits come from the
+ * SCRADDR field of the IOBDMA. */
+ cvmmemctl.s.iobdmascrmsb = 0;
+ /* R/W If set, SYNCWS and SYNCS only order marked stores; if
+ * clear, SYNCWS and SYNCS only order unmarked
+ * stores. SYNCWSMARKED has no effect when DISSYNCWS is
+ * set. */
+ cvmmemctl.s.syncwsmarked = 0;
+ /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
+ cvmmemctl.s.dissyncws = 0;
+ /* R/W If set, no stall happens on write buffer full. */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
+ cvmmemctl.s.diswbfst = 1;
+ else
+ cvmmemctl.s.diswbfst = 0;
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with <48>==0 */
+ cvmmemctl.s.xkmemenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==0 */
+ cvmmemctl.s.xkmemenau = 0;
+
+ /* R/W If set (and SX set), supervisor-level loads/stores can
+ * use XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenas = 0;
+
+ /* R/W If set (and UX set), user-level loads/stores can use
+ * XKPHYS addresses with VA<48>==1 */
+ cvmmemctl.s.xkioenau = 0;
+
+ /* R/W If set, all stores act as SYNCW (NOMERGE must be set
+ * when this is set) RW, reset to 0. */
+ cvmmemctl.s.allsyncw = 0;
+
+ /* R/W If set, no stores merge, and all stores reach the
+ * coherent bus in order. */
+ cvmmemctl.s.nomerge = 0;
+ /* R/W Selects the bit in the counter used for DID time-outs 0
+ * = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
+ * between 1x and 2x this interval. For example, with
+ * DIDTTO=3, expiration interval is between 16K and 32K. */
+ cvmmemctl.s.didtto = 0;
+ /* R/W If set, the (mem) CSR clock never turns off. */
+ cvmmemctl.s.csrckalwys = 0;
+ /* R/W If set, mclk never turns off. */
+ cvmmemctl.s.mclkalwys = 0;
+ /* R/W Selects the bit in the counter used for write buffer
+ * flush time-outs (WBFLT+11) is the bit position in an
+ * internal counter used to determine expiration. The write
+ * buffer expires between 1x and 2x this interval. For
+ * example, with WBFLT = 0, a write buffer expires between 2K
+ * and 4K cycles after the write buffer entry is allocated. */
+ cvmmemctl.s.wbfltime = 0;
+ /* R/W If set, do not put Istream in the L2 cache. */
+ cvmmemctl.s.istrnol2 = 0;
+ /* R/W The write buffer threshold. */
+ cvmmemctl.s.wbthresh = 10;
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * kernel/debug mode. */
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ cvmmemctl.s.cvmsegenak = 1;
+#else
+ cvmmemctl.s.cvmsegenak = 0;
+#endif
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * supervisor mode. */
+ cvmmemctl.s.cvmsegenas = 0;
+ /* R/W If set, CVMSEG is available for loads/stores in user
+ * mode. */
+ cvmmemctl.s.cvmsegenau = 0;
+ /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
+ * is max legal value. */
+ cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
+
+
+ if (smp_processor_id() == 0)
+ pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
+ CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
+
+ write_c0_cvmmemctl(cvmmemctl.u64);
+
+ /* Move the performance counter interrupts to IRQ 6 */
+ cvmctl = read_c0_cvmctl();
+ cvmctl &= ~(7 << 7);
+ cvmctl |= 6 << 7;
+ write_c0_cvmctl(cvmctl);
+
+ /* Set a default for the hardware timeouts */
+ fau_timeout.u64 = 0;
+ fau_timeout.s.tout_val = 0xfff;
+ /* Disable tagwait FAU timeout */
+ fau_timeout.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+
+ nm_tim.u64 = 0;
+ /* 4096 cycles */
+ nm_tim.s.nw_tim = 3;
+ cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+
+ write_octeon_c0_icacheerr(0);
+ write_c0_derraddr1(0);
+}
+
+/**
+ * Early entry point for arch setup
+ */
+void __init prom_init(void)
+{
+ struct cvmx_sysinfo *sysinfo;
+ const int coreid = cvmx_get_core_num();
+ int i;
+ int argc;
+ struct uart_port octeon_port;
+#ifdef CONFIG_CAVIUM_RESERVE32
+ int64_t addr = -1;
+#endif
+ /*
+ * The bootloader passes a pointer to the boot descriptor in
+ * $a3, this is available as fw_arg3.
+ */
+ octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
+ octeon_bootinfo =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
+ cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
+
+ /*
+ * Only enable the LED controller if we're running on a CN38XX, CN58XX,
+ * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
+ */
+ if (!octeon_is_simulation() &&
+ octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
+ cvmx_write_csr(CVMX_LED_EN, 0);
+ cvmx_write_csr(CVMX_LED_PRT, 0);
+ cvmx_write_csr(CVMX_LED_DBG, 0);
+ cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
+ cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
+ cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
+ cvmx_write_csr(CVMX_LED_EN, 1);
+ }
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * We need to temporarily allocate all memory in the reserve32
+ * region. This makes sure the kernel doesn't allocate this
+ * memory when it is getting memory from the
+ * bootloader. Later, after the memory allocations are
+ * complete, the reserve32 will be freed.
+ */
+#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
+ if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
+ pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
+ "This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
+ "is set\n");
+ else
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 512 << 20,
+ "CAVIUM_RESERVE32", 0);
+#else
+ /*
+ * Allocate memory for RESERVED32 aligned on 2MB boundary. This
+ * is in case we later use hugetlb entries with it.
+ */
+ addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 2 << 20,
+ "CAVIUM_RESERVE32", 0);
+#endif
+ if (addr < 0)
+ pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+ else
+ octeon_reserve32_memory = addr;
+#endif
+
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
+ if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
+ pr_info("Skipping L2 locking due to reduced L2 cache size\n");
+ } else {
+ uint32_t ebase = read_c0_ebase() & 0x3ffff000;
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
+ /* TLB refill */
+ cvmx_l2c_lock_mem_region(ebase, 0x100);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
+ /* General exception */
+ cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
+ /* Interrupt handler */
+ cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
+ cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
+ cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
+#endif
+#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
+#endif
+ }
+#endif
+
+ sysinfo = cvmx_sysinfo_get();
+ memset(sysinfo, 0, sizeof(*sysinfo));
+ sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
+ sysinfo->phy_mem_desc_ptr =
+ cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
+ sysinfo->core_mask = octeon_bootinfo->core_mask;
+ sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
+ sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
+ sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
+ sysinfo->board_type = octeon_bootinfo->board_type;
+ sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
+ sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
+ memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
+ sizeof(sysinfo->mac_addr_base));
+ sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
+ memcpy(sysinfo->board_serial_number,
+ octeon_bootinfo->board_serial_number,
+ sizeof(sysinfo->board_serial_number));
+ sysinfo->compact_flash_common_base_addr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ sysinfo->compact_flash_attribute_base_addr =
+ octeon_bootinfo->compact_flash_attribute_base_addr;
+ sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
+ sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
+ sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
+
+
+ octeon_check_cpu_bist();
+
+ octeon_uart = octeon_get_boot_uart();
+
+ /*
+ * Disable All CIU Interrupts. The ones we need will be
+ * enabled later. Read the SUM register so we know the write
+ * completed.
+ */
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
+
+#ifdef CONFIG_SMP
+ octeon_write_lcd("LinuxSMP");
+#else
+ octeon_write_lcd("Linux");
+#endif
+
+#ifdef CONFIG_CAVIUM_GDB
+ /*
+ * When debugging the linux kernel, force the cores to enter
+ * the debug exception handler to break in.
+ */
+ if (octeon_get_boot_debug_flag()) {
+ cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
+ cvmx_read_csr(CVMX_CIU_DINT);
+ }
+#endif
+
+ /*
+ * BIST should always be enabled when doing a soft reset. L2
+ * Cache locking for instance is not cleared unless BIST is
+ * enabled. Unfortunately due to a chip errata G-200 for
+ * Cn38XX and CN31XX, BIST msut be disabled on these parts.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
+ OCTEON_IS_MODEL(OCTEON_CN31XX))
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
+ else
+ cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
+
+ /* Default to 64MB in the simulator to speed things up */
+ if (octeon_is_simulation())
+ MAX_MEMORY = 64ull << 20;
+
+ arcs_cmdline[0] = 0;
+ argc = octeon_boot_desc_ptr->argc;
+ for (i = 0; i < argc; i++) {
+ const char *arg =
+ cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
+ if ((strncmp(arg, "MEM=", 4) == 0) ||
+ (strncmp(arg, "mem=", 4) == 0)) {
+ sscanf(arg + 4, "%llu", &MAX_MEMORY);
+ MAX_MEMORY <<= 20;
+ if (MAX_MEMORY == 0)
+ MAX_MEMORY = 32ull << 30;
+ } else if (strcmp(arg, "ecc_verbose") == 0) {
+#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
+ __cvmx_interrupt_ecc_report_single_bit_errors = 1;
+ pr_notice("Reporting of single bit ECC errors is "
+ "turned on\n");
+#endif
+ } else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
+ sizeof(arcs_cmdline) - 1) {
+ strcat(arcs_cmdline, " ");
+ strcat(arcs_cmdline, arg);
+ }
+ }
+
+ if (strstr(arcs_cmdline, "console=") == NULL) {
+#ifdef CONFIG_GDB_CONSOLE
+ strcat(arcs_cmdline, " console=gdb");
+#else
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+#else
+ if (octeon_uart == 1)
+ strcat(arcs_cmdline, " console=ttyS1,115200");
+ else
+ strcat(arcs_cmdline, " console=ttyS0,115200");
+#endif
+#endif
+ }
+
+ if (octeon_is_simulation()) {
+ /*
+ * The simulator uses a mtdram device pre filled with
+ * the filesystem. Also specify the calibration delay
+ * to avoid calculating it every time.
+ */
+ strcat(arcs_cmdline, " rw root=1f00"
+ " lpj=60176 slram=root,0x40000000,+1073741824");
+ }
+
+ mips_hpt_frequency = octeon_get_clock_rate();
+
+ octeon_init_cvmcount();
+
+ _machine_restart = octeon_restart;
+ _machine_halt = octeon_halt;
+
+ memset(&octeon_port, 0, sizeof(octeon_port));
+ /*
+ * For early_serial_setup we don't set the port type or
+ * UPF_FIXED_TYPE.
+ */
+ octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
+ octeon_port.iotype = UPIO_MEM;
+ /* I/O addresses are every 8 bytes */
+ octeon_port.regshift = 3;
+ /* Clock rate of the chip */
+ octeon_port.uartclk = mips_hpt_frequency;
+ octeon_port.fifosize = 64;
+ octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
+ octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
+ octeon_port.serial_in = octeon_serial_in;
+ octeon_port.serial_out = octeon_serial_out;
+#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
+ octeon_port.line = 0;
+#else
+ octeon_port.line = octeon_uart;
+#endif
+ octeon_port.irq = 42 + octeon_uart;
+ early_serial_setup(&octeon_port);
+
+ octeon_user_io_init();
+ register_smp_ops(&octeon_smp_ops);
+}
+
+void __init plat_mem_setup(void)
+{
+ uint64_t mem_alloc_size;
+ uint64_t total;
+ int64_t memory;
+
+ total = 0;
+
+ /* First add the init memory we will be returning. */
+ memory = __pa_symbol(&__init_begin) & PAGE_MASK;
+ mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
+ if (mem_alloc_size > 0) {
+ add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+ total += mem_alloc_size;
+ }
+
+ /*
+ * The Mips memory init uses the first memory location for
+ * some memory vectors. When SPARSEMEM is in use, it doesn't
+ * verify that the size is big enough for the final
+ * vectors. Making the smallest chuck 4MB seems to be enough
+ * to consistantly work.
+ */
+ mem_alloc_size = 4 << 20;
+ if (mem_alloc_size > MAX_MEMORY)
+ mem_alloc_size = MAX_MEMORY;
+
+ /*
+ * When allocating memory, we want incrementing addresses from
+ * bootmem_alloc so the code in add_memory_region can merge
+ * regions next to each other.
+ */
+ cvmx_bootmem_lock();
+ while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
+ && (total < MAX_MEMORY)) {
+#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
+ __pa_symbol(&__init_end), -1,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#elif defined(CONFIG_HIGHMEM)
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#else
+ memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
+ 0x100000,
+ CVMX_BOOTMEM_FLAG_NO_LOCKING);
+#endif
+ if (memory >= 0) {
+ /*
+ * This function automatically merges address
+ * regions next to each other if they are
+ * received in incrementing order.
+ */
+ add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
+ total += mem_alloc_size;
+ } else {
+ break;
+ }
+ }
+ cvmx_bootmem_unlock();
+
+#ifdef CONFIG_CAVIUM_RESERVE32
+ /*
+ * Now that we've allocated the kernel memory it is safe to
+ * free the reserved region. We free it here so that builtin
+ * drivers can use the memory.
+ */
+ if (octeon_reserve32_memory)
+ cvmx_bootmem_free_named("CAVIUM_RESERVE32");
+#endif /* CONFIG_CAVIUM_RESERVE32 */
+
+ if (total == 0)
+ panic("Unable to allocate memory from "
+ "cvmx_bootmem_phy_alloc\n");
+}
+
+
+int prom_putchar(char c)
+{
+ uint64_t lsrval;
+
+ /* Spin until there is room */
+ do {
+ lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
+ } while ((lsrval & 0x20) == 0);
+
+ /* Write the byte */
+ cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
+ return 1;
+}
+
+void prom_free_prom_memory(void)
+{
+#ifdef CONFIG_CAVIUM_DECODE_RSL
+ cvmx_interrupt_rsl_enable();
+
+ /* Add an interrupt handler for general failures. */
+ if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
+ "RML/RSL", octeon_rlm_interrupt)) {
+ panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
+ }
+#endif
+
+ /* This call is here so that it is performed after any TLB
+ initializations. It needs to be after these in case the
+ CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
+ octeon_hal_setup_reserved32();
+}
+
+static struct octeon_cf_data octeon_cf_data;
+
+static int __init octeon_cf_device_init(void)
+{
+ union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
+ unsigned long base_ptr, region_base, region_size;
+ struct platform_device *pd;
+ struct resource cf_resources[3];
+ unsigned int num_resources;
+ int i;
+ int ret = 0;
+
+ /* Setup octeon-cf platform device if present. */
+ base_ptr = 0;
+ if (octeon_bootinfo->major_version == 1
+ && octeon_bootinfo->minor_version >= 1) {
+ if (octeon_bootinfo->compact_flash_common_base_addr)
+ base_ptr =
+ octeon_bootinfo->compact_flash_common_base_addr;
+ } else {
+ base_ptr = 0x1d000800;
+ }
+
+ if (!base_ptr)
+ return ret;
+
+ /* Find CS0 region. */
+ for (i = 0; i < 8; i++) {
+ mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
+ && base_ptr < region_base + region_size)
+ break;
+ }
+ if (i >= 7) {
+ /* i and i + 1 are CS0 and CS1, both must be less than 8. */
+ goto out;
+ }
+ octeon_cf_data.base_region = i;
+ octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
+ octeon_cf_data.base_region_bias = base_ptr - region_base;
+ memset(cf_resources, 0, sizeof(cf_resources));
+ num_resources = 0;
+ cf_resources[num_resources].flags = IORESOURCE_MEM;
+ cf_resources[num_resources].start = region_base;
+ cf_resources[num_resources].end = region_base + region_size - 1;
+ num_resources++;
+
+
+ if (!(base_ptr & 0xfffful)) {
+ /*
+ * Boot loader signals availability of DMA (true_ide
+ * mode) by setting low order bits of base_ptr to
+ * zero.
+ */
+
+ /* Asume that CS1 immediately follows. */
+ mio_boot_reg_cfg.u64 =
+ cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
+ region_base = mio_boot_reg_cfg.s.base << 16;
+ region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
+ if (!mio_boot_reg_cfg.s.en)
+ goto out;
+
+ cf_resources[num_resources].flags = IORESOURCE_MEM;
+ cf_resources[num_resources].start = region_base;
+ cf_resources[num_resources].end = region_base + region_size - 1;
+ num_resources++;
+
+ octeon_cf_data.dma_engine = 0;
+ cf_resources[num_resources].flags = IORESOURCE_IRQ;
+ cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
+ cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
+ num_resources++;
+ } else {
+ octeon_cf_data.dma_engine = -1;
+ }
+
+ pd = platform_device_alloc("pata_octeon_cf", -1);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pd->dev.platform_data = &octeon_cf_data;
+
+ ret = platform_device_add_resources(pd, cf_resources, num_resources);
+ if (ret)
+ goto fail;
+
+ ret = platform_device_add(pd);
+ if (ret)
+ goto fail;
+
+ return ret;
+fail:
+ platform_device_put(pd);
+out:
+ return ret;
+}
+device_initcall(octeon_cf_device_init);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
new file mode 100644
index 000000000000..24e0ad63980a
--- /dev/null
+++ b/arch/mips/cavium-octeon/smp.c
@@ -0,0 +1,211 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <asm/mmu_context.h>
+#include <asm/system.h>
+#include <asm/time.h>
+
+#include <asm/octeon/octeon.h>
+
+volatile unsigned long octeon_processor_boot = 0xff;
+volatile unsigned long octeon_processor_sp;
+volatile unsigned long octeon_processor_gp;
+
+static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
+{
+ const int coreid = cvmx_get_core_num();
+ uint64_t action;
+
+ /* Load the mailbox register to figure out what we're supposed to do */
+ action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid));
+
+ /* Clear the mailbox to clear the interrupt */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
+
+ if (action & SMP_CALL_FUNCTION)
+ smp_call_function_interrupt();
+
+ /* Check if we've been told to flush the icache */
+ if (action & SMP_ICACHE_FLUSH)
+ asm volatile ("synci 0($0)\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * Cause the function described by call_data to be executed on the passed
+ * cpu. When the function has finished, increment the finished field of
+ * call_data.
+ */
+void octeon_send_ipi_single(int cpu, unsigned int action)
+{
+ int coreid = cpu_logical_map(cpu);
+ /*
+ pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
+ coreid, action);
+ */
+ cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
+}
+
+static inline void octeon_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu_mask(i, mask)
+ octeon_send_ipi_single(i, action);
+}
+
+/**
+ * Detect available CPUs, populate phys_cpu_present_map
+ */
+static void octeon_smp_setup(void)
+{
+ const int coreid = cvmx_get_core_num();
+ int cpus;
+ int id;
+
+ int core_mask = octeon_get_boot_coremask();
+
+ cpus_clear(cpu_possible_map);
+ __cpu_number_map[coreid] = 0;
+ __cpu_logical_map[0] = coreid;
+ cpu_set(0, cpu_possible_map);
+
+ cpus = 1;
+ for (id = 0; id < 16; id++) {
+ if ((id != coreid) && (core_mask & (1 << id))) {
+ cpu_set(cpus, cpu_possible_map);
+ __cpu_number_map[id] = cpus;
+ __cpu_logical_map[cpus] = id;
+ cpus++;
+ }
+ }
+}
+
+/**
+ * Firmware CPU startup hook
+ *
+ */
+static void octeon_boot_secondary(int cpu, struct task_struct *idle)
+{
+ int count;
+
+ pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
+ cpu_logical_map(cpu));
+
+ octeon_processor_sp = __KSTK_TOS(idle);
+ octeon_processor_gp = (unsigned long)(task_thread_info(idle));
+ octeon_processor_boot = cpu_logical_map(cpu);
+ mb();
+
+ count = 10000;
+ while (octeon_processor_sp && count) {
+ /* Waiting for processor to get the SP and GP */
+ udelay(1);
+ count--;
+ }
+ if (count == 0)
+ pr_err("Secondary boot timeout\n");
+}
+
+/**
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void octeon_init_secondary(void)
+{
+ const int coreid = cvmx_get_core_num();
+ union cvmx_ciu_intx_sum0 interrupt_enable;
+
+ octeon_check_cpu_bist();
+ octeon_init_cvmcount();
+ /*
+ pr_info("SMP: CPU%d (CoreId %lu) started\n", cpu, coreid);
+ */
+ /* Enable Mailbox interrupts to this core. These are the only
+ interrupts allowed on line 3 */
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), 0xffffffff);
+ interrupt_enable.u64 = 0;
+ interrupt_enable.s.mbox = 0x3;
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), interrupt_enable.u64);
+ cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
+ cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
+ /* Enable core interrupt processing for 2,3 and 7 */
+ set_c0_status(0x8c01);
+}
+
+/**
+ * Callout to firmware before smp_init
+ *
+ */
+void octeon_prepare_cpus(unsigned int max_cpus)
+{
+ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffffffff);
+ if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, IRQF_SHARED,
+ "mailbox0", mailbox_interrupt)) {
+ panic("Cannot request_irq(OCTEON_IRQ_MBOX0)\n");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX1, mailbox_interrupt, IRQF_SHARED,
+ "mailbox1", mailbox_interrupt)) {
+ panic("Cannot request_irq(OCTEON_IRQ_MBOX1)\n");
+ }
+}
+
+/**
+ * Last chance for the board code to finish SMP initialization before
+ * the CPU is "online".
+ */
+static void octeon_smp_finish(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+ unsigned long tmp;
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+ to be not masked by this core so we know the signal is received by
+ someone */
+ asm volatile ("dmfc0 %0, $22\n"
+ "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+
+ octeon_user_io_init();
+
+ /* to generate the first CPU timer interrupt */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+}
+
+/**
+ * Hook for after all CPUs are online
+ */
+static void octeon_cpus_done(void)
+{
+#ifdef CONFIG_CAVIUM_GDB
+ unsigned long tmp;
+ /* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also set the MCD0
+ to be not masked by this core so we know the signal is received by
+ someone */
+ asm volatile ("dmfc0 %0, $22\n"
+ "ori %0, %0, 0x9100\n" "dmtc0 %0, $22\n" : "=r" (tmp));
+#endif
+}
+
+struct plat_smp_ops octeon_smp_ops = {
+ .send_ipi_single = octeon_send_ipi_single,
+ .send_ipi_mask = octeon_send_ipi_mask,
+ .init_secondary = octeon_init_secondary,
+ .smp_finish = octeon_smp_finish,
+ .cpus_done = octeon_cpus_done,
+ .boot_secondary = octeon_boot_secondary,
+ .smp_setup = octeon_smp_setup,
+ .prepare_cpus = octeon_prepare_cpus,
+};
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
new file mode 100644
index 000000000000..7afaa28a3768
--- /dev/null
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -0,0 +1,943 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.28-rc6
+# Wed Dec 3 11:00:58 2008
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+# CONFIG_MACH_ALCHEMY is not set
+# CONFIG_BASLER_EXCITE is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+# CONFIG_LEMOTE_FULONG is not set
+# CONFIG_MIPS_MALTA is not set
+# CONFIG_MIPS_SIM is not set
+# CONFIG_MACH_EMMA is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PNX8550_JBS is not set
+# CONFIG_PNX8550_STB810 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_PMC_YOSEMITE is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD=y
+CONFIG_CAVIUM_OCTEON_SPECIFIC_OPTIONS=y
+# CONFIG_CAVIUM_OCTEON_2ND_KERNEL is not set
+CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED=y
+CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
+CONFIG_CAVIUM_OCTEON_LOCK_L2=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT=y
+CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_CEVT_R4K=y
+CONFIG_CSRC_R4K=y
+CONFIG_DMA_COHERENT=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_NO_IOPORT is not set
+CONFIG_CPU_BIG_ENDIAN=y
+# CONFIG_CPU_LITTLE_ENDIAN is not set
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_IRQ_CPU=y
+CONFIG_IRQ_CPU_OCTEON=y
+CONFIG_SWAP_IO_SPACE=y
+CONFIG_MIPS_L1_CACHE_SHIFT=7
+
+#
+# CPU selection
+#
+# CONFIG_CPU_LOONGSON2 is not set
+# CONFIG_CPU_MIPS32_R1 is not set
+# CONFIG_CPU_MIPS32_R2 is not set
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_R3000 is not set
+# CONFIG_CPU_TX39XX is not set
+# CONFIG_CPU_VR41XX is not set
+# CONFIG_CPU_R4300 is not set
+# CONFIG_CPU_R4X00 is not set
+# CONFIG_CPU_TX49XX is not set
+# CONFIG_CPU_R5000 is not set
+# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
+# CONFIG_CPU_R6000 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_R8000 is not set
+# CONFIG_CPU_R10000 is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_RM9000 is not set
+# CONFIG_CPU_SB1 is not set
+CONFIG_CPU_CAVIUM_OCTEON=y
+CONFIG_WEAK_ORDERING=y
+CONFIG_WEAK_REORDERING_BEYOND_LLSC=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_64BIT_KERNEL=y
+
+#
+# Kernel type
+#
+# CONFIG_32BIT is not set
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_8KB is not set
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_MIPS_MT_DISABLED=y
+# CONFIG_MIPS_MT_SMP is not set
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_64BIT_PHYS_ADDR=y
+CONFIG_CPU_HAS_SYNC=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_IRQ_PER_CPU=y
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_HIGHMEM=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+# CONFIG_FLATMEM_MANUAL is not set
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
+CONFIG_SPARSEMEM_STATIC=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_SMP=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_NR_CPUS_DEFAULT_16=y
+CONFIG_NR_CPUS=16
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_48 is not set
+# CONFIG_HZ_100 is not set
+# CONFIG_HZ_128 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=250
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_KEXEC is not set
+CONFIG_SECCOMP=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_RELAY=y
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_STOP_MACHINE=y
+CONFIG_BLOCK=y
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLOCK_COMPAT=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PROBE_INITRD_HEADER is not set
+# CONFIG_FREEZER is not set
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_MMU=y
+# CONFIG_PCCARD is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_MIPS32_COMPAT=y
+CONFIG_COMPAT=y
+CONFIG_SYSVIPC_COMPAT=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_BINFMT_ELF32=y
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_ASK_IP_FIB_HASH=y
+# CONFIG_IP_FIB_TRIE is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETLABEL is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
+CONFIG_FIB_RULES=y
+# CONFIG_WIRELESS is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x8000000
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_PHYLIB is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+CONFIG_NETDEV_1000=y
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
+
+#
+# Multimedia devices
+#
+
+#
+# Multimedia core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_STAGING_EXCLUDE_BUILD=y
+
+#
+# File systems
+#
+# CONFIG_EXT2_FS is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_XFS_FS is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_FS=y
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+CONFIG_DEBUG_SPINLOCK=y
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_SPINLOCK_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+#
+# Tracers
+#
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_CMDLINE=""
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_RUNTIME_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
+CONFIG_SECURITY_NETWORK=y
+# CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=0
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+# CONFIG_CRYPTO_FIPS is not set
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+CONFIG_CRYPTO_HMAC=y
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_HW=y
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 12d12dfe73c0..a0d14f85b781 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
#ifndef cpu_has_tx39_cache
#define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE)
#endif
+#ifndef cpu_has_octeon_cache
+#define cpu_has_octeon_cache 0
+#endif
#ifndef cpu_has_fpu
#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 229a786101d9..c018727c7ddc 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -33,6 +33,7 @@
#define PRID_COMP_TOSHIBA 0x070000
#define PRID_COMP_LSI 0x080000
#define PRID_COMP_LEXRA 0x0b0000
+#define PRID_COMP_CAVIUM 0x0d0000
/*
@@ -114,6 +115,18 @@
#define PRID_IMP_BCM3302 0x9000
/*
+ * These are the PRID's for when 23:16 == PRID_COMP_CAVIUM
+ */
+
+#define PRID_IMP_CAVIUM_CN38XX 0x0000
+#define PRID_IMP_CAVIUM_CN31XX 0x0100
+#define PRID_IMP_CAVIUM_CN30XX 0x0200
+#define PRID_IMP_CAVIUM_CN58XX 0x0300
+#define PRID_IMP_CAVIUM_CN56XX 0x0400
+#define PRID_IMP_CAVIUM_CN50XX 0x0600
+#define PRID_IMP_CAVIUM_CN52XX 0x0700
+
+/*
* Definitions for 7:0 on legacy processors
*/
@@ -203,6 +216,7 @@ enum cpu_type_enum {
* MIPS64 class processors
*/
CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+ CPU_CAVIUM_OCTEON,
CPU_LAST
};
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 2de638f84c86..43baed16a109 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -42,7 +42,7 @@ ASMMACRO(_ehb,
/*
* TLB hazards
*/
-#if defined(CONFIG_CPU_MIPSR2)
+#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
/*
* MIPSR2 defines ehb for hazard avoidance
@@ -138,7 +138,7 @@ do { \
__instruction_hazard(); \
} while (0)
-#elif defined(CONFIG_CPU_R10000)
+#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 501a40b9f18d..436878e4e063 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -295,6 +295,12 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define war_octeon_io_reorder_wmb() wmb()
+#else
+#define war_octeon_io_reorder_wmb() do { } while (0)
+#endif
+
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
\
static inline void pfx##write##bwlq(type val, \
@@ -303,6 +309,8 @@ static inline void pfx##write##bwlq(type val, \
volatile type *__mem; \
type __val; \
\
+ war_octeon_io_reorder_wmb(); \
+ \
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
__val = pfx##ioswab##bwlq(__mem, val); \
@@ -370,6 +378,8 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
volatile type *__addr; \
type __val; \
\
+ war_octeon_io_reorder_wmb(); \
+ \
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
__val = pfx##ioswab##bwlq(__addr, val); \
@@ -504,8 +514,12 @@ BUILDSTRING(q, u64)
#endif
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define mmiowb() wmb()
+#else
/* Depends on MIPS II instruction set */
#define mmiowb() asm volatile ("sync" ::: "memory")
+#endif
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index abc62aa744ac..3214ade02d10 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
*/
#define IRQ_AFFINITY_HOOK(irq) \
do { \
- if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
+ if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
smtc_forward_irq(irq); \
irq_exit(); \
return; \
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 0d302bad4492..62f91f50b5b5 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -91,14 +91,57 @@ static inline u32 au_readl(unsigned long reg)
return *(volatile u32 *)reg;
}
+/* Early Au1000 have a write-only SYS_CPUPLL register. */
+static inline int au1xxx_cpu_has_pll_wo(void)
+{
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ return 1;
+ }
+ return 0;
+}
+
+/* does CPU need CONFIG[OD] set to fix tons of errata? */
+static inline int au1xxx_cpu_needs_config_od(void)
+{
+ /*
+ * c0_config.od (bit 19) was write only (and read as 0) on the
+ * early revisions of Alchemy SOCs. It disables the bus trans-
+ * action overlapping and needs to be set to fix various errata.
+ */
+ switch (read_c0_prid()) {
+ case 0x00030100: /* Au1000 DA */
+ case 0x00030201: /* Au1000 HA */
+ case 0x00030202: /* Au1000 HB */
+ case 0x01030200: /* Au1500 AB */
+ /*
+ * Au1100/Au1200 errata actually keep silence about this bit,
+ * so we set it just in case for those revisions that require
+ * it to be set according to the (now gone) cpu_table.
+ */
+ case 0x02030200: /* Au1100 AB */
+ case 0x02030201: /* Au1100 BA */
+ case 0x02030202: /* Au1100 BC */
+ case 0x04030201: /* Au1200 AC */
+ return 1;
+ }
+ return 0;
+}
/* arch/mips/au1000/common/clocks.c */
extern void set_au1x00_speed(unsigned int new_freq);
extern unsigned int get_au1x00_speed(void);
extern void set_au1x00_uart_baud_base(unsigned long new_baud_base);
extern unsigned long get_au1x00_uart_baud_base(void);
-extern void set_au1x00_lcd_clock(void);
-extern unsigned int get_au1x00_lcd_clock(void);
+extern unsigned long au1xxx_calc_clock(void);
+
+/* PM: arch/mips/alchemy/common/sleeper.S, power.c, irq.c */
+void au1xxx_save_and_sleep(void);
+void au_sleep(void);
+void save_au1xxx_intctl(void);
+void restore_au1xxx_intctl(void);
/*
* Every board describes its IRQ mapping with this table.
@@ -109,10 +152,11 @@ struct au1xxx_irqmap {
int im_request;
};
-/*
- * init_IRQ looks for a table with this name.
- */
-extern struct au1xxx_irqmap au1xxx_irq_map[];
+/* core calls this function to let boards initialize other IRQ sources */
+void board_init_irq(void);
+
+/* boards call this to register additional (GPIO) interrupts */
+void au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count);
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
@@ -505,15 +549,6 @@ extern struct au1xxx_irqmap au1xxx_irq_map[];
#define IC1_TESTBIT 0xB1800080
-/* Interrupt Configuration Modes */
-#define INTC_INT_DISABLED 0x0
-#define INTC_INT_RISE_EDGE 0x1
-#define INTC_INT_FALL_EDGE 0x2
-#define INTC_INT_RISE_AND_FALL_EDGE 0x3
-#define INTC_INT_HIGH_LEVEL 0x5
-#define INTC_INT_LOW_LEVEL 0x6
-#define INTC_INT_HIGH_AND_LOW_LEVEL 0x7
-
/* Interrupt Numbers */
/* Au1000 */
#ifdef CONFIG_SOC_AU1000
@@ -1525,6 +1560,10 @@ enum soc_au1200_ints {
#define SYS_SLPPWR 0xB1900078
#define SYS_SLEEP 0xB190007C
+#define SYS_WAKEMSK_D2 (1 << 9)
+#define SYS_WAKEMSK_M2 (1 << 8)
+#define SYS_WAKEMSK_GPIO(x) (1 << (x))
+
/* Clock Controller */
#define SYS_FREQCTRL0 0xB1900020
# define SYS_FC_FRDIV2_BIT 22
@@ -1749,24 +1788,4 @@ static AU1X00_SYS * const sys = (AU1X00_SYS *)SYS_BASE;
#endif
-/*
- * Processor information based on PRID.
- * Copied from PowerPC.
- */
-#ifndef _LANGUAGE_ASSEMBLY
-struct cpu_spec {
- /* CPU is matched via (PRID & prid_mask) == prid_value */
- unsigned int prid_mask;
- unsigned int prid_value;
-
- char *cpu_name;
- unsigned char cpu_od; /* Set Config[OD] */
- unsigned char cpu_bclk; /* Enable BCLK switching */
- unsigned char cpu_pll_wo; /* sys_cpupll reg. write-only */
-};
-
-extern struct cpu_spec cpu_specs[];
-extern struct cpu_spec *cur_cpu_spec[];
-#endif
-
#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 44a67bf05dc1..06f68f43800a 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -357,6 +357,11 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr);
u32 au1xxx_ddma_add_device(dbdev_tab_t *dev);
extern void au1xxx_ddma_del_device(u32 devid);
void *au1xxx_ddma_get_nextptr_virt(au1x_ddma_desc_t *dp);
+#ifdef CONFIG_PM
+void au1xxx_dbdma_suspend(void);
+void au1xxx_dbdma_resume(void);
+#endif
+
/*
* Some compatibilty macros -- needed to make changes to API
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
new file mode 100644
index 000000000000..04ce6e6569da
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -0,0 +1,78 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004 Cavium Networks
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_CAVIUM_OCTEON_CPU_FEATURE_OVERRIDES_H
+
+#include <linux/types.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Cavium Octeons are MIPS64v2 processors
+ */
+#define cpu_dcache_line_size() 128
+#define cpu_icache_line_size() 128
+
+
+#define cpu_has_4kex 1
+#define cpu_has_3k_cache 0
+#define cpu_has_4k_cache 0
+#define cpu_has_tx39_cache 0
+#define cpu_has_fpu 0
+#define cpu_has_counter 1
+#define cpu_has_watch 1
+#define cpu_has_divec 1
+#define cpu_has_vce 0
+#define cpu_has_cache_cdex_p 0
+#define cpu_has_cache_cdex_s 0
+#define cpu_has_prefetch 1
+
+/*
+ * We should disable LL/SC on non SMP systems as it is faster to
+ * disable interrupts for atomic access than a LL/SC. Unfortunatly we
+ * cannot as this breaks asm/futex.h
+ */
+#define cpu_has_llsc 1
+#define cpu_has_vtag_icache 1
+#define cpu_has_dc_aliases 0
+#define cpu_has_ic_fills_f_dc 0
+#define cpu_has_64bits 1
+#define cpu_has_octeon_cache 1
+#define cpu_has_saa octeon_has_saa()
+#define cpu_has_mips32r1 0
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 1
+#define cpu_has_dsp 0
+#define cpu_has_mipsmt 0
+#define cpu_has_userlocal 0
+#define cpu_has_vint 0
+#define cpu_has_veic 0
+#define ARCH_HAS_READ_CURRENT_TIMER 1
+#define ARCH_HAS_IRQ_PER_CPU 1
+#define ARCH_HAS_SPINLOCK_PREFETCH 1
+#define spin_lock_prefetch(x) prefetch(x)
+#define PREFETCH_STRIDE 128
+
+static inline int read_current_timer(unsigned long *result)
+{
+ asm volatile ("rdhwr %0,$31\n"
+#ifndef CONFIG_64BIT
+ "\tsll %0, 0"
+#endif
+ : "=r" (*result));
+ return 0;
+}
+
+static inline int octeon_has_saa(void)
+{
+ int id;
+ asm volatile ("mfc0 %0, $15,0" : "=r" (id));
+ return id >= 0x000d0300;
+}
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
new file mode 100644
index 000000000000..f30fce92aabb
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -0,0 +1,64 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
+ *
+ *
+ * Similar to mach-generic/dma-coherence.h except
+ * plat_device_is_coherent hard coded to return 1.
+ *
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
+#define __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H
+
+struct device;
+
+dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t);
+void octeon_unmap_dma_mem(struct device *, dma_addr_t);
+
+static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
+ size_t size)
+{
+ return octeon_map_dma_mem(dev, addr, size);
+}
+
+static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
+ struct page *page)
+{
+ return octeon_map_dma_mem(dev, page_address(page), PAGE_SIZE);
+}
+
+static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
+{
+ return dma_addr;
+}
+
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
+{
+ octeon_unmap_dma_mem(dev, dma_addr);
+}
+
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ mb();
+}
+
+static inline int plat_device_is_coherent(struct device *dev)
+{
+ return 1;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return dma_addr == -1;
+}
+
+#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
new file mode 100644
index 000000000000..d32220fbf4f1
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -0,0 +1,244 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __OCTEON_IRQ_H__
+#define __OCTEON_IRQ_H__
+
+#define NR_IRQS OCTEON_IRQ_LAST
+#define MIPS_CPU_IRQ_BASE OCTEON_IRQ_SW0
+
+/* 0 - 7 represent the i8259 master */
+#define OCTEON_IRQ_I8259M0 0
+#define OCTEON_IRQ_I8259M1 1
+#define OCTEON_IRQ_I8259M2 2
+#define OCTEON_IRQ_I8259M3 3
+#define OCTEON_IRQ_I8259M4 4
+#define OCTEON_IRQ_I8259M5 5
+#define OCTEON_IRQ_I8259M6 6
+#define OCTEON_IRQ_I8259M7 7
+/* 8 - 15 represent the i8259 slave */
+#define OCTEON_IRQ_I8259S0 8
+#define OCTEON_IRQ_I8259S1 9
+#define OCTEON_IRQ_I8259S2 10
+#define OCTEON_IRQ_I8259S3 11
+#define OCTEON_IRQ_I8259S4 12
+#define OCTEON_IRQ_I8259S5 13
+#define OCTEON_IRQ_I8259S6 14
+#define OCTEON_IRQ_I8259S7 15
+/* 16 - 23 represent the 8 MIPS standard interrupt sources */
+#define OCTEON_IRQ_SW0 16
+#define OCTEON_IRQ_SW1 17
+#define OCTEON_IRQ_CIU0 18
+#define OCTEON_IRQ_CIU1 19
+#define OCTEON_IRQ_CIU4 20
+#define OCTEON_IRQ_5 21
+#define OCTEON_IRQ_PERF 22
+#define OCTEON_IRQ_TIMER 23
+/* 24 - 87 represent the sources in CIU_INTX_EN0 */
+#define OCTEON_IRQ_WORKQ0 24
+#define OCTEON_IRQ_WORKQ1 25
+#define OCTEON_IRQ_WORKQ2 26
+#define OCTEON_IRQ_WORKQ3 27
+#define OCTEON_IRQ_WORKQ4 28
+#define OCTEON_IRQ_WORKQ5 29
+#define OCTEON_IRQ_WORKQ6 30
+#define OCTEON_IRQ_WORKQ7 31
+#define OCTEON_IRQ_WORKQ8 32
+#define OCTEON_IRQ_WORKQ9 33
+#define OCTEON_IRQ_WORKQ10 34
+#define OCTEON_IRQ_WORKQ11 35
+#define OCTEON_IRQ_WORKQ12 36
+#define OCTEON_IRQ_WORKQ13 37
+#define OCTEON_IRQ_WORKQ14 38
+#define OCTEON_IRQ_WORKQ15 39
+#define OCTEON_IRQ_GPIO0 40
+#define OCTEON_IRQ_GPIO1 41
+#define OCTEON_IRQ_GPIO2 42
+#define OCTEON_IRQ_GPIO3 43
+#define OCTEON_IRQ_GPIO4 44
+#define OCTEON_IRQ_GPIO5 45
+#define OCTEON_IRQ_GPIO6 46
+#define OCTEON_IRQ_GPIO7 47
+#define OCTEON_IRQ_GPIO8 48
+#define OCTEON_IRQ_GPIO9 49
+#define OCTEON_IRQ_GPIO10 50
+#define OCTEON_IRQ_GPIO11 51
+#define OCTEON_IRQ_GPIO12 52
+#define OCTEON_IRQ_GPIO13 53
+#define OCTEON_IRQ_GPIO14 54
+#define OCTEON_IRQ_GPIO15 55
+#define OCTEON_IRQ_MBOX0 56
+#define OCTEON_IRQ_MBOX1 57
+#define OCTEON_IRQ_UART0 58
+#define OCTEON_IRQ_UART1 59
+#define OCTEON_IRQ_PCI_INT0 60
+#define OCTEON_IRQ_PCI_INT1 61
+#define OCTEON_IRQ_PCI_INT2 62
+#define OCTEON_IRQ_PCI_INT3 63
+#define OCTEON_IRQ_PCI_MSI0 64
+#define OCTEON_IRQ_PCI_MSI1 65
+#define OCTEON_IRQ_PCI_MSI2 66
+#define OCTEON_IRQ_PCI_MSI3 67
+#define OCTEON_IRQ_RESERVED68 68 /* Summary of CIU_INT_SUM1 */
+#define OCTEON_IRQ_TWSI 69
+#define OCTEON_IRQ_RML 70
+#define OCTEON_IRQ_TRACE 71
+#define OCTEON_IRQ_GMX_DRP0 72
+#define OCTEON_IRQ_GMX_DRP1 73
+#define OCTEON_IRQ_IPD_DRP 74
+#define OCTEON_IRQ_KEY_ZERO 75
+#define OCTEON_IRQ_TIMER0 76
+#define OCTEON_IRQ_TIMER1 77
+#define OCTEON_IRQ_TIMER2 78
+#define OCTEON_IRQ_TIMER3 79
+#define OCTEON_IRQ_USB0 80
+#define OCTEON_IRQ_PCM 81
+#define OCTEON_IRQ_MPI 82
+#define OCTEON_IRQ_TWSI2 83
+#define OCTEON_IRQ_POWIQ 84
+#define OCTEON_IRQ_IPDPPTHR 85
+#define OCTEON_IRQ_MII0 86
+#define OCTEON_IRQ_BOOTDMA 87
+/* 88 - 151 represent the sources in CIU_INTX_EN1 */
+#define OCTEON_IRQ_WDOG0 88
+#define OCTEON_IRQ_WDOG1 89
+#define OCTEON_IRQ_WDOG2 90
+#define OCTEON_IRQ_WDOG3 91
+#define OCTEON_IRQ_WDOG4 92
+#define OCTEON_IRQ_WDOG5 93
+#define OCTEON_IRQ_WDOG6 94
+#define OCTEON_IRQ_WDOG7 95
+#define OCTEON_IRQ_WDOG8 96
+#define OCTEON_IRQ_WDOG9 97
+#define OCTEON_IRQ_WDOG10 98
+#define OCTEON_IRQ_WDOG11 99
+#define OCTEON_IRQ_WDOG12 100
+#define OCTEON_IRQ_WDOG13 101
+#define OCTEON_IRQ_WDOG14 102
+#define OCTEON_IRQ_WDOG15 103
+#define OCTEON_IRQ_UART2 104
+#define OCTEON_IRQ_USB1 105
+#define OCTEON_IRQ_MII1 106
+#define OCTEON_IRQ_RESERVED107 107
+#define OCTEON_IRQ_RESERVED108 108
+#define OCTEON_IRQ_RESERVED109 109
+#define OCTEON_IRQ_RESERVED110 110
+#define OCTEON_IRQ_RESERVED111 111
+#define OCTEON_IRQ_RESERVED112 112
+#define OCTEON_IRQ_RESERVED113 113
+#define OCTEON_IRQ_RESERVED114 114
+#define OCTEON_IRQ_RESERVED115 115
+#define OCTEON_IRQ_RESERVED116 116
+#define OCTEON_IRQ_RESERVED117 117
+#define OCTEON_IRQ_RESERVED118 118
+#define OCTEON_IRQ_RESERVED119 119
+#define OCTEON_IRQ_RESERVED120 120
+#define OCTEON_IRQ_RESERVED121 121
+#define OCTEON_IRQ_RESERVED122 122
+#define OCTEON_IRQ_RESERVED123 123
+#define OCTEON_IRQ_RESERVED124 124
+#define OCTEON_IRQ_RESERVED125 125
+#define OCTEON_IRQ_RESERVED126 126
+#define OCTEON_IRQ_RESERVED127 127
+#define OCTEON_IRQ_RESERVED128 128
+#define OCTEON_IRQ_RESERVED129 129
+#define OCTEON_IRQ_RESERVED130 130
+#define OCTEON_IRQ_RESERVED131 131
+#define OCTEON_IRQ_RESERVED132 132
+#define OCTEON_IRQ_RESERVED133 133
+#define OCTEON_IRQ_RESERVED134 134
+#define OCTEON_IRQ_RESERVED135 135
+#define OCTEON_IRQ_RESERVED136 136
+#define OCTEON_IRQ_RESERVED137 137
+#define OCTEON_IRQ_RESERVED138 138
+#define OCTEON_IRQ_RESERVED139 139
+#define OCTEON_IRQ_RESERVED140 140
+#define OCTEON_IRQ_RESERVED141 141
+#define OCTEON_IRQ_RESERVED142 142
+#define OCTEON_IRQ_RESERVED143 143
+#define OCTEON_IRQ_RESERVED144 144
+#define OCTEON_IRQ_RESERVED145 145
+#define OCTEON_IRQ_RESERVED146 146
+#define OCTEON_IRQ_RESERVED147 147
+#define OCTEON_IRQ_RESERVED148 148
+#define OCTEON_IRQ_RESERVED149 149
+#define OCTEON_IRQ_RESERVED150 150
+#define OCTEON_IRQ_RESERVED151 151
+
+#ifdef CONFIG_PCI_MSI
+/* 152 - 215 represent the MSI interrupts 0-63 */
+#define OCTEON_IRQ_MSI_BIT0 152
+#define OCTEON_IRQ_MSI_BIT1 153
+#define OCTEON_IRQ_MSI_BIT2 154
+#define OCTEON_IRQ_MSI_BIT3 155
+#define OCTEON_IRQ_MSI_BIT4 156
+#define OCTEON_IRQ_MSI_BIT5 157
+#define OCTEON_IRQ_MSI_BIT6 158
+#define OCTEON_IRQ_MSI_BIT7 159
+#define OCTEON_IRQ_MSI_BIT8 160
+#define OCTEON_IRQ_MSI_BIT9 161
+#define OCTEON_IRQ_MSI_BIT10 162
+#define OCTEON_IRQ_MSI_BIT11 163
+#define OCTEON_IRQ_MSI_BIT12 164
+#define OCTEON_IRQ_MSI_BIT13 165
+#define OCTEON_IRQ_MSI_BIT14 166
+#define OCTEON_IRQ_MSI_BIT15 167
+#define OCTEON_IRQ_MSI_BIT16 168
+#define OCTEON_IRQ_MSI_BIT17 169
+#define OCTEON_IRQ_MSI_BIT18 170
+#define OCTEON_IRQ_MSI_BIT19 171
+#define OCTEON_IRQ_MSI_BIT20 172
+#define OCTEON_IRQ_MSI_BIT21 173
+#define OCTEON_IRQ_MSI_BIT22 174
+#define OCTEON_IRQ_MSI_BIT23 175
+#define OCTEON_IRQ_MSI_BIT24 176
+#define OCTEON_IRQ_MSI_BIT25 177
+#define OCTEON_IRQ_MSI_BIT26 178
+#define OCTEON_IRQ_MSI_BIT27 179
+#define OCTEON_IRQ_MSI_BIT28 180
+#define OCTEON_IRQ_MSI_BIT29 181
+#define OCTEON_IRQ_MSI_BIT30 182
+#define OCTEON_IRQ_MSI_BIT31 183
+#define OCTEON_IRQ_MSI_BIT32 184
+#define OCTEON_IRQ_MSI_BIT33 185
+#define OCTEON_IRQ_MSI_BIT34 186
+#define OCTEON_IRQ_MSI_BIT35 187
+#define OCTEON_IRQ_MSI_BIT36 188
+#define OCTEON_IRQ_MSI_BIT37 189
+#define OCTEON_IRQ_MSI_BIT38 190
+#define OCTEON_IRQ_MSI_BIT39 191
+#define OCTEON_IRQ_MSI_BIT40 192
+#define OCTEON_IRQ_MSI_BIT41 193
+#define OCTEON_IRQ_MSI_BIT42 194
+#define OCTEON_IRQ_MSI_BIT43 195
+#define OCTEON_IRQ_MSI_BIT44 196
+#define OCTEON_IRQ_MSI_BIT45 197
+#define OCTEON_IRQ_MSI_BIT46 198
+#define OCTEON_IRQ_MSI_BIT47 199
+#define OCTEON_IRQ_MSI_BIT48 200
+#define OCTEON_IRQ_MSI_BIT49 201
+#define OCTEON_IRQ_MSI_BIT50 202
+#define OCTEON_IRQ_MSI_BIT51 203
+#define OCTEON_IRQ_MSI_BIT52 204
+#define OCTEON_IRQ_MSI_BIT53 205
+#define OCTEON_IRQ_MSI_BIT54 206
+#define OCTEON_IRQ_MSI_BIT55 207
+#define OCTEON_IRQ_MSI_BIT56 208
+#define OCTEON_IRQ_MSI_BIT57 209
+#define OCTEON_IRQ_MSI_BIT58 210
+#define OCTEON_IRQ_MSI_BIT59 211
+#define OCTEON_IRQ_MSI_BIT60 212
+#define OCTEON_IRQ_MSI_BIT61 213
+#define OCTEON_IRQ_MSI_BIT62 214
+#define OCTEON_IRQ_MSI_BIT63 215
+
+#define OCTEON_IRQ_LAST 216
+#else
+#define OCTEON_IRQ_LAST 152
+#endif
+
+#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
new file mode 100644
index 000000000000..0b2b5eb22e9b
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -0,0 +1,131 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2008 Cavium Networks, Inc
+ */
+#ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+#define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
+
+
+#define CP0_CYCLE_COUNTER $9, 6
+#define CP0_CVMCTL_REG $9, 7
+#define CP0_CVMMEMCTL_REG $11,7
+#define CP0_PRID_REG $15, 0
+#define CP0_PRID_OCTEON_PASS1 0x000d0000
+#define CP0_PRID_OCTEON_CN30XX 0x000d0200
+
+.macro kernel_entry_setup
+ # Registers set by bootloader:
+ # (only 32 bits set by bootloader, all addresses are physical
+ # addresses, and need to have the appropriate memory region set
+ # by the kernel
+ # a0 = argc
+ # a1 = argv (kseg0 compat addr)
+ # a2 = 1 if init core, zero otherwise
+ # a3 = address of boot descriptor block
+ .set push
+ .set arch=octeon
+ # Read the cavium mem control register
+ dmfc0 v0, CP0_CVMMEMCTL_REG
+ # Clear the lower 6 bits, the CVMSEG size
+ dins v0, $0, 0, 6
+ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
+ dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
+#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED
+ # Disable unaligned load/store support but leave HW fixup enabled
+ or v0, v0, 0x5001
+ xor v0, v0, 0x1001
+#else
+ # Disable unaligned load/store and HW fixup support
+ or v0, v0, 0x5001
+ xor v0, v0, 0x5001
+#endif
+ # Read the processor ID register
+ mfc0 v1, CP0_PRID_REG
+ # Disable instruction prefetching (Octeon Pass1 errata)
+ or v0, v0, 0x2000
+ # Skip reenable of prefetching for Octeon Pass1
+ beq v1, CP0_PRID_OCTEON_PASS1, skip
+ nop
+ # Reenable instruction prefetching, not on Pass1
+ xor v0, v0, 0x2000
+ # Strip off pass number off of processor id
+ srl v1, 8
+ sll v1, 8
+ # CN30XX needs some extra stuff turned off for better performance
+ bne v1, CP0_PRID_OCTEON_CN30XX, skip
+ nop
+ # CN30XX Use random Icache replacement
+ or v0, v0, 0x400
+ # CN30XX Disable instruction prefetching
+ or v0, v0, 0x2000
+skip:
+ # Write the cavium control register
+ dmtc0 v0, CP0_CVMCTL_REG
+ sync
+ # Flush dcache after config change
+ cache 9, 0($0)
+ # Get my core id
+ rdhwr v0, $0
+ # Jump the master to kernel_entry
+ bne a2, zero, octeon_main_processor
+ nop
+
+#ifdef CONFIG_SMP
+
+ #
+ # All cores other than the master need to wait here for SMP bootstrap
+ # to begin
+ #
+
+ # This is the variable where the next core to boot os stored
+ PTR_LA t0, octeon_processor_boot
+octeon_spin_wait_boot:
+ # Get the core id of the next to be booted
+ LONG_L t1, (t0)
+ # Keep looping if it isn't me
+ bne t1, v0, octeon_spin_wait_boot
+ nop
+ # Get my GP from the global variable
+ PTR_LA t0, octeon_processor_gp
+ LONG_L gp, (t0)
+ # Get my SP from the global variable
+ PTR_LA t0, octeon_processor_sp
+ LONG_L sp, (t0)
+ # Set the SP global variable to zero so the master knows we've started
+ LONG_S zero, (t0)
+#ifdef __OCTEON__
+ syncw
+ syncw
+#else
+ sync
+#endif
+ # Jump to the normal Linux SMP entry point
+ j smp_bootstrap
+ nop
+#else /* CONFIG_SMP */
+
+ #
+ # Someone tried to boot SMP with a non SMP kernel. All extra cores
+ # will halt here.
+ #
+octeon_wait_forever:
+ wait
+ b octeon_wait_forever
+ nop
+
+#endif /* CONFIG_SMP */
+octeon_main_processor:
+ .set pop
+.endm
+
+/*
+ * Do SMP slave processor setup necessary before we can savely execute C code.
+ */
+ .macro smp_slave_setup
+ .endm
+
+#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h
new file mode 100644
index 000000000000..c4712d7cc81d
--- /dev/null
+++ b/arch/mips/include/asm/mach-cavium-octeon/war.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2008 Cavium Networks <support@caviumnetworks.com>
+ */
+#ifndef __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H
+#define __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index 76e04e7feb84..36c611b6c597 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
#ifdef CONFIG_DMA_COHERENT
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index ed7e6222dc15..4c21bfca10c3 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & ~(0xffUL << 56);
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 1; /* IP27 non-cohernet mode is unsupported */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index a5511ebb2d53..7ae40f4b1c80 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return addr;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0; /* IP32 is non-cohernet */
diff --git a/arch/mips/include/asm/mach-jazz/dma-coherence.h b/arch/mips/include/asm/mach-jazz/dma-coherence.h
index d66979a124a8..1c7cd27efa7b 100644
--- a/arch/mips/include/asm/mach-jazz/dma-coherence.h
+++ b/arch/mips/include/asm/mach-jazz/dma-coherence.h
@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return vdma_log2phys(dma_addr);
}
-static void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
vdma_free(dma_addr);
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mach-lemote/dma-coherence.h b/arch/mips/include/asm/mach-lemote/dma-coherence.h
index 7e914777ebc4..38fad7dfe7da 100644
--- a/arch/mips/include/asm/mach-lemote/dma-coherence.h
+++ b/arch/mips/include/asm/mach-lemote/dma-coherence.h
@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & 0x7fffffff;
}
-static inline void plat_unmap_dma_mem(dma_addr_t dma_addr)
+static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
}
+static inline int plat_dma_supported(struct device *dev, u64 mask)
+{
+ /*
+ * we fall back to GFP_DMA when the mask isn't all 1s,
+ * so we can't guarantee allocations that must be
+ * within a tighter range than GFP_DMA..
+ */
+ if (mask < DMA_BIT_MASK(24))
+ return 0;
+
+ return 1;
+}
+
+static inline void plat_extra_sync_for_device(struct device *dev)
+{
+ return;
+}
+
+static inline int plat_dma_mapping_error(struct device *dev,
+ dma_addr_t dma_addr)
+{
+ return 0;
+}
+
static inline int plat_device_is_coherent(struct device *dev)
{
return 0;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 9316324d070d..0417516503f6 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1000,6 +1000,26 @@ do { \
#define read_c0_ebase() __read_32bit_c0_register($15, 1)
#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+
+/* Cavium OCTEON (cnMIPS) */
+#define read_c0_cvmcount() __read_ulong_c0_register($9, 6)
+#define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val)
+
+#define read_c0_cvmctl() __read_64bit_c0_register($9, 7)
+#define write_c0_cvmctl(val) __write_64bit_c0_register($9, 7, val)
+
+#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
+#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+/*
+ * The cacheerr registers are not standardized. On OCTEON, they are
+ * 64 bits wide.
+ */
+#define read_octeon_c0_icacheerr() __read_64bit_c0_register($27, 0)
+#define write_octeon_c0_icacheerr(val) __write_64bit_c0_register($27, 0, val)
+
+#define read_octeon_c0_dcacheerr() __read_64bit_c0_register($27, 1)
+#define write_octeon_c0_dcacheerr(val) __write_64bit_c0_register($27, 1, val)
+
/*
* Macros to access the floating point coprocessor control registers
*/
@@ -1008,6 +1028,8 @@ do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\treorder\n\t" \
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ ".set\tmips1\n\t" \
"cfc1\t%0,"STR(source)"\n\t" \
".set\tpop" \
: "=r" (__res)); \
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index e2e09b2cd265..d94085a3eafb 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -116,6 +116,8 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "SB1 "
#elif defined CONFIG_CPU_LOONGSON2
#define MODULE_PROC_FAMILY "LOONGSON2 "
+#elif defined CONFIG_CPU_CAVIUM_OCTEON
+#define MODULE_PROC_FAMILY "OCTEON "
#else
#error MODULE_PROC_FAMILY undefined for your processor configuration
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asm.h b/arch/mips/include/asm/octeon/cvmx-asm.h
new file mode 100644
index 000000000000..b21d3fc1ef91
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-asm.h
@@ -0,0 +1,128 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * This is file defines ASM primitives for the executive.
+ */
+#ifndef __CVMX_ASM_H__
+#define __CVMX_ASM_H__
+
+#include "octeon-model.h"
+
+/* other useful stuff */
+#define CVMX_SYNC asm volatile ("sync" : : : "memory")
+/* String version of SYNCW macro for using in inline asm constructs */
+#define CVMX_SYNCW_STR "syncw\nsyncw\n"
+#ifdef __OCTEON__
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+/*
+ * We actually use two syncw instructions in a row when we need a write
+ * memory barrier. This is because the CN3XXX series of Octeons have
+ * errata Core-401. This can cause a single syncw to not enforce
+ * ordering under very rare conditions. Even if it is rare, better safe
+ * than sorry.
+ */
+#define CVMX_SYNCW asm volatile ("syncw\n\tsyncw" : : : "memory")
+
+/*
+ * Define new sync instructions to be normal SYNC instructions for
+ * operating systems that use threads.
+ */
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#else
+/*
+ * Not using a Cavium compiler, always use the slower sync so the
+ * assembler stays happy.
+ */
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIO asm volatile ("nop")
+
+#define CVMX_SYNCIOBDMA asm volatile ("sync" : : : "memory")
+
+/* Deprecated, will be removed in future release */
+#define CVMX_SYNCIOALL asm volatile ("nop")
+
+#define CVMX_SYNCW asm volatile ("sync" : : : "memory")
+#define CVMX_SYNCWS CVMX_SYNCW
+#define CVMX_SYNCS CVMX_SYNC
+#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
+#endif
+
+/*
+ * CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable
+ * (actually old value or zero) until that byte is stored to (by this or
+ * another processor. Note that the value of each byte is not only
+ * unpredictable, but may also change again - up until the point when one
+ * of the cores stores to the byte.
+ */
+#define CVMX_PREPARE_FOR_STORE(address, offset) \
+ asm volatile ("pref 30, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+ [rbase] "d" (address))
+/*
+ * This is a command headed to the L2 controller to tell it to clear
+ * its dirty bit for a block. Basically, SW is telling HW that the
+ * current version of the block will not be used.
+ */
+#define CVMX_DONT_WRITE_BACK(address, offset) \
+ asm volatile ("pref 29, " CVMX_TMP_STR(offset) "(%[rbase])" : : \
+ [rbase] "d" (address))
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE \
+ { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); }
+
+/* flush stores, invalidate entire icache */
+#define CVMX_ICACHE_INVALIDATE2 \
+ { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); }
+
+/* complete prefetches, invalidate entire dcache */
+#define CVMX_DCACHE_INVALIDATE \
+ { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
+
+
+#define CVMX_POP(result, input) \
+ asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+#define CVMX_DPOP(result, input) \
+ asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
+
+/* some new cop0-like stuff */
+#define CVMX_RDHWR(result, regstr) \
+ asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#define CVMX_RDHWRNV(result, regstr) \
+ asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
+#endif /* __CVMX_ASM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
new file mode 100644
index 000000000000..692989acd8a9
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -0,0 +1,262 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Header file containing the ABI with the bootloader.
+ */
+
+#ifndef __CVMX_BOOTINFO_H__
+#define __CVMX_BOOTINFO_H__
+
+/*
+ * Current major and minor versions of the CVMX bootinfo block that is
+ * passed from the bootloader to the application. This is versioned
+ * so that applications can properly handle multiple bootloader
+ * versions.
+ */
+#define CVMX_BOOTINFO_MAJ_VER 1
+#define CVMX_BOOTINFO_MIN_VER 2
+
+#if (CVMX_BOOTINFO_MAJ_VER == 1)
+#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
+/*
+ * This structure is populated by the bootloader. For binary
+ * compatibility the only changes that should be made are
+ * adding members to the end of the structure, and the minor
+ * version should be incremented at that time.
+ * If an incompatible change is made, the major version
+ * must be incremented, and the minor version should be reset
+ * to 0.
+ */
+struct cvmx_bootinfo {
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ uint64_t desc_vaddr;
+
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t flags;
+ uint32_t core_mask;
+ /* DRAM size in megabytes */
+ uint32_t dram_size;
+ /* physical address of free memory descriptor block*/
+ uint32_t phy_mem_desc_addr;
+ /* used to pass flags from app to debugger */
+ uint32_t debugger_flags_base_addr;
+
+ /* CPU clock speed, in hz */
+ uint32_t eclock_hz;
+
+ /* DRAM clock speed, in hz */
+ uint32_t dclock_hz;
+
+ uint32_t reserved0;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t reserved1;
+ uint8_t reserved2;
+ uint8_t reserved3;
+ char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+#if (CVMX_BOOTINFO_MIN_VER >= 1)
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These are the physical
+ * addresses, so care must be taken to use the correct
+ * XKPHYS/KSEG0 addressing depending on the application's
+ * ABI. These values will be 0 if CF is not present.
+ */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board)
+ * This will be 0 if LED display not present.
+ */
+ uint64_t led_display_base_addr;
+#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 2)
+ /* DFA reference clock in hz (if applicable)*/
+ uint32_t dfa_ref_clock_hz;
+
+ /*
+ * flags indicating various configuration options. These
+ * flags supercede the 'flags' variable and should be used
+ * instead if available.
+ */
+ uint32_t config_flags;
+#endif
+
+};
+
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
+#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
+#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
+#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
+/* This flag is set if the TLB mappings are not contained in the
+ * 0x10000000 - 0x20000000 boot bus region. */
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
+
+#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
+
+/* Type defines for board and chip types */
+enum cvmx_board_types_enum {
+ CVMX_BOARD_TYPE_NULL = 0,
+ CVMX_BOARD_TYPE_SIM = 1,
+ CVMX_BOARD_TYPE_EBT3000 = 2,
+ CVMX_BOARD_TYPE_KODAMA = 3,
+ CVMX_BOARD_TYPE_NIAGARA = 4,
+ CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
+ CVMX_BOARD_TYPE_THUNDER = 6,
+ CVMX_BOARD_TYPE_TRANTOR = 7,
+ CVMX_BOARD_TYPE_EBH3000 = 8,
+ CVMX_BOARD_TYPE_EBH3100 = 9,
+ CVMX_BOARD_TYPE_HIKARI = 10,
+ CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
+ CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
+ CVMX_BOARD_TYPE_KBP = 13,
+ /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
+ CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
+ CVMX_BOARD_TYPE_EBT5800 = 15,
+ CVMX_BOARD_TYPE_NICPRO2 = 16,
+ CVMX_BOARD_TYPE_EBH5600 = 17,
+ CVMX_BOARD_TYPE_EBH5601 = 18,
+ CVMX_BOARD_TYPE_EBH5200 = 19,
+ CVMX_BOARD_TYPE_BBGW_REF = 20,
+ CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
+ CVMX_BOARD_TYPE_EBT5600 = 22,
+ CVMX_BOARD_TYPE_EBH5201 = 23,
+ CVMX_BOARD_TYPE_MAX,
+
+ /*
+ * The range from CVMX_BOARD_TYPE_MAX to
+ * CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
+ * SDK use.
+ */
+
+ /*
+ * Set aside a range for customer boards. These numbers are managed
+ * by Cavium.
+ */
+ CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
+ CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
+ CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
+ CVMX_BOARD_TYPE_CUST_NB5 = 10003,
+ CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
+ CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
+
+ /*
+ * Set aside a range for customer private use. The SDK won't
+ * use any numbers in this range.
+ */
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
+ CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
+
+ /* The remaining range is reserved for future use. */
+};
+
+enum cvmx_chip_types_enum {
+ CVMX_CHIP_TYPE_NULL = 0,
+ CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
+ CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
+ CVMX_CHIP_TYPE_MAX,
+};
+
+/* Compatability alias for NAC38 name change, planned to be removed
+ * from SDK 1.7 */
+#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
+
+/* Functions to return string based on type */
+#define ENUM_BRD_TYPE_CASE(x) \
+ case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
+static inline const char *cvmx_board_type_to_string(enum
+ cvmx_board_types_enum type)
+{
+ switch (type) {
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
+
+ /* Customer boards listed here */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
+
+ /* Customer private range */
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
+ }
+ return "Unsupported Board";
+}
+
+#define ENUM_CHIP_TYPE_CASE(x) \
+ case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
+static inline const char *cvmx_chip_type_to_string(enum
+ cvmx_chip_types_enum type)
+{
+ switch (type) {
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
+ ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
+ }
+ return "Unsupported Chip";
+}
+
+#endif /* __CVMX_BOOTINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
new file mode 100644
index 000000000000..1cbe4b55889d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -0,0 +1,288 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Simple allocate only memory allocator. Used to allocate memory at
+ * application start time.
+ */
+
+#ifndef __CVMX_BOOTMEM_H__
+#define __CVMX_BOOTMEM_H__
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/* minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/* Flags for cvmx_bootmem_phy_mem* functions */
+/* Allocate from end of block instead of beginning */
+#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0)
+
+/* Don't do any locking. */
+#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1)
+
+/* First bytes of each free physical block of memory contain this structure,
+ * which is used to maintain the free memory list. Since the bootloader is
+ * only 32 bits, there is a union providing 64 and 32 bit versions. The
+ * application init code converts addresses to 64 bit addresses before the
+ * application starts.
+ */
+struct cvmx_bootmem_block_header {
+ /*
+ * Note: these are referenced from assembly routines in the
+ * bootloader, so this structure should not be changed
+ * without changing those routines as well.
+ */
+ uint64_t next_block_addr;
+ uint64_t size;
+
+};
+
+/*
+ * Structure for named memory blocks. Number of descriptors available
+ * can be changed without affecting compatiblity, but name length
+ * changes require a bump in the bootmem descriptor version Note: This
+ * structure must be naturally 64 bit aligned, as a single memory
+ * image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /* Base address of named block */
+ uint64_t base_addr;
+ /*
+ * Size actually allocated for named block (may differ from
+ * requested).
+ */
+ uint64_t size;
+ /* name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+/* Current descriptor versions */
+/* CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* First three members of cvmx_bootmem_desc_t are left in original
+ * positions for backwards compatibility.
+ */
+struct cvmx_bootmem_desc {
+ /* spinlock to control access to list */
+ uint32_t lock;
+ /* flags for indicating various conditions */
+ uint32_t flags;
+ uint64_t head_addr;
+
+ /* Incremented when incompatible changes made */
+ uint32_t major_version;
+
+ /*
+ * Incremented changed when compatible changes made, reset to
+ * zero when major incremented.
+ */
+ uint32_t minor_version;
+
+ uint64_t app_data_addr;
+ uint64_t app_data_size;
+
+ /* number of elements in named blocks array */
+ uint32_t named_block_num_blocks;
+
+ /* length of name array in bootmem blocks */
+ uint32_t named_block_name_len;
+ /* address of named memory block descriptors */
+ uint64_t named_block_array_addr;
+
+};
+
+/**
+ * Initialize the boot alloc memory structures. This is
+ * normally called inside of cvmx_user_app_init()
+ *
+ * @mem_desc_ptr: Address of the free memory list
+ */
+extern int cvmx_bootmem_init(void *mem_desc_ptr);
+
+/**
+ * Allocate a block of memory from the free list that was passed
+ * to the application by the bootloader.
+ * This is an allocate-only algorithm, so freeing memory is not possible.
+ *
+ * @size: Size in bytes of block to allocate
+ * @alignment: Alignment required - must be power of 2
+ *
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader at a specific
+ * address. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated at the specified address.
+ *
+ * @size: Size in bytes of block to allocate
+ * @address: Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
+ uint64_t alignment);
+
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @size: Size in bytes of block to allocate
+ * @min_addr: defines the minimum address of the range
+ * @max_addr: defines the maximum address of the range
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr);
+
+/**
+ * Frees a previously allocated named bootmem block.
+ *
+ * @name: name of block to free
+ *
+ * Returns 0 on failure,
+ * !0 on success
+ */
+extern int cvmx_bootmem_free_named(char *name);
+
+/**
+ * Finds a named bootmem block by name.
+ *
+ * @name: name of block to free
+ *
+ * Returns pointer to named block descriptor on success
+ * 0 on failure
+ */
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
+
+/**
+ * Allocates a block of physical memory from the free list, at
+ * (optional) requested address and alignment.
+ *
+ * @req_size: size of region to allocate. All requests are rounded up
+ * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ *
+ * @address_min: Minimum address that block can occupy.
+ *
+ * @address_max: Specifies the maximum address_min (inclusive) that
+ * the allocation can use.
+ *
+ * @alignment: Requested alignment of the block. If this alignment
+ * cannot be met, the allocation fails. This must be a
+ * power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less than
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns physical address of block allocated, or -1 on failure
+ */
+int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
+ uint64_t address_max, uint64_t alignment,
+ uint32_t flags);
+
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns Pointer to memory block descriptor, NULL if not found.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
+ */
+struct cvmx_bootmem_named_block_desc *
+cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
+
+/**
+ * Frees a named block.
+ *
+ * @name: name of block to free
+ * @flags: flags for passing options
+ *
+ * Returns 0 on failure
+ * 1 on success
+ */
+int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
+
+/**
+ * Frees a block to the bootmem allocator list. This must
+ * be used with care, as the size provided must match the size
+ * of the block that was allocated, or the list will become
+ * corrupted.
+ *
+ * IMPORTANT: This is only intended to be used as part of named block
+ * frees and initial population of the free memory list.
+ * *
+ *
+ * @phy_addr: physical address of block
+ * @size: size of block in bytes.
+ * @flags: flags for passing options
+ *
+ * Returns 1 on success,
+ * 0 on failure
+ */
+int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
+
+/**
+ * Locks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_lock(void);
+
+/**
+ * Unlocks the bootmem allocator. This is useful in certain situations
+ * where multiple allocations must be made without being interrupted.
+ * This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
+ *
+ */
+void cvmx_bootmem_unlock(void);
+
+#endif /* __CVMX_BOOTMEM_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
new file mode 100644
index 000000000000..f8f05b7764b7
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu-defs.h
@@ -0,0 +1,1616 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_CIU_DEFS_H__
+#define __CVMX_CIU_DEFS_H__
+
+#define CVMX_CIU_BIST \
+ CVMX_ADD_IO_SEG(0x0001070000000730ull)
+#define CVMX_CIU_DINT \
+ CVMX_ADD_IO_SEG(0x0001070000000720ull)
+#define CVMX_CIU_FUSE \
+ CVMX_ADD_IO_SEG(0x0001070000000728ull)
+#define CVMX_CIU_GSTOP \
+ CVMX_ADD_IO_SEG(0x0001070000000710ull)
+#define CVMX_CIU_INTX_EN0(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN0_W1C(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000002200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN0_W1S(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000006200ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1_W1C(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000002208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN1_W1S(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000006208ull + (((offset) & 63) * 16))
+#define CVMX_CIU_INTX_EN4_0(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_0_W1C(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000002C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_0_W1S(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000006C80ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1_W1C(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000002C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_EN4_1_W1S(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000006C88ull + (((offset) & 15) * 16))
+#define CVMX_CIU_INTX_SUM0(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000000ull + (((offset) & 63) * 8))
+#define CVMX_CIU_INTX_SUM4(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000C00ull + (((offset) & 15) * 8))
+#define CVMX_CIU_INT_SUM1 \
+ CVMX_ADD_IO_SEG(0x0001070000000108ull)
+#define CVMX_CIU_MBOX_CLRX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000680ull + (((offset) & 15) * 8))
+#define CVMX_CIU_MBOX_SETX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000600ull + (((offset) & 15) * 8))
+#define CVMX_CIU_NMI \
+ CVMX_ADD_IO_SEG(0x0001070000000718ull)
+#define CVMX_CIU_PCI_INTA \
+ CVMX_ADD_IO_SEG(0x0001070000000750ull)
+#define CVMX_CIU_PP_DBG \
+ CVMX_ADD_IO_SEG(0x0001070000000708ull)
+#define CVMX_CIU_PP_POKEX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000580ull + (((offset) & 15) * 8))
+#define CVMX_CIU_PP_RST \
+ CVMX_ADD_IO_SEG(0x0001070000000700ull)
+#define CVMX_CIU_QLM_DCOK \
+ CVMX_ADD_IO_SEG(0x0001070000000760ull)
+#define CVMX_CIU_QLM_JTGC \
+ CVMX_ADD_IO_SEG(0x0001070000000768ull)
+#define CVMX_CIU_QLM_JTGD \
+ CVMX_ADD_IO_SEG(0x0001070000000770ull)
+#define CVMX_CIU_SOFT_BIST \
+ CVMX_ADD_IO_SEG(0x0001070000000738ull)
+#define CVMX_CIU_SOFT_PRST \
+ CVMX_ADD_IO_SEG(0x0001070000000748ull)
+#define CVMX_CIU_SOFT_PRST1 \
+ CVMX_ADD_IO_SEG(0x0001070000000758ull)
+#define CVMX_CIU_SOFT_RST \
+ CVMX_ADD_IO_SEG(0x0001070000000740ull)
+#define CVMX_CIU_TIMX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000480ull + (((offset) & 3) * 8))
+#define CVMX_CIU_WDOGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000500ull + (((offset) & 15) * 8))
+
+union cvmx_ciu_bist {
+ uint64_t u64;
+ struct cvmx_ciu_bist_s {
+ uint64_t reserved_4_63:60;
+ uint64_t bist:4;
+ } s;
+ struct cvmx_ciu_bist_s cn30xx;
+ struct cvmx_ciu_bist_s cn31xx;
+ struct cvmx_ciu_bist_s cn38xx;
+ struct cvmx_ciu_bist_s cn38xxp2;
+ struct cvmx_ciu_bist_cn50xx {
+ uint64_t reserved_2_63:62;
+ uint64_t bist:2;
+ } cn50xx;
+ struct cvmx_ciu_bist_cn52xx {
+ uint64_t reserved_3_63:61;
+ uint64_t bist:3;
+ } cn52xx;
+ struct cvmx_ciu_bist_cn52xx cn52xxp1;
+ struct cvmx_ciu_bist_s cn56xx;
+ struct cvmx_ciu_bist_s cn56xxp1;
+ struct cvmx_ciu_bist_s cn58xx;
+ struct cvmx_ciu_bist_s cn58xxp1;
+};
+
+union cvmx_ciu_dint {
+ uint64_t u64;
+ struct cvmx_ciu_dint_s {
+ uint64_t reserved_16_63:48;
+ uint64_t dint:16;
+ } s;
+ struct cvmx_ciu_dint_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t dint:1;
+ } cn30xx;
+ struct cvmx_ciu_dint_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t dint:2;
+ } cn31xx;
+ struct cvmx_ciu_dint_s cn38xx;
+ struct cvmx_ciu_dint_s cn38xxp2;
+ struct cvmx_ciu_dint_cn31xx cn50xx;
+ struct cvmx_ciu_dint_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t dint:4;
+ } cn52xx;
+ struct cvmx_ciu_dint_cn52xx cn52xxp1;
+ struct cvmx_ciu_dint_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t dint:12;
+ } cn56xx;
+ struct cvmx_ciu_dint_cn56xx cn56xxp1;
+ struct cvmx_ciu_dint_s cn58xx;
+ struct cvmx_ciu_dint_s cn58xxp1;
+};
+
+union cvmx_ciu_fuse {
+ uint64_t u64;
+ struct cvmx_ciu_fuse_s {
+ uint64_t reserved_16_63:48;
+ uint64_t fuse:16;
+ } s;
+ struct cvmx_ciu_fuse_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t fuse:1;
+ } cn30xx;
+ struct cvmx_ciu_fuse_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t fuse:2;
+ } cn31xx;
+ struct cvmx_ciu_fuse_s cn38xx;
+ struct cvmx_ciu_fuse_s cn38xxp2;
+ struct cvmx_ciu_fuse_cn31xx cn50xx;
+ struct cvmx_ciu_fuse_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t fuse:4;
+ } cn52xx;
+ struct cvmx_ciu_fuse_cn52xx cn52xxp1;
+ struct cvmx_ciu_fuse_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t fuse:12;
+ } cn56xx;
+ struct cvmx_ciu_fuse_cn56xx cn56xxp1;
+ struct cvmx_ciu_fuse_s cn58xx;
+ struct cvmx_ciu_fuse_s cn58xxp1;
+};
+
+union cvmx_ciu_gstop {
+ uint64_t u64;
+ struct cvmx_ciu_gstop_s {
+ uint64_t reserved_1_63:63;
+ uint64_t gstop:1;
+ } s;
+ struct cvmx_ciu_gstop_s cn30xx;
+ struct cvmx_ciu_gstop_s cn31xx;
+ struct cvmx_ciu_gstop_s cn38xx;
+ struct cvmx_ciu_gstop_s cn38xxp2;
+ struct cvmx_ciu_gstop_s cn50xx;
+ struct cvmx_ciu_gstop_s cn52xx;
+ struct cvmx_ciu_gstop_s cn52xxp1;
+ struct cvmx_ciu_gstop_s cn56xx;
+ struct cvmx_ciu_gstop_s cn56xxp1;
+ struct cvmx_ciu_gstop_s cn58xx;
+ struct cvmx_ciu_gstop_s cn58xxp1;
+};
+
+union cvmx_ciu_intx_en0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en0_cn30xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t reserved_47_47:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn30xx;
+ struct cvmx_ciu_intx_en0_cn31xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn31xx;
+ struct cvmx_ciu_intx_en0_cn38xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn38xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_en0_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en0_cn56xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn56xx;
+ struct cvmx_ciu_intx_en0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en0_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en0_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1c_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en0_w1c_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1c_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en0_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en0_w1s_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en0_w1s_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en0_w1s_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en1 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en1_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t wdog:1;
+ } cn30xx;
+ struct cvmx_ciu_intx_en1_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t wdog:2;
+ } cn31xx;
+ struct cvmx_ciu_intx_en1_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn38xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_en1_cn31xx cn50xx;
+ struct cvmx_ciu_intx_en1_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_cn52xxp1 {
+ uint64_t reserved_19_63:45;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en1_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xx;
+ struct cvmx_ciu_intx_en1_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en1_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1c_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en1_w1c_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1c_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1c_cn58xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en1_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en1_w1s_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en1_w1s_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en1_w1s_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en1_w1s_cn58xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en4_0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en4_0_cn50xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t reserved_47_47:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_en4_0_cn56xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_0_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_0_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en4_0_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1c_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1c_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1c_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1c_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en4_0_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_0_w1s_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_en4_0_w1s_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_0_w1s_s cn56xx;
+ struct cvmx_ciu_intx_en4_0_w1s_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t reserved_44_44:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en4_1 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en4_1_cn50xx {
+ uint64_t reserved_2_63:62;
+ uint64_t wdog:2;
+ } cn50xx;
+ struct cvmx_ciu_intx_en4_1_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_cn52xxp1 {
+ uint64_t reserved_19_63:45;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xxp1;
+ struct cvmx_ciu_intx_en4_1_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_en4_1_cn58xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn58xx;
+ struct cvmx_ciu_intx_en4_1_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_en4_1_w1c {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1c_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1c_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1c_cn58xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_en4_1_w1s {
+ uint64_t u64;
+ struct cvmx_ciu_intx_en4_1_w1s_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_intx_en4_1_w1s_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_intx_en4_1_w1s_cn58xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn58xx;
+};
+
+union cvmx_ciu_intx_sum0 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum0_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_sum0_cn30xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t reserved_47_47:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn30xx;
+ struct cvmx_ciu_intx_sum0_cn31xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn31xx;
+ struct cvmx_ciu_intx_sum0_cn38xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn38xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn38xxp2;
+ struct cvmx_ciu_intx_sum0_cn30xx cn50xx;
+ struct cvmx_ciu_intx_sum0_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_sum0_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum0_cn56xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn56xx;
+ struct cvmx_ciu_intx_sum0_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xx;
+ struct cvmx_ciu_intx_sum0_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_intx_sum4 {
+ uint64_t u64;
+ struct cvmx_ciu_intx_sum4_s {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } s;
+ struct cvmx_ciu_intx_sum4_cn50xx {
+ uint64_t reserved_59_63:5;
+ uint64_t mpi:1;
+ uint64_t pcm:1;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t reserved_47_47:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn50xx;
+ struct cvmx_ciu_intx_sum4_cn52xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t reserved_51_51:1;
+ uint64_t ipd_drp:1;
+ uint64_t reserved_49_49:1;
+ uint64_t gmx_drp:1;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn52xx;
+ struct cvmx_ciu_intx_sum4_cn52xx cn52xxp1;
+ struct cvmx_ciu_intx_sum4_cn56xx {
+ uint64_t bootdma:1;
+ uint64_t mii:1;
+ uint64_t ipdppthr:1;
+ uint64_t powiq:1;
+ uint64_t twsi2:1;
+ uint64_t reserved_57_58:2;
+ uint64_t usb:1;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn56xx;
+ struct cvmx_ciu_intx_sum4_cn56xx cn56xxp1;
+ struct cvmx_ciu_intx_sum4_cn58xx {
+ uint64_t reserved_56_63:8;
+ uint64_t timer:4;
+ uint64_t key_zero:1;
+ uint64_t ipd_drp:1;
+ uint64_t gmx_drp:2;
+ uint64_t trace:1;
+ uint64_t rml:1;
+ uint64_t twsi:1;
+ uint64_t wdog_sum:1;
+ uint64_t pci_msi:4;
+ uint64_t pci_int:4;
+ uint64_t uart:2;
+ uint64_t mbox:2;
+ uint64_t gpio:16;
+ uint64_t workq:16;
+ } cn58xx;
+ struct cvmx_ciu_intx_sum4_cn58xx cn58xxp1;
+};
+
+union cvmx_ciu_int_sum1 {
+ uint64_t u64;
+ struct cvmx_ciu_int_sum1_s {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t wdog:16;
+ } s;
+ struct cvmx_ciu_int_sum1_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t wdog:1;
+ } cn30xx;
+ struct cvmx_ciu_int_sum1_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t wdog:2;
+ } cn31xx;
+ struct cvmx_ciu_int_sum1_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t wdog:16;
+ } cn38xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn38xxp2;
+ struct cvmx_ciu_int_sum1_cn31xx cn50xx;
+ struct cvmx_ciu_int_sum1_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t nand:1;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xx;
+ struct cvmx_ciu_int_sum1_cn52xxp1 {
+ uint64_t reserved_19_63:45;
+ uint64_t mii1:1;
+ uint64_t usb1:1;
+ uint64_t uart2:1;
+ uint64_t reserved_4_15:12;
+ uint64_t wdog:4;
+ } cn52xxp1;
+ struct cvmx_ciu_int_sum1_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t wdog:12;
+ } cn56xx;
+ struct cvmx_ciu_int_sum1_cn56xx cn56xxp1;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xx;
+ struct cvmx_ciu_int_sum1_cn38xx cn58xxp1;
+};
+
+union cvmx_ciu_mbox_clrx {
+ uint64_t u64;
+ struct cvmx_ciu_mbox_clrx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t bits:32;
+ } s;
+ struct cvmx_ciu_mbox_clrx_s cn30xx;
+ struct cvmx_ciu_mbox_clrx_s cn31xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xx;
+ struct cvmx_ciu_mbox_clrx_s cn38xxp2;
+ struct cvmx_ciu_mbox_clrx_s cn50xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xx;
+ struct cvmx_ciu_mbox_clrx_s cn52xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn56xx;
+ struct cvmx_ciu_mbox_clrx_s cn56xxp1;
+ struct cvmx_ciu_mbox_clrx_s cn58xx;
+ struct cvmx_ciu_mbox_clrx_s cn58xxp1;
+};
+
+union cvmx_ciu_mbox_setx {
+ uint64_t u64;
+ struct cvmx_ciu_mbox_setx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t bits:32;
+ } s;
+ struct cvmx_ciu_mbox_setx_s cn30xx;
+ struct cvmx_ciu_mbox_setx_s cn31xx;
+ struct cvmx_ciu_mbox_setx_s cn38xx;
+ struct cvmx_ciu_mbox_setx_s cn38xxp2;
+ struct cvmx_ciu_mbox_setx_s cn50xx;
+ struct cvmx_ciu_mbox_setx_s cn52xx;
+ struct cvmx_ciu_mbox_setx_s cn52xxp1;
+ struct cvmx_ciu_mbox_setx_s cn56xx;
+ struct cvmx_ciu_mbox_setx_s cn56xxp1;
+ struct cvmx_ciu_mbox_setx_s cn58xx;
+ struct cvmx_ciu_mbox_setx_s cn58xxp1;
+};
+
+union cvmx_ciu_nmi {
+ uint64_t u64;
+ struct cvmx_ciu_nmi_s {
+ uint64_t reserved_16_63:48;
+ uint64_t nmi:16;
+ } s;
+ struct cvmx_ciu_nmi_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t nmi:1;
+ } cn30xx;
+ struct cvmx_ciu_nmi_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t nmi:2;
+ } cn31xx;
+ struct cvmx_ciu_nmi_s cn38xx;
+ struct cvmx_ciu_nmi_s cn38xxp2;
+ struct cvmx_ciu_nmi_cn31xx cn50xx;
+ struct cvmx_ciu_nmi_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t nmi:4;
+ } cn52xx;
+ struct cvmx_ciu_nmi_cn52xx cn52xxp1;
+ struct cvmx_ciu_nmi_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t nmi:12;
+ } cn56xx;
+ struct cvmx_ciu_nmi_cn56xx cn56xxp1;
+ struct cvmx_ciu_nmi_s cn58xx;
+ struct cvmx_ciu_nmi_s cn58xxp1;
+};
+
+union cvmx_ciu_pci_inta {
+ uint64_t u64;
+ struct cvmx_ciu_pci_inta_s {
+ uint64_t reserved_2_63:62;
+ uint64_t intr:2;
+ } s;
+ struct cvmx_ciu_pci_inta_s cn30xx;
+ struct cvmx_ciu_pci_inta_s cn31xx;
+ struct cvmx_ciu_pci_inta_s cn38xx;
+ struct cvmx_ciu_pci_inta_s cn38xxp2;
+ struct cvmx_ciu_pci_inta_s cn50xx;
+ struct cvmx_ciu_pci_inta_s cn52xx;
+ struct cvmx_ciu_pci_inta_s cn52xxp1;
+ struct cvmx_ciu_pci_inta_s cn56xx;
+ struct cvmx_ciu_pci_inta_s cn56xxp1;
+ struct cvmx_ciu_pci_inta_s cn58xx;
+ struct cvmx_ciu_pci_inta_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_dbg {
+ uint64_t u64;
+ struct cvmx_ciu_pp_dbg_s {
+ uint64_t reserved_16_63:48;
+ uint64_t ppdbg:16;
+ } s;
+ struct cvmx_ciu_pp_dbg_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t ppdbg:1;
+ } cn30xx;
+ struct cvmx_ciu_pp_dbg_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t ppdbg:2;
+ } cn31xx;
+ struct cvmx_ciu_pp_dbg_s cn38xx;
+ struct cvmx_ciu_pp_dbg_s cn38xxp2;
+ struct cvmx_ciu_pp_dbg_cn31xx cn50xx;
+ struct cvmx_ciu_pp_dbg_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t ppdbg:4;
+ } cn52xx;
+ struct cvmx_ciu_pp_dbg_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_dbg_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ppdbg:12;
+ } cn56xx;
+ struct cvmx_ciu_pp_dbg_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_dbg_s cn58xx;
+ struct cvmx_ciu_pp_dbg_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_pokex {
+ uint64_t u64;
+ struct cvmx_ciu_pp_pokex_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_ciu_pp_pokex_s cn30xx;
+ struct cvmx_ciu_pp_pokex_s cn31xx;
+ struct cvmx_ciu_pp_pokex_s cn38xx;
+ struct cvmx_ciu_pp_pokex_s cn38xxp2;
+ struct cvmx_ciu_pp_pokex_s cn50xx;
+ struct cvmx_ciu_pp_pokex_s cn52xx;
+ struct cvmx_ciu_pp_pokex_s cn52xxp1;
+ struct cvmx_ciu_pp_pokex_s cn56xx;
+ struct cvmx_ciu_pp_pokex_s cn56xxp1;
+ struct cvmx_ciu_pp_pokex_s cn58xx;
+ struct cvmx_ciu_pp_pokex_s cn58xxp1;
+};
+
+union cvmx_ciu_pp_rst {
+ uint64_t u64;
+ struct cvmx_ciu_pp_rst_s {
+ uint64_t reserved_16_63:48;
+ uint64_t rst:15;
+ uint64_t rst0:1;
+ } s;
+ struct cvmx_ciu_pp_rst_cn30xx {
+ uint64_t reserved_1_63:63;
+ uint64_t rst0:1;
+ } cn30xx;
+ struct cvmx_ciu_pp_rst_cn31xx {
+ uint64_t reserved_2_63:62;
+ uint64_t rst:1;
+ uint64_t rst0:1;
+ } cn31xx;
+ struct cvmx_ciu_pp_rst_s cn38xx;
+ struct cvmx_ciu_pp_rst_s cn38xxp2;
+ struct cvmx_ciu_pp_rst_cn31xx cn50xx;
+ struct cvmx_ciu_pp_rst_cn52xx {
+ uint64_t reserved_4_63:60;
+ uint64_t rst:3;
+ uint64_t rst0:1;
+ } cn52xx;
+ struct cvmx_ciu_pp_rst_cn52xx cn52xxp1;
+ struct cvmx_ciu_pp_rst_cn56xx {
+ uint64_t reserved_12_63:52;
+ uint64_t rst:11;
+ uint64_t rst0:1;
+ } cn56xx;
+ struct cvmx_ciu_pp_rst_cn56xx cn56xxp1;
+ struct cvmx_ciu_pp_rst_s cn58xx;
+ struct cvmx_ciu_pp_rst_s cn58xxp1;
+};
+
+union cvmx_ciu_qlm_dcok {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_dcok_s {
+ uint64_t reserved_4_63:60;
+ uint64_t qlm_dcok:4;
+ } s;
+ struct cvmx_ciu_qlm_dcok_cn52xx {
+ uint64_t reserved_2_63:62;
+ uint64_t qlm_dcok:2;
+ } cn52xx;
+ struct cvmx_ciu_qlm_dcok_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_dcok_s cn56xx;
+ struct cvmx_ciu_qlm_dcok_s cn56xxp1;
+};
+
+union cvmx_ciu_qlm_jtgc {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgc_s {
+ uint64_t reserved_11_63:53;
+ uint64_t clk_div:3;
+ uint64_t reserved_6_7:2;
+ uint64_t mux_sel:2;
+ uint64_t bypass:4;
+ } s;
+ struct cvmx_ciu_qlm_jtgc_cn52xx {
+ uint64_t reserved_11_63:53;
+ uint64_t clk_div:3;
+ uint64_t reserved_5_7:3;
+ uint64_t mux_sel:1;
+ uint64_t reserved_2_3:2;
+ uint64_t bypass:2;
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgc_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgc_s cn56xx;
+ struct cvmx_ciu_qlm_jtgc_s cn56xxp1;
+};
+
+union cvmx_ciu_qlm_jtgd {
+ uint64_t u64;
+ struct cvmx_ciu_qlm_jtgd_s {
+ uint64_t capture:1;
+ uint64_t shift:1;
+ uint64_t update:1;
+ uint64_t reserved_44_60:17;
+ uint64_t select:4;
+ uint64_t reserved_37_39:3;
+ uint64_t shft_cnt:5;
+ uint64_t shft_reg:32;
+ } s;
+ struct cvmx_ciu_qlm_jtgd_cn52xx {
+ uint64_t capture:1;
+ uint64_t shift:1;
+ uint64_t update:1;
+ uint64_t reserved_42_60:19;
+ uint64_t select:2;
+ uint64_t reserved_37_39:3;
+ uint64_t shft_cnt:5;
+ uint64_t shft_reg:32;
+ } cn52xx;
+ struct cvmx_ciu_qlm_jtgd_cn52xx cn52xxp1;
+ struct cvmx_ciu_qlm_jtgd_s cn56xx;
+ struct cvmx_ciu_qlm_jtgd_cn56xxp1 {
+ uint64_t capture:1;
+ uint64_t shift:1;
+ uint64_t update:1;
+ uint64_t reserved_37_60:24;
+ uint64_t shft_cnt:5;
+ uint64_t shft_reg:32;
+ } cn56xxp1;
+};
+
+union cvmx_ciu_soft_bist {
+ uint64_t u64;
+ struct cvmx_ciu_soft_bist_s {
+ uint64_t reserved_1_63:63;
+ uint64_t soft_bist:1;
+ } s;
+ struct cvmx_ciu_soft_bist_s cn30xx;
+ struct cvmx_ciu_soft_bist_s cn31xx;
+ struct cvmx_ciu_soft_bist_s cn38xx;
+ struct cvmx_ciu_soft_bist_s cn38xxp2;
+ struct cvmx_ciu_soft_bist_s cn50xx;
+ struct cvmx_ciu_soft_bist_s cn52xx;
+ struct cvmx_ciu_soft_bist_s cn52xxp1;
+ struct cvmx_ciu_soft_bist_s cn56xx;
+ struct cvmx_ciu_soft_bist_s cn56xxp1;
+ struct cvmx_ciu_soft_bist_s cn58xx;
+ struct cvmx_ciu_soft_bist_s cn58xxp1;
+};
+
+union cvmx_ciu_soft_prst {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst_s {
+ uint64_t reserved_3_63:61;
+ uint64_t host64:1;
+ uint64_t npi:1;
+ uint64_t soft_prst:1;
+ } s;
+ struct cvmx_ciu_soft_prst_s cn30xx;
+ struct cvmx_ciu_soft_prst_s cn31xx;
+ struct cvmx_ciu_soft_prst_s cn38xx;
+ struct cvmx_ciu_soft_prst_s cn38xxp2;
+ struct cvmx_ciu_soft_prst_s cn50xx;
+ struct cvmx_ciu_soft_prst_cn52xx {
+ uint64_t reserved_1_63:63;
+ uint64_t soft_prst:1;
+ } cn52xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn52xxp1;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xx;
+ struct cvmx_ciu_soft_prst_cn52xx cn56xxp1;
+ struct cvmx_ciu_soft_prst_s cn58xx;
+ struct cvmx_ciu_soft_prst_s cn58xxp1;
+};
+
+union cvmx_ciu_soft_prst1 {
+ uint64_t u64;
+ struct cvmx_ciu_soft_prst1_s {
+ uint64_t reserved_1_63:63;
+ uint64_t soft_prst:1;
+ } s;
+ struct cvmx_ciu_soft_prst1_s cn52xx;
+ struct cvmx_ciu_soft_prst1_s cn52xxp1;
+ struct cvmx_ciu_soft_prst1_s cn56xx;
+ struct cvmx_ciu_soft_prst1_s cn56xxp1;
+};
+
+union cvmx_ciu_soft_rst {
+ uint64_t u64;
+ struct cvmx_ciu_soft_rst_s {
+ uint64_t reserved_1_63:63;
+ uint64_t soft_rst:1;
+ } s;
+ struct cvmx_ciu_soft_rst_s cn30xx;
+ struct cvmx_ciu_soft_rst_s cn31xx;
+ struct cvmx_ciu_soft_rst_s cn38xx;
+ struct cvmx_ciu_soft_rst_s cn38xxp2;
+ struct cvmx_ciu_soft_rst_s cn50xx;
+ struct cvmx_ciu_soft_rst_s cn52xx;
+ struct cvmx_ciu_soft_rst_s cn52xxp1;
+ struct cvmx_ciu_soft_rst_s cn56xx;
+ struct cvmx_ciu_soft_rst_s cn56xxp1;
+ struct cvmx_ciu_soft_rst_s cn58xx;
+ struct cvmx_ciu_soft_rst_s cn58xxp1;
+};
+
+union cvmx_ciu_timx {
+ uint64_t u64;
+ struct cvmx_ciu_timx_s {
+ uint64_t reserved_37_63:27;
+ uint64_t one_shot:1;
+ uint64_t len:36;
+ } s;
+ struct cvmx_ciu_timx_s cn30xx;
+ struct cvmx_ciu_timx_s cn31xx;
+ struct cvmx_ciu_timx_s cn38xx;
+ struct cvmx_ciu_timx_s cn38xxp2;
+ struct cvmx_ciu_timx_s cn50xx;
+ struct cvmx_ciu_timx_s cn52xx;
+ struct cvmx_ciu_timx_s cn52xxp1;
+ struct cvmx_ciu_timx_s cn56xx;
+ struct cvmx_ciu_timx_s cn56xxp1;
+ struct cvmx_ciu_timx_s cn58xx;
+ struct cvmx_ciu_timx_s cn58xxp1;
+};
+
+union cvmx_ciu_wdogx {
+ uint64_t u64;
+ struct cvmx_ciu_wdogx_s {
+ uint64_t reserved_46_63:18;
+ uint64_t gstopen:1;
+ uint64_t dstop:1;
+ uint64_t cnt:24;
+ uint64_t len:16;
+ uint64_t state:2;
+ uint64_t mode:2;
+ } s;
+ struct cvmx_ciu_wdogx_s cn30xx;
+ struct cvmx_ciu_wdogx_s cn31xx;
+ struct cvmx_ciu_wdogx_s cn38xx;
+ struct cvmx_ciu_wdogx_s cn38xxp2;
+ struct cvmx_ciu_wdogx_s cn50xx;
+ struct cvmx_ciu_wdogx_s cn52xx;
+ struct cvmx_ciu_wdogx_s cn52xxp1;
+ struct cvmx_ciu_wdogx_s cn56xx;
+ struct cvmx_ciu_wdogx_s cn56xxp1;
+ struct cvmx_ciu_wdogx_s cn58xx;
+ struct cvmx_ciu_wdogx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
new file mode 100644
index 000000000000..5fdd6ba48a05
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -0,0 +1,219 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_GPIO_DEFS_H__
+#define __CVMX_GPIO_DEFS_H__
+
+#define CVMX_GPIO_BIT_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000800ull + (((offset) & 15) * 8))
+#define CVMX_GPIO_BOOT_ENA \
+ CVMX_ADD_IO_SEG(0x00010700000008A8ull)
+#define CVMX_GPIO_CLK_GENX(offset) \
+ CVMX_ADD_IO_SEG(0x00010700000008C0ull + (((offset) & 3) * 8))
+#define CVMX_GPIO_DBG_ENA \
+ CVMX_ADD_IO_SEG(0x00010700000008A0ull)
+#define CVMX_GPIO_INT_CLR \
+ CVMX_ADD_IO_SEG(0x0001070000000898ull)
+#define CVMX_GPIO_RX_DAT \
+ CVMX_ADD_IO_SEG(0x0001070000000880ull)
+#define CVMX_GPIO_TX_CLR \
+ CVMX_ADD_IO_SEG(0x0001070000000890ull)
+#define CVMX_GPIO_TX_SET \
+ CVMX_ADD_IO_SEG(0x0001070000000888ull)
+#define CVMX_GPIO_XBIT_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001070000000900ull + (((offset) & 31) * 8) - 8 * 16)
+
+union cvmx_gpio_bit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_bit_cfgx_s {
+ uint64_t reserved_15_63:49;
+ uint64_t clk_gen:1;
+ uint64_t clk_sel:2;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+ } s;
+ struct cvmx_gpio_bit_cfgx_cn30xx {
+ uint64_t reserved_12_63:52;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t int_type:1;
+ uint64_t int_en:1;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+ } cn30xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
+ struct cvmx_gpio_bit_cfgx_s cn52xx;
+ struct cvmx_gpio_bit_cfgx_s cn52xxp1;
+ struct cvmx_gpio_bit_cfgx_s cn56xx;
+ struct cvmx_gpio_bit_cfgx_s cn56xxp1;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
+ struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
+};
+
+union cvmx_gpio_boot_ena {
+ uint64_t u64;
+ struct cvmx_gpio_boot_ena_s {
+ uint64_t reserved_12_63:52;
+ uint64_t boot_ena:4;
+ uint64_t reserved_0_7:8;
+ } s;
+ struct cvmx_gpio_boot_ena_s cn30xx;
+ struct cvmx_gpio_boot_ena_s cn31xx;
+ struct cvmx_gpio_boot_ena_s cn50xx;
+};
+
+union cvmx_gpio_clk_genx {
+ uint64_t u64;
+ struct cvmx_gpio_clk_genx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t n:32;
+ } s;
+ struct cvmx_gpio_clk_genx_s cn52xx;
+ struct cvmx_gpio_clk_genx_s cn52xxp1;
+ struct cvmx_gpio_clk_genx_s cn56xx;
+ struct cvmx_gpio_clk_genx_s cn56xxp1;
+};
+
+union cvmx_gpio_dbg_ena {
+ uint64_t u64;
+ struct cvmx_gpio_dbg_ena_s {
+ uint64_t reserved_21_63:43;
+ uint64_t dbg_ena:21;
+ } s;
+ struct cvmx_gpio_dbg_ena_s cn30xx;
+ struct cvmx_gpio_dbg_ena_s cn31xx;
+ struct cvmx_gpio_dbg_ena_s cn50xx;
+};
+
+union cvmx_gpio_int_clr {
+ uint64_t u64;
+ struct cvmx_gpio_int_clr_s {
+ uint64_t reserved_16_63:48;
+ uint64_t type:16;
+ } s;
+ struct cvmx_gpio_int_clr_s cn30xx;
+ struct cvmx_gpio_int_clr_s cn31xx;
+ struct cvmx_gpio_int_clr_s cn38xx;
+ struct cvmx_gpio_int_clr_s cn38xxp2;
+ struct cvmx_gpio_int_clr_s cn50xx;
+ struct cvmx_gpio_int_clr_s cn52xx;
+ struct cvmx_gpio_int_clr_s cn52xxp1;
+ struct cvmx_gpio_int_clr_s cn56xx;
+ struct cvmx_gpio_int_clr_s cn56xxp1;
+ struct cvmx_gpio_int_clr_s cn58xx;
+ struct cvmx_gpio_int_clr_s cn58xxp1;
+};
+
+union cvmx_gpio_rx_dat {
+ uint64_t u64;
+ struct cvmx_gpio_rx_dat_s {
+ uint64_t reserved_24_63:40;
+ uint64_t dat:24;
+ } s;
+ struct cvmx_gpio_rx_dat_s cn30xx;
+ struct cvmx_gpio_rx_dat_s cn31xx;
+ struct cvmx_gpio_rx_dat_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t dat:16;
+ } cn38xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
+ struct cvmx_gpio_rx_dat_s cn50xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn52xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn56xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
+ struct cvmx_gpio_rx_dat_cn38xx cn58xx;
+ struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_tx_clr {
+ uint64_t u64;
+ struct cvmx_gpio_tx_clr_s {
+ uint64_t reserved_24_63:40;
+ uint64_t clr:24;
+ } s;
+ struct cvmx_gpio_tx_clr_s cn30xx;
+ struct cvmx_gpio_tx_clr_s cn31xx;
+ struct cvmx_gpio_tx_clr_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t clr:16;
+ } cn38xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
+ struct cvmx_gpio_tx_clr_s cn50xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn52xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn56xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
+ struct cvmx_gpio_tx_clr_cn38xx cn58xx;
+ struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_tx_set {
+ uint64_t u64;
+ struct cvmx_gpio_tx_set_s {
+ uint64_t reserved_24_63:40;
+ uint64_t set:24;
+ } s;
+ struct cvmx_gpio_tx_set_s cn30xx;
+ struct cvmx_gpio_tx_set_s cn31xx;
+ struct cvmx_gpio_tx_set_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t set:16;
+ } cn38xx;
+ struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
+ struct cvmx_gpio_tx_set_s cn50xx;
+ struct cvmx_gpio_tx_set_cn38xx cn52xx;
+ struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn56xx;
+ struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
+ struct cvmx_gpio_tx_set_cn38xx cn58xx;
+ struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
+};
+
+union cvmx_gpio_xbit_cfgx {
+ uint64_t u64;
+ struct cvmx_gpio_xbit_cfgx_s {
+ uint64_t reserved_12_63:52;
+ uint64_t fil_sel:4;
+ uint64_t fil_cnt:4;
+ uint64_t reserved_2_3:2;
+ uint64_t rx_xor:1;
+ uint64_t tx_oe:1;
+ } s;
+ struct cvmx_gpio_xbit_cfgx_s cn30xx;
+ struct cvmx_gpio_xbit_cfgx_s cn31xx;
+ struct cvmx_gpio_xbit_cfgx_s cn50xx;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
new file mode 100644
index 000000000000..0ee36baec500
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -0,0 +1,530 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IOB_DEFS_H__
+#define __CVMX_IOB_DEFS_H__
+
+#define CVMX_IOB_BIST_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800F00007F8ull)
+#define CVMX_IOB_CTL_STATUS \
+ CVMX_ADD_IO_SEG(0x00011800F0000050ull)
+#define CVMX_IOB_DWB_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000028ull)
+#define CVMX_IOB_FAU_TIMEOUT \
+ CVMX_ADD_IO_SEG(0x00011800F0000000ull)
+#define CVMX_IOB_I2C_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000010ull)
+#define CVMX_IOB_INB_CONTROL_MATCH \
+ CVMX_ADD_IO_SEG(0x00011800F0000078ull)
+#define CVMX_IOB_INB_CONTROL_MATCH_ENB \
+ CVMX_ADD_IO_SEG(0x00011800F0000088ull)
+#define CVMX_IOB_INB_DATA_MATCH \
+ CVMX_ADD_IO_SEG(0x00011800F0000070ull)
+#define CVMX_IOB_INB_DATA_MATCH_ENB \
+ CVMX_ADD_IO_SEG(0x00011800F0000080ull)
+#define CVMX_IOB_INT_ENB \
+ CVMX_ADD_IO_SEG(0x00011800F0000060ull)
+#define CVMX_IOB_INT_SUM \
+ CVMX_ADD_IO_SEG(0x00011800F0000058ull)
+#define CVMX_IOB_N2C_L2C_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000020ull)
+#define CVMX_IOB_N2C_RSP_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000008ull)
+#define CVMX_IOB_OUTB_COM_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000040ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH \
+ CVMX_ADD_IO_SEG(0x00011800F0000098ull)
+#define CVMX_IOB_OUTB_CONTROL_MATCH_ENB \
+ CVMX_ADD_IO_SEG(0x00011800F00000A8ull)
+#define CVMX_IOB_OUTB_DATA_MATCH \
+ CVMX_ADD_IO_SEG(0x00011800F0000090ull)
+#define CVMX_IOB_OUTB_DATA_MATCH_ENB \
+ CVMX_ADD_IO_SEG(0x00011800F00000A0ull)
+#define CVMX_IOB_OUTB_FPA_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000048ull)
+#define CVMX_IOB_OUTB_REQ_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000038ull)
+#define CVMX_IOB_P2C_REQ_PRI_CNT \
+ CVMX_ADD_IO_SEG(0x00011800F0000018ull)
+#define CVMX_IOB_PKT_ERR \
+ CVMX_ADD_IO_SEG(0x00011800F0000068ull)
+
+union cvmx_iob_bist_status {
+ uint64_t u64;
+ struct cvmx_iob_bist_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t icnrcb:1;
+ uint64_t icr0:1;
+ uint64_t icr1:1;
+ uint64_t icnr1:1;
+ uint64_t icnr0:1;
+ uint64_t ibdr0:1;
+ uint64_t ibdr1:1;
+ uint64_t ibr0:1;
+ uint64_t ibr1:1;
+ uint64_t icnrt:1;
+ uint64_t ibrq0:1;
+ uint64_t ibrq1:1;
+ uint64_t icrn0:1;
+ uint64_t icrn1:1;
+ uint64_t icrp0:1;
+ uint64_t icrp1:1;
+ uint64_t ibd:1;
+ uint64_t icd:1;
+ } s;
+ struct cvmx_iob_bist_status_s cn30xx;
+ struct cvmx_iob_bist_status_s cn31xx;
+ struct cvmx_iob_bist_status_s cn38xx;
+ struct cvmx_iob_bist_status_s cn38xxp2;
+ struct cvmx_iob_bist_status_s cn50xx;
+ struct cvmx_iob_bist_status_s cn52xx;
+ struct cvmx_iob_bist_status_s cn52xxp1;
+ struct cvmx_iob_bist_status_s cn56xx;
+ struct cvmx_iob_bist_status_s cn56xxp1;
+ struct cvmx_iob_bist_status_s cn58xx;
+ struct cvmx_iob_bist_status_s cn58xxp1;
+};
+
+union cvmx_iob_ctl_status {
+ uint64_t u64;
+ struct cvmx_iob_ctl_status_s {
+ uint64_t reserved_5_63:59;
+ uint64_t outb_mat:1;
+ uint64_t inb_mat:1;
+ uint64_t pko_enb:1;
+ uint64_t dwb_enb:1;
+ uint64_t fau_end:1;
+ } s;
+ struct cvmx_iob_ctl_status_s cn30xx;
+ struct cvmx_iob_ctl_status_s cn31xx;
+ struct cvmx_iob_ctl_status_s cn38xx;
+ struct cvmx_iob_ctl_status_s cn38xxp2;
+ struct cvmx_iob_ctl_status_s cn50xx;
+ struct cvmx_iob_ctl_status_s cn52xx;
+ struct cvmx_iob_ctl_status_s cn52xxp1;
+ struct cvmx_iob_ctl_status_s cn56xx;
+ struct cvmx_iob_ctl_status_s cn56xxp1;
+ struct cvmx_iob_ctl_status_s cn58xx;
+ struct cvmx_iob_ctl_status_s cn58xxp1;
+};
+
+union cvmx_iob_dwb_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_dwb_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xx;
+ struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_fau_timeout {
+ uint64_t u64;
+ struct cvmx_iob_fau_timeout_s {
+ uint64_t reserved_13_63:51;
+ uint64_t tout_enb:1;
+ uint64_t tout_val:12;
+ } s;
+ struct cvmx_iob_fau_timeout_s cn30xx;
+ struct cvmx_iob_fau_timeout_s cn31xx;
+ struct cvmx_iob_fau_timeout_s cn38xx;
+ struct cvmx_iob_fau_timeout_s cn38xxp2;
+ struct cvmx_iob_fau_timeout_s cn50xx;
+ struct cvmx_iob_fau_timeout_s cn52xx;
+ struct cvmx_iob_fau_timeout_s cn52xxp1;
+ struct cvmx_iob_fau_timeout_s cn56xx;
+ struct cvmx_iob_fau_timeout_s cn56xxp1;
+ struct cvmx_iob_fau_timeout_s cn58xx;
+ struct cvmx_iob_fau_timeout_s cn58xxp1;
+};
+
+union cvmx_iob_i2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_i2c_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_inb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_s {
+ uint64_t reserved_29_63:35;
+ uint64_t mask:8;
+ uint64_t opc:4;
+ uint64_t dst:9;
+ uint64_t src:8;
+ } s;
+ struct cvmx_iob_inb_control_match_s cn30xx;
+ struct cvmx_iob_inb_control_match_s cn31xx;
+ struct cvmx_iob_inb_control_match_s cn38xx;
+ struct cvmx_iob_inb_control_match_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_s cn50xx;
+ struct cvmx_iob_inb_control_match_s cn52xx;
+ struct cvmx_iob_inb_control_match_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_s cn56xx;
+ struct cvmx_iob_inb_control_match_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_s cn58xx;
+ struct cvmx_iob_inb_control_match_s cn58xxp1;
+};
+
+union cvmx_iob_inb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_control_match_enb_s {
+ uint64_t reserved_29_63:35;
+ uint64_t mask:8;
+ uint64_t opc:4;
+ uint64_t dst:9;
+ uint64_t src:8;
+ } s;
+ struct cvmx_iob_inb_control_match_enb_s cn30xx;
+ struct cvmx_iob_inb_control_match_enb_s cn31xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xx;
+ struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_control_match_enb_s cn50xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xx;
+ struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn56xx;
+ struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_control_match_enb_s cn58xx;
+ struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_inb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_iob_inb_data_match_s cn30xx;
+ struct cvmx_iob_inb_data_match_s cn31xx;
+ struct cvmx_iob_inb_data_match_s cn38xx;
+ struct cvmx_iob_inb_data_match_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_s cn50xx;
+ struct cvmx_iob_inb_data_match_s cn52xx;
+ struct cvmx_iob_inb_data_match_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_s cn56xx;
+ struct cvmx_iob_inb_data_match_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_s cn58xx;
+ struct cvmx_iob_inb_data_match_s cn58xxp1;
+};
+
+union cvmx_iob_inb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_inb_data_match_enb_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_iob_inb_data_match_enb_s cn30xx;
+ struct cvmx_iob_inb_data_match_enb_s cn31xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xx;
+ struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_inb_data_match_enb_s cn50xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xx;
+ struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn56xx;
+ struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_inb_data_match_enb_s cn58xx;
+ struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_int_enb {
+ uint64_t u64;
+ struct cvmx_iob_int_enb_s {
+ uint64_t reserved_6_63:58;
+ uint64_t p_dat:1;
+ uint64_t np_dat:1;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+ } s;
+ struct cvmx_iob_int_enb_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+ } cn30xx;
+ struct cvmx_iob_int_enb_cn30xx cn31xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xx;
+ struct cvmx_iob_int_enb_cn30xx cn38xxp2;
+ struct cvmx_iob_int_enb_s cn50xx;
+ struct cvmx_iob_int_enb_s cn52xx;
+ struct cvmx_iob_int_enb_s cn52xxp1;
+ struct cvmx_iob_int_enb_s cn56xx;
+ struct cvmx_iob_int_enb_s cn56xxp1;
+ struct cvmx_iob_int_enb_s cn58xx;
+ struct cvmx_iob_int_enb_s cn58xxp1;
+};
+
+union cvmx_iob_int_sum {
+ uint64_t u64;
+ struct cvmx_iob_int_sum_s {
+ uint64_t reserved_6_63:58;
+ uint64_t p_dat:1;
+ uint64_t np_dat:1;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+ } s;
+ struct cvmx_iob_int_sum_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t p_eop:1;
+ uint64_t p_sop:1;
+ uint64_t np_eop:1;
+ uint64_t np_sop:1;
+ } cn30xx;
+ struct cvmx_iob_int_sum_cn30xx cn31xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xx;
+ struct cvmx_iob_int_sum_cn30xx cn38xxp2;
+ struct cvmx_iob_int_sum_s cn50xx;
+ struct cvmx_iob_int_sum_s cn52xx;
+ struct cvmx_iob_int_sum_s cn52xxp1;
+ struct cvmx_iob_int_sum_s cn56xx;
+ struct cvmx_iob_int_sum_s cn56xxp1;
+ struct cvmx_iob_int_sum_s cn58xx;
+ struct cvmx_iob_int_sum_s cn58xxp1;
+};
+
+union cvmx_iob_n2c_l2c_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_n2c_rsp_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
+ struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_com_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_com_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_control_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_s {
+ uint64_t reserved_26_63:38;
+ uint64_t mask:8;
+ uint64_t eot:1;
+ uint64_t dst:8;
+ uint64_t src:9;
+ } s;
+ struct cvmx_iob_outb_control_match_s cn30xx;
+ struct cvmx_iob_outb_control_match_s cn31xx;
+ struct cvmx_iob_outb_control_match_s cn38xx;
+ struct cvmx_iob_outb_control_match_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_s cn50xx;
+ struct cvmx_iob_outb_control_match_s cn52xx;
+ struct cvmx_iob_outb_control_match_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_s cn56xx;
+ struct cvmx_iob_outb_control_match_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_s cn58xx;
+ struct cvmx_iob_outb_control_match_s cn58xxp1;
+};
+
+union cvmx_iob_outb_control_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_control_match_enb_s {
+ uint64_t reserved_26_63:38;
+ uint64_t mask:8;
+ uint64_t eot:1;
+ uint64_t dst:8;
+ uint64_t src:9;
+ } s;
+ struct cvmx_iob_outb_control_match_enb_s cn30xx;
+ struct cvmx_iob_outb_control_match_enb_s cn31xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xx;
+ struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_control_match_enb_s cn50xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xx;
+ struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn56xx;
+ struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_control_match_enb_s cn58xx;
+ struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_outb_data_match {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_iob_outb_data_match_s cn30xx;
+ struct cvmx_iob_outb_data_match_s cn31xx;
+ struct cvmx_iob_outb_data_match_s cn38xx;
+ struct cvmx_iob_outb_data_match_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_s cn50xx;
+ struct cvmx_iob_outb_data_match_s cn52xx;
+ struct cvmx_iob_outb_data_match_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_s cn56xx;
+ struct cvmx_iob_outb_data_match_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_s cn58xx;
+ struct cvmx_iob_outb_data_match_s cn58xxp1;
+};
+
+union cvmx_iob_outb_data_match_enb {
+ uint64_t u64;
+ struct cvmx_iob_outb_data_match_enb_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_iob_outb_data_match_enb_s cn30xx;
+ struct cvmx_iob_outb_data_match_enb_s cn31xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xx;
+ struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
+ struct cvmx_iob_outb_data_match_enb_s cn50xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xx;
+ struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn56xx;
+ struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
+ struct cvmx_iob_outb_data_match_enb_s cn58xx;
+ struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
+};
+
+union cvmx_iob_outb_fpa_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_fpa_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_outb_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_outb_req_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_p2c_req_pri_cnt {
+ uint64_t u64;
+ struct cvmx_iob_p2c_req_pri_cnt_s {
+ uint64_t reserved_16_63:48;
+ uint64_t cnt_enb:1;
+ uint64_t cnt_val:15;
+ } s;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
+ struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
+};
+
+union cvmx_iob_pkt_err {
+ uint64_t u64;
+ struct cvmx_iob_pkt_err_s {
+ uint64_t reserved_6_63:58;
+ uint64_t port:6;
+ } s;
+ struct cvmx_iob_pkt_err_s cn30xx;
+ struct cvmx_iob_pkt_err_s cn31xx;
+ struct cvmx_iob_pkt_err_s cn38xx;
+ struct cvmx_iob_pkt_err_s cn38xxp2;
+ struct cvmx_iob_pkt_err_s cn50xx;
+ struct cvmx_iob_pkt_err_s cn52xx;
+ struct cvmx_iob_pkt_err_s cn52xxp1;
+ struct cvmx_iob_pkt_err_s cn56xx;
+ struct cvmx_iob_pkt_err_s cn56xxp1;
+ struct cvmx_iob_pkt_err_s cn58xx;
+ struct cvmx_iob_pkt_err_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
new file mode 100644
index 000000000000..f8b8fc657d2c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -0,0 +1,877 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_IPD_DEFS_H__
+#define __CVMX_IPD_DEFS_H__
+
+#define CVMX_IPD_1ST_MBUFF_SKIP \
+ CVMX_ADD_IO_SEG(0x00014F0000000000ull)
+#define CVMX_IPD_1st_NEXT_PTR_BACK \
+ CVMX_ADD_IO_SEG(0x00014F0000000150ull)
+#define CVMX_IPD_2nd_NEXT_PTR_BACK \
+ CVMX_ADD_IO_SEG(0x00014F0000000158ull)
+#define CVMX_IPD_BIST_STATUS \
+ CVMX_ADD_IO_SEG(0x00014F00000007F8ull)
+#define CVMX_IPD_BP_PRT_RED_END \
+ CVMX_ADD_IO_SEG(0x00014F0000000328ull)
+#define CVMX_IPD_CLK_COUNT \
+ CVMX_ADD_IO_SEG(0x00014F0000000338ull)
+#define CVMX_IPD_CTL_STATUS \
+ CVMX_ADD_IO_SEG(0x00014F0000000018ull)
+#define CVMX_IPD_INT_ENB \
+ CVMX_ADD_IO_SEG(0x00014F0000000160ull)
+#define CVMX_IPD_INT_SUM \
+ CVMX_ADD_IO_SEG(0x00014F0000000168ull)
+#define CVMX_IPD_NOT_1ST_MBUFF_SKIP \
+ CVMX_ADD_IO_SEG(0x00014F0000000008ull)
+#define CVMX_IPD_PACKET_MBUFF_SIZE \
+ CVMX_ADD_IO_SEG(0x00014F0000000010ull)
+#define CVMX_IPD_PKT_PTR_VALID \
+ CVMX_ADD_IO_SEG(0x00014F0000000358ull)
+#define CVMX_IPD_PORTX_BP_PAGE_CNT(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000028ull + (((offset) & 63) * 8))
+#define CVMX_IPD_PORTX_BP_PAGE_CNT2(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000368ull + (((offset) & 63) * 8) - 8 * 36)
+#define CVMX_IPD_PORT_BP_COUNTERS2_PAIRX(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000388ull + (((offset) & 63) * 8) - 8 * 36)
+#define CVMX_IPD_PORT_BP_COUNTERS_PAIRX(offset) \
+ CVMX_ADD_IO_SEG(0x00014F00000001B8ull + (((offset) & 63) * 8))
+#define CVMX_IPD_PORT_QOS_INTX(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000808ull + (((offset) & 7) * 8))
+#define CVMX_IPD_PORT_QOS_INT_ENBX(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000848ull + (((offset) & 7) * 8))
+#define CVMX_IPD_PORT_QOS_X_CNT(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000888ull + (((offset) & 511) * 8))
+#define CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL \
+ CVMX_ADD_IO_SEG(0x00014F0000000348ull)
+#define CVMX_IPD_PRC_PORT_PTR_FIFO_CTL \
+ CVMX_ADD_IO_SEG(0x00014F0000000350ull)
+#define CVMX_IPD_PTR_COUNT \
+ CVMX_ADD_IO_SEG(0x00014F0000000320ull)
+#define CVMX_IPD_PWP_PTR_FIFO_CTL \
+ CVMX_ADD_IO_SEG(0x00014F0000000340ull)
+#define CVMX_IPD_QOS0_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F0000000178ull)
+#define CVMX_IPD_QOS1_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F0000000180ull)
+#define CVMX_IPD_QOS2_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F0000000188ull)
+#define CVMX_IPD_QOS3_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F0000000190ull)
+#define CVMX_IPD_QOS4_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F0000000198ull)
+#define CVMX_IPD_QOS5_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F00000001A0ull)
+#define CVMX_IPD_QOS6_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F00000001A8ull)
+#define CVMX_IPD_QOS7_RED_MARKS \
+ CVMX_ADD_IO_SEG(0x00014F00000001B0ull)
+#define CVMX_IPD_QOSX_RED_MARKS(offset) \
+ CVMX_ADD_IO_SEG(0x00014F0000000178ull + (((offset) & 7) * 8))
+#define CVMX_IPD_QUE0_FREE_PAGE_CNT \
+ CVMX_ADD_IO_SEG(0x00014F0000000330ull)
+#define CVMX_IPD_RED_PORT_ENABLE \
+ CVMX_ADD_IO_SEG(0x00014F00000002D8ull)
+#define CVMX_IPD_RED_PORT_ENABLE2 \
+ CVMX_ADD_IO_SEG(0x00014F00000003A8ull)
+#define CVMX_IPD_RED_QUE0_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F00000002E0ull)
+#define CVMX_IPD_RED_QUE1_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F00000002E8ull)
+#define CVMX_IPD_RED_QUE2_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F00000002F0ull)
+#define CVMX_IPD_RED_QUE3_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F00000002F8ull)
+#define CVMX_IPD_RED_QUE4_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F0000000300ull)
+#define CVMX_IPD_RED_QUE5_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F0000000308ull)
+#define CVMX_IPD_RED_QUE6_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F0000000310ull)
+#define CVMX_IPD_RED_QUE7_PARAM \
+ CVMX_ADD_IO_SEG(0x00014F0000000318ull)
+#define CVMX_IPD_RED_QUEX_PARAM(offset) \
+ CVMX_ADD_IO_SEG(0x00014F00000002E0ull + (((offset) & 7) * 8))
+#define CVMX_IPD_SUB_PORT_BP_PAGE_CNT \
+ CVMX_ADD_IO_SEG(0x00014F0000000148ull)
+#define CVMX_IPD_SUB_PORT_FCS \
+ CVMX_ADD_IO_SEG(0x00014F0000000170ull)
+#define CVMX_IPD_SUB_PORT_QOS_CNT \
+ CVMX_ADD_IO_SEG(0x00014F0000000800ull)
+#define CVMX_IPD_WQE_FPA_QUEUE \
+ CVMX_ADD_IO_SEG(0x00014F0000000020ull)
+#define CVMX_IPD_WQE_PTR_VALID \
+ CVMX_ADD_IO_SEG(0x00014F0000000360ull)
+
+union cvmx_ipd_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_1st_mbuff_skip_s {
+ uint64_t reserved_6_63:58;
+ uint64_t skip_sz:6;
+ } s;
+ struct cvmx_ipd_1st_mbuff_skip_s cn30xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn31xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn38xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2;
+ struct cvmx_ipd_1st_mbuff_skip_s cn50xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn52xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn56xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
+ struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
+ struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
+};
+
+union cvmx_ipd_1st_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_1st_next_ptr_back_s {
+ uint64_t reserved_4_63:60;
+ uint64_t back:4;
+ } s;
+ struct cvmx_ipd_1st_next_ptr_back_s cn30xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn31xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn38xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2;
+ struct cvmx_ipd_1st_next_ptr_back_s cn50xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn52xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn56xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
+ struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
+ struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
+};
+
+union cvmx_ipd_2nd_next_ptr_back {
+ uint64_t u64;
+ struct cvmx_ipd_2nd_next_ptr_back_s {
+ uint64_t reserved_4_63:60;
+ uint64_t back:4;
+ } s;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn30xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn31xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn38xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn50xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn52xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn56xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
+ struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
+};
+
+union cvmx_ipd_bist_status {
+ uint64_t u64;
+ struct cvmx_ipd_bist_status_s {
+ uint64_t reserved_18_63:46;
+ uint64_t csr_mem:1;
+ uint64_t csr_ncmd:1;
+ uint64_t pwq_wqed:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_pow:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t pbm3:1;
+ uint64_t pbm2:1;
+ uint64_t pbm1:1;
+ uint64_t pbm0:1;
+ uint64_t pbm_word:1;
+ uint64_t pwq1:1;
+ uint64_t pwq0:1;
+ uint64_t prc_off:1;
+ uint64_t ipd_old:1;
+ uint64_t ipd_new:1;
+ uint64_t pwp:1;
+ } s;
+ struct cvmx_ipd_bist_status_cn30xx {
+ uint64_t reserved_16_63:48;
+ uint64_t pwq_wqed:1;
+ uint64_t pwq_wp1:1;
+ uint64_t pwq_pow:1;
+ uint64_t ipq_pbe1:1;
+ uint64_t ipq_pbe0:1;
+ uint64_t pbm3:1;
+ uint64_t pbm2:1;
+ uint64_t pbm1:1;
+ uint64_t pbm0:1;
+ uint64_t pbm_word:1;
+ uint64_t pwq1:1;
+ uint64_t pwq0:1;
+ uint64_t prc_off:1;
+ uint64_t ipd_old:1;
+ uint64_t ipd_new:1;
+ uint64_t pwp:1;
+ } cn30xx;
+ struct cvmx_ipd_bist_status_cn30xx cn31xx;
+ struct cvmx_ipd_bist_status_cn30xx cn38xx;
+ struct cvmx_ipd_bist_status_cn30xx cn38xxp2;
+ struct cvmx_ipd_bist_status_cn30xx cn50xx;
+ struct cvmx_ipd_bist_status_s cn52xx;
+ struct cvmx_ipd_bist_status_s cn52xxp1;
+ struct cvmx_ipd_bist_status_s cn56xx;
+ struct cvmx_ipd_bist_status_s cn56xxp1;
+ struct cvmx_ipd_bist_status_cn30xx cn58xx;
+ struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
+};
+
+union cvmx_ipd_bp_prt_red_end {
+ uint64_t u64;
+ struct cvmx_ipd_bp_prt_red_end_s {
+ uint64_t reserved_40_63:24;
+ uint64_t prt_enb:40;
+ } s;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx {
+ uint64_t reserved_36_63:28;
+ uint64_t prt_enb:36;
+ } cn30xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
+ struct cvmx_ipd_bp_prt_red_end_s cn52xx;
+ struct cvmx_ipd_bp_prt_red_end_s cn52xxp1;
+ struct cvmx_ipd_bp_prt_red_end_s cn56xx;
+ struct cvmx_ipd_bp_prt_red_end_s cn56xxp1;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
+ struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
+};
+
+union cvmx_ipd_clk_count {
+ uint64_t u64;
+ struct cvmx_ipd_clk_count_s {
+ uint64_t clk_cnt:64;
+ } s;
+ struct cvmx_ipd_clk_count_s cn30xx;
+ struct cvmx_ipd_clk_count_s cn31xx;
+ struct cvmx_ipd_clk_count_s cn38xx;
+ struct cvmx_ipd_clk_count_s cn38xxp2;
+ struct cvmx_ipd_clk_count_s cn50xx;
+ struct cvmx_ipd_clk_count_s cn52xx;
+ struct cvmx_ipd_clk_count_s cn52xxp1;
+ struct cvmx_ipd_clk_count_s cn56xx;
+ struct cvmx_ipd_clk_count_s cn56xxp1;
+ struct cvmx_ipd_clk_count_s cn58xx;
+ struct cvmx_ipd_clk_count_s cn58xxp1;
+};
+
+union cvmx_ipd_ctl_status {
+ uint64_t u64;
+ struct cvmx_ipd_ctl_status_s {
+ uint64_t reserved_15_63:49;
+ uint64_t no_wptr:1;
+ uint64_t pq_apkt:1;
+ uint64_t pq_nabuf:1;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } s;
+ struct cvmx_ipd_ctl_status_cn30xx {
+ uint64_t reserved_10_63:54;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn30xx;
+ struct cvmx_ipd_ctl_status_cn30xx cn31xx;
+ struct cvmx_ipd_ctl_status_cn30xx cn38xx;
+ struct cvmx_ipd_ctl_status_cn38xxp2 {
+ uint64_t reserved_9_63:55;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn38xxp2;
+ struct cvmx_ipd_ctl_status_s cn50xx;
+ struct cvmx_ipd_ctl_status_s cn52xx;
+ struct cvmx_ipd_ctl_status_s cn52xxp1;
+ struct cvmx_ipd_ctl_status_s cn56xx;
+ struct cvmx_ipd_ctl_status_s cn56xxp1;
+ struct cvmx_ipd_ctl_status_cn58xx {
+ uint64_t reserved_12_63:52;
+ uint64_t ipd_full:1;
+ uint64_t pkt_off:1;
+ uint64_t len_m8:1;
+ uint64_t reset:1;
+ uint64_t addpkt:1;
+ uint64_t naddbuf:1;
+ uint64_t pkt_lend:1;
+ uint64_t wqe_lend:1;
+ uint64_t pbp_en:1;
+ uint64_t opc_mode:2;
+ uint64_t ipd_en:1;
+ } cn58xx;
+ struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
+};
+
+union cvmx_ipd_int_enb {
+ uint64_t u64;
+ struct cvmx_ipd_int_enb_s {
+ uint64_t reserved_12_63:52;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } s;
+ struct cvmx_ipd_int_enb_cn30xx {
+ uint64_t reserved_5_63:59;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } cn30xx;
+ struct cvmx_ipd_int_enb_cn30xx cn31xx;
+ struct cvmx_ipd_int_enb_cn38xx {
+ uint64_t reserved_10_63:54;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } cn38xx;
+ struct cvmx_ipd_int_enb_cn30xx cn38xxp2;
+ struct cvmx_ipd_int_enb_cn38xx cn50xx;
+ struct cvmx_ipd_int_enb_s cn52xx;
+ struct cvmx_ipd_int_enb_s cn52xxp1;
+ struct cvmx_ipd_int_enb_s cn56xx;
+ struct cvmx_ipd_int_enb_s cn56xxp1;
+ struct cvmx_ipd_int_enb_cn38xx cn58xx;
+ struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_int_sum {
+ uint64_t u64;
+ struct cvmx_ipd_int_sum_s {
+ uint64_t reserved_12_63:52;
+ uint64_t pq_sub:1;
+ uint64_t pq_add:1;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } s;
+ struct cvmx_ipd_int_sum_cn30xx {
+ uint64_t reserved_5_63:59;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } cn30xx;
+ struct cvmx_ipd_int_sum_cn30xx cn31xx;
+ struct cvmx_ipd_int_sum_cn38xx {
+ uint64_t reserved_10_63:54;
+ uint64_t bc_ovr:1;
+ uint64_t d_coll:1;
+ uint64_t c_coll:1;
+ uint64_t cc_ovr:1;
+ uint64_t dc_ovr:1;
+ uint64_t bp_sub:1;
+ uint64_t prc_par3:1;
+ uint64_t prc_par2:1;
+ uint64_t prc_par1:1;
+ uint64_t prc_par0:1;
+ } cn38xx;
+ struct cvmx_ipd_int_sum_cn30xx cn38xxp2;
+ struct cvmx_ipd_int_sum_cn38xx cn50xx;
+ struct cvmx_ipd_int_sum_s cn52xx;
+ struct cvmx_ipd_int_sum_s cn52xxp1;
+ struct cvmx_ipd_int_sum_s cn56xx;
+ struct cvmx_ipd_int_sum_s cn56xxp1;
+ struct cvmx_ipd_int_sum_cn38xx cn58xx;
+ struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_not_1st_mbuff_skip {
+ uint64_t u64;
+ struct cvmx_ipd_not_1st_mbuff_skip_s {
+ uint64_t reserved_6_63:58;
+ uint64_t skip_sz:6;
+ } s;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
+ struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
+};
+
+union cvmx_ipd_packet_mbuff_size {
+ uint64_t u64;
+ struct cvmx_ipd_packet_mbuff_size_s {
+ uint64_t reserved_12_63:52;
+ uint64_t mb_size:12;
+ } s;
+ struct cvmx_ipd_packet_mbuff_size_s cn30xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn31xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn38xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn38xxp2;
+ struct cvmx_ipd_packet_mbuff_size_s cn50xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn52xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn52xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn56xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
+ struct cvmx_ipd_packet_mbuff_size_s cn58xx;
+ struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
+};
+
+union cvmx_ipd_pkt_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_pkt_ptr_valid_s {
+ uint64_t reserved_29_63:35;
+ uint64_t ptr:29;
+ } s;
+ struct cvmx_ipd_pkt_ptr_valid_s cn30xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn31xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn38xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn50xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn52xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn56xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
+ struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
+ struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt_s {
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn30xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn31xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn38xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn50xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn52xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn56xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
+ struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_portx_bp_page_cnt2 {
+ uint64_t u64;
+ struct cvmx_ipd_portx_bp_page_cnt2_s {
+ uint64_t reserved_18_63:46;
+ uint64_t bp_enb:1;
+ uint64_t page_cnt:17;
+ } s;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
+ struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
+};
+
+union cvmx_ipd_port_bp_counters2_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters2_pairx_s {
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+ } s;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
+ struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
+};
+
+union cvmx_ipd_port_bp_counters_pairx {
+ uint64_t u64;
+ struct cvmx_ipd_port_bp_counters_pairx_s {
+ uint64_t reserved_25_63:39;
+ uint64_t cnt_val:25;
+ } s;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn30xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn31xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn38xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn50xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn52xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn56xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
+ struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
+};
+
+union cvmx_ipd_port_qos_x_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_x_cnt_s {
+ uint64_t wmark:32;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_ipd_port_qos_x_cnt_s cn52xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
+ struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
+ struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
+};
+
+union cvmx_ipd_port_qos_intx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_intx_s {
+ uint64_t intr:64;
+ } s;
+ struct cvmx_ipd_port_qos_intx_s cn52xx;
+ struct cvmx_ipd_port_qos_intx_s cn52xxp1;
+ struct cvmx_ipd_port_qos_intx_s cn56xx;
+ struct cvmx_ipd_port_qos_intx_s cn56xxp1;
+};
+
+union cvmx_ipd_port_qos_int_enbx {
+ uint64_t u64;
+ struct cvmx_ipd_port_qos_int_enbx_s {
+ uint64_t enb:64;
+ } s;
+ struct cvmx_ipd_port_qos_int_enbx_s cn52xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
+ struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
+ struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
+};
+
+union cvmx_ipd_prc_hold_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s {
+ uint64_t reserved_39_63:25;
+ uint64_t max_pkt:3;
+ uint64_t praddr:3;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:3;
+ } s;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_prc_port_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s {
+ uint64_t reserved_44_63:20;
+ uint64_t max_pkt:7;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:7;
+ } s;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_ptr_count {
+ uint64_t u64;
+ struct cvmx_ipd_ptr_count_s {
+ uint64_t reserved_19_63:45;
+ uint64_t pktv_cnt:1;
+ uint64_t wqev_cnt:1;
+ uint64_t pfif_cnt:3;
+ uint64_t pkt_pcnt:7;
+ uint64_t wqe_pcnt:7;
+ } s;
+ struct cvmx_ipd_ptr_count_s cn30xx;
+ struct cvmx_ipd_ptr_count_s cn31xx;
+ struct cvmx_ipd_ptr_count_s cn38xx;
+ struct cvmx_ipd_ptr_count_s cn38xxp2;
+ struct cvmx_ipd_ptr_count_s cn50xx;
+ struct cvmx_ipd_ptr_count_s cn52xx;
+ struct cvmx_ipd_ptr_count_s cn52xxp1;
+ struct cvmx_ipd_ptr_count_s cn56xx;
+ struct cvmx_ipd_ptr_count_s cn56xxp1;
+ struct cvmx_ipd_ptr_count_s cn58xx;
+ struct cvmx_ipd_ptr_count_s cn58xxp1;
+};
+
+union cvmx_ipd_pwp_ptr_fifo_ctl {
+ uint64_t u64;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s {
+ uint64_t reserved_61_63:3;
+ uint64_t max_cnts:7;
+ uint64_t wraddr:8;
+ uint64_t praddr:8;
+ uint64_t ptr:29;
+ uint64_t cena:1;
+ uint64_t raddr:8;
+ } s;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
+ struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
+};
+
+union cvmx_ipd_qosx_red_marks {
+ uint64_t u64;
+ struct cvmx_ipd_qosx_red_marks_s {
+ uint64_t drop:32;
+ uint64_t pass:32;
+ } s;
+ struct cvmx_ipd_qosx_red_marks_s cn30xx;
+ struct cvmx_ipd_qosx_red_marks_s cn31xx;
+ struct cvmx_ipd_qosx_red_marks_s cn38xx;
+ struct cvmx_ipd_qosx_red_marks_s cn38xxp2;
+ struct cvmx_ipd_qosx_red_marks_s cn50xx;
+ struct cvmx_ipd_qosx_red_marks_s cn52xx;
+ struct cvmx_ipd_qosx_red_marks_s cn52xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn56xx;
+ struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
+ struct cvmx_ipd_qosx_red_marks_s cn58xx;
+ struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
+};
+
+union cvmx_ipd_que0_free_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_que0_free_page_cnt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t q0_pcnt:32;
+ } s;
+ struct cvmx_ipd_que0_free_page_cnt_s cn30xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn31xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn38xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_que0_free_page_cnt_s cn50xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn52xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn56xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
+ struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_red_port_enable {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable_s {
+ uint64_t prb_dly:14;
+ uint64_t avg_dly:14;
+ uint64_t prt_enb:36;
+ } s;
+ struct cvmx_ipd_red_port_enable_s cn30xx;
+ struct cvmx_ipd_red_port_enable_s cn31xx;
+ struct cvmx_ipd_red_port_enable_s cn38xx;
+ struct cvmx_ipd_red_port_enable_s cn38xxp2;
+ struct cvmx_ipd_red_port_enable_s cn50xx;
+ struct cvmx_ipd_red_port_enable_s cn52xx;
+ struct cvmx_ipd_red_port_enable_s cn52xxp1;
+ struct cvmx_ipd_red_port_enable_s cn56xx;
+ struct cvmx_ipd_red_port_enable_s cn56xxp1;
+ struct cvmx_ipd_red_port_enable_s cn58xx;
+ struct cvmx_ipd_red_port_enable_s cn58xxp1;
+};
+
+union cvmx_ipd_red_port_enable2 {
+ uint64_t u64;
+ struct cvmx_ipd_red_port_enable2_s {
+ uint64_t reserved_4_63:60;
+ uint64_t prt_enb:4;
+ } s;
+ struct cvmx_ipd_red_port_enable2_s cn52xx;
+ struct cvmx_ipd_red_port_enable2_s cn52xxp1;
+ struct cvmx_ipd_red_port_enable2_s cn56xx;
+ struct cvmx_ipd_red_port_enable2_s cn56xxp1;
+};
+
+union cvmx_ipd_red_quex_param {
+ uint64_t u64;
+ struct cvmx_ipd_red_quex_param_s {
+ uint64_t reserved_49_63:15;
+ uint64_t use_pcnt:1;
+ uint64_t new_con:8;
+ uint64_t avg_con:8;
+ uint64_t prb_con:32;
+ } s;
+ struct cvmx_ipd_red_quex_param_s cn30xx;
+ struct cvmx_ipd_red_quex_param_s cn31xx;
+ struct cvmx_ipd_red_quex_param_s cn38xx;
+ struct cvmx_ipd_red_quex_param_s cn38xxp2;
+ struct cvmx_ipd_red_quex_param_s cn50xx;
+ struct cvmx_ipd_red_quex_param_s cn52xx;
+ struct cvmx_ipd_red_quex_param_s cn52xxp1;
+ struct cvmx_ipd_red_quex_param_s cn56xx;
+ struct cvmx_ipd_red_quex_param_s cn56xxp1;
+ struct cvmx_ipd_red_quex_param_s cn58xx;
+ struct cvmx_ipd_red_quex_param_s cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_bp_page_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s {
+ uint64_t reserved_31_63:33;
+ uint64_t port:6;
+ uint64_t page_cnt:25;
+ } s;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
+ struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_fcs {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_fcs_s {
+ uint64_t reserved_40_63:24;
+ uint64_t port_bit2:4;
+ uint64_t reserved_32_35:4;
+ uint64_t port_bit:32;
+ } s;
+ struct cvmx_ipd_sub_port_fcs_cn30xx {
+ uint64_t reserved_3_63:61;
+ uint64_t port_bit:3;
+ } cn30xx;
+ struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t port_bit:32;
+ } cn38xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2;
+ struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx;
+ struct cvmx_ipd_sub_port_fcs_s cn52xx;
+ struct cvmx_ipd_sub_port_fcs_s cn52xxp1;
+ struct cvmx_ipd_sub_port_fcs_s cn56xx;
+ struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
+ struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
+};
+
+union cvmx_ipd_sub_port_qos_cnt {
+ uint64_t u64;
+ struct cvmx_ipd_sub_port_qos_cnt_s {
+ uint64_t reserved_41_63:23;
+ uint64_t port_qos:9;
+ uint64_t cnt:32;
+ } s;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn52xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
+ struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
+};
+
+union cvmx_ipd_wqe_fpa_queue {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_fpa_queue_s {
+ uint64_t reserved_3_63:61;
+ uint64_t wqe_pool:3;
+ } s;
+ struct cvmx_ipd_wqe_fpa_queue_s cn30xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn31xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn38xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2;
+ struct cvmx_ipd_wqe_fpa_queue_s cn50xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn52xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn56xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
+ struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
+ struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
+};
+
+union cvmx_ipd_wqe_ptr_valid {
+ uint64_t u64;
+ struct cvmx_ipd_wqe_ptr_valid_s {
+ uint64_t reserved_29_63:35;
+ uint64_t ptr:29;
+ } s;
+ struct cvmx_ipd_wqe_ptr_valid_s cn30xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn31xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn38xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn50xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn52xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn56xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
+ struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
+ struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c-defs.h b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
new file mode 100644
index 000000000000..337583842b51
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c-defs.h
@@ -0,0 +1,963 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2C_DEFS_H__
+#define __CVMX_L2C_DEFS_H__
+
+#define CVMX_L2C_BST0 \
+ CVMX_ADD_IO_SEG(0x00011800800007F8ull)
+#define CVMX_L2C_BST1 \
+ CVMX_ADD_IO_SEG(0x00011800800007F0ull)
+#define CVMX_L2C_BST2 \
+ CVMX_ADD_IO_SEG(0x00011800800007E8ull)
+#define CVMX_L2C_CFG \
+ CVMX_ADD_IO_SEG(0x0001180080000000ull)
+#define CVMX_L2C_DBG \
+ CVMX_ADD_IO_SEG(0x0001180080000030ull)
+#define CVMX_L2C_DUT \
+ CVMX_ADD_IO_SEG(0x0001180080000050ull)
+#define CVMX_L2C_GRPWRR0 \
+ CVMX_ADD_IO_SEG(0x00011800800000C8ull)
+#define CVMX_L2C_GRPWRR1 \
+ CVMX_ADD_IO_SEG(0x00011800800000D0ull)
+#define CVMX_L2C_INT_EN \
+ CVMX_ADD_IO_SEG(0x0001180080000100ull)
+#define CVMX_L2C_INT_STAT \
+ CVMX_ADD_IO_SEG(0x00011800800000F8ull)
+#define CVMX_L2C_LCKBASE \
+ CVMX_ADD_IO_SEG(0x0001180080000058ull)
+#define CVMX_L2C_LCKOFF \
+ CVMX_ADD_IO_SEG(0x0001180080000060ull)
+#define CVMX_L2C_LFB0 \
+ CVMX_ADD_IO_SEG(0x0001180080000038ull)
+#define CVMX_L2C_LFB1 \
+ CVMX_ADD_IO_SEG(0x0001180080000040ull)
+#define CVMX_L2C_LFB2 \
+ CVMX_ADD_IO_SEG(0x0001180080000048ull)
+#define CVMX_L2C_LFB3 \
+ CVMX_ADD_IO_SEG(0x00011800800000B8ull)
+#define CVMX_L2C_OOB \
+ CVMX_ADD_IO_SEG(0x00011800800000D8ull)
+#define CVMX_L2C_OOB1 \
+ CVMX_ADD_IO_SEG(0x00011800800000E0ull)
+#define CVMX_L2C_OOB2 \
+ CVMX_ADD_IO_SEG(0x00011800800000E8ull)
+#define CVMX_L2C_OOB3 \
+ CVMX_ADD_IO_SEG(0x00011800800000F0ull)
+#define CVMX_L2C_PFC0 \
+ CVMX_ADD_IO_SEG(0x0001180080000098ull)
+#define CVMX_L2C_PFC1 \
+ CVMX_ADD_IO_SEG(0x00011800800000A0ull)
+#define CVMX_L2C_PFC2 \
+ CVMX_ADD_IO_SEG(0x00011800800000A8ull)
+#define CVMX_L2C_PFC3 \
+ CVMX_ADD_IO_SEG(0x00011800800000B0ull)
+#define CVMX_L2C_PFCTL \
+ CVMX_ADD_IO_SEG(0x0001180080000090ull)
+#define CVMX_L2C_PFCX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180080000098ull + (((offset) & 3) * 8))
+#define CVMX_L2C_PPGRP \
+ CVMX_ADD_IO_SEG(0x00011800800000C0ull)
+#define CVMX_L2C_SPAR0 \
+ CVMX_ADD_IO_SEG(0x0001180080000068ull)
+#define CVMX_L2C_SPAR1 \
+ CVMX_ADD_IO_SEG(0x0001180080000070ull)
+#define CVMX_L2C_SPAR2 \
+ CVMX_ADD_IO_SEG(0x0001180080000078ull)
+#define CVMX_L2C_SPAR3 \
+ CVMX_ADD_IO_SEG(0x0001180080000080ull)
+#define CVMX_L2C_SPAR4 \
+ CVMX_ADD_IO_SEG(0x0001180080000088ull)
+
+union cvmx_l2c_bst0 {
+ uint64_t u64;
+ struct cvmx_l2c_bst0_s {
+ uint64_t reserved_24_63:40;
+ uint64_t dtbnk:1;
+ uint64_t wlb_msk:4;
+ uint64_t dtcnt:13;
+ uint64_t dt:1;
+ uint64_t stin_msk:1;
+ uint64_t wlb_dat:4;
+ } s;
+ struct cvmx_l2c_bst0_cn30xx {
+ uint64_t reserved_23_63:41;
+ uint64_t wlb_msk:4;
+ uint64_t reserved_15_18:4;
+ uint64_t dtcnt:9;
+ uint64_t dt:1;
+ uint64_t reserved_4_4:1;
+ uint64_t wlb_dat:4;
+ } cn30xx;
+ struct cvmx_l2c_bst0_cn31xx {
+ uint64_t reserved_23_63:41;
+ uint64_t wlb_msk:4;
+ uint64_t reserved_16_18:3;
+ uint64_t dtcnt:10;
+ uint64_t dt:1;
+ uint64_t stin_msk:1;
+ uint64_t wlb_dat:4;
+ } cn31xx;
+ struct cvmx_l2c_bst0_cn38xx {
+ uint64_t reserved_19_63:45;
+ uint64_t dtcnt:13;
+ uint64_t dt:1;
+ uint64_t stin_msk:1;
+ uint64_t wlb_dat:4;
+ } cn38xx;
+ struct cvmx_l2c_bst0_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst0_cn50xx {
+ uint64_t reserved_24_63:40;
+ uint64_t dtbnk:1;
+ uint64_t wlb_msk:4;
+ uint64_t reserved_16_18:3;
+ uint64_t dtcnt:10;
+ uint64_t dt:1;
+ uint64_t stin_msk:1;
+ uint64_t wlb_dat:4;
+ } cn50xx;
+ struct cvmx_l2c_bst0_cn50xx cn52xx;
+ struct cvmx_l2c_bst0_cn50xx cn52xxp1;
+ struct cvmx_l2c_bst0_s cn56xx;
+ struct cvmx_l2c_bst0_s cn56xxp1;
+ struct cvmx_l2c_bst0_s cn58xx;
+ struct cvmx_l2c_bst0_s cn58xxp1;
+};
+
+union cvmx_l2c_bst1 {
+ uint64_t u64;
+ struct cvmx_l2c_bst1_s {
+ uint64_t reserved_9_63:55;
+ uint64_t l2t:9;
+ } s;
+ struct cvmx_l2c_bst1_cn30xx {
+ uint64_t reserved_16_63:48;
+ uint64_t vwdf:4;
+ uint64_t lrf:2;
+ uint64_t vab_vwcf:1;
+ uint64_t reserved_5_8:4;
+ uint64_t l2t:5;
+ } cn30xx;
+ struct cvmx_l2c_bst1_cn30xx cn31xx;
+ struct cvmx_l2c_bst1_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t vwdf:4;
+ uint64_t lrf:2;
+ uint64_t vab_vwcf:1;
+ uint64_t l2t:9;
+ } cn38xx;
+ struct cvmx_l2c_bst1_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst1_cn38xx cn50xx;
+ struct cvmx_l2c_bst1_cn52xx {
+ uint64_t reserved_19_63:45;
+ uint64_t plc2:1;
+ uint64_t plc1:1;
+ uint64_t plc0:1;
+ uint64_t vwdf:4;
+ uint64_t reserved_11_11:1;
+ uint64_t ilc:1;
+ uint64_t vab_vwcf:1;
+ uint64_t l2t:9;
+ } cn52xx;
+ struct cvmx_l2c_bst1_cn52xx cn52xxp1;
+ struct cvmx_l2c_bst1_cn56xx {
+ uint64_t reserved_24_63:40;
+ uint64_t plc2:1;
+ uint64_t plc1:1;
+ uint64_t plc0:1;
+ uint64_t ilc:1;
+ uint64_t vwdf1:4;
+ uint64_t vwdf0:4;
+ uint64_t vab_vwcf1:1;
+ uint64_t reserved_10_10:1;
+ uint64_t vab_vwcf0:1;
+ uint64_t l2t:9;
+ } cn56xx;
+ struct cvmx_l2c_bst1_cn56xx cn56xxp1;
+ struct cvmx_l2c_bst1_cn38xx cn58xx;
+ struct cvmx_l2c_bst1_cn38xx cn58xxp1;
+};
+
+union cvmx_l2c_bst2 {
+ uint64_t u64;
+ struct cvmx_l2c_bst2_s {
+ uint64_t reserved_16_63:48;
+ uint64_t mrb:4;
+ uint64_t reserved_4_11:8;
+ uint64_t ipcbst:1;
+ uint64_t picbst:1;
+ uint64_t xrdmsk:1;
+ uint64_t xrddat:1;
+ } s;
+ struct cvmx_l2c_bst2_cn30xx {
+ uint64_t reserved_16_63:48;
+ uint64_t mrb:4;
+ uint64_t rmdf:4;
+ uint64_t reserved_4_7:4;
+ uint64_t ipcbst:1;
+ uint64_t reserved_2_2:1;
+ uint64_t xrdmsk:1;
+ uint64_t xrddat:1;
+ } cn30xx;
+ struct cvmx_l2c_bst2_cn30xx cn31xx;
+ struct cvmx_l2c_bst2_cn38xx {
+ uint64_t reserved_16_63:48;
+ uint64_t mrb:4;
+ uint64_t rmdf:4;
+ uint64_t rhdf:4;
+ uint64_t ipcbst:1;
+ uint64_t picbst:1;
+ uint64_t xrdmsk:1;
+ uint64_t xrddat:1;
+ } cn38xx;
+ struct cvmx_l2c_bst2_cn38xx cn38xxp2;
+ struct cvmx_l2c_bst2_cn30xx cn50xx;
+ struct cvmx_l2c_bst2_cn30xx cn52xx;
+ struct cvmx_l2c_bst2_cn30xx cn52xxp1;
+ struct cvmx_l2c_bst2_cn56xx {
+ uint64_t reserved_16_63:48;
+ uint64_t mrb:4;
+ uint64_t rmdb:4;
+ uint64_t rhdb:4;
+ uint64_t ipcbst:1;
+ uint64_t picbst:1;
+ uint64_t xrdmsk:1;
+ uint64_t xrddat:1;
+ } cn56xx;
+ struct cvmx_l2c_bst2_cn56xx cn56xxp1;
+ struct cvmx_l2c_bst2_cn56xx cn58xx;
+ struct cvmx_l2c_bst2_cn56xx cn58xxp1;
+};
+
+union cvmx_l2c_cfg {
+ uint64_t u64;
+ struct cvmx_l2c_cfg_s {
+ uint64_t reserved_20_63:44;
+ uint64_t bstrun:1;
+ uint64_t lbist:1;
+ uint64_t xor_bank:1;
+ uint64_t dpres1:1;
+ uint64_t dpres0:1;
+ uint64_t dfill_dis:1;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } s;
+ struct cvmx_l2c_cfg_cn30xx {
+ uint64_t reserved_14_63:50;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } cn30xx;
+ struct cvmx_l2c_cfg_cn30xx cn31xx;
+ struct cvmx_l2c_cfg_cn30xx cn38xx;
+ struct cvmx_l2c_cfg_cn30xx cn38xxp2;
+ struct cvmx_l2c_cfg_cn50xx {
+ uint64_t reserved_20_63:44;
+ uint64_t bstrun:1;
+ uint64_t lbist:1;
+ uint64_t reserved_14_17:4;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } cn50xx;
+ struct cvmx_l2c_cfg_cn50xx cn52xx;
+ struct cvmx_l2c_cfg_cn50xx cn52xxp1;
+ struct cvmx_l2c_cfg_s cn56xx;
+ struct cvmx_l2c_cfg_s cn56xxp1;
+ struct cvmx_l2c_cfg_cn58xx {
+ uint64_t reserved_20_63:44;
+ uint64_t bstrun:1;
+ uint64_t lbist:1;
+ uint64_t reserved_15_17:3;
+ uint64_t dfill_dis:1;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } cn58xx;
+ struct cvmx_l2c_cfg_cn58xxp1 {
+ uint64_t reserved_15_63:49;
+ uint64_t dfill_dis:1;
+ uint64_t fpexp:4;
+ uint64_t fpempty:1;
+ uint64_t fpen:1;
+ uint64_t idxalias:1;
+ uint64_t mwf_crd:4;
+ uint64_t rsp_arb_mode:1;
+ uint64_t rfb_arb_mode:1;
+ uint64_t lrf_arb_mode:1;
+ } cn58xxp1;
+};
+
+union cvmx_l2c_dbg {
+ uint64_t u64;
+ struct cvmx_l2c_dbg_s {
+ uint64_t reserved_15_63:49;
+ uint64_t lfb_enum:4;
+ uint64_t lfb_dmp:1;
+ uint64_t ppnum:4;
+ uint64_t set:3;
+ uint64_t finv:1;
+ uint64_t l2d:1;
+ uint64_t l2t:1;
+ } s;
+ struct cvmx_l2c_dbg_cn30xx {
+ uint64_t reserved_13_63:51;
+ uint64_t lfb_enum:2;
+ uint64_t lfb_dmp:1;
+ uint64_t reserved_5_9:5;
+ uint64_t set:2;
+ uint64_t finv:1;
+ uint64_t l2d:1;
+ uint64_t l2t:1;
+ } cn30xx;
+ struct cvmx_l2c_dbg_cn31xx {
+ uint64_t reserved_14_63:50;
+ uint64_t lfb_enum:3;
+ uint64_t lfb_dmp:1;
+ uint64_t reserved_7_9:3;
+ uint64_t ppnum:1;
+ uint64_t reserved_5_5:1;
+ uint64_t set:2;
+ uint64_t finv:1;
+ uint64_t l2d:1;
+ uint64_t l2t:1;
+ } cn31xx;
+ struct cvmx_l2c_dbg_s cn38xx;
+ struct cvmx_l2c_dbg_s cn38xxp2;
+ struct cvmx_l2c_dbg_cn50xx {
+ uint64_t reserved_14_63:50;
+ uint64_t lfb_enum:3;
+ uint64_t lfb_dmp:1;
+ uint64_t reserved_7_9:3;
+ uint64_t ppnum:1;
+ uint64_t set:3;
+ uint64_t finv:1;
+ uint64_t l2d:1;
+ uint64_t l2t:1;
+ } cn50xx;
+ struct cvmx_l2c_dbg_cn52xx {
+ uint64_t reserved_14_63:50;
+ uint64_t lfb_enum:3;
+ uint64_t lfb_dmp:1;
+ uint64_t reserved_8_9:2;
+ uint64_t ppnum:2;
+ uint64_t set:3;
+ uint64_t finv:1;
+ uint64_t l2d:1;
+ uint64_t l2t:1;
+ } cn52xx;
+ struct cvmx_l2c_dbg_cn52xx cn52xxp1;
+ struct cvmx_l2c_dbg_s cn56xx;
+ struct cvmx_l2c_dbg_s cn56xxp1;
+ struct cvmx_l2c_dbg_s cn58xx;
+ struct cvmx_l2c_dbg_s cn58xxp1;
+};
+
+union cvmx_l2c_dut {
+ uint64_t u64;
+ struct cvmx_l2c_dut_s {
+ uint64_t reserved_32_63:32;
+ uint64_t dtena:1;
+ uint64_t reserved_30_30:1;
+ uint64_t dt_vld:1;
+ uint64_t dt_tag:29;
+ } s;
+ struct cvmx_l2c_dut_s cn30xx;
+ struct cvmx_l2c_dut_s cn31xx;
+ struct cvmx_l2c_dut_s cn38xx;
+ struct cvmx_l2c_dut_s cn38xxp2;
+ struct cvmx_l2c_dut_s cn50xx;
+ struct cvmx_l2c_dut_s cn52xx;
+ struct cvmx_l2c_dut_s cn52xxp1;
+ struct cvmx_l2c_dut_s cn56xx;
+ struct cvmx_l2c_dut_s cn56xxp1;
+ struct cvmx_l2c_dut_s cn58xx;
+ struct cvmx_l2c_dut_s cn58xxp1;
+};
+
+union cvmx_l2c_grpwrr0 {
+ uint64_t u64;
+ struct cvmx_l2c_grpwrr0_s {
+ uint64_t plc1rmsk:32;
+ uint64_t plc0rmsk:32;
+ } s;
+ struct cvmx_l2c_grpwrr0_s cn52xx;
+ struct cvmx_l2c_grpwrr0_s cn52xxp1;
+ struct cvmx_l2c_grpwrr0_s cn56xx;
+ struct cvmx_l2c_grpwrr0_s cn56xxp1;
+};
+
+union cvmx_l2c_grpwrr1 {
+ uint64_t u64;
+ struct cvmx_l2c_grpwrr1_s {
+ uint64_t ilcrmsk:32;
+ uint64_t plc2rmsk:32;
+ } s;
+ struct cvmx_l2c_grpwrr1_s cn52xx;
+ struct cvmx_l2c_grpwrr1_s cn52xxp1;
+ struct cvmx_l2c_grpwrr1_s cn56xx;
+ struct cvmx_l2c_grpwrr1_s cn56xxp1;
+};
+
+union cvmx_l2c_int_en {
+ uint64_t u64;
+ struct cvmx_l2c_int_en_s {
+ uint64_t reserved_9_63:55;
+ uint64_t lck2ena:1;
+ uint64_t lckena:1;
+ uint64_t l2ddeden:1;
+ uint64_t l2dsecen:1;
+ uint64_t l2tdeden:1;
+ uint64_t l2tsecen:1;
+ uint64_t oob3en:1;
+ uint64_t oob2en:1;
+ uint64_t oob1en:1;
+ } s;
+ struct cvmx_l2c_int_en_s cn52xx;
+ struct cvmx_l2c_int_en_s cn52xxp1;
+ struct cvmx_l2c_int_en_s cn56xx;
+ struct cvmx_l2c_int_en_s cn56xxp1;
+};
+
+union cvmx_l2c_int_stat {
+ uint64_t u64;
+ struct cvmx_l2c_int_stat_s {
+ uint64_t reserved_9_63:55;
+ uint64_t lck2:1;
+ uint64_t lck:1;
+ uint64_t l2dded:1;
+ uint64_t l2dsec:1;
+ uint64_t l2tded:1;
+ uint64_t l2tsec:1;
+ uint64_t oob3:1;
+ uint64_t oob2:1;
+ uint64_t oob1:1;
+ } s;
+ struct cvmx_l2c_int_stat_s cn52xx;
+ struct cvmx_l2c_int_stat_s cn52xxp1;
+ struct cvmx_l2c_int_stat_s cn56xx;
+ struct cvmx_l2c_int_stat_s cn56xxp1;
+};
+
+union cvmx_l2c_lckbase {
+ uint64_t u64;
+ struct cvmx_l2c_lckbase_s {
+ uint64_t reserved_31_63:33;
+ uint64_t lck_base:27;
+ uint64_t reserved_1_3:3;
+ uint64_t lck_ena:1;
+ } s;
+ struct cvmx_l2c_lckbase_s cn30xx;
+ struct cvmx_l2c_lckbase_s cn31xx;
+ struct cvmx_l2c_lckbase_s cn38xx;
+ struct cvmx_l2c_lckbase_s cn38xxp2;
+ struct cvmx_l2c_lckbase_s cn50xx;
+ struct cvmx_l2c_lckbase_s cn52xx;
+ struct cvmx_l2c_lckbase_s cn52xxp1;
+ struct cvmx_l2c_lckbase_s cn56xx;
+ struct cvmx_l2c_lckbase_s cn56xxp1;
+ struct cvmx_l2c_lckbase_s cn58xx;
+ struct cvmx_l2c_lckbase_s cn58xxp1;
+};
+
+union cvmx_l2c_lckoff {
+ uint64_t u64;
+ struct cvmx_l2c_lckoff_s {
+ uint64_t reserved_10_63:54;
+ uint64_t lck_offset:10;
+ } s;
+ struct cvmx_l2c_lckoff_s cn30xx;
+ struct cvmx_l2c_lckoff_s cn31xx;
+ struct cvmx_l2c_lckoff_s cn38xx;
+ struct cvmx_l2c_lckoff_s cn38xxp2;
+ struct cvmx_l2c_lckoff_s cn50xx;
+ struct cvmx_l2c_lckoff_s cn52xx;
+ struct cvmx_l2c_lckoff_s cn52xxp1;
+ struct cvmx_l2c_lckoff_s cn56xx;
+ struct cvmx_l2c_lckoff_s cn56xxp1;
+ struct cvmx_l2c_lckoff_s cn58xx;
+ struct cvmx_l2c_lckoff_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb0 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb0_s {
+ uint64_t reserved_32_63:32;
+ uint64_t stcpnd:1;
+ uint64_t stpnd:1;
+ uint64_t stinv:1;
+ uint64_t stcfl:1;
+ uint64_t vam:1;
+ uint64_t inxt:4;
+ uint64_t itl:1;
+ uint64_t ihd:1;
+ uint64_t set:3;
+ uint64_t vabnum:4;
+ uint64_t sid:9;
+ uint64_t cmd:4;
+ uint64_t vld:1;
+ } s;
+ struct cvmx_l2c_lfb0_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t stcpnd:1;
+ uint64_t stpnd:1;
+ uint64_t stinv:1;
+ uint64_t stcfl:1;
+ uint64_t vam:1;
+ uint64_t reserved_25_26:2;
+ uint64_t inxt:2;
+ uint64_t itl:1;
+ uint64_t ihd:1;
+ uint64_t reserved_20_20:1;
+ uint64_t set:2;
+ uint64_t reserved_16_17:2;
+ uint64_t vabnum:2;
+ uint64_t sid:9;
+ uint64_t cmd:4;
+ uint64_t vld:1;
+ } cn30xx;
+ struct cvmx_l2c_lfb0_cn31xx {
+ uint64_t reserved_32_63:32;
+ uint64_t stcpnd:1;
+ uint64_t stpnd:1;
+ uint64_t stinv:1;
+ uint64_t stcfl:1;
+ uint64_t vam:1;
+ uint64_t reserved_26_26:1;
+ uint64_t inxt:3;
+ uint64_t itl:1;
+ uint64_t ihd:1;
+ uint64_t reserved_20_20:1;
+ uint64_t set:2;
+ uint64_t reserved_17_17:1;
+ uint64_t vabnum:3;
+ uint64_t sid:9;
+ uint64_t cmd:4;
+ uint64_t vld:1;
+ } cn31xx;
+ struct cvmx_l2c_lfb0_s cn38xx;
+ struct cvmx_l2c_lfb0_s cn38xxp2;
+ struct cvmx_l2c_lfb0_cn50xx {
+ uint64_t reserved_32_63:32;
+ uint64_t stcpnd:1;
+ uint64_t stpnd:1;
+ uint64_t stinv:1;
+ uint64_t stcfl:1;
+ uint64_t vam:1;
+ uint64_t reserved_26_26:1;
+ uint64_t inxt:3;
+ uint64_t itl:1;
+ uint64_t ihd:1;
+ uint64_t set:3;
+ uint64_t reserved_17_17:1;
+ uint64_t vabnum:3;
+ uint64_t sid:9;
+ uint64_t cmd:4;
+ uint64_t vld:1;
+ } cn50xx;
+ struct cvmx_l2c_lfb0_cn50xx cn52xx;
+ struct cvmx_l2c_lfb0_cn50xx cn52xxp1;
+ struct cvmx_l2c_lfb0_s cn56xx;
+ struct cvmx_l2c_lfb0_s cn56xxp1;
+ struct cvmx_l2c_lfb0_s cn58xx;
+ struct cvmx_l2c_lfb0_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb1 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb1_s {
+ uint64_t reserved_19_63:45;
+ uint64_t dsgoing:1;
+ uint64_t bid:2;
+ uint64_t wtrsp:1;
+ uint64_t wtdw:1;
+ uint64_t wtdq:1;
+ uint64_t wtwhp:1;
+ uint64_t wtwhf:1;
+ uint64_t wtwrm:1;
+ uint64_t wtstm:1;
+ uint64_t wtrda:1;
+ uint64_t wtstdt:1;
+ uint64_t wtstrsp:1;
+ uint64_t wtstrsc:1;
+ uint64_t wtvtm:1;
+ uint64_t wtmfl:1;
+ uint64_t prbrty:1;
+ uint64_t wtprb:1;
+ uint64_t vld:1;
+ } s;
+ struct cvmx_l2c_lfb1_s cn30xx;
+ struct cvmx_l2c_lfb1_s cn31xx;
+ struct cvmx_l2c_lfb1_s cn38xx;
+ struct cvmx_l2c_lfb1_s cn38xxp2;
+ struct cvmx_l2c_lfb1_s cn50xx;
+ struct cvmx_l2c_lfb1_s cn52xx;
+ struct cvmx_l2c_lfb1_s cn52xxp1;
+ struct cvmx_l2c_lfb1_s cn56xx;
+ struct cvmx_l2c_lfb1_s cn56xxp1;
+ struct cvmx_l2c_lfb1_s cn58xx;
+ struct cvmx_l2c_lfb1_s cn58xxp1;
+};
+
+union cvmx_l2c_lfb2 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb2_s {
+ uint64_t reserved_0_63:64;
+ } s;
+ struct cvmx_l2c_lfb2_cn30xx {
+ uint64_t reserved_27_63:37;
+ uint64_t lfb_tag:19;
+ uint64_t lfb_idx:8;
+ } cn30xx;
+ struct cvmx_l2c_lfb2_cn31xx {
+ uint64_t reserved_27_63:37;
+ uint64_t lfb_tag:17;
+ uint64_t lfb_idx:10;
+ } cn31xx;
+ struct cvmx_l2c_lfb2_cn31xx cn38xx;
+ struct cvmx_l2c_lfb2_cn31xx cn38xxp2;
+ struct cvmx_l2c_lfb2_cn50xx {
+ uint64_t reserved_27_63:37;
+ uint64_t lfb_tag:20;
+ uint64_t lfb_idx:7;
+ } cn50xx;
+ struct cvmx_l2c_lfb2_cn52xx {
+ uint64_t reserved_27_63:37;
+ uint64_t lfb_tag:18;
+ uint64_t lfb_idx:9;
+ } cn52xx;
+ struct cvmx_l2c_lfb2_cn52xx cn52xxp1;
+ struct cvmx_l2c_lfb2_cn56xx {
+ uint64_t reserved_27_63:37;
+ uint64_t lfb_tag:16;
+ uint64_t lfb_idx:11;
+ } cn56xx;
+ struct cvmx_l2c_lfb2_cn56xx cn56xxp1;
+ struct cvmx_l2c_lfb2_cn56xx cn58xx;
+ struct cvmx_l2c_lfb2_cn56xx cn58xxp1;
+};
+
+union cvmx_l2c_lfb3 {
+ uint64_t u64;
+ struct cvmx_l2c_lfb3_s {
+ uint64_t reserved_5_63:59;
+ uint64_t stpartdis:1;
+ uint64_t lfb_hwm:4;
+ } s;
+ struct cvmx_l2c_lfb3_cn30xx {
+ uint64_t reserved_5_63:59;
+ uint64_t stpartdis:1;
+ uint64_t reserved_2_3:2;
+ uint64_t lfb_hwm:2;
+ } cn30xx;
+ struct cvmx_l2c_lfb3_cn31xx {
+ uint64_t reserved_5_63:59;
+ uint64_t stpartdis:1;
+ uint64_t reserved_3_3:1;
+ uint64_t lfb_hwm:3;
+ } cn31xx;
+ struct cvmx_l2c_lfb3_s cn38xx;
+ struct cvmx_l2c_lfb3_s cn38xxp2;
+ struct cvmx_l2c_lfb3_cn31xx cn50xx;
+ struct cvmx_l2c_lfb3_cn31xx cn52xx;
+ struct cvmx_l2c_lfb3_cn31xx cn52xxp1;
+ struct cvmx_l2c_lfb3_s cn56xx;
+ struct cvmx_l2c_lfb3_s cn56xxp1;
+ struct cvmx_l2c_lfb3_s cn58xx;
+ struct cvmx_l2c_lfb3_s cn58xxp1;
+};
+
+union cvmx_l2c_oob {
+ uint64_t u64;
+ struct cvmx_l2c_oob_s {
+ uint64_t reserved_2_63:62;
+ uint64_t dwbena:1;
+ uint64_t stena:1;
+ } s;
+ struct cvmx_l2c_oob_s cn52xx;
+ struct cvmx_l2c_oob_s cn52xxp1;
+ struct cvmx_l2c_oob_s cn56xx;
+ struct cvmx_l2c_oob_s cn56xxp1;
+};
+
+union cvmx_l2c_oob1 {
+ uint64_t u64;
+ struct cvmx_l2c_oob1_s {
+ uint64_t fadr:27;
+ uint64_t fsrc:1;
+ uint64_t reserved_34_35:2;
+ uint64_t sadr:14;
+ uint64_t reserved_14_19:6;
+ uint64_t size:14;
+ } s;
+ struct cvmx_l2c_oob1_s cn52xx;
+ struct cvmx_l2c_oob1_s cn52xxp1;
+ struct cvmx_l2c_oob1_s cn56xx;
+ struct cvmx_l2c_oob1_s cn56xxp1;
+};
+
+union cvmx_l2c_oob2 {
+ uint64_t u64;
+ struct cvmx_l2c_oob2_s {
+ uint64_t fadr:27;
+ uint64_t fsrc:1;
+ uint64_t reserved_34_35:2;
+ uint64_t sadr:14;
+ uint64_t reserved_14_19:6;
+ uint64_t size:14;
+ } s;
+ struct cvmx_l2c_oob2_s cn52xx;
+ struct cvmx_l2c_oob2_s cn52xxp1;
+ struct cvmx_l2c_oob2_s cn56xx;
+ struct cvmx_l2c_oob2_s cn56xxp1;
+};
+
+union cvmx_l2c_oob3 {
+ uint64_t u64;
+ struct cvmx_l2c_oob3_s {
+ uint64_t fadr:27;
+ uint64_t fsrc:1;
+ uint64_t reserved_34_35:2;
+ uint64_t sadr:14;
+ uint64_t reserved_14_19:6;
+ uint64_t size:14;
+ } s;
+ struct cvmx_l2c_oob3_s cn52xx;
+ struct cvmx_l2c_oob3_s cn52xxp1;
+ struct cvmx_l2c_oob3_s cn56xx;
+ struct cvmx_l2c_oob3_s cn56xxp1;
+};
+
+union cvmx_l2c_pfcx {
+ uint64_t u64;
+ struct cvmx_l2c_pfcx_s {
+ uint64_t reserved_36_63:28;
+ uint64_t pfcnt0:36;
+ } s;
+ struct cvmx_l2c_pfcx_s cn30xx;
+ struct cvmx_l2c_pfcx_s cn31xx;
+ struct cvmx_l2c_pfcx_s cn38xx;
+ struct cvmx_l2c_pfcx_s cn38xxp2;
+ struct cvmx_l2c_pfcx_s cn50xx;
+ struct cvmx_l2c_pfcx_s cn52xx;
+ struct cvmx_l2c_pfcx_s cn52xxp1;
+ struct cvmx_l2c_pfcx_s cn56xx;
+ struct cvmx_l2c_pfcx_s cn56xxp1;
+ struct cvmx_l2c_pfcx_s cn58xx;
+ struct cvmx_l2c_pfcx_s cn58xxp1;
+};
+
+union cvmx_l2c_pfctl {
+ uint64_t u64;
+ struct cvmx_l2c_pfctl_s {
+ uint64_t reserved_36_63:28;
+ uint64_t cnt3rdclr:1;
+ uint64_t cnt2rdclr:1;
+ uint64_t cnt1rdclr:1;
+ uint64_t cnt0rdclr:1;
+ uint64_t cnt3ena:1;
+ uint64_t cnt3clr:1;
+ uint64_t cnt3sel:6;
+ uint64_t cnt2ena:1;
+ uint64_t cnt2clr:1;
+ uint64_t cnt2sel:6;
+ uint64_t cnt1ena:1;
+ uint64_t cnt1clr:1;
+ uint64_t cnt1sel:6;
+ uint64_t cnt0ena:1;
+ uint64_t cnt0clr:1;
+ uint64_t cnt0sel:6;
+ } s;
+ struct cvmx_l2c_pfctl_s cn30xx;
+ struct cvmx_l2c_pfctl_s cn31xx;
+ struct cvmx_l2c_pfctl_s cn38xx;
+ struct cvmx_l2c_pfctl_s cn38xxp2;
+ struct cvmx_l2c_pfctl_s cn50xx;
+ struct cvmx_l2c_pfctl_s cn52xx;
+ struct cvmx_l2c_pfctl_s cn52xxp1;
+ struct cvmx_l2c_pfctl_s cn56xx;
+ struct cvmx_l2c_pfctl_s cn56xxp1;
+ struct cvmx_l2c_pfctl_s cn58xx;
+ struct cvmx_l2c_pfctl_s cn58xxp1;
+};
+
+union cvmx_l2c_ppgrp {
+ uint64_t u64;
+ struct cvmx_l2c_ppgrp_s {
+ uint64_t reserved_24_63:40;
+ uint64_t pp11grp:2;
+ uint64_t pp10grp:2;
+ uint64_t pp9grp:2;
+ uint64_t pp8grp:2;
+ uint64_t pp7grp:2;
+ uint64_t pp6grp:2;
+ uint64_t pp5grp:2;
+ uint64_t pp4grp:2;
+ uint64_t pp3grp:2;
+ uint64_t pp2grp:2;
+ uint64_t pp1grp:2;
+ uint64_t pp0grp:2;
+ } s;
+ struct cvmx_l2c_ppgrp_cn52xx {
+ uint64_t reserved_8_63:56;
+ uint64_t pp3grp:2;
+ uint64_t pp2grp:2;
+ uint64_t pp1grp:2;
+ uint64_t pp0grp:2;
+ } cn52xx;
+ struct cvmx_l2c_ppgrp_cn52xx cn52xxp1;
+ struct cvmx_l2c_ppgrp_s cn56xx;
+ struct cvmx_l2c_ppgrp_s cn56xxp1;
+};
+
+union cvmx_l2c_spar0 {
+ uint64_t u64;
+ struct cvmx_l2c_spar0_s {
+ uint64_t reserved_32_63:32;
+ uint64_t umsk3:8;
+ uint64_t umsk2:8;
+ uint64_t umsk1:8;
+ uint64_t umsk0:8;
+ } s;
+ struct cvmx_l2c_spar0_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t umsk0:4;
+ } cn30xx;
+ struct cvmx_l2c_spar0_cn31xx {
+ uint64_t reserved_12_63:52;
+ uint64_t umsk1:4;
+ uint64_t reserved_4_7:4;
+ uint64_t umsk0:4;
+ } cn31xx;
+ struct cvmx_l2c_spar0_s cn38xx;
+ struct cvmx_l2c_spar0_s cn38xxp2;
+ struct cvmx_l2c_spar0_cn50xx {
+ uint64_t reserved_16_63:48;
+ uint64_t umsk1:8;
+ uint64_t umsk0:8;
+ } cn50xx;
+ struct cvmx_l2c_spar0_s cn52xx;
+ struct cvmx_l2c_spar0_s cn52xxp1;
+ struct cvmx_l2c_spar0_s cn56xx;
+ struct cvmx_l2c_spar0_s cn56xxp1;
+ struct cvmx_l2c_spar0_s cn58xx;
+ struct cvmx_l2c_spar0_s cn58xxp1;
+};
+
+union cvmx_l2c_spar1 {
+ uint64_t u64;
+ struct cvmx_l2c_spar1_s {
+ uint64_t reserved_32_63:32;
+ uint64_t umsk7:8;
+ uint64_t umsk6:8;
+ uint64_t umsk5:8;
+ uint64_t umsk4:8;
+ } s;
+ struct cvmx_l2c_spar1_s cn38xx;
+ struct cvmx_l2c_spar1_s cn38xxp2;
+ struct cvmx_l2c_spar1_s cn56xx;
+ struct cvmx_l2c_spar1_s cn56xxp1;
+ struct cvmx_l2c_spar1_s cn58xx;
+ struct cvmx_l2c_spar1_s cn58xxp1;
+};
+
+union cvmx_l2c_spar2 {
+ uint64_t u64;
+ struct cvmx_l2c_spar2_s {
+ uint64_t reserved_32_63:32;
+ uint64_t umsk11:8;
+ uint64_t umsk10:8;
+ uint64_t umsk9:8;
+ uint64_t umsk8:8;
+ } s;
+ struct cvmx_l2c_spar2_s cn38xx;
+ struct cvmx_l2c_spar2_s cn38xxp2;
+ struct cvmx_l2c_spar2_s cn56xx;
+ struct cvmx_l2c_spar2_s cn56xxp1;
+ struct cvmx_l2c_spar2_s cn58xx;
+ struct cvmx_l2c_spar2_s cn58xxp1;
+};
+
+union cvmx_l2c_spar3 {
+ uint64_t u64;
+ struct cvmx_l2c_spar3_s {
+ uint64_t reserved_32_63:32;
+ uint64_t umsk15:8;
+ uint64_t umsk14:8;
+ uint64_t umsk13:8;
+ uint64_t umsk12:8;
+ } s;
+ struct cvmx_l2c_spar3_s cn38xx;
+ struct cvmx_l2c_spar3_s cn38xxp2;
+ struct cvmx_l2c_spar3_s cn58xx;
+ struct cvmx_l2c_spar3_s cn58xxp1;
+};
+
+union cvmx_l2c_spar4 {
+ uint64_t u64;
+ struct cvmx_l2c_spar4_s {
+ uint64_t reserved_8_63:56;
+ uint64_t umskiob:8;
+ } s;
+ struct cvmx_l2c_spar4_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t umskiob:4;
+ } cn30xx;
+ struct cvmx_l2c_spar4_cn30xx cn31xx;
+ struct cvmx_l2c_spar4_s cn38xx;
+ struct cvmx_l2c_spar4_s cn38xxp2;
+ struct cvmx_l2c_spar4_s cn50xx;
+ struct cvmx_l2c_spar4_s cn52xx;
+ struct cvmx_l2c_spar4_s cn52xxp1;
+ struct cvmx_l2c_spar4_s cn56xx;
+ struct cvmx_l2c_spar4_s cn56xxp1;
+ struct cvmx_l2c_spar4_s cn58xx;
+ struct cvmx_l2c_spar4_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
new file mode 100644
index 000000000000..2a8c0902ea50
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -0,0 +1,325 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * Interface to the Level 2 Cache (L2C) control, measurement, and debugging
+ * facilities.
+ */
+
+#ifndef __CVMX_L2C_H__
+#define __CVMX_L2C_H__
+
+/* Deprecated macro, use function */
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc()
+
+/* Deprecated macro, use function */
+#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits()
+
+/* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets()
+
+#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+
+/* Defines for index aliasing computations */
+#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT \
+ (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
+
+#define CVMX_L2C_ALIAS_MASK \
+ (CVMX_L2C_IDX_MASK << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT)
+
+union cvmx_l2c_tag {
+ uint64_t u64;
+ struct {
+ uint64_t reserved:28;
+ uint64_t V:1; /* Line valid */
+ uint64_t D:1; /* Line dirty */
+ uint64_t L:1; /* Line locked */
+ uint64_t U:1; /* Use, LRU eviction */
+ uint64_t addr:32; /* Phys mem (not all bits valid) */
+ } s;
+};
+
+ /* L2C Performance Counter events. */
+enum cvmx_l2c_event {
+ CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53
+};
+
+/**
+ * Configure one of the four L2 Cache performance counters to capture event
+ * occurences.
+ *
+ * @counter: The counter to configure. Range 0..3.
+ * @event: The type of L2 Cache event occurrence to count.
+ * @clear_on_read: When asserted, any read of the performance counter
+ * clears the counter.
+ *
+ * The routine does not clear the counter.
+ */
+void cvmx_l2c_config_perf(uint32_t counter,
+ enum cvmx_l2c_event event, uint32_t clear_on_read);
+/**
+ * Read the given L2 Cache performance counter. The counter must be configured
+ * before reading, but this routine does not enforce this requirement.
+ *
+ * @counter: The counter to configure. Range 0..3.
+ *
+ * Returns The current counter value.
+ */
+uint64_t cvmx_l2c_read_perf(uint32_t counter);
+
+/**
+ * Return the L2 Cache way partitioning for a given core.
+ *
+ * @core: The core processor of interest.
+ *
+ * Returns The mask specifying the partitioning. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_core_way_partition(uint32_t core);
+
+/**
+ * Partitions the L2 cache for a core
+ *
+ * @core: The core that the partitioning applies to.
+ *
+ * @mask: The partitioning of the ways expressed as a binary mask. A 0
+ * bit allows the core to evict cache lines from a way, while a
+ * 1 bit blocks the core from evicting any lines from that
+ * way. There must be at least one allowed way (0 bit) in the
+ * mask.
+ *
+ * If any ways are blocked for all cores and the HW blocks, then those
+ * ways will never have any cache lines evicted from them. All cores
+ * and the hardware blocks are free to read from all ways regardless
+ * of the partitioning.
+ */
+int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
+
+/**
+ * Return the L2 Cache way partitioning for the hw blocks.
+ *
+ * Returns The mask specifying the reserved way. 0 bits in mask indicates
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
+ */
+int cvmx_l2c_get_hw_way_partition(void);
+
+/**
+ * Partitions the L2 cache for the hardware blocks.
+ *
+ * @mask: The partitioning of the ways expressed as a binary mask. A 0
+ * bit allows the core to evict cache lines from a way, while a
+ * 1 bit blocks the core from evicting any lines from that
+ * way. There must be at least one allowed way (0 bit) in the
+ * mask.
+ *
+ * If any ways are blocked for all cores and the HW blocks, then those
+ * ways will never have any cache lines evicted from them. All cores
+ * and the hardware blocks are free to read from all ways regardless
+ * of the partitioning.
+ */
+int cvmx_l2c_set_hw_way_partition(uint32_t mask);
+
+/**
+ * Locks a line in the L2 cache at the specified physical address
+ *
+ * @addr: physical address of line to lock
+ *
+ * Returns 0 on success,
+ * 1 if line not locked.
+ */
+int cvmx_l2c_lock_line(uint64_t addr);
+
+/**
+ * Locks a specified memory region in the L2 cache.
+ *
+ * Note that if not all lines can be locked, that means that all
+ * but one of the ways (associations) available to the locking
+ * core are locked. Having only 1 association available for
+ * normal caching may have a significant adverse affect on performance.
+ * Care should be taken to ensure that enough of the L2 cache is left
+ * unlocked to allow for normal caching of DRAM.
+ *
+ * @start: Physical address of the start of the region to lock
+ * @len: Length (in bytes) of region to lock
+ *
+ * Returns Number of requested lines that where not locked.
+ * 0 on success (all locked)
+ */
+int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Unlock and flush a cache line from the L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ * Note that this function will flush a matching but unlocked cache line.
+ * (If address is not in L2, no lines are flushed.)
+ *
+ * @address: Physical address to unlock
+ *
+ * Returns 0: line not unlocked
+ * 1: line unlocked
+ */
+int cvmx_l2c_unlock_line(uint64_t address);
+
+/**
+ * Unlocks a region of memory that is locked in the L2 cache
+ *
+ * @start: start physical address
+ * @len: length (in bytes) to unlock
+ *
+ * Returns Number of locked lines that the call unlocked
+ */
+int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
+
+/**
+ * Read the L2 controller tag for a given location in L2
+ *
+ * @association:
+ * Which association to read line from
+ * @index: Which way to read from.
+ *
+ * Returns l2c tag structure for line requested.
+ */
+union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index);
+
+/* Wrapper around deprecated old function name */
+static inline union cvmx_l2c_tag cvmx_get_l2c_tag(uint32_t association,
+ uint32_t index)
+{
+ return cvmx_l2c_get_tag(association, index);
+}
+
+/**
+ * Returns the cache index for a given physical address
+ *
+ * @addr: physical address
+ *
+ * Returns L2 cache index
+ */
+uint32_t cvmx_l2c_address_to_index(uint64_t addr);
+
+/**
+ * Flushes (and unlocks) the entire L2 cache.
+ * IMPORTANT: Must only be run by one core at a time due to use
+ * of L2C debug features.
+ */
+void cvmx_l2c_flush(void);
+
+/**
+ *
+ * Returns Returns the size of the L2 cache in bytes,
+ * -1 on error (unrecognized model)
+ */
+int cvmx_l2c_get_cache_size_bytes(void);
+
+/**
+ * Return the number of sets in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_sets(void);
+
+/**
+ * Return log base 2 of the number of sets in the L2 cache
+ * Returns
+ */
+int cvmx_l2c_get_set_bits(void);
+/**
+ * Return the number of associations in the L2 Cache
+ *
+ * Returns
+ */
+int cvmx_l2c_get_num_assoc(void);
+
+/**
+ * Flush a line from the L2 cache
+ * This should only be called from one core at a time, as this routine
+ * sets the core to the 'debug' core in order to flush the line.
+ *
+ * @assoc: Association (or way) to flush
+ * @index: Index to flush
+ */
+void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index);
+
+#endif /* __CVMX_L2C_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-l2d-defs.h b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
new file mode 100644
index 000000000000..d7102d455e1b
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2d-defs.h
@@ -0,0 +1,369 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2D_DEFS_H__
+#define __CVMX_L2D_DEFS_H__
+
+#define CVMX_L2D_BST0 \
+ CVMX_ADD_IO_SEG(0x0001180080000780ull)
+#define CVMX_L2D_BST1 \
+ CVMX_ADD_IO_SEG(0x0001180080000788ull)
+#define CVMX_L2D_BST2 \
+ CVMX_ADD_IO_SEG(0x0001180080000790ull)
+#define CVMX_L2D_BST3 \
+ CVMX_ADD_IO_SEG(0x0001180080000798ull)
+#define CVMX_L2D_ERR \
+ CVMX_ADD_IO_SEG(0x0001180080000010ull)
+#define CVMX_L2D_FADR \
+ CVMX_ADD_IO_SEG(0x0001180080000018ull)
+#define CVMX_L2D_FSYN0 \
+ CVMX_ADD_IO_SEG(0x0001180080000020ull)
+#define CVMX_L2D_FSYN1 \
+ CVMX_ADD_IO_SEG(0x0001180080000028ull)
+#define CVMX_L2D_FUS0 \
+ CVMX_ADD_IO_SEG(0x00011800800007A0ull)
+#define CVMX_L2D_FUS1 \
+ CVMX_ADD_IO_SEG(0x00011800800007A8ull)
+#define CVMX_L2D_FUS2 \
+ CVMX_ADD_IO_SEG(0x00011800800007B0ull)
+#define CVMX_L2D_FUS3 \
+ CVMX_ADD_IO_SEG(0x00011800800007B8ull)
+
+union cvmx_l2d_bst0 {
+ uint64_t u64;
+ struct cvmx_l2d_bst0_s {
+ uint64_t reserved_35_63:29;
+ uint64_t ftl:1;
+ uint64_t q0stat:34;
+ } s;
+ struct cvmx_l2d_bst0_s cn30xx;
+ struct cvmx_l2d_bst0_s cn31xx;
+ struct cvmx_l2d_bst0_s cn38xx;
+ struct cvmx_l2d_bst0_s cn38xxp2;
+ struct cvmx_l2d_bst0_s cn50xx;
+ struct cvmx_l2d_bst0_s cn52xx;
+ struct cvmx_l2d_bst0_s cn52xxp1;
+ struct cvmx_l2d_bst0_s cn56xx;
+ struct cvmx_l2d_bst0_s cn56xxp1;
+ struct cvmx_l2d_bst0_s cn58xx;
+ struct cvmx_l2d_bst0_s cn58xxp1;
+};
+
+union cvmx_l2d_bst1 {
+ uint64_t u64;
+ struct cvmx_l2d_bst1_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q1stat:34;
+ } s;
+ struct cvmx_l2d_bst1_s cn30xx;
+ struct cvmx_l2d_bst1_s cn31xx;
+ struct cvmx_l2d_bst1_s cn38xx;
+ struct cvmx_l2d_bst1_s cn38xxp2;
+ struct cvmx_l2d_bst1_s cn50xx;
+ struct cvmx_l2d_bst1_s cn52xx;
+ struct cvmx_l2d_bst1_s cn52xxp1;
+ struct cvmx_l2d_bst1_s cn56xx;
+ struct cvmx_l2d_bst1_s cn56xxp1;
+ struct cvmx_l2d_bst1_s cn58xx;
+ struct cvmx_l2d_bst1_s cn58xxp1;
+};
+
+union cvmx_l2d_bst2 {
+ uint64_t u64;
+ struct cvmx_l2d_bst2_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q2stat:34;
+ } s;
+ struct cvmx_l2d_bst2_s cn30xx;
+ struct cvmx_l2d_bst2_s cn31xx;
+ struct cvmx_l2d_bst2_s cn38xx;
+ struct cvmx_l2d_bst2_s cn38xxp2;
+ struct cvmx_l2d_bst2_s cn50xx;
+ struct cvmx_l2d_bst2_s cn52xx;
+ struct cvmx_l2d_bst2_s cn52xxp1;
+ struct cvmx_l2d_bst2_s cn56xx;
+ struct cvmx_l2d_bst2_s cn56xxp1;
+ struct cvmx_l2d_bst2_s cn58xx;
+ struct cvmx_l2d_bst2_s cn58xxp1;
+};
+
+union cvmx_l2d_bst3 {
+ uint64_t u64;
+ struct cvmx_l2d_bst3_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q3stat:34;
+ } s;
+ struct cvmx_l2d_bst3_s cn30xx;
+ struct cvmx_l2d_bst3_s cn31xx;
+ struct cvmx_l2d_bst3_s cn38xx;
+ struct cvmx_l2d_bst3_s cn38xxp2;
+ struct cvmx_l2d_bst3_s cn50xx;
+ struct cvmx_l2d_bst3_s cn52xx;
+ struct cvmx_l2d_bst3_s cn52xxp1;
+ struct cvmx_l2d_bst3_s cn56xx;
+ struct cvmx_l2d_bst3_s cn56xxp1;
+ struct cvmx_l2d_bst3_s cn58xx;
+ struct cvmx_l2d_bst3_s cn58xxp1;
+};
+
+union cvmx_l2d_err {
+ uint64_t u64;
+ struct cvmx_l2d_err_s {
+ uint64_t reserved_6_63:58;
+ uint64_t bmhclsel:1;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } s;
+ struct cvmx_l2d_err_s cn30xx;
+ struct cvmx_l2d_err_s cn31xx;
+ struct cvmx_l2d_err_s cn38xx;
+ struct cvmx_l2d_err_s cn38xxp2;
+ struct cvmx_l2d_err_s cn50xx;
+ struct cvmx_l2d_err_s cn52xx;
+ struct cvmx_l2d_err_s cn52xxp1;
+ struct cvmx_l2d_err_s cn56xx;
+ struct cvmx_l2d_err_s cn56xxp1;
+ struct cvmx_l2d_err_s cn58xx;
+ struct cvmx_l2d_err_s cn58xxp1;
+};
+
+union cvmx_l2d_fadr {
+ uint64_t u64;
+ struct cvmx_l2d_fadr_s {
+ uint64_t reserved_19_63:45;
+ uint64_t fadru:1;
+ uint64_t fowmsk:4;
+ uint64_t fset:3;
+ uint64_t fadr:11;
+ } s;
+ struct cvmx_l2d_fadr_cn30xx {
+ uint64_t reserved_18_63:46;
+ uint64_t fowmsk:4;
+ uint64_t reserved_13_13:1;
+ uint64_t fset:2;
+ uint64_t reserved_9_10:2;
+ uint64_t fadr:9;
+ } cn30xx;
+ struct cvmx_l2d_fadr_cn31xx {
+ uint64_t reserved_18_63:46;
+ uint64_t fowmsk:4;
+ uint64_t reserved_13_13:1;
+ uint64_t fset:2;
+ uint64_t reserved_10_10:1;
+ uint64_t fadr:10;
+ } cn31xx;
+ struct cvmx_l2d_fadr_cn38xx {
+ uint64_t reserved_18_63:46;
+ uint64_t fowmsk:4;
+ uint64_t fset:3;
+ uint64_t fadr:11;
+ } cn38xx;
+ struct cvmx_l2d_fadr_cn38xx cn38xxp2;
+ struct cvmx_l2d_fadr_cn50xx {
+ uint64_t reserved_18_63:46;
+ uint64_t fowmsk:4;
+ uint64_t fset:3;
+ uint64_t reserved_8_10:3;
+ uint64_t fadr:8;
+ } cn50xx;
+ struct cvmx_l2d_fadr_cn52xx {
+ uint64_t reserved_18_63:46;
+ uint64_t fowmsk:4;
+ uint64_t fset:3;
+ uint64_t reserved_10_10:1;
+ uint64_t fadr:10;
+ } cn52xx;
+ struct cvmx_l2d_fadr_cn52xx cn52xxp1;
+ struct cvmx_l2d_fadr_s cn56xx;
+ struct cvmx_l2d_fadr_s cn56xxp1;
+ struct cvmx_l2d_fadr_s cn58xx;
+ struct cvmx_l2d_fadr_s cn58xxp1;
+};
+
+union cvmx_l2d_fsyn0 {
+ uint64_t u64;
+ struct cvmx_l2d_fsyn0_s {
+ uint64_t reserved_20_63:44;
+ uint64_t fsyn_ow1:10;
+ uint64_t fsyn_ow0:10;
+ } s;
+ struct cvmx_l2d_fsyn0_s cn30xx;
+ struct cvmx_l2d_fsyn0_s cn31xx;
+ struct cvmx_l2d_fsyn0_s cn38xx;
+ struct cvmx_l2d_fsyn0_s cn38xxp2;
+ struct cvmx_l2d_fsyn0_s cn50xx;
+ struct cvmx_l2d_fsyn0_s cn52xx;
+ struct cvmx_l2d_fsyn0_s cn52xxp1;
+ struct cvmx_l2d_fsyn0_s cn56xx;
+ struct cvmx_l2d_fsyn0_s cn56xxp1;
+ struct cvmx_l2d_fsyn0_s cn58xx;
+ struct cvmx_l2d_fsyn0_s cn58xxp1;
+};
+
+union cvmx_l2d_fsyn1 {
+ uint64_t u64;
+ struct cvmx_l2d_fsyn1_s {
+ uint64_t reserved_20_63:44;
+ uint64_t fsyn_ow3:10;
+ uint64_t fsyn_ow2:10;
+ } s;
+ struct cvmx_l2d_fsyn1_s cn30xx;
+ struct cvmx_l2d_fsyn1_s cn31xx;
+ struct cvmx_l2d_fsyn1_s cn38xx;
+ struct cvmx_l2d_fsyn1_s cn38xxp2;
+ struct cvmx_l2d_fsyn1_s cn50xx;
+ struct cvmx_l2d_fsyn1_s cn52xx;
+ struct cvmx_l2d_fsyn1_s cn52xxp1;
+ struct cvmx_l2d_fsyn1_s cn56xx;
+ struct cvmx_l2d_fsyn1_s cn56xxp1;
+ struct cvmx_l2d_fsyn1_s cn58xx;
+ struct cvmx_l2d_fsyn1_s cn58xxp1;
+};
+
+union cvmx_l2d_fus0 {
+ uint64_t u64;
+ struct cvmx_l2d_fus0_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q0fus:34;
+ } s;
+ struct cvmx_l2d_fus0_s cn30xx;
+ struct cvmx_l2d_fus0_s cn31xx;
+ struct cvmx_l2d_fus0_s cn38xx;
+ struct cvmx_l2d_fus0_s cn38xxp2;
+ struct cvmx_l2d_fus0_s cn50xx;
+ struct cvmx_l2d_fus0_s cn52xx;
+ struct cvmx_l2d_fus0_s cn52xxp1;
+ struct cvmx_l2d_fus0_s cn56xx;
+ struct cvmx_l2d_fus0_s cn56xxp1;
+ struct cvmx_l2d_fus0_s cn58xx;
+ struct cvmx_l2d_fus0_s cn58xxp1;
+};
+
+union cvmx_l2d_fus1 {
+ uint64_t u64;
+ struct cvmx_l2d_fus1_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q1fus:34;
+ } s;
+ struct cvmx_l2d_fus1_s cn30xx;
+ struct cvmx_l2d_fus1_s cn31xx;
+ struct cvmx_l2d_fus1_s cn38xx;
+ struct cvmx_l2d_fus1_s cn38xxp2;
+ struct cvmx_l2d_fus1_s cn50xx;
+ struct cvmx_l2d_fus1_s cn52xx;
+ struct cvmx_l2d_fus1_s cn52xxp1;
+ struct cvmx_l2d_fus1_s cn56xx;
+ struct cvmx_l2d_fus1_s cn56xxp1;
+ struct cvmx_l2d_fus1_s cn58xx;
+ struct cvmx_l2d_fus1_s cn58xxp1;
+};
+
+union cvmx_l2d_fus2 {
+ uint64_t u64;
+ struct cvmx_l2d_fus2_s {
+ uint64_t reserved_34_63:30;
+ uint64_t q2fus:34;
+ } s;
+ struct cvmx_l2d_fus2_s cn30xx;
+ struct cvmx_l2d_fus2_s cn31xx;
+ struct cvmx_l2d_fus2_s cn38xx;
+ struct cvmx_l2d_fus2_s cn38xxp2;
+ struct cvmx_l2d_fus2_s cn50xx;
+ struct cvmx_l2d_fus2_s cn52xx;
+ struct cvmx_l2d_fus2_s cn52xxp1;
+ struct cvmx_l2d_fus2_s cn56xx;
+ struct cvmx_l2d_fus2_s cn56xxp1;
+ struct cvmx_l2d_fus2_s cn58xx;
+ struct cvmx_l2d_fus2_s cn58xxp1;
+};
+
+union cvmx_l2d_fus3 {
+ uint64_t u64;
+ struct cvmx_l2d_fus3_s {
+ uint64_t reserved_40_63:24;
+ uint64_t ema_ctl:3;
+ uint64_t reserved_34_36:3;
+ uint64_t q3fus:34;
+ } s;
+ struct cvmx_l2d_fus3_cn30xx {
+ uint64_t reserved_35_63:29;
+ uint64_t crip_64k:1;
+ uint64_t q3fus:34;
+ } cn30xx;
+ struct cvmx_l2d_fus3_cn31xx {
+ uint64_t reserved_35_63:29;
+ uint64_t crip_128k:1;
+ uint64_t q3fus:34;
+ } cn31xx;
+ struct cvmx_l2d_fus3_cn38xx {
+ uint64_t reserved_36_63:28;
+ uint64_t crip_256k:1;
+ uint64_t crip_512k:1;
+ uint64_t q3fus:34;
+ } cn38xx;
+ struct cvmx_l2d_fus3_cn38xx cn38xxp2;
+ struct cvmx_l2d_fus3_cn50xx {
+ uint64_t reserved_40_63:24;
+ uint64_t ema_ctl:3;
+ uint64_t reserved_36_36:1;
+ uint64_t crip_32k:1;
+ uint64_t crip_64k:1;
+ uint64_t q3fus:34;
+ } cn50xx;
+ struct cvmx_l2d_fus3_cn52xx {
+ uint64_t reserved_40_63:24;
+ uint64_t ema_ctl:3;
+ uint64_t reserved_36_36:1;
+ uint64_t crip_128k:1;
+ uint64_t crip_256k:1;
+ uint64_t q3fus:34;
+ } cn52xx;
+ struct cvmx_l2d_fus3_cn52xx cn52xxp1;
+ struct cvmx_l2d_fus3_cn56xx {
+ uint64_t reserved_40_63:24;
+ uint64_t ema_ctl:3;
+ uint64_t reserved_36_36:1;
+ uint64_t crip_512k:1;
+ uint64_t crip_1024k:1;
+ uint64_t q3fus:34;
+ } cn56xx;
+ struct cvmx_l2d_fus3_cn56xx cn56xxp1;
+ struct cvmx_l2d_fus3_cn58xx {
+ uint64_t reserved_39_63:25;
+ uint64_t ema_ctl:2;
+ uint64_t reserved_36_36:1;
+ uint64_t crip_512k:1;
+ uint64_t crip_1024k:1;
+ uint64_t q3fus:34;
+ } cn58xx;
+ struct cvmx_l2d_fus3_cn58xx cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
new file mode 100644
index 000000000000..2639a3f5ffc2
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -0,0 +1,141 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_L2T_DEFS_H__
+#define __CVMX_L2T_DEFS_H__
+
+#define CVMX_L2T_ERR \
+ CVMX_ADD_IO_SEG(0x0001180080000008ull)
+
+union cvmx_l2t_err {
+ uint64_t u64;
+ struct cvmx_l2t_err_s {
+ uint64_t reserved_29_63:35;
+ uint64_t fadru:1;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t fset:3;
+ uint64_t fadr:10;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } s;
+ struct cvmx_l2t_err_cn30xx {
+ uint64_t reserved_28_63:36;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t reserved_23_23:1;
+ uint64_t fset:2;
+ uint64_t reserved_19_20:2;
+ uint64_t fadr:8;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } cn30xx;
+ struct cvmx_l2t_err_cn31xx {
+ uint64_t reserved_28_63:36;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t reserved_23_23:1;
+ uint64_t fset:2;
+ uint64_t reserved_20_20:1;
+ uint64_t fadr:9;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } cn31xx;
+ struct cvmx_l2t_err_cn38xx {
+ uint64_t reserved_28_63:36;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t fset:3;
+ uint64_t fadr:10;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } cn38xx;
+ struct cvmx_l2t_err_cn38xx cn38xxp2;
+ struct cvmx_l2t_err_cn50xx {
+ uint64_t reserved_28_63:36;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t fset:3;
+ uint64_t reserved_18_20:3;
+ uint64_t fadr:7;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } cn50xx;
+ struct cvmx_l2t_err_cn52xx {
+ uint64_t reserved_28_63:36;
+ uint64_t lck_intena2:1;
+ uint64_t lckerr2:1;
+ uint64_t lck_intena:1;
+ uint64_t lckerr:1;
+ uint64_t fset:3;
+ uint64_t reserved_20_20:1;
+ uint64_t fadr:9;
+ uint64_t fsyn:6;
+ uint64_t ded_err:1;
+ uint64_t sec_err:1;
+ uint64_t ded_intena:1;
+ uint64_t sec_intena:1;
+ uint64_t ecc_ena:1;
+ } cn52xx;
+ struct cvmx_l2t_err_cn52xx cn52xxp1;
+ struct cvmx_l2t_err_s cn56xx;
+ struct cvmx_l2t_err_s cn56xxp1;
+ struct cvmx_l2t_err_s cn58xx;
+ struct cvmx_l2t_err_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
new file mode 100644
index 000000000000..16f174a4dadf
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -0,0 +1,240 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_LED_DEFS_H__
+#define __CVMX_LED_DEFS_H__
+
+#define CVMX_LED_BLINK \
+ CVMX_ADD_IO_SEG(0x0001180000001A48ull)
+#define CVMX_LED_CLK_PHASE \
+ CVMX_ADD_IO_SEG(0x0001180000001A08ull)
+#define CVMX_LED_CYLON \
+ CVMX_ADD_IO_SEG(0x0001180000001AF8ull)
+#define CVMX_LED_DBG \
+ CVMX_ADD_IO_SEG(0x0001180000001A18ull)
+#define CVMX_LED_EN \
+ CVMX_ADD_IO_SEG(0x0001180000001A00ull)
+#define CVMX_LED_POLARITY \
+ CVMX_ADD_IO_SEG(0x0001180000001A50ull)
+#define CVMX_LED_PRT \
+ CVMX_ADD_IO_SEG(0x0001180000001A10ull)
+#define CVMX_LED_PRT_FMT \
+ CVMX_ADD_IO_SEG(0x0001180000001A30ull)
+#define CVMX_LED_PRT_STATUSX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001A80ull + (((offset) & 7) * 8))
+#define CVMX_LED_UDD_CNTX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001A20ull + (((offset) & 1) * 8))
+#define CVMX_LED_UDD_DATX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001A38ull + (((offset) & 1) * 8))
+#define CVMX_LED_UDD_DAT_CLRX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001AC8ull + (((offset) & 1) * 16))
+#define CVMX_LED_UDD_DAT_SETX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001AC0ull + (((offset) & 1) * 16))
+
+union cvmx_led_blink {
+ uint64_t u64;
+ struct cvmx_led_blink_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rate:8;
+ } s;
+ struct cvmx_led_blink_s cn38xx;
+ struct cvmx_led_blink_s cn38xxp2;
+ struct cvmx_led_blink_s cn56xx;
+ struct cvmx_led_blink_s cn56xxp1;
+ struct cvmx_led_blink_s cn58xx;
+ struct cvmx_led_blink_s cn58xxp1;
+};
+
+union cvmx_led_clk_phase {
+ uint64_t u64;
+ struct cvmx_led_clk_phase_s {
+ uint64_t reserved_7_63:57;
+ uint64_t phase:7;
+ } s;
+ struct cvmx_led_clk_phase_s cn38xx;
+ struct cvmx_led_clk_phase_s cn38xxp2;
+ struct cvmx_led_clk_phase_s cn56xx;
+ struct cvmx_led_clk_phase_s cn56xxp1;
+ struct cvmx_led_clk_phase_s cn58xx;
+ struct cvmx_led_clk_phase_s cn58xxp1;
+};
+
+union cvmx_led_cylon {
+ uint64_t u64;
+ struct cvmx_led_cylon_s {
+ uint64_t reserved_16_63:48;
+ uint64_t rate:16;
+ } s;
+ struct cvmx_led_cylon_s cn38xx;
+ struct cvmx_led_cylon_s cn38xxp2;
+ struct cvmx_led_cylon_s cn56xx;
+ struct cvmx_led_cylon_s cn56xxp1;
+ struct cvmx_led_cylon_s cn58xx;
+ struct cvmx_led_cylon_s cn58xxp1;
+};
+
+union cvmx_led_dbg {
+ uint64_t u64;
+ struct cvmx_led_dbg_s {
+ uint64_t reserved_1_63:63;
+ uint64_t dbg_en:1;
+ } s;
+ struct cvmx_led_dbg_s cn38xx;
+ struct cvmx_led_dbg_s cn38xxp2;
+ struct cvmx_led_dbg_s cn56xx;
+ struct cvmx_led_dbg_s cn56xxp1;
+ struct cvmx_led_dbg_s cn58xx;
+ struct cvmx_led_dbg_s cn58xxp1;
+};
+
+union cvmx_led_en {
+ uint64_t u64;
+ struct cvmx_led_en_s {
+ uint64_t reserved_1_63:63;
+ uint64_t en:1;
+ } s;
+ struct cvmx_led_en_s cn38xx;
+ struct cvmx_led_en_s cn38xxp2;
+ struct cvmx_led_en_s cn56xx;
+ struct cvmx_led_en_s cn56xxp1;
+ struct cvmx_led_en_s cn58xx;
+ struct cvmx_led_en_s cn58xxp1;
+};
+
+union cvmx_led_polarity {
+ uint64_t u64;
+ struct cvmx_led_polarity_s {
+ uint64_t reserved_1_63:63;
+ uint64_t polarity:1;
+ } s;
+ struct cvmx_led_polarity_s cn38xx;
+ struct cvmx_led_polarity_s cn38xxp2;
+ struct cvmx_led_polarity_s cn56xx;
+ struct cvmx_led_polarity_s cn56xxp1;
+ struct cvmx_led_polarity_s cn58xx;
+ struct cvmx_led_polarity_s cn58xxp1;
+};
+
+union cvmx_led_prt {
+ uint64_t u64;
+ struct cvmx_led_prt_s {
+ uint64_t reserved_8_63:56;
+ uint64_t prt_en:8;
+ } s;
+ struct cvmx_led_prt_s cn38xx;
+ struct cvmx_led_prt_s cn38xxp2;
+ struct cvmx_led_prt_s cn56xx;
+ struct cvmx_led_prt_s cn56xxp1;
+ struct cvmx_led_prt_s cn58xx;
+ struct cvmx_led_prt_s cn58xxp1;
+};
+
+union cvmx_led_prt_fmt {
+ uint64_t u64;
+ struct cvmx_led_prt_fmt_s {
+ uint64_t reserved_4_63:60;
+ uint64_t format:4;
+ } s;
+ struct cvmx_led_prt_fmt_s cn38xx;
+ struct cvmx_led_prt_fmt_s cn38xxp2;
+ struct cvmx_led_prt_fmt_s cn56xx;
+ struct cvmx_led_prt_fmt_s cn56xxp1;
+ struct cvmx_led_prt_fmt_s cn58xx;
+ struct cvmx_led_prt_fmt_s cn58xxp1;
+};
+
+union cvmx_led_prt_statusx {
+ uint64_t u64;
+ struct cvmx_led_prt_statusx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t status:6;
+ } s;
+ struct cvmx_led_prt_statusx_s cn38xx;
+ struct cvmx_led_prt_statusx_s cn38xxp2;
+ struct cvmx_led_prt_statusx_s cn56xx;
+ struct cvmx_led_prt_statusx_s cn56xxp1;
+ struct cvmx_led_prt_statusx_s cn58xx;
+ struct cvmx_led_prt_statusx_s cn58xxp1;
+};
+
+union cvmx_led_udd_cntx {
+ uint64_t u64;
+ struct cvmx_led_udd_cntx_s {
+ uint64_t reserved_6_63:58;
+ uint64_t cnt:6;
+ } s;
+ struct cvmx_led_udd_cntx_s cn38xx;
+ struct cvmx_led_udd_cntx_s cn38xxp2;
+ struct cvmx_led_udd_cntx_s cn56xx;
+ struct cvmx_led_udd_cntx_s cn56xxp1;
+ struct cvmx_led_udd_cntx_s cn58xx;
+ struct cvmx_led_udd_cntx_s cn58xxp1;
+};
+
+union cvmx_led_udd_datx {
+ uint64_t u64;
+ struct cvmx_led_udd_datx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t dat:32;
+ } s;
+ struct cvmx_led_udd_datx_s cn38xx;
+ struct cvmx_led_udd_datx_s cn38xxp2;
+ struct cvmx_led_udd_datx_s cn56xx;
+ struct cvmx_led_udd_datx_s cn56xxp1;
+ struct cvmx_led_udd_datx_s cn58xx;
+ struct cvmx_led_udd_datx_s cn58xxp1;
+};
+
+union cvmx_led_udd_dat_clrx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_clrx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t clr:32;
+ } s;
+ struct cvmx_led_udd_dat_clrx_s cn38xx;
+ struct cvmx_led_udd_dat_clrx_s cn38xxp2;
+ struct cvmx_led_udd_dat_clrx_s cn56xx;
+ struct cvmx_led_udd_dat_clrx_s cn56xxp1;
+ struct cvmx_led_udd_dat_clrx_s cn58xx;
+ struct cvmx_led_udd_dat_clrx_s cn58xxp1;
+};
+
+union cvmx_led_udd_dat_setx {
+ uint64_t u64;
+ struct cvmx_led_udd_dat_setx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t set:32;
+ } s;
+ struct cvmx_led_udd_dat_setx_s cn38xx;
+ struct cvmx_led_udd_dat_setx_s cn38xxp2;
+ struct cvmx_led_udd_dat_setx_s cn56xx;
+ struct cvmx_led_udd_dat_setx_s cn56xxp1;
+ struct cvmx_led_udd_dat_setx_s cn58xx;
+ struct cvmx_led_udd_dat_setx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
new file mode 100644
index 000000000000..6555f0530988
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -0,0 +1,2004 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_MIO_DEFS_H__
+#define __CVMX_MIO_DEFS_H__
+
+#define CVMX_MIO_BOOT_BIST_STAT \
+ CVMX_ADD_IO_SEG(0x00011800000000F8ull)
+#define CVMX_MIO_BOOT_COMP \
+ CVMX_ADD_IO_SEG(0x00011800000000B8ull)
+#define CVMX_MIO_BOOT_DMA_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000100ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_INTX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000138ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_INT_ENX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000150ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_DMA_TIMX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000120ull + (((offset) & 3) * 8))
+#define CVMX_MIO_BOOT_ERR \
+ CVMX_ADD_IO_SEG(0x00011800000000A0ull)
+#define CVMX_MIO_BOOT_INT \
+ CVMX_ADD_IO_SEG(0x00011800000000A8ull)
+#define CVMX_MIO_BOOT_LOC_ADR \
+ CVMX_ADD_IO_SEG(0x0001180000000090ull)
+#define CVMX_MIO_BOOT_LOC_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000080ull + (((offset) & 1) * 8))
+#define CVMX_MIO_BOOT_LOC_DAT \
+ CVMX_ADD_IO_SEG(0x0001180000000098ull)
+#define CVMX_MIO_BOOT_PIN_DEFS \
+ CVMX_ADD_IO_SEG(0x00011800000000C0ull)
+#define CVMX_MIO_BOOT_REG_CFGX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000000ull + (((offset) & 7) * 8))
+#define CVMX_MIO_BOOT_REG_TIMX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000040ull + (((offset) & 7) * 8))
+#define CVMX_MIO_BOOT_THR \
+ CVMX_ADD_IO_SEG(0x00011800000000B0ull)
+#define CVMX_MIO_FUS_BNK_DATX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001520ull + (((offset) & 3) * 8))
+#define CVMX_MIO_FUS_DAT0 \
+ CVMX_ADD_IO_SEG(0x0001180000001400ull)
+#define CVMX_MIO_FUS_DAT1 \
+ CVMX_ADD_IO_SEG(0x0001180000001408ull)
+#define CVMX_MIO_FUS_DAT2 \
+ CVMX_ADD_IO_SEG(0x0001180000001410ull)
+#define CVMX_MIO_FUS_DAT3 \
+ CVMX_ADD_IO_SEG(0x0001180000001418ull)
+#define CVMX_MIO_FUS_EMA \
+ CVMX_ADD_IO_SEG(0x0001180000001550ull)
+#define CVMX_MIO_FUS_PDF \
+ CVMX_ADD_IO_SEG(0x0001180000001420ull)
+#define CVMX_MIO_FUS_PLL \
+ CVMX_ADD_IO_SEG(0x0001180000001580ull)
+#define CVMX_MIO_FUS_PROG \
+ CVMX_ADD_IO_SEG(0x0001180000001510ull)
+#define CVMX_MIO_FUS_PROG_TIMES \
+ CVMX_ADD_IO_SEG(0x0001180000001518ull)
+#define CVMX_MIO_FUS_RCMD \
+ CVMX_ADD_IO_SEG(0x0001180000001500ull)
+#define CVMX_MIO_FUS_SPR_REPAIR_RES \
+ CVMX_ADD_IO_SEG(0x0001180000001548ull)
+#define CVMX_MIO_FUS_SPR_REPAIR_SUM \
+ CVMX_ADD_IO_SEG(0x0001180000001540ull)
+#define CVMX_MIO_FUS_UNLOCK \
+ CVMX_ADD_IO_SEG(0x0001180000001578ull)
+#define CVMX_MIO_FUS_WADR \
+ CVMX_ADD_IO_SEG(0x0001180000001508ull)
+#define CVMX_MIO_NDF_DMA_CFG \
+ CVMX_ADD_IO_SEG(0x0001180000000168ull)
+#define CVMX_MIO_NDF_DMA_INT \
+ CVMX_ADD_IO_SEG(0x0001180000000170ull)
+#define CVMX_MIO_NDF_DMA_INT_EN \
+ CVMX_ADD_IO_SEG(0x0001180000000178ull)
+#define CVMX_MIO_PLL_CTL \
+ CVMX_ADD_IO_SEG(0x0001180000001448ull)
+#define CVMX_MIO_PLL_SETTING \
+ CVMX_ADD_IO_SEG(0x0001180000001440ull)
+#define CVMX_MIO_TWSX_INT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001010ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_SW_TWSI(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001000ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_SW_TWSI_EXT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001018ull + (((offset) & 1) * 512))
+#define CVMX_MIO_TWSX_TWSI_SW(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000001008ull + (((offset) & 1) * 512))
+#define CVMX_MIO_UART2_DLH \
+ CVMX_ADD_IO_SEG(0x0001180000000488ull)
+#define CVMX_MIO_UART2_DLL \
+ CVMX_ADD_IO_SEG(0x0001180000000480ull)
+#define CVMX_MIO_UART2_FAR \
+ CVMX_ADD_IO_SEG(0x0001180000000520ull)
+#define CVMX_MIO_UART2_FCR \
+ CVMX_ADD_IO_SEG(0x0001180000000450ull)
+#define CVMX_MIO_UART2_HTX \
+ CVMX_ADD_IO_SEG(0x0001180000000708ull)
+#define CVMX_MIO_UART2_IER \
+ CVMX_ADD_IO_SEG(0x0001180000000408ull)
+#define CVMX_MIO_UART2_IIR \
+ CVMX_ADD_IO_SEG(0x0001180000000410ull)
+#define CVMX_MIO_UART2_LCR \
+ CVMX_ADD_IO_SEG(0x0001180000000418ull)
+#define CVMX_MIO_UART2_LSR \
+ CVMX_ADD_IO_SEG(0x0001180000000428ull)
+#define CVMX_MIO_UART2_MCR \
+ CVMX_ADD_IO_SEG(0x0001180000000420ull)
+#define CVMX_MIO_UART2_MSR \
+ CVMX_ADD_IO_SEG(0x0001180000000430ull)
+#define CVMX_MIO_UART2_RBR \
+ CVMX_ADD_IO_SEG(0x0001180000000400ull)
+#define CVMX_MIO_UART2_RFL \
+ CVMX_ADD_IO_SEG(0x0001180000000608ull)
+#define CVMX_MIO_UART2_RFW \
+ CVMX_ADD_IO_SEG(0x0001180000000530ull)
+#define CVMX_MIO_UART2_SBCR \
+ CVMX_ADD_IO_SEG(0x0001180000000620ull)
+#define CVMX_MIO_UART2_SCR \
+ CVMX_ADD_IO_SEG(0x0001180000000438ull)
+#define CVMX_MIO_UART2_SFE \
+ CVMX_ADD_IO_SEG(0x0001180000000630ull)
+#define CVMX_MIO_UART2_SRR \
+ CVMX_ADD_IO_SEG(0x0001180000000610ull)
+#define CVMX_MIO_UART2_SRT \
+ CVMX_ADD_IO_SEG(0x0001180000000638ull)
+#define CVMX_MIO_UART2_SRTS \
+ CVMX_ADD_IO_SEG(0x0001180000000618ull)
+#define CVMX_MIO_UART2_STT \
+ CVMX_ADD_IO_SEG(0x0001180000000700ull)
+#define CVMX_MIO_UART2_TFL \
+ CVMX_ADD_IO_SEG(0x0001180000000600ull)
+#define CVMX_MIO_UART2_TFR \
+ CVMX_ADD_IO_SEG(0x0001180000000528ull)
+#define CVMX_MIO_UART2_THR \
+ CVMX_ADD_IO_SEG(0x0001180000000440ull)
+#define CVMX_MIO_UART2_USR \
+ CVMX_ADD_IO_SEG(0x0001180000000538ull)
+#define CVMX_MIO_UARTX_DLH(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000888ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_DLL(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000880ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_FAR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000920ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_FCR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000850ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_HTX(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000B08ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_IER(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000808ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_IIR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000810ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_LCR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000818ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_LSR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000828ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_MCR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000820ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_MSR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000830ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RBR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000800ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RFL(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A08ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_RFW(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000930ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SBCR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A20ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SCR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000838ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SFE(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A30ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A10ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A38ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_SRTS(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A18ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_STT(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000B00ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_TFL(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000A00ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_TFR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000928ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_THR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000840ull + (((offset) & 1) * 1024))
+#define CVMX_MIO_UARTX_USR(offset) \
+ CVMX_ADD_IO_SEG(0x0001180000000938ull + (((offset) & 1) * 1024))
+
+union cvmx_mio_boot_bist_stat {
+ uint64_t u64;
+ struct cvmx_mio_boot_bist_stat_s {
+ uint64_t reserved_2_63:62;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } s;
+ struct cvmx_mio_boot_bist_stat_cn30xx {
+ uint64_t reserved_4_63:60;
+ uint64_t ncbo_1:1;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } cn30xx;
+ struct cvmx_mio_boot_bist_stat_cn30xx cn31xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx {
+ uint64_t reserved_3_63:61;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } cn38xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_bist_stat_cn50xx {
+ uint64_t reserved_6_63:58;
+ uint64_t pcm_1:1;
+ uint64_t pcm_0:1;
+ uint64_t ncbo_1:1;
+ uint64_t ncbo_0:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } cn50xx;
+ struct cvmx_mio_boot_bist_stat_cn52xx {
+ uint64_t reserved_6_63:58;
+ uint64_t ndf:2;
+ uint64_t ncbo_0:1;
+ uint64_t dma:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } cn52xx;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 {
+ uint64_t reserved_4_63:60;
+ uint64_t ncbo_0:1;
+ uint64_t dma:1;
+ uint64_t loc:1;
+ uint64_t ncbi:1;
+ } cn52xxp1;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx;
+ struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
+ struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
+};
+
+union cvmx_mio_boot_comp {
+ uint64_t u64;
+ struct cvmx_mio_boot_comp_s {
+ uint64_t reserved_10_63:54;
+ uint64_t pctl:5;
+ uint64_t nctl:5;
+ } s;
+ struct cvmx_mio_boot_comp_s cn50xx;
+ struct cvmx_mio_boot_comp_s cn52xx;
+ struct cvmx_mio_boot_comp_s cn52xxp1;
+ struct cvmx_mio_boot_comp_s cn56xx;
+ struct cvmx_mio_boot_comp_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_cfgx_s {
+ uint64_t en:1;
+ uint64_t rw:1;
+ uint64_t clr:1;
+ uint64_t reserved_60_60:1;
+ uint64_t swap32:1;
+ uint64_t swap16:1;
+ uint64_t swap8:1;
+ uint64_t endian:1;
+ uint64_t size:20;
+ uint64_t adr:36;
+ } s;
+ struct cvmx_mio_boot_dma_cfgx_s cn52xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_cfgx_s cn56xx;
+ struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_intx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_intx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t dmarq:1;
+ uint64_t done:1;
+ } s;
+ struct cvmx_mio_boot_dma_intx_s cn52xx;
+ struct cvmx_mio_boot_dma_intx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_intx_s cn56xx;
+ struct cvmx_mio_boot_dma_intx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_int_enx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_int_enx_s {
+ uint64_t reserved_2_63:62;
+ uint64_t dmarq:1;
+ uint64_t done:1;
+ } s;
+ struct cvmx_mio_boot_dma_int_enx_s cn52xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_int_enx_s cn56xx;
+ struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_dma_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_dma_timx_s {
+ uint64_t dmack_pi:1;
+ uint64_t dmarq_pi:1;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t ddr:1;
+ uint64_t width:1;
+ uint64_t reserved_48_54:7;
+ uint64_t pause:6;
+ uint64_t dmack_h:6;
+ uint64_t we_n:6;
+ uint64_t we_a:6;
+ uint64_t oe_n:6;
+ uint64_t oe_a:6;
+ uint64_t dmack_s:6;
+ uint64_t dmarq:6;
+ } s;
+ struct cvmx_mio_boot_dma_timx_s cn52xx;
+ struct cvmx_mio_boot_dma_timx_s cn52xxp1;
+ struct cvmx_mio_boot_dma_timx_s cn56xx;
+ struct cvmx_mio_boot_dma_timx_s cn56xxp1;
+};
+
+union cvmx_mio_boot_err {
+ uint64_t u64;
+ struct cvmx_mio_boot_err_s {
+ uint64_t reserved_2_63:62;
+ uint64_t wait_err:1;
+ uint64_t adr_err:1;
+ } s;
+ struct cvmx_mio_boot_err_s cn30xx;
+ struct cvmx_mio_boot_err_s cn31xx;
+ struct cvmx_mio_boot_err_s cn38xx;
+ struct cvmx_mio_boot_err_s cn38xxp2;
+ struct cvmx_mio_boot_err_s cn50xx;
+ struct cvmx_mio_boot_err_s cn52xx;
+ struct cvmx_mio_boot_err_s cn52xxp1;
+ struct cvmx_mio_boot_err_s cn56xx;
+ struct cvmx_mio_boot_err_s cn56xxp1;
+ struct cvmx_mio_boot_err_s cn58xx;
+ struct cvmx_mio_boot_err_s cn58xxp1;
+};
+
+union cvmx_mio_boot_int {
+ uint64_t u64;
+ struct cvmx_mio_boot_int_s {
+ uint64_t reserved_2_63:62;
+ uint64_t wait_int:1;
+ uint64_t adr_int:1;
+ } s;
+ struct cvmx_mio_boot_int_s cn30xx;
+ struct cvmx_mio_boot_int_s cn31xx;
+ struct cvmx_mio_boot_int_s cn38xx;
+ struct cvmx_mio_boot_int_s cn38xxp2;
+ struct cvmx_mio_boot_int_s cn50xx;
+ struct cvmx_mio_boot_int_s cn52xx;
+ struct cvmx_mio_boot_int_s cn52xxp1;
+ struct cvmx_mio_boot_int_s cn56xx;
+ struct cvmx_mio_boot_int_s cn56xxp1;
+ struct cvmx_mio_boot_int_s cn58xx;
+ struct cvmx_mio_boot_int_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_adr {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_adr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t adr:5;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mio_boot_loc_adr_s cn30xx;
+ struct cvmx_mio_boot_loc_adr_s cn31xx;
+ struct cvmx_mio_boot_loc_adr_s cn38xx;
+ struct cvmx_mio_boot_loc_adr_s cn38xxp2;
+ struct cvmx_mio_boot_loc_adr_s cn50xx;
+ struct cvmx_mio_boot_loc_adr_s cn52xx;
+ struct cvmx_mio_boot_loc_adr_s cn52xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn56xx;
+ struct cvmx_mio_boot_loc_adr_s cn56xxp1;
+ struct cvmx_mio_boot_loc_adr_s cn58xx;
+ struct cvmx_mio_boot_loc_adr_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_cfgx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t en:1;
+ uint64_t reserved_28_30:3;
+ uint64_t base:25;
+ uint64_t reserved_0_2:3;
+ } s;
+ struct cvmx_mio_boot_loc_cfgx_s cn30xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn31xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn38xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn38xxp2;
+ struct cvmx_mio_boot_loc_cfgx_s cn50xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn52xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn56xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_loc_cfgx_s cn58xx;
+ struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
+};
+
+union cvmx_mio_boot_loc_dat {
+ uint64_t u64;
+ struct cvmx_mio_boot_loc_dat_s {
+ uint64_t data:64;
+ } s;
+ struct cvmx_mio_boot_loc_dat_s cn30xx;
+ struct cvmx_mio_boot_loc_dat_s cn31xx;
+ struct cvmx_mio_boot_loc_dat_s cn38xx;
+ struct cvmx_mio_boot_loc_dat_s cn38xxp2;
+ struct cvmx_mio_boot_loc_dat_s cn50xx;
+ struct cvmx_mio_boot_loc_dat_s cn52xx;
+ struct cvmx_mio_boot_loc_dat_s cn52xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn56xx;
+ struct cvmx_mio_boot_loc_dat_s cn56xxp1;
+ struct cvmx_mio_boot_loc_dat_s cn58xx;
+ struct cvmx_mio_boot_loc_dat_s cn58xxp1;
+};
+
+union cvmx_mio_boot_pin_defs {
+ uint64_t u64;
+ struct cvmx_mio_boot_pin_defs_s {
+ uint64_t reserved_16_63:48;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t dmack_p2:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t nand:1;
+ uint64_t reserved_0_7:8;
+ } s;
+ struct cvmx_mio_boot_pin_defs_cn52xx {
+ uint64_t reserved_16_63:48;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t reserved_13_13:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t nand:1;
+ uint64_t reserved_0_7:8;
+ } cn52xx;
+ struct cvmx_mio_boot_pin_defs_cn56xx {
+ uint64_t reserved_16_63:48;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t dmack_p2:1;
+ uint64_t dmack_p1:1;
+ uint64_t dmack_p0:1;
+ uint64_t term:2;
+ uint64_t reserved_0_8:9;
+ } cn56xx;
+};
+
+union cvmx_mio_boot_reg_cfgx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_cfgx_s {
+ uint64_t reserved_44_63:20;
+ uint64_t dmack:2;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+ } s;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx {
+ uint64_t reserved_37_63:27;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+ } cn30xx;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx;
+ struct cvmx_mio_boot_reg_cfgx_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t reserved_28_29:2;
+ uint64_t size:12;
+ uint64_t base:16;
+ } cn38xx;
+ struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_reg_cfgx_cn50xx {
+ uint64_t reserved_42_63:22;
+ uint64_t tim_mult:2;
+ uint64_t rd_dly:3;
+ uint64_t sam:1;
+ uint64_t we_ext:2;
+ uint64_t oe_ext:2;
+ uint64_t en:1;
+ uint64_t orbit:1;
+ uint64_t ale:1;
+ uint64_t width:1;
+ uint64_t size:12;
+ uint64_t base:16;
+ } cn50xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn52xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn52xxp1;
+ struct cvmx_mio_boot_reg_cfgx_s cn56xx;
+ struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
+ struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_boot_reg_timx {
+ uint64_t u64;
+ struct cvmx_mio_boot_reg_timx_s {
+ uint64_t pagem:1;
+ uint64_t waitm:1;
+ uint64_t pages:2;
+ uint64_t ale:6;
+ uint64_t page:6;
+ uint64_t wait:6;
+ uint64_t pause:6;
+ uint64_t wr_hld:6;
+ uint64_t rd_hld:6;
+ uint64_t we:6;
+ uint64_t oe:6;
+ uint64_t ce:6;
+ uint64_t adr:6;
+ } s;
+ struct cvmx_mio_boot_reg_timx_s cn30xx;
+ struct cvmx_mio_boot_reg_timx_s cn31xx;
+ struct cvmx_mio_boot_reg_timx_cn38xx {
+ uint64_t pagem:1;
+ uint64_t waitm:1;
+ uint64_t pages:2;
+ uint64_t reserved_54_59:6;
+ uint64_t page:6;
+ uint64_t wait:6;
+ uint64_t pause:6;
+ uint64_t wr_hld:6;
+ uint64_t rd_hld:6;
+ uint64_t we:6;
+ uint64_t oe:6;
+ uint64_t ce:6;
+ uint64_t adr:6;
+ } cn38xx;
+ struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2;
+ struct cvmx_mio_boot_reg_timx_s cn50xx;
+ struct cvmx_mio_boot_reg_timx_s cn52xx;
+ struct cvmx_mio_boot_reg_timx_s cn52xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn56xx;
+ struct cvmx_mio_boot_reg_timx_s cn56xxp1;
+ struct cvmx_mio_boot_reg_timx_s cn58xx;
+ struct cvmx_mio_boot_reg_timx_s cn58xxp1;
+};
+
+union cvmx_mio_boot_thr {
+ uint64_t u64;
+ struct cvmx_mio_boot_thr_s {
+ uint64_t reserved_22_63:42;
+ uint64_t dma_thr:6;
+ uint64_t reserved_14_15:2;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_thr:6;
+ } s;
+ struct cvmx_mio_boot_thr_cn30xx {
+ uint64_t reserved_14_63:50;
+ uint64_t fif_cnt:6;
+ uint64_t reserved_6_7:2;
+ uint64_t fif_thr:6;
+ } cn30xx;
+ struct cvmx_mio_boot_thr_cn30xx cn31xx;
+ struct cvmx_mio_boot_thr_cn30xx cn38xx;
+ struct cvmx_mio_boot_thr_cn30xx cn38xxp2;
+ struct cvmx_mio_boot_thr_cn30xx cn50xx;
+ struct cvmx_mio_boot_thr_s cn52xx;
+ struct cvmx_mio_boot_thr_s cn52xxp1;
+ struct cvmx_mio_boot_thr_s cn56xx;
+ struct cvmx_mio_boot_thr_s cn56xxp1;
+ struct cvmx_mio_boot_thr_cn30xx cn58xx;
+ struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_fus_bnk_datx {
+ uint64_t u64;
+ struct cvmx_mio_fus_bnk_datx_s {
+ uint64_t dat:64;
+ } s;
+ struct cvmx_mio_fus_bnk_datx_s cn50xx;
+ struct cvmx_mio_fus_bnk_datx_s cn52xx;
+ struct cvmx_mio_fus_bnk_datx_s cn52xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn56xx;
+ struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
+ struct cvmx_mio_fus_bnk_datx_s cn58xx;
+ struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat0 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat0_s {
+ uint64_t reserved_32_63:32;
+ uint64_t man_info:32;
+ } s;
+ struct cvmx_mio_fus_dat0_s cn30xx;
+ struct cvmx_mio_fus_dat0_s cn31xx;
+ struct cvmx_mio_fus_dat0_s cn38xx;
+ struct cvmx_mio_fus_dat0_s cn38xxp2;
+ struct cvmx_mio_fus_dat0_s cn50xx;
+ struct cvmx_mio_fus_dat0_s cn52xx;
+ struct cvmx_mio_fus_dat0_s cn52xxp1;
+ struct cvmx_mio_fus_dat0_s cn56xx;
+ struct cvmx_mio_fus_dat0_s cn56xxp1;
+ struct cvmx_mio_fus_dat0_s cn58xx;
+ struct cvmx_mio_fus_dat0_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat1 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat1_s {
+ uint64_t reserved_32_63:32;
+ uint64_t man_info:32;
+ } s;
+ struct cvmx_mio_fus_dat1_s cn30xx;
+ struct cvmx_mio_fus_dat1_s cn31xx;
+ struct cvmx_mio_fus_dat1_s cn38xx;
+ struct cvmx_mio_fus_dat1_s cn38xxp2;
+ struct cvmx_mio_fus_dat1_s cn50xx;
+ struct cvmx_mio_fus_dat1_s cn52xx;
+ struct cvmx_mio_fus_dat1_s cn52xxp1;
+ struct cvmx_mio_fus_dat1_s cn56xx;
+ struct cvmx_mio_fus_dat1_s cn56xxp1;
+ struct cvmx_mio_fus_dat1_s cn58xx;
+ struct cvmx_mio_fus_dat1_s cn58xxp1;
+};
+
+union cvmx_mio_fus_dat2 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat2_s {
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+ } s;
+ struct cvmx_mio_fus_dat2_cn30xx {
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pll_off:4;
+ uint64_t reserved_1_11:11;
+ uint64_t pp_dis:1;
+ } cn30xx;
+ struct cvmx_mio_fus_dat2_cn31xx {
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pll_off:4;
+ uint64_t reserved_2_11:10;
+ uint64_t pp_dis:2;
+ } cn31xx;
+ struct cvmx_mio_fus_dat2_cn38xx {
+ uint64_t reserved_29_63:35;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pp_dis:16;
+ } cn38xx;
+ struct cvmx_mio_fus_dat2_cn38xx cn38xxp2;
+ struct cvmx_mio_fus_dat2_cn50xx {
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_2_15:14;
+ uint64_t pp_dis:2;
+ } cn50xx;
+ struct cvmx_mio_fus_dat2_cn52xx {
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_4_15:12;
+ uint64_t pp_dis:4;
+ } cn52xx;
+ struct cvmx_mio_fus_dat2_cn52xx cn52xxp1;
+ struct cvmx_mio_fus_dat2_cn56xx {
+ uint64_t reserved_34_63:30;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_30_31:2;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t reserved_12_15:4;
+ uint64_t pp_dis:12;
+ } cn56xx;
+ struct cvmx_mio_fus_dat2_cn56xx cn56xxp1;
+ struct cvmx_mio_fus_dat2_cn58xx {
+ uint64_t reserved_30_63:34;
+ uint64_t nokasu:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t rst_sht:1;
+ uint64_t bist_dis:1;
+ uint64_t chip_id:8;
+ uint64_t pp_dis:16;
+ } cn58xx;
+ struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
+};
+
+union cvmx_mio_fus_dat3 {
+ uint64_t u64;
+ struct cvmx_mio_fus_dat3_s {
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } s;
+ struct cvmx_mio_fus_dat3_cn30xx {
+ uint64_t reserved_32_63:32;
+ uint64_t pll_div4:1;
+ uint64_t reserved_29_30:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } cn30xx;
+ struct cvmx_mio_fus_dat3_s cn31xx;
+ struct cvmx_mio_fus_dat3_cn38xx {
+ uint64_t reserved_31_63:33;
+ uint64_t zip_crip:2;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } cn38xx;
+ struct cvmx_mio_fus_dat3_cn38xxp2 {
+ uint64_t reserved_29_63:35;
+ uint64_t bar2_en:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t icache:24;
+ } cn38xxp2;
+ struct cvmx_mio_fus_dat3_cn38xx cn50xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn52xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn52xxp1;
+ struct cvmx_mio_fus_dat3_cn38xx cn56xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
+ struct cvmx_mio_fus_dat3_cn38xx cn58xx;
+ struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
+};
+
+union cvmx_mio_fus_ema {
+ uint64_t u64;
+ struct cvmx_mio_fus_ema_s {
+ uint64_t reserved_7_63:57;
+ uint64_t eff_ema:3;
+ uint64_t reserved_3_3:1;
+ uint64_t ema:3;
+ } s;
+ struct cvmx_mio_fus_ema_s cn50xx;
+ struct cvmx_mio_fus_ema_s cn52xx;
+ struct cvmx_mio_fus_ema_s cn52xxp1;
+ struct cvmx_mio_fus_ema_s cn56xx;
+ struct cvmx_mio_fus_ema_s cn56xxp1;
+ struct cvmx_mio_fus_ema_cn58xx {
+ uint64_t reserved_2_63:62;
+ uint64_t ema:2;
+ } cn58xx;
+ struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
+};
+
+union cvmx_mio_fus_pdf {
+ uint64_t u64;
+ struct cvmx_mio_fus_pdf_s {
+ uint64_t pdf:64;
+ } s;
+ struct cvmx_mio_fus_pdf_s cn50xx;
+ struct cvmx_mio_fus_pdf_s cn52xx;
+ struct cvmx_mio_fus_pdf_s cn52xxp1;
+ struct cvmx_mio_fus_pdf_s cn56xx;
+ struct cvmx_mio_fus_pdf_s cn56xxp1;
+ struct cvmx_mio_fus_pdf_s cn58xx;
+};
+
+union cvmx_mio_fus_pll {
+ uint64_t u64;
+ struct cvmx_mio_fus_pll_s {
+ uint64_t reserved_2_63:62;
+ uint64_t rfslip:1;
+ uint64_t fbslip:1;
+ } s;
+ struct cvmx_mio_fus_pll_s cn50xx;
+ struct cvmx_mio_fus_pll_s cn52xx;
+ struct cvmx_mio_fus_pll_s cn52xxp1;
+ struct cvmx_mio_fus_pll_s cn56xx;
+ struct cvmx_mio_fus_pll_s cn56xxp1;
+ struct cvmx_mio_fus_pll_s cn58xx;
+ struct cvmx_mio_fus_pll_s cn58xxp1;
+};
+
+union cvmx_mio_fus_prog {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_s {
+ uint64_t reserved_1_63:63;
+ uint64_t prog:1;
+ } s;
+ struct cvmx_mio_fus_prog_s cn30xx;
+ struct cvmx_mio_fus_prog_s cn31xx;
+ struct cvmx_mio_fus_prog_s cn38xx;
+ struct cvmx_mio_fus_prog_s cn38xxp2;
+ struct cvmx_mio_fus_prog_s cn50xx;
+ struct cvmx_mio_fus_prog_s cn52xx;
+ struct cvmx_mio_fus_prog_s cn52xxp1;
+ struct cvmx_mio_fus_prog_s cn56xx;
+ struct cvmx_mio_fus_prog_s cn56xxp1;
+ struct cvmx_mio_fus_prog_s cn58xx;
+ struct cvmx_mio_fus_prog_s cn58xxp1;
+};
+
+union cvmx_mio_fus_prog_times {
+ uint64_t u64;
+ struct cvmx_mio_fus_prog_times_s {
+ uint64_t reserved_33_63:31;
+ uint64_t prog_pin:1;
+ uint64_t out:8;
+ uint64_t sclk_lo:4;
+ uint64_t sclk_hi:12;
+ uint64_t setup:8;
+ } s;
+ struct cvmx_mio_fus_prog_times_s cn50xx;
+ struct cvmx_mio_fus_prog_times_s cn52xx;
+ struct cvmx_mio_fus_prog_times_s cn52xxp1;
+ struct cvmx_mio_fus_prog_times_s cn56xx;
+ struct cvmx_mio_fus_prog_times_s cn56xxp1;
+ struct cvmx_mio_fus_prog_times_s cn58xx;
+ struct cvmx_mio_fus_prog_times_s cn58xxp1;
+};
+
+union cvmx_mio_fus_rcmd {
+ uint64_t u64;
+ struct cvmx_mio_fus_rcmd_s {
+ uint64_t reserved_24_63:40;
+ uint64_t dat:8;
+ uint64_t reserved_13_15:3;
+ uint64_t pend:1;
+ uint64_t reserved_9_11:3;
+ uint64_t efuse:1;
+ uint64_t addr:8;
+ } s;
+ struct cvmx_mio_fus_rcmd_cn30xx {
+ uint64_t reserved_24_63:40;
+ uint64_t dat:8;
+ uint64_t reserved_13_15:3;
+ uint64_t pend:1;
+ uint64_t reserved_9_11:3;
+ uint64_t efuse:1;
+ uint64_t reserved_7_7:1;
+ uint64_t addr:7;
+ } cn30xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn31xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn38xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2;
+ struct cvmx_mio_fus_rcmd_cn30xx cn50xx;
+ struct cvmx_mio_fus_rcmd_s cn52xx;
+ struct cvmx_mio_fus_rcmd_s cn52xxp1;
+ struct cvmx_mio_fus_rcmd_s cn56xx;
+ struct cvmx_mio_fus_rcmd_s cn56xxp1;
+ struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
+ struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
+};
+
+union cvmx_mio_fus_spr_repair_res {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_res_s {
+ uint64_t reserved_42_63:22;
+ uint64_t repair2:14;
+ uint64_t repair1:14;
+ uint64_t repair0:14;
+ } s;
+ struct cvmx_mio_fus_spr_repair_res_s cn30xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn31xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn38xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn50xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn52xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn52xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn56xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
+ struct cvmx_mio_fus_spr_repair_res_s cn58xx;
+ struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
+};
+
+union cvmx_mio_fus_spr_repair_sum {
+ uint64_t u64;
+ struct cvmx_mio_fus_spr_repair_sum_s {
+ uint64_t reserved_1_63:63;
+ uint64_t too_many:1;
+ } s;
+ struct cvmx_mio_fus_spr_repair_sum_s cn30xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn31xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn38xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn50xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn52xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn56xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
+ struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
+ struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
+};
+
+union cvmx_mio_fus_unlock {
+ uint64_t u64;
+ struct cvmx_mio_fus_unlock_s {
+ uint64_t reserved_24_63:40;
+ uint64_t key:24;
+ } s;
+ struct cvmx_mio_fus_unlock_s cn30xx;
+ struct cvmx_mio_fus_unlock_s cn31xx;
+};
+
+union cvmx_mio_fus_wadr {
+ uint64_t u64;
+ struct cvmx_mio_fus_wadr_s {
+ uint64_t reserved_10_63:54;
+ uint64_t addr:10;
+ } s;
+ struct cvmx_mio_fus_wadr_s cn30xx;
+ struct cvmx_mio_fus_wadr_s cn31xx;
+ struct cvmx_mio_fus_wadr_s cn38xx;
+ struct cvmx_mio_fus_wadr_s cn38xxp2;
+ struct cvmx_mio_fus_wadr_cn50xx {
+ uint64_t reserved_2_63:62;
+ uint64_t addr:2;
+ } cn50xx;
+ struct cvmx_mio_fus_wadr_cn52xx {
+ uint64_t reserved_3_63:61;
+ uint64_t addr:3;
+ } cn52xx;
+ struct cvmx_mio_fus_wadr_cn52xx cn52xxp1;
+ struct cvmx_mio_fus_wadr_cn52xx cn56xx;
+ struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
+ struct cvmx_mio_fus_wadr_cn50xx cn58xx;
+ struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
+};
+
+union cvmx_mio_ndf_dma_cfg {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_cfg_s {
+ uint64_t en:1;
+ uint64_t rw:1;
+ uint64_t clr:1;
+ uint64_t reserved_60_60:1;
+ uint64_t swap32:1;
+ uint64_t swap16:1;
+ uint64_t swap8:1;
+ uint64_t endian:1;
+ uint64_t size:20;
+ uint64_t adr:36;
+ } s;
+ struct cvmx_mio_ndf_dma_cfg_s cn52xx;
+};
+
+union cvmx_mio_ndf_dma_int {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_s {
+ uint64_t reserved_1_63:63;
+ uint64_t done:1;
+ } s;
+ struct cvmx_mio_ndf_dma_int_s cn52xx;
+};
+
+union cvmx_mio_ndf_dma_int_en {
+ uint64_t u64;
+ struct cvmx_mio_ndf_dma_int_en_s {
+ uint64_t reserved_1_63:63;
+ uint64_t done:1;
+ } s;
+ struct cvmx_mio_ndf_dma_int_en_s cn52xx;
+};
+
+union cvmx_mio_pll_ctl {
+ uint64_t u64;
+ struct cvmx_mio_pll_ctl_s {
+ uint64_t reserved_5_63:59;
+ uint64_t bw_ctl:5;
+ } s;
+ struct cvmx_mio_pll_ctl_s cn30xx;
+ struct cvmx_mio_pll_ctl_s cn31xx;
+};
+
+union cvmx_mio_pll_setting {
+ uint64_t u64;
+ struct cvmx_mio_pll_setting_s {
+ uint64_t reserved_17_63:47;
+ uint64_t setting:17;
+ } s;
+ struct cvmx_mio_pll_setting_s cn30xx;
+ struct cvmx_mio_pll_setting_s cn31xx;
+};
+
+union cvmx_mio_twsx_int {
+ uint64_t u64;
+ struct cvmx_mio_twsx_int_s {
+ uint64_t reserved_12_63:52;
+ uint64_t scl:1;
+ uint64_t sda:1;
+ uint64_t scl_ovr:1;
+ uint64_t sda_ovr:1;
+ uint64_t reserved_7_7:1;
+ uint64_t core_en:1;
+ uint64_t ts_en:1;
+ uint64_t st_en:1;
+ uint64_t reserved_3_3:1;
+ uint64_t core_int:1;
+ uint64_t ts_int:1;
+ uint64_t st_int:1;
+ } s;
+ struct cvmx_mio_twsx_int_s cn30xx;
+ struct cvmx_mio_twsx_int_s cn31xx;
+ struct cvmx_mio_twsx_int_s cn38xx;
+ struct cvmx_mio_twsx_int_cn38xxp2 {
+ uint64_t reserved_7_63:57;
+ uint64_t core_en:1;
+ uint64_t ts_en:1;
+ uint64_t st_en:1;
+ uint64_t reserved_3_3:1;
+ uint64_t core_int:1;
+ uint64_t ts_int:1;
+ uint64_t st_int:1;
+ } cn38xxp2;
+ struct cvmx_mio_twsx_int_s cn50xx;
+ struct cvmx_mio_twsx_int_s cn52xx;
+ struct cvmx_mio_twsx_int_s cn52xxp1;
+ struct cvmx_mio_twsx_int_s cn56xx;
+ struct cvmx_mio_twsx_int_s cn56xxp1;
+ struct cvmx_mio_twsx_int_s cn58xx;
+ struct cvmx_mio_twsx_int_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_sw_twsi {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_s {
+ uint64_t v:1;
+ uint64_t slonly:1;
+ uint64_t eia:1;
+ uint64_t op:4;
+ uint64_t r:1;
+ uint64_t sovr:1;
+ uint64_t size:3;
+ uint64_t scr:2;
+ uint64_t a:10;
+ uint64_t ia:5;
+ uint64_t eop_ia:3;
+ uint64_t d:32;
+ } s;
+ struct cvmx_mio_twsx_sw_twsi_s cn30xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn31xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn38xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn38xxp2;
+ struct cvmx_mio_twsx_sw_twsi_s cn50xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn52xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn52xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn56xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
+ struct cvmx_mio_twsx_sw_twsi_s cn58xx;
+ struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_sw_twsi_ext {
+ uint64_t u64;
+ struct cvmx_mio_twsx_sw_twsi_ext_s {
+ uint64_t reserved_40_63:24;
+ uint64_t ia:8;
+ uint64_t d:32;
+ } s;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
+ struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
+};
+
+union cvmx_mio_twsx_twsi_sw {
+ uint64_t u64;
+ struct cvmx_mio_twsx_twsi_sw_s {
+ uint64_t v:2;
+ uint64_t reserved_32_61:30;
+ uint64_t d:32;
+ } s;
+ struct cvmx_mio_twsx_twsi_sw_s cn30xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn31xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn38xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn38xxp2;
+ struct cvmx_mio_twsx_twsi_sw_s cn50xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn52xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn52xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn56xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
+ struct cvmx_mio_twsx_twsi_sw_s cn58xx;
+ struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dlh_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dlh:8;
+ } s;
+ struct cvmx_mio_uartx_dlh_s cn30xx;
+ struct cvmx_mio_uartx_dlh_s cn31xx;
+ struct cvmx_mio_uartx_dlh_s cn38xx;
+ struct cvmx_mio_uartx_dlh_s cn38xxp2;
+ struct cvmx_mio_uartx_dlh_s cn50xx;
+ struct cvmx_mio_uartx_dlh_s cn52xx;
+ struct cvmx_mio_uartx_dlh_s cn52xxp1;
+ struct cvmx_mio_uartx_dlh_s cn56xx;
+ struct cvmx_mio_uartx_dlh_s cn56xxp1;
+ struct cvmx_mio_uartx_dlh_s cn58xx;
+ struct cvmx_mio_uartx_dlh_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_dll {
+ uint64_t u64;
+ struct cvmx_mio_uartx_dll_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dll:8;
+ } s;
+ struct cvmx_mio_uartx_dll_s cn30xx;
+ struct cvmx_mio_uartx_dll_s cn31xx;
+ struct cvmx_mio_uartx_dll_s cn38xx;
+ struct cvmx_mio_uartx_dll_s cn38xxp2;
+ struct cvmx_mio_uartx_dll_s cn50xx;
+ struct cvmx_mio_uartx_dll_s cn52xx;
+ struct cvmx_mio_uartx_dll_s cn52xxp1;
+ struct cvmx_mio_uartx_dll_s cn56xx;
+ struct cvmx_mio_uartx_dll_s cn56xxp1;
+ struct cvmx_mio_uartx_dll_s cn58xx;
+ struct cvmx_mio_uartx_dll_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_far {
+ uint64_t u64;
+ struct cvmx_mio_uartx_far_s {
+ uint64_t reserved_1_63:63;
+ uint64_t far:1;
+ } s;
+ struct cvmx_mio_uartx_far_s cn30xx;
+ struct cvmx_mio_uartx_far_s cn31xx;
+ struct cvmx_mio_uartx_far_s cn38xx;
+ struct cvmx_mio_uartx_far_s cn38xxp2;
+ struct cvmx_mio_uartx_far_s cn50xx;
+ struct cvmx_mio_uartx_far_s cn52xx;
+ struct cvmx_mio_uartx_far_s cn52xxp1;
+ struct cvmx_mio_uartx_far_s cn56xx;
+ struct cvmx_mio_uartx_far_s cn56xxp1;
+ struct cvmx_mio_uartx_far_s cn58xx;
+ struct cvmx_mio_uartx_far_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_fcr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rxtrig:2;
+ uint64_t txtrig:2;
+ uint64_t reserved_3_3:1;
+ uint64_t txfr:1;
+ uint64_t rxfr:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_mio_uartx_fcr_s cn30xx;
+ struct cvmx_mio_uartx_fcr_s cn31xx;
+ struct cvmx_mio_uartx_fcr_s cn38xx;
+ struct cvmx_mio_uartx_fcr_s cn38xxp2;
+ struct cvmx_mio_uartx_fcr_s cn50xx;
+ struct cvmx_mio_uartx_fcr_s cn52xx;
+ struct cvmx_mio_uartx_fcr_s cn52xxp1;
+ struct cvmx_mio_uartx_fcr_s cn56xx;
+ struct cvmx_mio_uartx_fcr_s cn56xxp1;
+ struct cvmx_mio_uartx_fcr_s cn58xx;
+ struct cvmx_mio_uartx_fcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_htx {
+ uint64_t u64;
+ struct cvmx_mio_uartx_htx_s {
+ uint64_t reserved_1_63:63;
+ uint64_t htx:1;
+ } s;
+ struct cvmx_mio_uartx_htx_s cn30xx;
+ struct cvmx_mio_uartx_htx_s cn31xx;
+ struct cvmx_mio_uartx_htx_s cn38xx;
+ struct cvmx_mio_uartx_htx_s cn38xxp2;
+ struct cvmx_mio_uartx_htx_s cn50xx;
+ struct cvmx_mio_uartx_htx_s cn52xx;
+ struct cvmx_mio_uartx_htx_s cn52xxp1;
+ struct cvmx_mio_uartx_htx_s cn56xx;
+ struct cvmx_mio_uartx_htx_s cn56xxp1;
+ struct cvmx_mio_uartx_htx_s cn58xx;
+ struct cvmx_mio_uartx_htx_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_ier {
+ uint64_t u64;
+ struct cvmx_mio_uartx_ier_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ptime:1;
+ uint64_t reserved_4_6:3;
+ uint64_t edssi:1;
+ uint64_t elsi:1;
+ uint64_t etbei:1;
+ uint64_t erbfi:1;
+ } s;
+ struct cvmx_mio_uartx_ier_s cn30xx;
+ struct cvmx_mio_uartx_ier_s cn31xx;
+ struct cvmx_mio_uartx_ier_s cn38xx;
+ struct cvmx_mio_uartx_ier_s cn38xxp2;
+ struct cvmx_mio_uartx_ier_s cn50xx;
+ struct cvmx_mio_uartx_ier_s cn52xx;
+ struct cvmx_mio_uartx_ier_s cn52xxp1;
+ struct cvmx_mio_uartx_ier_s cn56xx;
+ struct cvmx_mio_uartx_ier_s cn56xxp1;
+ struct cvmx_mio_uartx_ier_s cn58xx;
+ struct cvmx_mio_uartx_ier_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_iir {
+ uint64_t u64;
+ struct cvmx_mio_uartx_iir_s {
+ uint64_t reserved_8_63:56;
+ uint64_t fen:2;
+ uint64_t reserved_4_5:2;
+ uint64_t iid:4;
+ } s;
+ struct cvmx_mio_uartx_iir_s cn30xx;
+ struct cvmx_mio_uartx_iir_s cn31xx;
+ struct cvmx_mio_uartx_iir_s cn38xx;
+ struct cvmx_mio_uartx_iir_s cn38xxp2;
+ struct cvmx_mio_uartx_iir_s cn50xx;
+ struct cvmx_mio_uartx_iir_s cn52xx;
+ struct cvmx_mio_uartx_iir_s cn52xxp1;
+ struct cvmx_mio_uartx_iir_s cn56xx;
+ struct cvmx_mio_uartx_iir_s cn56xxp1;
+ struct cvmx_mio_uartx_iir_s cn58xx;
+ struct cvmx_mio_uartx_iir_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lcr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dlab:1;
+ uint64_t brk:1;
+ uint64_t reserved_5_5:1;
+ uint64_t eps:1;
+ uint64_t pen:1;
+ uint64_t stop:1;
+ uint64_t cls:2;
+ } s;
+ struct cvmx_mio_uartx_lcr_s cn30xx;
+ struct cvmx_mio_uartx_lcr_s cn31xx;
+ struct cvmx_mio_uartx_lcr_s cn38xx;
+ struct cvmx_mio_uartx_lcr_s cn38xxp2;
+ struct cvmx_mio_uartx_lcr_s cn50xx;
+ struct cvmx_mio_uartx_lcr_s cn52xx;
+ struct cvmx_mio_uartx_lcr_s cn52xxp1;
+ struct cvmx_mio_uartx_lcr_s cn56xx;
+ struct cvmx_mio_uartx_lcr_s cn56xxp1;
+ struct cvmx_mio_uartx_lcr_s cn58xx;
+ struct cvmx_mio_uartx_lcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_lsr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ferr:1;
+ uint64_t temt:1;
+ uint64_t thre:1;
+ uint64_t bi:1;
+ uint64_t fe:1;
+ uint64_t pe:1;
+ uint64_t oe:1;
+ uint64_t dr:1;
+ } s;
+ struct cvmx_mio_uartx_lsr_s cn30xx;
+ struct cvmx_mio_uartx_lsr_s cn31xx;
+ struct cvmx_mio_uartx_lsr_s cn38xx;
+ struct cvmx_mio_uartx_lsr_s cn38xxp2;
+ struct cvmx_mio_uartx_lsr_s cn50xx;
+ struct cvmx_mio_uartx_lsr_s cn52xx;
+ struct cvmx_mio_uartx_lsr_s cn52xxp1;
+ struct cvmx_mio_uartx_lsr_s cn56xx;
+ struct cvmx_mio_uartx_lsr_s cn56xxp1;
+ struct cvmx_mio_uartx_lsr_s cn58xx;
+ struct cvmx_mio_uartx_lsr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_mcr_s {
+ uint64_t reserved_6_63:58;
+ uint64_t afce:1;
+ uint64_t loop:1;
+ uint64_t out2:1;
+ uint64_t out1:1;
+ uint64_t rts:1;
+ uint64_t dtr:1;
+ } s;
+ struct cvmx_mio_uartx_mcr_s cn30xx;
+ struct cvmx_mio_uartx_mcr_s cn31xx;
+ struct cvmx_mio_uartx_mcr_s cn38xx;
+ struct cvmx_mio_uartx_mcr_s cn38xxp2;
+ struct cvmx_mio_uartx_mcr_s cn50xx;
+ struct cvmx_mio_uartx_mcr_s cn52xx;
+ struct cvmx_mio_uartx_mcr_s cn52xxp1;
+ struct cvmx_mio_uartx_mcr_s cn56xx;
+ struct cvmx_mio_uartx_mcr_s cn56xxp1;
+ struct cvmx_mio_uartx_mcr_s cn58xx;
+ struct cvmx_mio_uartx_mcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_msr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_msr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dcd:1;
+ uint64_t ri:1;
+ uint64_t dsr:1;
+ uint64_t cts:1;
+ uint64_t ddcd:1;
+ uint64_t teri:1;
+ uint64_t ddsr:1;
+ uint64_t dcts:1;
+ } s;
+ struct cvmx_mio_uartx_msr_s cn30xx;
+ struct cvmx_mio_uartx_msr_s cn31xx;
+ struct cvmx_mio_uartx_msr_s cn38xx;
+ struct cvmx_mio_uartx_msr_s cn38xxp2;
+ struct cvmx_mio_uartx_msr_s cn50xx;
+ struct cvmx_mio_uartx_msr_s cn52xx;
+ struct cvmx_mio_uartx_msr_s cn52xxp1;
+ struct cvmx_mio_uartx_msr_s cn56xx;
+ struct cvmx_mio_uartx_msr_s cn56xxp1;
+ struct cvmx_mio_uartx_msr_s cn58xx;
+ struct cvmx_mio_uartx_msr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rbr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rbr:8;
+ } s;
+ struct cvmx_mio_uartx_rbr_s cn30xx;
+ struct cvmx_mio_uartx_rbr_s cn31xx;
+ struct cvmx_mio_uartx_rbr_s cn38xx;
+ struct cvmx_mio_uartx_rbr_s cn38xxp2;
+ struct cvmx_mio_uartx_rbr_s cn50xx;
+ struct cvmx_mio_uartx_rbr_s cn52xx;
+ struct cvmx_mio_uartx_rbr_s cn52xxp1;
+ struct cvmx_mio_uartx_rbr_s cn56xx;
+ struct cvmx_mio_uartx_rbr_s cn56xxp1;
+ struct cvmx_mio_uartx_rbr_s cn58xx;
+ struct cvmx_mio_uartx_rbr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfl_s {
+ uint64_t reserved_7_63:57;
+ uint64_t rfl:7;
+ } s;
+ struct cvmx_mio_uartx_rfl_s cn30xx;
+ struct cvmx_mio_uartx_rfl_s cn31xx;
+ struct cvmx_mio_uartx_rfl_s cn38xx;
+ struct cvmx_mio_uartx_rfl_s cn38xxp2;
+ struct cvmx_mio_uartx_rfl_s cn50xx;
+ struct cvmx_mio_uartx_rfl_s cn52xx;
+ struct cvmx_mio_uartx_rfl_s cn52xxp1;
+ struct cvmx_mio_uartx_rfl_s cn56xx;
+ struct cvmx_mio_uartx_rfl_s cn56xxp1;
+ struct cvmx_mio_uartx_rfl_s cn58xx;
+ struct cvmx_mio_uartx_rfl_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uartx_rfw_s {
+ uint64_t reserved_10_63:54;
+ uint64_t rffe:1;
+ uint64_t rfpe:1;
+ uint64_t rfwd:8;
+ } s;
+ struct cvmx_mio_uartx_rfw_s cn30xx;
+ struct cvmx_mio_uartx_rfw_s cn31xx;
+ struct cvmx_mio_uartx_rfw_s cn38xx;
+ struct cvmx_mio_uartx_rfw_s cn38xxp2;
+ struct cvmx_mio_uartx_rfw_s cn50xx;
+ struct cvmx_mio_uartx_rfw_s cn52xx;
+ struct cvmx_mio_uartx_rfw_s cn52xxp1;
+ struct cvmx_mio_uartx_rfw_s cn56xx;
+ struct cvmx_mio_uartx_rfw_s cn56xxp1;
+ struct cvmx_mio_uartx_rfw_s cn58xx;
+ struct cvmx_mio_uartx_rfw_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sbcr_s {
+ uint64_t reserved_1_63:63;
+ uint64_t sbcr:1;
+ } s;
+ struct cvmx_mio_uartx_sbcr_s cn30xx;
+ struct cvmx_mio_uartx_sbcr_s cn31xx;
+ struct cvmx_mio_uartx_sbcr_s cn38xx;
+ struct cvmx_mio_uartx_sbcr_s cn38xxp2;
+ struct cvmx_mio_uartx_sbcr_s cn50xx;
+ struct cvmx_mio_uartx_sbcr_s cn52xx;
+ struct cvmx_mio_uartx_sbcr_s cn52xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn56xx;
+ struct cvmx_mio_uartx_sbcr_s cn56xxp1;
+ struct cvmx_mio_uartx_sbcr_s cn58xx;
+ struct cvmx_mio_uartx_sbcr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_scr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_scr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t scr:8;
+ } s;
+ struct cvmx_mio_uartx_scr_s cn30xx;
+ struct cvmx_mio_uartx_scr_s cn31xx;
+ struct cvmx_mio_uartx_scr_s cn38xx;
+ struct cvmx_mio_uartx_scr_s cn38xxp2;
+ struct cvmx_mio_uartx_scr_s cn50xx;
+ struct cvmx_mio_uartx_scr_s cn52xx;
+ struct cvmx_mio_uartx_scr_s cn52xxp1;
+ struct cvmx_mio_uartx_scr_s cn56xx;
+ struct cvmx_mio_uartx_scr_s cn56xxp1;
+ struct cvmx_mio_uartx_scr_s cn58xx;
+ struct cvmx_mio_uartx_scr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uartx_sfe_s {
+ uint64_t reserved_1_63:63;
+ uint64_t sfe:1;
+ } s;
+ struct cvmx_mio_uartx_sfe_s cn30xx;
+ struct cvmx_mio_uartx_sfe_s cn31xx;
+ struct cvmx_mio_uartx_sfe_s cn38xx;
+ struct cvmx_mio_uartx_sfe_s cn38xxp2;
+ struct cvmx_mio_uartx_sfe_s cn50xx;
+ struct cvmx_mio_uartx_sfe_s cn52xx;
+ struct cvmx_mio_uartx_sfe_s cn52xxp1;
+ struct cvmx_mio_uartx_sfe_s cn56xx;
+ struct cvmx_mio_uartx_sfe_s cn56xxp1;
+ struct cvmx_mio_uartx_sfe_s cn58xx;
+ struct cvmx_mio_uartx_sfe_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srr_s {
+ uint64_t reserved_3_63:61;
+ uint64_t stfr:1;
+ uint64_t srfr:1;
+ uint64_t usr:1;
+ } s;
+ struct cvmx_mio_uartx_srr_s cn30xx;
+ struct cvmx_mio_uartx_srr_s cn31xx;
+ struct cvmx_mio_uartx_srr_s cn38xx;
+ struct cvmx_mio_uartx_srr_s cn38xxp2;
+ struct cvmx_mio_uartx_srr_s cn50xx;
+ struct cvmx_mio_uartx_srr_s cn52xx;
+ struct cvmx_mio_uartx_srr_s cn52xxp1;
+ struct cvmx_mio_uartx_srr_s cn56xx;
+ struct cvmx_mio_uartx_srr_s cn56xxp1;
+ struct cvmx_mio_uartx_srr_s cn58xx;
+ struct cvmx_mio_uartx_srr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srt_s {
+ uint64_t reserved_2_63:62;
+ uint64_t srt:2;
+ } s;
+ struct cvmx_mio_uartx_srt_s cn30xx;
+ struct cvmx_mio_uartx_srt_s cn31xx;
+ struct cvmx_mio_uartx_srt_s cn38xx;
+ struct cvmx_mio_uartx_srt_s cn38xxp2;
+ struct cvmx_mio_uartx_srt_s cn50xx;
+ struct cvmx_mio_uartx_srt_s cn52xx;
+ struct cvmx_mio_uartx_srt_s cn52xxp1;
+ struct cvmx_mio_uartx_srt_s cn56xx;
+ struct cvmx_mio_uartx_srt_s cn56xxp1;
+ struct cvmx_mio_uartx_srt_s cn58xx;
+ struct cvmx_mio_uartx_srt_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_srts {
+ uint64_t u64;
+ struct cvmx_mio_uartx_srts_s {
+ uint64_t reserved_1_63:63;
+ uint64_t srts:1;
+ } s;
+ struct cvmx_mio_uartx_srts_s cn30xx;
+ struct cvmx_mio_uartx_srts_s cn31xx;
+ struct cvmx_mio_uartx_srts_s cn38xx;
+ struct cvmx_mio_uartx_srts_s cn38xxp2;
+ struct cvmx_mio_uartx_srts_s cn50xx;
+ struct cvmx_mio_uartx_srts_s cn52xx;
+ struct cvmx_mio_uartx_srts_s cn52xxp1;
+ struct cvmx_mio_uartx_srts_s cn56xx;
+ struct cvmx_mio_uartx_srts_s cn56xxp1;
+ struct cvmx_mio_uartx_srts_s cn58xx;
+ struct cvmx_mio_uartx_srts_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_stt {
+ uint64_t u64;
+ struct cvmx_mio_uartx_stt_s {
+ uint64_t reserved_2_63:62;
+ uint64_t stt:2;
+ } s;
+ struct cvmx_mio_uartx_stt_s cn30xx;
+ struct cvmx_mio_uartx_stt_s cn31xx;
+ struct cvmx_mio_uartx_stt_s cn38xx;
+ struct cvmx_mio_uartx_stt_s cn38xxp2;
+ struct cvmx_mio_uartx_stt_s cn50xx;
+ struct cvmx_mio_uartx_stt_s cn52xx;
+ struct cvmx_mio_uartx_stt_s cn52xxp1;
+ struct cvmx_mio_uartx_stt_s cn56xx;
+ struct cvmx_mio_uartx_stt_s cn56xxp1;
+ struct cvmx_mio_uartx_stt_s cn58xx;
+ struct cvmx_mio_uartx_stt_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfl_s {
+ uint64_t reserved_7_63:57;
+ uint64_t tfl:7;
+ } s;
+ struct cvmx_mio_uartx_tfl_s cn30xx;
+ struct cvmx_mio_uartx_tfl_s cn31xx;
+ struct cvmx_mio_uartx_tfl_s cn38xx;
+ struct cvmx_mio_uartx_tfl_s cn38xxp2;
+ struct cvmx_mio_uartx_tfl_s cn50xx;
+ struct cvmx_mio_uartx_tfl_s cn52xx;
+ struct cvmx_mio_uartx_tfl_s cn52xxp1;
+ struct cvmx_mio_uartx_tfl_s cn56xx;
+ struct cvmx_mio_uartx_tfl_s cn56xxp1;
+ struct cvmx_mio_uartx_tfl_s cn58xx;
+ struct cvmx_mio_uartx_tfl_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_tfr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t tfr:8;
+ } s;
+ struct cvmx_mio_uartx_tfr_s cn30xx;
+ struct cvmx_mio_uartx_tfr_s cn31xx;
+ struct cvmx_mio_uartx_tfr_s cn38xx;
+ struct cvmx_mio_uartx_tfr_s cn38xxp2;
+ struct cvmx_mio_uartx_tfr_s cn50xx;
+ struct cvmx_mio_uartx_tfr_s cn52xx;
+ struct cvmx_mio_uartx_tfr_s cn52xxp1;
+ struct cvmx_mio_uartx_tfr_s cn56xx;
+ struct cvmx_mio_uartx_tfr_s cn56xxp1;
+ struct cvmx_mio_uartx_tfr_s cn58xx;
+ struct cvmx_mio_uartx_tfr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_thr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_thr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t thr:8;
+ } s;
+ struct cvmx_mio_uartx_thr_s cn30xx;
+ struct cvmx_mio_uartx_thr_s cn31xx;
+ struct cvmx_mio_uartx_thr_s cn38xx;
+ struct cvmx_mio_uartx_thr_s cn38xxp2;
+ struct cvmx_mio_uartx_thr_s cn50xx;
+ struct cvmx_mio_uartx_thr_s cn52xx;
+ struct cvmx_mio_uartx_thr_s cn52xxp1;
+ struct cvmx_mio_uartx_thr_s cn56xx;
+ struct cvmx_mio_uartx_thr_s cn56xxp1;
+ struct cvmx_mio_uartx_thr_s cn58xx;
+ struct cvmx_mio_uartx_thr_s cn58xxp1;
+};
+
+union cvmx_mio_uartx_usr {
+ uint64_t u64;
+ struct cvmx_mio_uartx_usr_s {
+ uint64_t reserved_5_63:59;
+ uint64_t rff:1;
+ uint64_t rfne:1;
+ uint64_t tfe:1;
+ uint64_t tfnf:1;
+ uint64_t busy:1;
+ } s;
+ struct cvmx_mio_uartx_usr_s cn30xx;
+ struct cvmx_mio_uartx_usr_s cn31xx;
+ struct cvmx_mio_uartx_usr_s cn38xx;
+ struct cvmx_mio_uartx_usr_s cn38xxp2;
+ struct cvmx_mio_uartx_usr_s cn50xx;
+ struct cvmx_mio_uartx_usr_s cn52xx;
+ struct cvmx_mio_uartx_usr_s cn52xxp1;
+ struct cvmx_mio_uartx_usr_s cn56xx;
+ struct cvmx_mio_uartx_usr_s cn56xxp1;
+ struct cvmx_mio_uartx_usr_s cn58xx;
+ struct cvmx_mio_uartx_usr_s cn58xxp1;
+};
+
+union cvmx_mio_uart2_dlh {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dlh_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dlh:8;
+ } s;
+ struct cvmx_mio_uart2_dlh_s cn52xx;
+ struct cvmx_mio_uart2_dlh_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_dll {
+ uint64_t u64;
+ struct cvmx_mio_uart2_dll_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dll:8;
+ } s;
+ struct cvmx_mio_uart2_dll_s cn52xx;
+ struct cvmx_mio_uart2_dll_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_far {
+ uint64_t u64;
+ struct cvmx_mio_uart2_far_s {
+ uint64_t reserved_1_63:63;
+ uint64_t far:1;
+ } s;
+ struct cvmx_mio_uart2_far_s cn52xx;
+ struct cvmx_mio_uart2_far_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_fcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_fcr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rxtrig:2;
+ uint64_t txtrig:2;
+ uint64_t reserved_3_3:1;
+ uint64_t txfr:1;
+ uint64_t rxfr:1;
+ uint64_t en:1;
+ } s;
+ struct cvmx_mio_uart2_fcr_s cn52xx;
+ struct cvmx_mio_uart2_fcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_htx {
+ uint64_t u64;
+ struct cvmx_mio_uart2_htx_s {
+ uint64_t reserved_1_63:63;
+ uint64_t htx:1;
+ } s;
+ struct cvmx_mio_uart2_htx_s cn52xx;
+ struct cvmx_mio_uart2_htx_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_ier {
+ uint64_t u64;
+ struct cvmx_mio_uart2_ier_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ptime:1;
+ uint64_t reserved_4_6:3;
+ uint64_t edssi:1;
+ uint64_t elsi:1;
+ uint64_t etbei:1;
+ uint64_t erbfi:1;
+ } s;
+ struct cvmx_mio_uart2_ier_s cn52xx;
+ struct cvmx_mio_uart2_ier_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_iir {
+ uint64_t u64;
+ struct cvmx_mio_uart2_iir_s {
+ uint64_t reserved_8_63:56;
+ uint64_t fen:2;
+ uint64_t reserved_4_5:2;
+ uint64_t iid:4;
+ } s;
+ struct cvmx_mio_uart2_iir_s cn52xx;
+ struct cvmx_mio_uart2_iir_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_lcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lcr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dlab:1;
+ uint64_t brk:1;
+ uint64_t reserved_5_5:1;
+ uint64_t eps:1;
+ uint64_t pen:1;
+ uint64_t stop:1;
+ uint64_t cls:2;
+ } s;
+ struct cvmx_mio_uart2_lcr_s cn52xx;
+ struct cvmx_mio_uart2_lcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_lsr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_lsr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t ferr:1;
+ uint64_t temt:1;
+ uint64_t thre:1;
+ uint64_t bi:1;
+ uint64_t fe:1;
+ uint64_t pe:1;
+ uint64_t oe:1;
+ uint64_t dr:1;
+ } s;
+ struct cvmx_mio_uart2_lsr_s cn52xx;
+ struct cvmx_mio_uart2_lsr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_mcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_mcr_s {
+ uint64_t reserved_6_63:58;
+ uint64_t afce:1;
+ uint64_t loop:1;
+ uint64_t out2:1;
+ uint64_t out1:1;
+ uint64_t rts:1;
+ uint64_t dtr:1;
+ } s;
+ struct cvmx_mio_uart2_mcr_s cn52xx;
+ struct cvmx_mio_uart2_mcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_msr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_msr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t dcd:1;
+ uint64_t ri:1;
+ uint64_t dsr:1;
+ uint64_t cts:1;
+ uint64_t ddcd:1;
+ uint64_t teri:1;
+ uint64_t ddsr:1;
+ uint64_t dcts:1;
+ } s;
+ struct cvmx_mio_uart2_msr_s cn52xx;
+ struct cvmx_mio_uart2_msr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rbr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rbr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rbr:8;
+ } s;
+ struct cvmx_mio_uart2_rbr_s cn52xx;
+ struct cvmx_mio_uart2_rbr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfl_s {
+ uint64_t reserved_7_63:57;
+ uint64_t rfl:7;
+ } s;
+ struct cvmx_mio_uart2_rfl_s cn52xx;
+ struct cvmx_mio_uart2_rfl_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_rfw {
+ uint64_t u64;
+ struct cvmx_mio_uart2_rfw_s {
+ uint64_t reserved_10_63:54;
+ uint64_t rffe:1;
+ uint64_t rfpe:1;
+ uint64_t rfwd:8;
+ } s;
+ struct cvmx_mio_uart2_rfw_s cn52xx;
+ struct cvmx_mio_uart2_rfw_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_sbcr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sbcr_s {
+ uint64_t reserved_1_63:63;
+ uint64_t sbcr:1;
+ } s;
+ struct cvmx_mio_uart2_sbcr_s cn52xx;
+ struct cvmx_mio_uart2_sbcr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_scr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_scr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t scr:8;
+ } s;
+ struct cvmx_mio_uart2_scr_s cn52xx;
+ struct cvmx_mio_uart2_scr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_sfe {
+ uint64_t u64;
+ struct cvmx_mio_uart2_sfe_s {
+ uint64_t reserved_1_63:63;
+ uint64_t sfe:1;
+ } s;
+ struct cvmx_mio_uart2_sfe_s cn52xx;
+ struct cvmx_mio_uart2_sfe_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srr_s {
+ uint64_t reserved_3_63:61;
+ uint64_t stfr:1;
+ uint64_t srfr:1;
+ uint64_t usr:1;
+ } s;
+ struct cvmx_mio_uart2_srr_s cn52xx;
+ struct cvmx_mio_uart2_srr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srt_s {
+ uint64_t reserved_2_63:62;
+ uint64_t srt:2;
+ } s;
+ struct cvmx_mio_uart2_srt_s cn52xx;
+ struct cvmx_mio_uart2_srt_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_srts {
+ uint64_t u64;
+ struct cvmx_mio_uart2_srts_s {
+ uint64_t reserved_1_63:63;
+ uint64_t srts:1;
+ } s;
+ struct cvmx_mio_uart2_srts_s cn52xx;
+ struct cvmx_mio_uart2_srts_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_stt {
+ uint64_t u64;
+ struct cvmx_mio_uart2_stt_s {
+ uint64_t reserved_2_63:62;
+ uint64_t stt:2;
+ } s;
+ struct cvmx_mio_uart2_stt_s cn52xx;
+ struct cvmx_mio_uart2_stt_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_tfl {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfl_s {
+ uint64_t reserved_7_63:57;
+ uint64_t tfl:7;
+ } s;
+ struct cvmx_mio_uart2_tfl_s cn52xx;
+ struct cvmx_mio_uart2_tfl_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_tfr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_tfr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t tfr:8;
+ } s;
+ struct cvmx_mio_uart2_tfr_s cn52xx;
+ struct cvmx_mio_uart2_tfr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_thr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_thr_s {
+ uint64_t reserved_8_63:56;
+ uint64_t thr:8;
+ } s;
+ struct cvmx_mio_uart2_thr_s cn52xx;
+ struct cvmx_mio_uart2_thr_s cn52xxp1;
+};
+
+union cvmx_mio_uart2_usr {
+ uint64_t u64;
+ struct cvmx_mio_uart2_usr_s {
+ uint64_t reserved_5_63:59;
+ uint64_t rff:1;
+ uint64_t rfne:1;
+ uint64_t tfe:1;
+ uint64_t tfnf:1;
+ uint64_t busy:1;
+ } s;
+ struct cvmx_mio_uart2_usr_s cn52xx;
+ struct cvmx_mio_uart2_usr_s cn52xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-packet.h b/arch/mips/include/asm/octeon/cvmx-packet.h
new file mode 100644
index 000000000000..38aefa1bab9d
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-packet.h
@@ -0,0 +1,61 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * Packet buffer defines.
+ */
+
+#ifndef __CVMX_PACKET_H__
+#define __CVMX_PACKET_H__
+
+/**
+ * This structure defines a buffer pointer on Octeon
+ */
+union cvmx_buf_ptr {
+ void *ptr;
+ uint64_t u64;
+ struct {
+ /* if set, invert the "free" pick of the overall
+ * packet. HW always sets this bit to 0 on inbound
+ * packet */
+ uint64_t i:1;
+
+ /* Indicates the amount to back up to get to the
+ * buffer start in cache lines. In most cases this is
+ * less than one complete cache line, so the value is
+ * zero */
+ uint64_t back:4;
+ /* The pool that the buffer came from / goes to */
+ uint64_t pool:3;
+ /* The size of the segment pointed to by addr (in bytes) */
+ uint64_t size:16;
+ /* Pointer to the first byte of the data, NOT buffer */
+ uint64_t addr:40;
+ } s;
+};
+
+#endif /* __CVMX_PACKET_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
new file mode 100644
index 000000000000..2d82e24be51c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -0,0 +1,698 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_POW_DEFS_H__
+#define __CVMX_POW_DEFS_H__
+
+#define CVMX_POW_BIST_STAT \
+ CVMX_ADD_IO_SEG(0x00016700000003F8ull)
+#define CVMX_POW_DS_PC \
+ CVMX_ADD_IO_SEG(0x0001670000000398ull)
+#define CVMX_POW_ECC_ERR \
+ CVMX_ADD_IO_SEG(0x0001670000000218ull)
+#define CVMX_POW_INT_CTL \
+ CVMX_ADD_IO_SEG(0x0001670000000220ull)
+#define CVMX_POW_IQ_CNTX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000340ull + (((offset) & 7) * 8))
+#define CVMX_POW_IQ_COM_CNT \
+ CVMX_ADD_IO_SEG(0x0001670000000388ull)
+#define CVMX_POW_IQ_INT \
+ CVMX_ADD_IO_SEG(0x0001670000000238ull)
+#define CVMX_POW_IQ_INT_EN \
+ CVMX_ADD_IO_SEG(0x0001670000000240ull)
+#define CVMX_POW_IQ_THRX(offset) \
+ CVMX_ADD_IO_SEG(0x00016700000003A0ull + (((offset) & 7) * 8))
+#define CVMX_POW_NOS_CNT \
+ CVMX_ADD_IO_SEG(0x0001670000000228ull)
+#define CVMX_POW_NW_TIM \
+ CVMX_ADD_IO_SEG(0x0001670000000210ull)
+#define CVMX_POW_PF_RST_MSK \
+ CVMX_ADD_IO_SEG(0x0001670000000230ull)
+#define CVMX_POW_PP_GRP_MSKX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000000ull + (((offset) & 15) * 8))
+#define CVMX_POW_QOS_RNDX(offset) \
+ CVMX_ADD_IO_SEG(0x00016700000001C0ull + (((offset) & 7) * 8))
+#define CVMX_POW_QOS_THRX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000180ull + (((offset) & 7) * 8))
+#define CVMX_POW_TS_PC \
+ CVMX_ADD_IO_SEG(0x0001670000000390ull)
+#define CVMX_POW_WA_COM_PC \
+ CVMX_ADD_IO_SEG(0x0001670000000380ull)
+#define CVMX_POW_WA_PCX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000300ull + (((offset) & 7) * 8))
+#define CVMX_POW_WQ_INT \
+ CVMX_ADD_IO_SEG(0x0001670000000200ull)
+#define CVMX_POW_WQ_INT_CNTX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000100ull + (((offset) & 15) * 8))
+#define CVMX_POW_WQ_INT_PC \
+ CVMX_ADD_IO_SEG(0x0001670000000208ull)
+#define CVMX_POW_WQ_INT_THRX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000080ull + (((offset) & 15) * 8))
+#define CVMX_POW_WS_PCX(offset) \
+ CVMX_ADD_IO_SEG(0x0001670000000280ull + (((offset) & 15) * 8))
+
+union cvmx_pow_bist_stat {
+ uint64_t u64;
+ struct cvmx_pow_bist_stat_s {
+ uint64_t reserved_32_63:32;
+ uint64_t pp:16;
+ uint64_t reserved_0_15:16;
+ } s;
+ struct cvmx_pow_bist_stat_cn30xx {
+ uint64_t reserved_17_63:47;
+ uint64_t pp:1;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+ } cn30xx;
+ struct cvmx_pow_bist_stat_cn31xx {
+ uint64_t reserved_18_63:46;
+ uint64_t pp:2;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+ } cn31xx;
+ struct cvmx_pow_bist_stat_cn38xx {
+ uint64_t reserved_32_63:32;
+ uint64_t pp:16;
+ uint64_t reserved_10_15:6;
+ uint64_t cam:1;
+ uint64_t nbt:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend1:1;
+ uint64_t pend0:1;
+ uint64_t adr1:1;
+ uint64_t adr0:1;
+ } cn38xx;
+ struct cvmx_pow_bist_stat_cn38xx cn38xxp2;
+ struct cvmx_pow_bist_stat_cn31xx cn50xx;
+ struct cvmx_pow_bist_stat_cn52xx {
+ uint64_t reserved_20_63:44;
+ uint64_t pp:4;
+ uint64_t reserved_9_15:7;
+ uint64_t cam:1;
+ uint64_t nbt1:1;
+ uint64_t nbt0:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend:1;
+ uint64_t adr:1;
+ } cn52xx;
+ struct cvmx_pow_bist_stat_cn52xx cn52xxp1;
+ struct cvmx_pow_bist_stat_cn56xx {
+ uint64_t reserved_28_63:36;
+ uint64_t pp:12;
+ uint64_t reserved_10_15:6;
+ uint64_t cam:1;
+ uint64_t nbt:1;
+ uint64_t index:1;
+ uint64_t fidx:1;
+ uint64_t nbr1:1;
+ uint64_t nbr0:1;
+ uint64_t pend1:1;
+ uint64_t pend0:1;
+ uint64_t adr1:1;
+ uint64_t adr0:1;
+ } cn56xx;
+ struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
+ struct cvmx_pow_bist_stat_cn38xx cn58xx;
+ struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
+};
+
+union cvmx_pow_ds_pc {
+ uint64_t u64;
+ struct cvmx_pow_ds_pc_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ds_pc:32;
+ } s;
+ struct cvmx_pow_ds_pc_s cn30xx;
+ struct cvmx_pow_ds_pc_s cn31xx;
+ struct cvmx_pow_ds_pc_s cn38xx;
+ struct cvmx_pow_ds_pc_s cn38xxp2;
+ struct cvmx_pow_ds_pc_s cn50xx;
+ struct cvmx_pow_ds_pc_s cn52xx;
+ struct cvmx_pow_ds_pc_s cn52xxp1;
+ struct cvmx_pow_ds_pc_s cn56xx;
+ struct cvmx_pow_ds_pc_s cn56xxp1;
+ struct cvmx_pow_ds_pc_s cn58xx;
+ struct cvmx_pow_ds_pc_s cn58xxp1;
+};
+
+union cvmx_pow_ecc_err {
+ uint64_t u64;
+ struct cvmx_pow_ecc_err_s {
+ uint64_t reserved_45_63:19;
+ uint64_t iop_ie:13;
+ uint64_t reserved_29_31:3;
+ uint64_t iop:13;
+ uint64_t reserved_14_15:2;
+ uint64_t rpe_ie:1;
+ uint64_t rpe:1;
+ uint64_t reserved_9_11:3;
+ uint64_t syn:5;
+ uint64_t dbe_ie:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ } s;
+ struct cvmx_pow_ecc_err_s cn30xx;
+ struct cvmx_pow_ecc_err_cn31xx {
+ uint64_t reserved_14_63:50;
+ uint64_t rpe_ie:1;
+ uint64_t rpe:1;
+ uint64_t reserved_9_11:3;
+ uint64_t syn:5;
+ uint64_t dbe_ie:1;
+ uint64_t sbe_ie:1;
+ uint64_t dbe:1;
+ uint64_t sbe:1;
+ } cn31xx;
+ struct cvmx_pow_ecc_err_s cn38xx;
+ struct cvmx_pow_ecc_err_cn31xx cn38xxp2;
+ struct cvmx_pow_ecc_err_s cn50xx;
+ struct cvmx_pow_ecc_err_s cn52xx;
+ struct cvmx_pow_ecc_err_s cn52xxp1;
+ struct cvmx_pow_ecc_err_s cn56xx;
+ struct cvmx_pow_ecc_err_s cn56xxp1;
+ struct cvmx_pow_ecc_err_s cn58xx;
+ struct cvmx_pow_ecc_err_s cn58xxp1;
+};
+
+union cvmx_pow_int_ctl {
+ uint64_t u64;
+ struct cvmx_pow_int_ctl_s {
+ uint64_t reserved_6_63:58;
+ uint64_t pfr_dis:1;
+ uint64_t nbr_thr:5;
+ } s;
+ struct cvmx_pow_int_ctl_s cn30xx;
+ struct cvmx_pow_int_ctl_s cn31xx;
+ struct cvmx_pow_int_ctl_s cn38xx;
+ struct cvmx_pow_int_ctl_s cn38xxp2;
+ struct cvmx_pow_int_ctl_s cn50xx;
+ struct cvmx_pow_int_ctl_s cn52xx;
+ struct cvmx_pow_int_ctl_s cn52xxp1;
+ struct cvmx_pow_int_ctl_s cn56xx;
+ struct cvmx_pow_int_ctl_s cn56xxp1;
+ struct cvmx_pow_int_ctl_s cn58xx;
+ struct cvmx_pow_int_ctl_s cn58xxp1;
+};
+
+union cvmx_pow_iq_cntx {
+ uint64_t u64;
+ struct cvmx_pow_iq_cntx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iq_cnt:32;
+ } s;
+ struct cvmx_pow_iq_cntx_s cn30xx;
+ struct cvmx_pow_iq_cntx_s cn31xx;
+ struct cvmx_pow_iq_cntx_s cn38xx;
+ struct cvmx_pow_iq_cntx_s cn38xxp2;
+ struct cvmx_pow_iq_cntx_s cn50xx;
+ struct cvmx_pow_iq_cntx_s cn52xx;
+ struct cvmx_pow_iq_cntx_s cn52xxp1;
+ struct cvmx_pow_iq_cntx_s cn56xx;
+ struct cvmx_pow_iq_cntx_s cn56xxp1;
+ struct cvmx_pow_iq_cntx_s cn58xx;
+ struct cvmx_pow_iq_cntx_s cn58xxp1;
+};
+
+union cvmx_pow_iq_com_cnt {
+ uint64_t u64;
+ struct cvmx_pow_iq_com_cnt_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iq_cnt:32;
+ } s;
+ struct cvmx_pow_iq_com_cnt_s cn30xx;
+ struct cvmx_pow_iq_com_cnt_s cn31xx;
+ struct cvmx_pow_iq_com_cnt_s cn38xx;
+ struct cvmx_pow_iq_com_cnt_s cn38xxp2;
+ struct cvmx_pow_iq_com_cnt_s cn50xx;
+ struct cvmx_pow_iq_com_cnt_s cn52xx;
+ struct cvmx_pow_iq_com_cnt_s cn52xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn56xx;
+ struct cvmx_pow_iq_com_cnt_s cn56xxp1;
+ struct cvmx_pow_iq_com_cnt_s cn58xx;
+ struct cvmx_pow_iq_com_cnt_s cn58xxp1;
+};
+
+union cvmx_pow_iq_int {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_s {
+ uint64_t reserved_8_63:56;
+ uint64_t iq_int:8;
+ } s;
+ struct cvmx_pow_iq_int_s cn52xx;
+ struct cvmx_pow_iq_int_s cn52xxp1;
+ struct cvmx_pow_iq_int_s cn56xx;
+ struct cvmx_pow_iq_int_s cn56xxp1;
+};
+
+union cvmx_pow_iq_int_en {
+ uint64_t u64;
+ struct cvmx_pow_iq_int_en_s {
+ uint64_t reserved_8_63:56;
+ uint64_t int_en:8;
+ } s;
+ struct cvmx_pow_iq_int_en_s cn52xx;
+ struct cvmx_pow_iq_int_en_s cn52xxp1;
+ struct cvmx_pow_iq_int_en_s cn56xx;
+ struct cvmx_pow_iq_int_en_s cn56xxp1;
+};
+
+union cvmx_pow_iq_thrx {
+ uint64_t u64;
+ struct cvmx_pow_iq_thrx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iq_thr:32;
+ } s;
+ struct cvmx_pow_iq_thrx_s cn52xx;
+ struct cvmx_pow_iq_thrx_s cn52xxp1;
+ struct cvmx_pow_iq_thrx_s cn56xx;
+ struct cvmx_pow_iq_thrx_s cn56xxp1;
+};
+
+union cvmx_pow_nos_cnt {
+ uint64_t u64;
+ struct cvmx_pow_nos_cnt_s {
+ uint64_t reserved_12_63:52;
+ uint64_t nos_cnt:12;
+ } s;
+ struct cvmx_pow_nos_cnt_cn30xx {
+ uint64_t reserved_7_63:57;
+ uint64_t nos_cnt:7;
+ } cn30xx;
+ struct cvmx_pow_nos_cnt_cn31xx {
+ uint64_t reserved_9_63:55;
+ uint64_t nos_cnt:9;
+ } cn31xx;
+ struct cvmx_pow_nos_cnt_s cn38xx;
+ struct cvmx_pow_nos_cnt_s cn38xxp2;
+ struct cvmx_pow_nos_cnt_cn31xx cn50xx;
+ struct cvmx_pow_nos_cnt_cn52xx {
+ uint64_t reserved_10_63:54;
+ uint64_t nos_cnt:10;
+ } cn52xx;
+ struct cvmx_pow_nos_cnt_cn52xx cn52xxp1;
+ struct cvmx_pow_nos_cnt_s cn56xx;
+ struct cvmx_pow_nos_cnt_s cn56xxp1;
+ struct cvmx_pow_nos_cnt_s cn58xx;
+ struct cvmx_pow_nos_cnt_s cn58xxp1;
+};
+
+union cvmx_pow_nw_tim {
+ uint64_t u64;
+ struct cvmx_pow_nw_tim_s {
+ uint64_t reserved_10_63:54;
+ uint64_t nw_tim:10;
+ } s;
+ struct cvmx_pow_nw_tim_s cn30xx;
+ struct cvmx_pow_nw_tim_s cn31xx;
+ struct cvmx_pow_nw_tim_s cn38xx;
+ struct cvmx_pow_nw_tim_s cn38xxp2;
+ struct cvmx_pow_nw_tim_s cn50xx;
+ struct cvmx_pow_nw_tim_s cn52xx;
+ struct cvmx_pow_nw_tim_s cn52xxp1;
+ struct cvmx_pow_nw_tim_s cn56xx;
+ struct cvmx_pow_nw_tim_s cn56xxp1;
+ struct cvmx_pow_nw_tim_s cn58xx;
+ struct cvmx_pow_nw_tim_s cn58xxp1;
+};
+
+union cvmx_pow_pf_rst_msk {
+ uint64_t u64;
+ struct cvmx_pow_pf_rst_msk_s {
+ uint64_t reserved_8_63:56;
+ uint64_t rst_msk:8;
+ } s;
+ struct cvmx_pow_pf_rst_msk_s cn50xx;
+ struct cvmx_pow_pf_rst_msk_s cn52xx;
+ struct cvmx_pow_pf_rst_msk_s cn52xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn56xx;
+ struct cvmx_pow_pf_rst_msk_s cn56xxp1;
+ struct cvmx_pow_pf_rst_msk_s cn58xx;
+ struct cvmx_pow_pf_rst_msk_s cn58xxp1;
+};
+
+union cvmx_pow_pp_grp_mskx {
+ uint64_t u64;
+ struct cvmx_pow_pp_grp_mskx_s {
+ uint64_t reserved_48_63:16;
+ uint64_t qos7_pri:4;
+ uint64_t qos6_pri:4;
+ uint64_t qos5_pri:4;
+ uint64_t qos4_pri:4;
+ uint64_t qos3_pri:4;
+ uint64_t qos2_pri:4;
+ uint64_t qos1_pri:4;
+ uint64_t qos0_pri:4;
+ uint64_t grp_msk:16;
+ } s;
+ struct cvmx_pow_pp_grp_mskx_cn30xx {
+ uint64_t reserved_16_63:48;
+ uint64_t grp_msk:16;
+ } cn30xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx;
+ struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2;
+ struct cvmx_pow_pp_grp_mskx_s cn50xx;
+ struct cvmx_pow_pp_grp_mskx_s cn52xx;
+ struct cvmx_pow_pp_grp_mskx_s cn52xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn56xx;
+ struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
+ struct cvmx_pow_pp_grp_mskx_s cn58xx;
+ struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
+};
+
+union cvmx_pow_qos_rndx {
+ uint64_t u64;
+ struct cvmx_pow_qos_rndx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t rnd_p3:8;
+ uint64_t rnd_p2:8;
+ uint64_t rnd_p1:8;
+ uint64_t rnd:8;
+ } s;
+ struct cvmx_pow_qos_rndx_s cn30xx;
+ struct cvmx_pow_qos_rndx_s cn31xx;
+ struct cvmx_pow_qos_rndx_s cn38xx;
+ struct cvmx_pow_qos_rndx_s cn38xxp2;
+ struct cvmx_pow_qos_rndx_s cn50xx;
+ struct cvmx_pow_qos_rndx_s cn52xx;
+ struct cvmx_pow_qos_rndx_s cn52xxp1;
+ struct cvmx_pow_qos_rndx_s cn56xx;
+ struct cvmx_pow_qos_rndx_s cn56xxp1;
+ struct cvmx_pow_qos_rndx_s cn58xx;
+ struct cvmx_pow_qos_rndx_s cn58xxp1;
+};
+
+union cvmx_pow_qos_thrx {
+ uint64_t u64;
+ struct cvmx_pow_qos_thrx_s {
+ uint64_t reserved_60_63:4;
+ uint64_t des_cnt:12;
+ uint64_t buf_cnt:12;
+ uint64_t free_cnt:12;
+ uint64_t reserved_23_23:1;
+ uint64_t max_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t min_thr:11;
+ } s;
+ struct cvmx_pow_qos_thrx_cn30xx {
+ uint64_t reserved_55_63:9;
+ uint64_t des_cnt:7;
+ uint64_t reserved_43_47:5;
+ uint64_t buf_cnt:7;
+ uint64_t reserved_31_35:5;
+ uint64_t free_cnt:7;
+ uint64_t reserved_18_23:6;
+ uint64_t max_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t min_thr:6;
+ } cn30xx;
+ struct cvmx_pow_qos_thrx_cn31xx {
+ uint64_t reserved_57_63:7;
+ uint64_t des_cnt:9;
+ uint64_t reserved_45_47:3;
+ uint64_t buf_cnt:9;
+ uint64_t reserved_33_35:3;
+ uint64_t free_cnt:9;
+ uint64_t reserved_20_23:4;
+ uint64_t max_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t min_thr:8;
+ } cn31xx;
+ struct cvmx_pow_qos_thrx_s cn38xx;
+ struct cvmx_pow_qos_thrx_s cn38xxp2;
+ struct cvmx_pow_qos_thrx_cn31xx cn50xx;
+ struct cvmx_pow_qos_thrx_cn52xx {
+ uint64_t reserved_58_63:6;
+ uint64_t des_cnt:10;
+ uint64_t reserved_46_47:2;
+ uint64_t buf_cnt:10;
+ uint64_t reserved_34_35:2;
+ uint64_t free_cnt:10;
+ uint64_t reserved_21_23:3;
+ uint64_t max_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t min_thr:9;
+ } cn52xx;
+ struct cvmx_pow_qos_thrx_cn52xx cn52xxp1;
+ struct cvmx_pow_qos_thrx_s cn56xx;
+ struct cvmx_pow_qos_thrx_s cn56xxp1;
+ struct cvmx_pow_qos_thrx_s cn58xx;
+ struct cvmx_pow_qos_thrx_s cn58xxp1;
+};
+
+union cvmx_pow_ts_pc {
+ uint64_t u64;
+ struct cvmx_pow_ts_pc_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ts_pc:32;
+ } s;
+ struct cvmx_pow_ts_pc_s cn30xx;
+ struct cvmx_pow_ts_pc_s cn31xx;
+ struct cvmx_pow_ts_pc_s cn38xx;
+ struct cvmx_pow_ts_pc_s cn38xxp2;
+ struct cvmx_pow_ts_pc_s cn50xx;
+ struct cvmx_pow_ts_pc_s cn52xx;
+ struct cvmx_pow_ts_pc_s cn52xxp1;
+ struct cvmx_pow_ts_pc_s cn56xx;
+ struct cvmx_pow_ts_pc_s cn56xxp1;
+ struct cvmx_pow_ts_pc_s cn58xx;
+ struct cvmx_pow_ts_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wa_com_pc {
+ uint64_t u64;
+ struct cvmx_pow_wa_com_pc_s {
+ uint64_t reserved_32_63:32;
+ uint64_t wa_pc:32;
+ } s;
+ struct cvmx_pow_wa_com_pc_s cn30xx;
+ struct cvmx_pow_wa_com_pc_s cn31xx;
+ struct cvmx_pow_wa_com_pc_s cn38xx;
+ struct cvmx_pow_wa_com_pc_s cn38xxp2;
+ struct cvmx_pow_wa_com_pc_s cn50xx;
+ struct cvmx_pow_wa_com_pc_s cn52xx;
+ struct cvmx_pow_wa_com_pc_s cn52xxp1;
+ struct cvmx_pow_wa_com_pc_s cn56xx;
+ struct cvmx_pow_wa_com_pc_s cn56xxp1;
+ struct cvmx_pow_wa_com_pc_s cn58xx;
+ struct cvmx_pow_wa_com_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wa_pcx {
+ uint64_t u64;
+ struct cvmx_pow_wa_pcx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t wa_pc:32;
+ } s;
+ struct cvmx_pow_wa_pcx_s cn30xx;
+ struct cvmx_pow_wa_pcx_s cn31xx;
+ struct cvmx_pow_wa_pcx_s cn38xx;
+ struct cvmx_pow_wa_pcx_s cn38xxp2;
+ struct cvmx_pow_wa_pcx_s cn50xx;
+ struct cvmx_pow_wa_pcx_s cn52xx;
+ struct cvmx_pow_wa_pcx_s cn52xxp1;
+ struct cvmx_pow_wa_pcx_s cn56xx;
+ struct cvmx_pow_wa_pcx_s cn56xxp1;
+ struct cvmx_pow_wa_pcx_s cn58xx;
+ struct cvmx_pow_wa_pcx_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_s {
+ uint64_t reserved_32_63:32;
+ uint64_t iq_dis:16;
+ uint64_t wq_int:16;
+ } s;
+ struct cvmx_pow_wq_int_s cn30xx;
+ struct cvmx_pow_wq_int_s cn31xx;
+ struct cvmx_pow_wq_int_s cn38xx;
+ struct cvmx_pow_wq_int_s cn38xxp2;
+ struct cvmx_pow_wq_int_s cn50xx;
+ struct cvmx_pow_wq_int_s cn52xx;
+ struct cvmx_pow_wq_int_s cn52xxp1;
+ struct cvmx_pow_wq_int_s cn56xx;
+ struct cvmx_pow_wq_int_s cn56xxp1;
+ struct cvmx_pow_wq_int_s cn58xx;
+ struct cvmx_pow_wq_int_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_cntx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_cntx_s {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t ds_cnt:12;
+ uint64_t iq_cnt:12;
+ } s;
+ struct cvmx_pow_wq_int_cntx_cn30xx {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_19_23:5;
+ uint64_t ds_cnt:7;
+ uint64_t reserved_7_11:5;
+ uint64_t iq_cnt:7;
+ } cn30xx;
+ struct cvmx_pow_wq_int_cntx_cn31xx {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_cnt:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_cnt:9;
+ } cn31xx;
+ struct cvmx_pow_wq_int_cntx_s cn38xx;
+ struct cvmx_pow_wq_int_cntx_s cn38xxp2;
+ struct cvmx_pow_wq_int_cntx_cn31xx cn50xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx {
+ uint64_t reserved_28_63:36;
+ uint64_t tc_cnt:4;
+ uint64_t reserved_22_23:2;
+ uint64_t ds_cnt:10;
+ uint64_t reserved_10_11:2;
+ uint64_t iq_cnt:10;
+ } cn52xx;
+ struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1;
+ struct cvmx_pow_wq_int_cntx_s cn56xx;
+ struct cvmx_pow_wq_int_cntx_s cn56xxp1;
+ struct cvmx_pow_wq_int_cntx_s cn58xx;
+ struct cvmx_pow_wq_int_cntx_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_pc {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_pc_s {
+ uint64_t reserved_60_63:4;
+ uint64_t pc:28;
+ uint64_t reserved_28_31:4;
+ uint64_t pc_thr:20;
+ uint64_t reserved_0_7:8;
+ } s;
+ struct cvmx_pow_wq_int_pc_s cn30xx;
+ struct cvmx_pow_wq_int_pc_s cn31xx;
+ struct cvmx_pow_wq_int_pc_s cn38xx;
+ struct cvmx_pow_wq_int_pc_s cn38xxp2;
+ struct cvmx_pow_wq_int_pc_s cn50xx;
+ struct cvmx_pow_wq_int_pc_s cn52xx;
+ struct cvmx_pow_wq_int_pc_s cn52xxp1;
+ struct cvmx_pow_wq_int_pc_s cn56xx;
+ struct cvmx_pow_wq_int_pc_s cn56xxp1;
+ struct cvmx_pow_wq_int_pc_s cn58xx;
+ struct cvmx_pow_wq_int_pc_s cn58xxp1;
+};
+
+union cvmx_pow_wq_int_thrx {
+ uint64_t u64;
+ struct cvmx_pow_wq_int_thrx_s {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_23_23:1;
+ uint64_t ds_thr:11;
+ uint64_t reserved_11_11:1;
+ uint64_t iq_thr:11;
+ } s;
+ struct cvmx_pow_wq_int_thrx_cn30xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_18_23:6;
+ uint64_t ds_thr:6;
+ uint64_t reserved_6_11:6;
+ uint64_t iq_thr:6;
+ } cn30xx;
+ struct cvmx_pow_wq_int_thrx_cn31xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_20_23:4;
+ uint64_t ds_thr:8;
+ uint64_t reserved_8_11:4;
+ uint64_t iq_thr:8;
+ } cn31xx;
+ struct cvmx_pow_wq_int_thrx_s cn38xx;
+ struct cvmx_pow_wq_int_thrx_s cn38xxp2;
+ struct cvmx_pow_wq_int_thrx_cn31xx cn50xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx {
+ uint64_t reserved_29_63:35;
+ uint64_t tc_en:1;
+ uint64_t tc_thr:4;
+ uint64_t reserved_21_23:3;
+ uint64_t ds_thr:9;
+ uint64_t reserved_9_11:3;
+ uint64_t iq_thr:9;
+ } cn52xx;
+ struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1;
+ struct cvmx_pow_wq_int_thrx_s cn56xx;
+ struct cvmx_pow_wq_int_thrx_s cn56xxp1;
+ struct cvmx_pow_wq_int_thrx_s cn58xx;
+ struct cvmx_pow_wq_int_thrx_s cn58xxp1;
+};
+
+union cvmx_pow_ws_pcx {
+ uint64_t u64;
+ struct cvmx_pow_ws_pcx_s {
+ uint64_t reserved_32_63:32;
+ uint64_t ws_pc:32;
+ } s;
+ struct cvmx_pow_ws_pcx_s cn30xx;
+ struct cvmx_pow_ws_pcx_s cn31xx;
+ struct cvmx_pow_ws_pcx_s cn38xx;
+ struct cvmx_pow_ws_pcx_s cn38xxp2;
+ struct cvmx_pow_ws_pcx_s cn50xx;
+ struct cvmx_pow_ws_pcx_s cn52xx;
+ struct cvmx_pow_ws_pcx_s cn52xxp1;
+ struct cvmx_pow_ws_pcx_s cn56xx;
+ struct cvmx_pow_ws_pcx_s cn56xxp1;
+ struct cvmx_pow_ws_pcx_s cn58xx;
+ struct cvmx_pow_ws_pcx_s cn58xxp1;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
new file mode 100644
index 000000000000..2fbf0871df11
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -0,0 +1,232 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/**
+ * Implementation of spinlocks for Octeon CVMX. Although similar in
+ * function to Linux kernel spinlocks, they are not compatible.
+ * Octeon CVMX spinlocks are only used to synchronize with the boot
+ * monitor and other non-Linux programs running in the system.
+ */
+
+#ifndef __CVMX_SPINLOCK_H__
+#define __CVMX_SPINLOCK_H__
+
+#include "cvmx-asm.h"
+
+/* Spinlocks for Octeon */
+
+/* define these to enable recursive spinlock debugging */
+/*#define CVMX_SPINLOCK_DEBUG */
+
+/**
+ * Spinlocks for Octeon CVMX
+ */
+typedef struct {
+ volatile uint32_t value;
+} cvmx_spinlock_t;
+
+/* note - macros not expanded in inline ASM, so values hardcoded */
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
+
+#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
+
+/**
+ * Initialize a spinlock
+ *
+ * @lock: Lock to initialize
+ */
+static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock)
+{
+ lock->value = CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Return non-zero if the spinlock is currently locked
+ *
+ * @lock: Lock to check
+ * Returns Non-zero if locked
+ */
+static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock)
+{
+ return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL;
+}
+
+/**
+ * Releases lock
+ *
+ * @lock: pointer to lock structure
+ */
+static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
+{
+ CVMX_SYNCWS;
+ lock->value = 0;
+ CVMX_SYNCWS;
+}
+
+/**
+ * Attempts to take the lock, but does not spin if lock is not available.
+ * May take some time to acquire the lock even if it is available
+ * due to the ll/sc not succeeding.
+ *
+ * @lock: pointer to lock structure
+ *
+ * Returns 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+
+static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ /* if lock held, fail immediately */
+ " bnez %[tmp], 2f \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n" :
+ [val] "+m"(lock->value), [tmp] "=&r"(tmp)
+ : : "memory");
+
+ return tmp != 0; /* normalize to 0 or 1 */
+}
+
+/**
+ * Gets lock, spins until lock is taken
+ *
+ * @lock: pointer to lock structure
+ */
+static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder \n"
+ "1: ll %[tmp], %[val] \n"
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n" :
+ [val] "+m"(lock->value), [tmp] "=&r"(tmp)
+ : : "memory");
+
+}
+
+/** ********************************************************************
+ * Bit spinlocks
+ * These spinlocks use a single bit (bit 31) of a 32 bit word for locking.
+ * The rest of the bits in the word are left undisturbed. This enables more
+ * compact data structures as only 1 bit is consumed for the lock.
+ *
+ */
+
+/**
+ * Gets lock, spins until lock is taken
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word: word to lock bit 31 of
+ */
+static inline void cvmx_spinlock_bit_lock(uint32_t *word)
+{
+ unsigned int tmp;
+ unsigned int sav;
+
+ __asm__ __volatile__(".set noreorder \n"
+ ".set noat \n"
+ "1: ll %[tmp], %[val] \n"
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n" :
+ [val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
+ : : "memory");
+
+}
+
+/**
+ * Attempts to get lock, returns immediately with success/failure
+ * Preserves the low 31 bits of the 32 bit
+ * word used for the lock.
+ *
+ *
+ * @word: word to lock bit 31 of
+ * Returns 0: lock successfully taken
+ * 1: lock not taken, held by someone else
+ * These return values match the Linux semantics.
+ */
+static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
+{
+ unsigned int tmp;
+
+ __asm__ __volatile__(".set noreorder\n\t"
+ ".set noat\n"
+ "1: ll %[tmp], %[val] \n"
+ /* if lock held, fail immediately */
+ " bbit1 %[tmp], 31, 2f \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n" :
+ [val] "+m"(*word), [tmp] "=&r"(tmp)
+ : : "memory");
+
+ return tmp != 0; /* normalize to 0 or 1 */
+}
+
+/**
+ * Releases bit lock
+ *
+ * Unconditionally clears bit 31 of the lock word. Note that this is
+ * done non-atomically, as this implementation assumes that the rest
+ * of the bits in the word are protected by the lock.
+ *
+ * @word: word to unlock bit 31 in
+ */
+static inline void cvmx_spinlock_bit_unlock(uint32_t *word)
+{
+ CVMX_SYNCWS;
+ *word &= ~(1UL << 31);
+ CVMX_SYNCWS;
+}
+
+#endif /* __CVMX_SPINLOCK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
new file mode 100644
index 000000000000..61dd5741afe4
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -0,0 +1,152 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * This module provides system/board information obtained by the bootloader.
+ */
+
+#ifndef __CVMX_SYSINFO_H__
+#define __CVMX_SYSINFO_H__
+
+#define OCTEON_SERIAL_LEN 20
+/**
+ * Structure describing application specific information.
+ * __cvmx_app_init() populates this from the cvmx boot descriptor.
+ * This structure is private to simple executive applications, so
+ * no versioning is required.
+ *
+ * This structure must be provided with some fields set in order to
+ * use simple executive functions in other applications (Linux kernel,
+ * u-boot, etc.) The cvmx_sysinfo_minimal_initialize() function is
+ * provided to set the required values in these cases.
+ */
+struct cvmx_sysinfo {
+ /* System wide variables */
+ /* installed DRAM in system, in bytes */
+ uint64_t system_dram_size;
+
+ /* ptr to memory descriptor block */
+ void *phy_mem_desc_ptr;
+
+
+ /* Application image specific variables */
+ /* stack top address (virtual) */
+ uint64_t stack_top;
+ /* heap base address (virtual) */
+ uint64_t heap_base;
+ /* stack size in bytes */
+ uint32_t stack_size;
+ /* heap size in bytes */
+ uint32_t heap_size;
+ /* coremask defining cores running application */
+ uint32_t core_mask;
+ /* Deprecated, use cvmx_coremask_first_core() to select init core */
+ uint32_t init_core;
+
+ /* exception base address, as set by bootloader */
+ uint64_t exception_base_addr;
+
+ /* cpu clock speed in hz */
+ uint32_t cpu_clock_hz;
+
+ /* dram data rate in hz (data rate = 2 * clock rate */
+ uint32_t dram_data_rate_hz;
+
+
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ char board_serial_number[OCTEON_SERIAL_LEN];
+ /*
+ * Several boards support compact flash on the Octeon boot
+ * bus. The CF memory spaces may be mapped to different
+ * addresses on different boards. These values will be 0 if
+ * CF is not present. Note that these addresses are physical
+ * addresses, and it is up to the application to use the
+ * proper addressing mode (XKPHYS, KSEG0, etc.)
+ */
+ uint64_t compact_flash_common_base_addr;
+ uint64_t compact_flash_attribute_base_addr;
+ /*
+ * Base address of the LED display (as on EBT3000 board) This
+ * will be 0 if LED display not present. Note that this
+ * address is a physical address, and it is up to the
+ * application to use the proper addressing mode (XKPHYS,
+ * KSEG0, etc.)
+ */
+ uint64_t led_display_base_addr;
+ /* DFA reference clock in hz (if applicable)*/
+ uint32_t dfa_ref_clock_hz;
+ /* configuration flags from bootloader */
+ uint32_t bootloader_config_flags;
+
+ /* Uart number used for console */
+ uint8_t console_uart_num;
+};
+
+/**
+ * This function returns the system/board information as obtained
+ * by the bootloader.
+ *
+ *
+ * Returns Pointer to the boot information structure
+ *
+ */
+
+extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
+
+/**
+ * This function is used in non-simple executive environments (such as
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * are required to use simple executive files directly.
+ *
+ * Locking (if required) must be handled outside of this
+ * function
+ *
+ * @phy_mem_desc_ptr: Pointer to global physical memory descriptor
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
+ *
+ * @board_rev_major:
+ * Board major revision
+ * @board_rev_minor:
+ * Board minor revision
+ * @cpu_clock_hz:
+ * CPU clock freqency in hertz
+ *
+ * Returns 0: Failure
+ * 1: success
+ */
+extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
+ uint16_t board_type,
+ uint8_t board_rev_major,
+ uint8_t board_rev_minor,
+ uint32_t cpu_clock_hz);
+
+#endif /* __CVMX_SYSINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
new file mode 100644
index 000000000000..03fddfa3e928
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -0,0 +1,505 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_H__
+#define __CVMX_H__
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include "cvmx-asm.h"
+#include "cvmx-packet.h"
+#include "cvmx-sysinfo.h"
+
+#include "cvmx-ciu-defs.h"
+#include "cvmx-gpio-defs.h"
+#include "cvmx-iob-defs.h"
+#include "cvmx-ipd-defs.h"
+#include "cvmx-l2c-defs.h"
+#include "cvmx-l2d-defs.h"
+#include "cvmx-l2t-defs.h"
+#include "cvmx-led-defs.h"
+#include "cvmx-mio-defs.h"
+#include "cvmx-pow-defs.h"
+
+#include "cvmx-bootinfo.h"
+#include "cvmx-bootmem.h"
+#include "cvmx-l2c.h"
+
+#ifndef CVMX_ENABLE_DEBUG_PRINTS
+#define CVMX_ENABLE_DEBUG_PRINTS 1
+#endif
+
+#if CVMX_ENABLE_DEBUG_PRINTS
+#define cvmx_dprintf printk
+#else
+#define cvmx_dprintf(...) {}
+#endif
+
+#define CVMX_MAX_CORES (16)
+#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
+#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
+#define CAST64(v) ((long long)(long)(v))
+#define CASTPTR(type, v) ((type *)(long)(v))
+
+/*
+ * Returns processor ID, different Linux and simple exec versions
+ * provided in the cvmx-app-init*.c files.
+ */
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint32_t cvmx_get_proc_id(void)
+{
+ uint32_t id;
+ asm("mfc0 %0, $15,0" : "=r"(id));
+ return id;
+}
+
+/* turn the variable name into a string */
+#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+#define CVMX_TMP_STR2(x) #x
+
+/**
+ * Builds a bit mask given the required size in bits.
+ *
+ * @bits: Number of bits in the mask
+ * Returns The mask
+ */ static inline uint64_t cvmx_build_mask(uint64_t bits)
+{
+ return ~((~0x0ull) << bits);
+}
+
+/**
+ * Builds a memory address for I/O based on the Major and Sub DID.
+ *
+ * @major_did: 5 bit major did
+ * @sub_did: 3 bit sub did
+ * Returns I/O base address
+ */
+static inline uint64_t cvmx_build_io_address(uint64_t major_did,
+ uint64_t sub_did)
+{
+ return (0x1ull << 48) | (major_did << 43) | (sub_did << 40);
+}
+
+/**
+ * Perform mask and shift to place the supplied value into
+ * the supplied bit rage.
+ *
+ * Example: cvmx_build_bits(39,24,value)
+ * <pre>
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
+ * +-------+-------+-------+-------+-------+-------+-------+------+
+ * 000000000000000000000000___________value000000000000000000000000
+ * </pre>
+ *
+ * @high_bit: Highest bit value can occupy (inclusive) 0-63
+ * @low_bit: Lowest bit value can occupy inclusive 0-high_bit
+ * @value: Value to use
+ * Returns Value masked and shifted
+ */
+static inline uint64_t cvmx_build_bits(uint64_t high_bit,
+ uint64_t low_bit, uint64_t value)
+{
+ return (value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit;
+}
+
+enum cvmx_mips_space {
+ CVMX_MIPS_SPACE_XKSEG = 3LL,
+ CVMX_MIPS_SPACE_XKPHYS = 2LL,
+ CVMX_MIPS_SPACE_XSSEG = 1LL,
+ CVMX_MIPS_SPACE_XUSEG = 0LL
+};
+
+/* These macros for use when using 32 bit pointers. */
+#define CVMX_MIPS32_SPACE_KSEG0 1l
+#define CVMX_ADD_SEG32(segment, add) \
+ (((int32_t)segment << 31) | (int32_t)(add))
+
+#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
+
+/* These macros simplify the process of creating common IO addresses */
+#define CVMX_ADD_SEG(segment, add) \
+ ((((uint64_t)segment) << 62) | (add))
+#ifndef CVMX_ADD_IO_SEG
+#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
+#endif
+
+/**
+ * Convert a memory pointer (void*) into a hardware compatable
+ * memory address (uint64_t). Octeon hardware widgets don't
+ * understand logical addresses.
+ *
+ * @ptr: C style memory pointer
+ * Returns Hardware physical address
+ */
+static inline uint64_t cvmx_ptr_to_phys(void *ptr)
+{
+ if (sizeof(void *) == 8) {
+ /*
+ * We're running in 64 bit mode. Normally this means
+ * that we can use 40 bits of address space (the
+ * hardware limit). Unfortunately there is one case
+ * were we need to limit this to 30 bits, sign
+ * extended 32 bit. Although these are 64 bits wide,
+ * only 30 bits can be used.
+ */
+ if ((CAST64(ptr) >> 62) == 3)
+ return CAST64(ptr) & cvmx_build_mask(30);
+ else
+ return CAST64(ptr) & cvmx_build_mask(40);
+ } else {
+ return (long)(ptr) & 0x1fffffff;
+ }
+}
+
+/**
+ * Convert a hardware physical address (uint64_t) into a
+ * memory pointer (void *).
+ *
+ * @physical_address:
+ * Hardware physical address to memory
+ * Returns Pointer to memory
+ */
+static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
+{
+ if (sizeof(void *) == 8) {
+ /* Just set the top bit, avoiding any TLB uglyness */
+ return CASTPTR(void,
+ CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
+ physical_address));
+ } else {
+ return CASTPTR(void,
+ CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0,
+ physical_address));
+ }
+}
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_WRITE64. This macro is used to build a store operation to
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
+}
+
+
+/* The following #if controls the definition of the macro
+ CVMX_BUILD_READ64. This macro is used to build a load operation from
+ a full 64bit address. With a 64bit ABI, this can be done with a simple
+ pointer access. 32bit ABIs require more complicated assembly */
+
+/* We have a full 64bit ABI. Writing to a 64bit address can be done with
+ a simple volatile pointer */
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
+ return *CASTPTR(volatile TYPE##_t, addr); \
+}
+
+
+/* The following defines 8 functions for writing to a 64bit address. Each
+ takes two arguments, the address and the value to write.
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
+CVMX_BUILD_WRITE64(int64, "sd");
+CVMX_BUILD_WRITE64(int32, "sw");
+CVMX_BUILD_WRITE64(int16, "sh");
+CVMX_BUILD_WRITE64(int8, "sb");
+CVMX_BUILD_WRITE64(uint64, "sd");
+CVMX_BUILD_WRITE64(uint32, "sw");
+CVMX_BUILD_WRITE64(uint16, "sh");
+CVMX_BUILD_WRITE64(uint8, "sb");
+#define cvmx_write64 cvmx_write64_uint64
+
+/* The following defines 8 functions for reading from a 64bit address. Each
+ takes the address as the only argument
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
+CVMX_BUILD_READ64(int64, "ld");
+CVMX_BUILD_READ64(int32, "lw");
+CVMX_BUILD_READ64(int16, "lh");
+CVMX_BUILD_READ64(int8, "lb");
+CVMX_BUILD_READ64(uint64, "ld");
+CVMX_BUILD_READ64(uint32, "lw");
+CVMX_BUILD_READ64(uint16, "lhu");
+CVMX_BUILD_READ64(uint8, "lbu");
+#define cvmx_read64 cvmx_read64_uint64
+
+
+static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
+{
+ cvmx_write64(csr_addr, val);
+
+ /*
+ * Perform an immediate read after every write to an RSL
+ * register to force the write to complete. It doesn't matter
+ * what RSL read we do, so we choose CVMX_MIO_BOOT_BIST_STAT
+ * because it is fast and harmless.
+ */
+ if ((csr_addr >> 40) == (0x800118))
+ cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
+}
+
+static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
+{
+ cvmx_write64(io_addr, val);
+
+}
+
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
+{
+ uint64_t val = cvmx_read64(csr_addr);
+ return val;
+}
+
+
+static inline void cvmx_send_single(uint64_t data)
+{
+ const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
+ cvmx_write64(CVMX_IOBDMA_SENDSINGLE, data);
+}
+
+static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
+{
+ union {
+ uint64_t u64;
+ struct {
+ uint64_t scraddr:8;
+ uint64_t len:8;
+ uint64_t addr:48;
+ } s;
+ } addr;
+ addr.u64 = csr_addr;
+ addr.s.scraddr = scraddr >> 3;
+ addr.s.len = 1;
+ cvmx_send_single(addr.u64);
+}
+
+/* Return true if Octeon is CN38XX pass 1 */
+static inline int cvmx_octeon_is_pass1(void)
+{
+#if OCTEON_IS_COMMON_BINARY()
+ return 0; /* Pass 1 isn't supported for common binaries */
+#else
+/* Now that we know we're built for a specific model, only check CN38XX */
+#if OCTEON_IS_MODEL(OCTEON_CN38XX)
+ return cvmx_get_proc_id() == OCTEON_CN38XX_PASS1;
+#else
+ return 0; /* Built for non CN38XX chip, we're not CN38XX pass1 */
+#endif
+#endif
+}
+
+static inline unsigned int cvmx_get_core_num(void)
+{
+ unsigned int core_num;
+ CVMX_RDHWRNV(core_num, 0);
+ return core_num;
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for POP instruction.
+ *
+ * @val: 32 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline uint32_t cvmx_pop(uint32_t val)
+{
+ uint32_t pop;
+ CVMX_POP(pop, val);
+ return pop;
+}
+
+/**
+ * Returns the number of bits set in the provided value.
+ * Simple wrapper for DPOP instruction.
+ *
+ * @val: 64 bit value to count set bits in
+ *
+ * Returns Number of bits set
+ */
+static inline int cvmx_dpop(uint64_t val)
+{
+ int pop;
+ CVMX_DPOP(pop, val);
+ return pop;
+}
+
+/**
+ * Provide current cycle counter as a return value
+ *
+ * Returns current cycle counter
+ */
+
+static inline uint64_t cvmx_get_cycle(void)
+{
+ uint64_t cycle;
+ CVMX_RDHWR(cycle, 31);
+ return cycle;
+}
+
+/**
+ * Reads a chip global cycle counter. This counts CPU cycles since
+ * chip reset. The counter is 64 bit.
+ * This register does not exist on CN38XX pass 1 silicion
+ *
+ * Returns Global chip cycle count since chip reset.
+ */
+static inline uint64_t cvmx_get_cycle_global(void)
+{
+ if (cvmx_octeon_is_pass1())
+ return 0;
+ else
+ return cvmx_read64(CVMX_IPD_CLK_COUNT);
+}
+
+/**
+ * This macro spins on a field waiting for it to reach a value. It
+ * is common in code to need to wait for a specific field in a CSR
+ * to match a specific value. Conceptually this macro expands to:
+ *
+ * 1) read csr at "address" with a csr typedef of "type"
+ * 2) Check if ("type".s."field" "op" "value")
+ * 3) If #2 isn't true loop to #1 unless too much time has passed.
+ */
+#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, timeout_usec)\
+ ( \
+{ \
+ int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ cvmx_sysinfo_get()->cpu_clock_hz / 1000000; \
+ type c; \
+ while (1) { \
+ c.u64 = cvmx_read_csr(address); \
+ if ((c.s.field) op(value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result; \
+})
+
+/***************************************************************************/
+
+static inline void cvmx_reset_octeon(void)
+{
+ union cvmx_ciu_soft_rst ciu_soft_rst;
+ ciu_soft_rst.u64 = 0;
+ ciu_soft_rst.s.soft_rst = 1;
+ cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
+}
+
+/* Return the number of cores available in the chip */
+static inline uint32_t cvmx_octeon_num_cores(void)
+{
+ uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
+ return cvmx_pop(ciu_fuse);
+}
+
+/**
+ * Read a byte of fuse data
+ * @byte_addr: address to read
+ *
+ * Returns fuse value: 0 or 1
+ */
+static uint8_t cvmx_fuse_read_byte(int byte_addr)
+{
+ union cvmx_mio_fus_rcmd read_cmd;
+
+ read_cmd.u64 = 0;
+ read_cmd.s.addr = byte_addr;
+ read_cmd.s.pend = 1;
+ cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
+ while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
+ && read_cmd.s.pend)
+ ;
+ return read_cmd.s.dat;
+}
+
+/**
+ * Read a single fuse bit
+ *
+ * @fuse: Fuse number (0-1024)
+ *
+ * Returns fuse value: 0 or 1
+ */
+static inline int cvmx_fuse_read(int fuse)
+{
+ return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
+}
+
+static inline int cvmx_octeon_model_CN36XX(void)
+{
+ return OCTEON_IS_MODEL(OCTEON_CN38XX)
+ && !cvmx_octeon_is_pass1()
+ && cvmx_fuse_read(264);
+}
+
+static inline int cvmx_octeon_zip_present(void)
+{
+ return octeon_has_feature(OCTEON_FEATURE_ZIP);
+}
+
+static inline int cvmx_octeon_dfa_present(void)
+{
+ if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN31XX)
+ && !OCTEON_IS_MODEL(OCTEON_CN58XX))
+ return 0;
+ else if (OCTEON_IS_MODEL(OCTEON_CN3020))
+ return 0;
+ else if (cvmx_octeon_is_pass1())
+ return 1;
+ else
+ return !cvmx_fuse_read(120);
+}
+
+static inline int cvmx_octeon_crypto_present(void)
+{
+ return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
+}
+
+#endif /* __CVMX_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
new file mode 100644
index 000000000000..04fac684069c
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -0,0 +1,119 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ * File defining checks for different Octeon features.
+ */
+
+#ifndef __OCTEON_FEATURE_H__
+#define __OCTEON_FEATURE_H__
+
+enum octeon_feature {
+ /*
+ * Octeon models in the CN5XXX family and higher support
+ * atomic add instructions to memory (saa/saad).
+ */
+ OCTEON_FEATURE_SAAD,
+ /* Does this Octeon support the ZIP offload engine? */
+ OCTEON_FEATURE_ZIP,
+ /* Does this Octeon support crypto acceleration using COP2? */
+ OCTEON_FEATURE_CRYPTO,
+ /* Does this Octeon support PCI express? */
+ OCTEON_FEATURE_PCIE,
+ /* Some Octeon models support internal memory for storing
+ * cryptographic keys */
+ OCTEON_FEATURE_KEY_MEMORY,
+ /* Octeon has a LED controller for banks of external LEDs */
+ OCTEON_FEATURE_LED_CONTROLLER,
+ /* Octeon has a trace buffer */
+ OCTEON_FEATURE_TRA,
+ /* Octeon has a management port */
+ OCTEON_FEATURE_MGMT_PORT,
+ /* Octeon has a raid unit */
+ OCTEON_FEATURE_RAID,
+ /* Octeon has a builtin USB */
+ OCTEON_FEATURE_USB,
+};
+
+static inline int cvmx_fuse_read(int fuse);
+
+/**
+ * Determine if the current Octeon supports a specific feature. These
+ * checks have been optimized to be fairly quick, but they should still
+ * be kept out of fast path code.
+ *
+ * @feature: Feature to check for. This should always be a constant so the
+ * compiler can remove the switch statement through optimization.
+ *
+ * Returns Non zero if the feature exists. Zero if the feature does not
+ * exist.
+ */
+static inline int octeon_has_feature(enum octeon_feature feature)
+{
+ switch (feature) {
+ case OCTEON_FEATURE_SAAD:
+ return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
+
+ case OCTEON_FEATURE_ZIP:
+ if (OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX))
+ return 0;
+ else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
+ return 1;
+ else
+ return !cvmx_fuse_read(121);
+
+ case OCTEON_FEATURE_CRYPTO:
+ return !cvmx_fuse_read(90);
+
+ case OCTEON_FEATURE_PCIE:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX);
+
+ case OCTEON_FEATURE_KEY_MEMORY:
+ case OCTEON_FEATURE_LED_CONTROLLER:
+ return OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX)
+ || OCTEON_IS_MODEL(OCTEON_CN56XX);
+ case OCTEON_FEATURE_TRA:
+ return !(OCTEON_IS_MODEL(OCTEON_CN30XX)
+ || OCTEON_IS_MODEL(OCTEON_CN50XX));
+ case OCTEON_FEATURE_MGMT_PORT:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX);
+ case OCTEON_FEATURE_RAID:
+ return OCTEON_IS_MODEL(OCTEON_CN56XX)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX);
+ case OCTEON_FEATURE_USB:
+ return !(OCTEON_IS_MODEL(OCTEON_CN38XX)
+ || OCTEON_IS_MODEL(OCTEON_CN58XX));
+ }
+ return 0;
+}
+
+#endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
new file mode 100644
index 000000000000..cf50336eca2e
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -0,0 +1,321 @@
+/***********************license start***************
+ * Author: Cavium Networks
+ *
+ * Contact: support@caviumnetworks.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2008 Cavium Networks
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Networks for more information
+ ***********************license end**************************************/
+
+/*
+ *
+ * File defining different Octeon model IDs and macros to
+ * compare them.
+ *
+ */
+
+#ifndef __OCTEON_MODEL_H__
+#define __OCTEON_MODEL_H__
+
+/* NOTE: These must match what is checked in common-config.mk */
+/* Defines to represent the different versions of Octeon. */
+
+/*
+ * IMPORTANT: When the default pass is updated for an Octeon Model,
+ * the corresponding change must also be made in the oct-sim script.
+ */
+
+/*
+ * The defines below should be used with the OCTEON_IS_MODEL() macro
+ * to determine what model of chip the software is running on. Models
+ * ending in 'XX' match multiple models (families), while specific
+ * models match only that model. If a pass (revision) is specified,
+ * then only that revision will be matched. Care should be taken when
+ * checking for both specific models and families that the specific
+ * models are checked for first. While these defines are similar to
+ * the processor ID, they are not intended to be used by anything
+ * other that the OCTEON_IS_MODEL framework, and the values are
+ * subject to change at anytime without notice.
+ *
+ * NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
+ * macros should be used outside of this file. All other macros are
+ * for internal use only, and may change without notice.
+ */
+
+/* Flag bits in top byte */
+/* Ignores revision in model checks */
+#define OM_IGNORE_REVISION 0x01000000
+/* Check submodels */
+#define OM_CHECK_SUBMODEL 0x02000000
+/* Match all models previous than the one specified */
+#define OM_MATCH_PREVIOUS_MODELS 0x04000000
+/* Ignores the minor revison on newer parts */
+#define OM_IGNORE_MINOR_REVISION 0x08000000
+#define OM_FLAG_MASK 0xff000000
+
+/*
+ * CN5XXX models with new revision encoding
+ */
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+
+/*
+ * NOTE: Octeon CN5000F model is not identifiable using the
+ * OCTEON_IS_MODEL() functions, but are treated as CN50XX.
+ */
+
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
+
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 \
+ | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+
+/*
+ * CN3XXX models with old revision enconding
+ */
+#define OCTEON_CN38XX_PASS1 0x000d0000
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+
+/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+
+/*
+ * This model is only used for internal checks, it is not a valid
+ * model for the OCTEON_MODEL environment variable. This matches the
+ * CN3010 and CN3005 but NOT the CN3020.
+ */
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION \
+ | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION \
+ | OM_CHECK_SUBMODEL)
+
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION \
+ | OM_CHECK_SUBMODEL)
+
+
+
+/* This matches the complete family of CN3xxx CPUs, and not subsequent models */
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 \
+ | OM_MATCH_PREVIOUS_MODELS \
+ | OM_IGNORE_REVISION)
+
+/* The revision byte (low byte) has two different encodings.
+ * CN3XXX:
+ *
+ * bits
+ * <7:5>: reserved (0)
+ * <4>: alternate package
+ * <3:0>: revision
+ *
+ * CN5XXX:
+ *
+ * bits
+ * <7>: reserved (0)
+ * <6>: alternate package
+ * <5:3>: major revision
+ * <2:0>: minor revision
+ *
+ */
+
+/* Masks used for the various types of model/family/revision matching */
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK \
+ | OCTEON_38XX_MODEL_MASK)
+
+/* CN5XXX and later use different layout of bits in the revision ID field */
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
+#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK \
+ | OCTEON_58XX_MODEL_MASK)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK \
+ & 0x00fffff8)
+
+#define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
+
+/* NOTE: This is for internal (to this file) use only. */
+static inline int __OCTEON_IS_MODEL_COMPILE__(uint32_t arg_model,
+ uint32_t chip_model)
+{
+ uint32_t rev_and_sub = OM_IGNORE_REVISION | OM_CHECK_SUBMODEL;
+
+ if ((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) {
+ if (((arg_model & OM_FLAG_MASK) == rev_and_sub) &&
+ __OCTEON_MATCH_MASK__(chip_model, arg_model,
+ OCTEON_38XX_MODEL_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == 0) &&
+ __OCTEON_MATCH_MASK__(chip_model, arg_model,
+ OCTEON_38XX_FAMILY_REV_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_REVISION) &&
+ __OCTEON_MATCH_MASK__(chip_model, arg_model,
+ OCTEON_38XX_FAMILY_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == OM_CHECK_SUBMODEL) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_38XX_MODEL_REV_MASK))
+ return 1;
+ if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
+ ((chip_model & OCTEON_38XX_MODEL_MASK) <
+ (arg_model & OCTEON_38XX_MODEL_MASK)))
+ return 1;
+ } else {
+ if (((arg_model & OM_FLAG_MASK) == rev_and_sub) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_58XX_MODEL_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == 0) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_58XX_FAMILY_REV_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_MINOR_REVISION) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_58XX_MODEL_MINOR_REV_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == OM_IGNORE_REVISION) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_58XX_FAMILY_MASK))
+ return 1;
+ if (((arg_model & OM_FLAG_MASK) == OM_CHECK_SUBMODEL) &&
+ __OCTEON_MATCH_MASK__((chip_model), (arg_model),
+ OCTEON_58XX_MODEL_REV_MASK))
+ return 1;
+ if ((arg_model & OM_MATCH_PREVIOUS_MODELS) &&
+ ((chip_model & OCTEON_58XX_MODEL_MASK) <
+ (arg_model & OCTEON_58XX_MODEL_MASK)))
+ return 1;
+ }
+ return 0;
+}
+
+/* forward declarations */
+static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
+static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
+
+/* NOTE: This for internal use only!!!!! */
+static inline int __octeon_is_model_runtime__(uint32_t model)
+{
+ uint32_t cpuid = cvmx_get_proc_id();
+
+ /*
+ * Check for special case of mismarked 3005 samples. We only
+ * need to check if the sub model isn't being ignored.
+ */
+ if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
+ if (cpuid == OCTEON_CN3010_PASS1 \
+ && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
+ cpuid |= 0x10;
+ }
+ return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
+}
+
+/*
+ * The OCTEON_IS_MODEL macro should be used for all Octeon model
+ * checking done in a program. This should be kept runtime if at all
+ * possible. Any compile time (#if OCTEON_IS_MODEL) usage must be
+ * condtionalized with OCTEON_IS_COMMON_BINARY() if runtime checking
+ * support is required.
+ */
+#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
+#define OCTEON_IS_COMMON_BINARY() 1
+#undef OCTEON_MODEL
+
+const char *octeon_model_get_string(uint32_t chip_id);
+const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer);
+
+#include "octeon-feature.h"
+
+#endif /* __OCTEON_MODEL_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
new file mode 100644
index 000000000000..edc676084cda
--- /dev/null
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -0,0 +1,248 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004-2008 Cavium Networks
+ */
+#ifndef __ASM_OCTEON_OCTEON_H
+#define __ASM_OCTEON_OCTEON_H
+
+#include "cvmx.h"
+
+extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
+ uint64_t alignment,
+ uint64_t min_addr,
+ uint64_t max_addr,
+ int do_locking);
+extern void *octeon_bootmem_alloc(uint64_t size, uint64_t alignment,
+ int do_locking);
+extern void *octeon_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr,
+ int do_locking);
+extern void *octeon_bootmem_alloc_named(uint64_t size, uint64_t alignment,
+ char *name);
+extern void *octeon_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name);
+extern void *octeon_bootmem_alloc_named_address(uint64_t size, uint64_t address,
+ char *name);
+extern int octeon_bootmem_free_named(char *name);
+extern void octeon_bootmem_lock(void);
+extern void octeon_bootmem_unlock(void);
+
+extern int octeon_is_simulation(void);
+extern int octeon_is_pci_host(void);
+extern int octeon_usb_is_ref_clk(void);
+extern uint64_t octeon_get_clock_rate(void);
+extern const char *octeon_board_type_string(void);
+extern const char *octeon_get_pci_interrupts(void);
+extern int octeon_get_southbridge_interrupt(void);
+extern int octeon_get_boot_coremask(void);
+extern int octeon_get_boot_num_arguments(void);
+extern const char *octeon_get_boot_argument(int arg);
+extern void octeon_hal_setup_reserved32(void);
+extern void octeon_user_io_init(void);
+struct octeon_cop2_state;
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *state);
+extern void octeon_crypto_disable(struct octeon_cop2_state *state,
+ unsigned long flags);
+
+extern void octeon_init_cvmcount(void);
+
+#define OCTEON_ARGV_MAX_ARGS 64
+#define OCTOEN_SERIAL_LEN 20
+
+struct octeon_boot_descriptor {
+ /* Start of block referenced by assembly code - do not change! */
+ uint32_t desc_version;
+ uint32_t desc_size;
+ uint64_t stack_top;
+ uint64_t heap_base;
+ uint64_t heap_end;
+ /* Only used by bootloader */
+ uint64_t entry_point;
+ uint64_t desc_vaddr;
+ /* End of This block referenced by assembly code - do not change! */
+ uint32_t exception_base_addr;
+ uint32_t stack_size;
+ uint32_t heap_size;
+ /* Argc count for application. */
+ uint32_t argc;
+ uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+ /* If set, use uart1 for console */
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+ /* If set, use PCI console */
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+ /* Call exit on break on serial port */
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
+
+ uint32_t flags;
+ uint32_t core_mask;
+ /* DRAM size in megabyes. */
+ uint32_t dram_size;
+ /* physical address of free memory descriptor block. */
+ uint32_t phy_mem_desc_addr;
+ /* used to pass flags from app to debugger. */
+ uint32_t debugger_flags_base_addr;
+ /* CPU clock speed, in hz. */
+ uint32_t eclock_hz;
+ /* DRAM clock speed, in hz. */
+ uint32_t dclock_hz;
+ /* SPI4 clock in hz. */
+ uint32_t spi_clock_hz;
+ uint16_t board_type;
+ uint8_t board_rev_major;
+ uint8_t board_rev_minor;
+ uint16_t chip_type;
+ uint8_t chip_rev_major;
+ uint8_t chip_rev_minor;
+ char board_serial_number[OCTOEN_SERIAL_LEN];
+ uint8_t mac_addr_base[6];
+ uint8_t mac_addr_count;
+ uint64_t cvmx_desc_vaddr;
+};
+
+union octeon_cvmemctl {
+ uint64_t u64;
+ struct {
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t tlbbist:1;
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t l1cbist:1;
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t l1dbist:1;
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t dcmbist:1;
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t ptgbist:1;
+ /* RO 1 = BIST fail, 0 = BIST pass */
+ uint64_t wbfbist:1;
+ /* Reserved */
+ uint64_t reserved:22;
+ /* R/W If set, marked write-buffer entries time out
+ * the same as as other entries; if clear, marked
+ * write-buffer entries use the maximum timeout. */
+ uint64_t dismarkwblongto:1;
+ /* R/W If set, a merged store does not clear the
+ * write-buffer entry timeout state. */
+ uint64_t dismrgclrwbto:1;
+ /* R/W Two bits that are the MSBs of the resultant
+ * CVMSEG LM word location for an IOBDMA. The other 8
+ * bits come from the SCRADDR field of the IOBDMA. */
+ uint64_t iobdmascrmsb:2;
+ /* R/W If set, SYNCWS and SYNCS only order marked
+ * stores; if clear, SYNCWS and SYNCS only order
+ * unmarked stores. SYNCWSMARKED has no effect when
+ * DISSYNCWS is set. */
+ uint64_t syncwsmarked:1;
+ /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
+ * SYNC. */
+ uint64_t dissyncws:1;
+ /* R/W If set, no stall happens on write buffer
+ * full. */
+ uint64_t diswbfst:1;
+ /* R/W If set (and SX set), supervisor-level
+ * loads/stores can use XKPHYS addresses with
+ * VA<48>==0 */
+ uint64_t xkmemenas:1;
+ /* R/W If set (and UX set), user-level loads/stores
+ * can use XKPHYS addresses with VA<48>==0 */
+ uint64_t xkmemenau:1;
+ /* R/W If set (and SX set), supervisor-level
+ * loads/stores can use XKPHYS addresses with
+ * VA<48>==1 */
+ uint64_t xkioenas:1;
+ /* R/W If set (and UX set), user-level loads/stores
+ * can use XKPHYS addresses with VA<48>==1 */
+ uint64_t xkioenau:1;
+ /* R/W If set, all stores act as SYNCW (NOMERGE must
+ * be set when this is set) RW, reset to 0. */
+ uint64_t allsyncw:1;
+ /* R/W If set, no stores merge, and all stores reach
+ * the coherent bus in order. */
+ uint64_t nomerge:1;
+ /* R/W Selects the bit in the counter used for DID
+ * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
+ * 214. Actual time-out is between 1x and 2x this
+ * interval. For example, with DIDTTO=3, expiration
+ * interval is between 16K and 32K. */
+ uint64_t didtto:2;
+ /* R/W If set, the (mem) CSR clock never turns off. */
+ uint64_t csrckalwys:1;
+ /* R/W If set, mclk never turns off. */
+ uint64_t mclkalwys:1;
+ /* R/W Selects the bit in the counter used for write
+ * buffer flush time-outs (WBFLT+11) is the bit
+ * position in an internal counter used to determine
+ * expiration. The write buffer expires between 1x and
+ * 2x this interval. For example, with WBFLT = 0, a
+ * write buffer expires between 2K and 4K cycles after
+ * the write buffer entry is allocated. */
+ uint64_t wbfltime:3;
+ /* R/W If set, do not put Istream in the L2 cache. */
+ uint64_t istrnol2:1;
+ /* R/W The write buffer threshold. */
+ uint64_t wbthresh:4;
+ /* Reserved */
+ uint64_t reserved2:2;
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * kernel/debug mode. */
+ uint64_t cvmsegenak:1;
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * supervisor mode. */
+ uint64_t cvmsegenas:1;
+ /* R/W If set, CVMSEG is available for loads/stores in
+ * user mode. */
+ uint64_t cvmsegenau:1;
+ /* R/W Size of local memory in cache blocks, 54 (6912
+ * bytes) is max legal value. */
+ uint64_t lmemsz:6;
+ } s;
+};
+
+struct octeon_cf_data {
+ unsigned long base_region_bias;
+ unsigned int base_region; /* The chip select region used by CF */
+ int is16bit; /* 0 - 8bit, !0 - 16bit */
+ int dma_engine; /* -1 for no DMA */
+};
+
+extern void octeon_write_lcd(const char *s);
+extern void octeon_check_cpu_bist(void);
+extern int octeon_get_boot_debug_flag(void);
+extern int octeon_get_boot_uart(void);
+
+struct uart_port;
+extern unsigned int octeon_serial_in(struct uart_port *, int);
+extern void octeon_serial_out(struct uart_port *, int, int);
+
+/**
+ * Write a 32bit value to the Octeon NPI register space
+ *
+ * @address: Address to write to
+ * @val: Value to write
+ */
+static inline void octeon_npi_write32(uint64_t address, uint32_t val)
+{
+ cvmx_write64_uint32(address ^ 4, val);
+ cvmx_read64_uint32(address ^ 4);
+}
+
+
+/**
+ * Read a 32bit value from the Octeon NPI register space
+ *
+ * @address: Address to read
+ * Returns The result
+ */
+static inline uint32_t octeon_npi_read32(uint64_t address)
+{
+ return cvmx_read64_uint32(address ^ 4);
+}
+
+#endif /* __ASM_OCTEON_OCTEON_H */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 18ee58e39445..0f926aa0cb47 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -118,6 +118,60 @@ union mips_watch_reg_state {
struct mips3264_watch_reg_state mips3264;
};
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+
+struct octeon_cop2_state {
+ /* DMFC2 rt, 0x0201 */
+ unsigned long cop2_crc_iv;
+ /* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
+ unsigned long cop2_crc_length;
+ /* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
+ unsigned long cop2_crc_poly;
+ /* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
+ unsigned long cop2_llm_dat[2];
+ /* DMFC2 rt, 0x0084 */
+ unsigned long cop2_3des_iv;
+ /* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
+ unsigned long cop2_3des_key[3];
+ /* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
+ unsigned long cop2_3des_result;
+ /* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
+ unsigned long cop2_aes_inp0;
+ /* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
+ unsigned long cop2_aes_iv[2];
+ /* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
+ * rt, 0x0107 */
+ unsigned long cop2_aes_key[4];
+ /* DMFC2 rt, 0x0110 */
+ unsigned long cop2_aes_keylen;
+ /* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
+ unsigned long cop2_aes_result[2];
+ /* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
+ * rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
+ * 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
+ * 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
+ * 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
+ unsigned long cop2_hsh_datw[15];
+ /* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
+ * rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
+ * 0x0256; DMFC2 rt, 0x0257 - Pass2 */
+ unsigned long cop2_hsh_ivw[8];
+ /* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
+ unsigned long cop2_gfm_mult[2];
+ /* DMFC2 rt, 0x025E - Pass2 */
+ unsigned long cop2_gfm_poly;
+ /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
+ unsigned long cop2_gfm_result[2];
+};
+#define INIT_OCTEON_COP2 {0,}
+
+struct octeon_cvmseg_state {
+ unsigned long cvmseg[CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE]
+ [cpu_dcache_line_size() / sizeof(unsigned long)];
+};
+
+#endif
+
typedef struct {
unsigned long seg;
} mm_segment_t;
@@ -160,6 +214,10 @@ struct thread_struct {
unsigned long trap_no;
unsigned long irix_trampoline; /* Wheee... */
unsigned long irix_oldctx;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
+ struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
+#endif
struct mips_abi *abi;
};
@@ -171,6 +229,13 @@ struct thread_struct {
#define FPAFF_INIT
#endif /* CONFIG_MIPS_MT_FPAFF */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+#define OCTEON_INIT \
+ .cp2 = INIT_OCTEON_COP2,
+#else
+#define OCTEON_INIT
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+
#define INIT_THREAD { \
/* \
* Saved main processor registers \
@@ -221,6 +286,10 @@ struct thread_struct {
.trap_no = 0, \
.irix_trampoline = 0, \
.irix_oldctx = 0, \
+ /* \
+ * Cavium Octeon specifics (null if not Octeon) \
+ */ \
+ OCTEON_INIT \
}
struct task_struct;
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index c2c8bac43307..1f30d16d4669 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -48,6 +48,10 @@ struct pt_regs {
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ unsigned long long mpl[3]; /* MTM{0,1,2} */
+ unsigned long long mtp[3]; /* MTP{0,1,2} */
+#endif
} __attribute__ ((aligned (8)));
/* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 86557b5d1b3f..40e5ef1d4d26 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -37,6 +37,9 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
+/* Octeon - Tell another core to flush its icache */
+#define SMP_ICACHE_FLUSH 0x4
+
extern void asmlinkage smp_bootstrap(void);
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4c37c4e5f72e..db0fa7b5aeaf 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -194,6 +194,19 @@
LONG_S $31, PT_R31(sp)
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ .set mips64
+ pref 0, 0($28) /* Prefetch the current pointer */
+ pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
+ /* The Octeon multiplier state is affected by general multiply
+ instructions. It must be saved before and kernel code might
+ corrupt it */
+ jal octeon_mult_save
+ LONG_L v1, 0($28) /* Load the current pointer */
+ /* Restore $31(ra) that was changed by the jal */
+ LONG_L ra, PT_R31(sp)
+ pref 0, 0(v1) /* Prefetch the current thread */
+#endif
.set pop
.endm
@@ -324,6 +337,10 @@
DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* Restore the Octeon multiplier state */
+ jal octeon_mult_restore
+#endif
mfc0 a0, CP0_STATUS
ori a0, STATMASK
xori a0, STATMASK
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 9601ea950542..38a30d2ee959 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -50,27 +50,35 @@ extern int (*perf_irq)(void);
/*
* Initialize the calling CPU's compare interrupt as clockevent device
*/
-#ifdef CONFIG_CEVT_R4K
-extern int mips_clockevent_init(void);
+#ifdef CONFIG_CEVT_R4K_LIB
extern unsigned int __weak get_c0_compare_int(void);
-#else
+extern int r4k_clockevent_init(void);
+#endif
+
static inline int mips_clockevent_init(void)
{
+#ifdef CONFIG_CEVT_R4K
+ return r4k_clockevent_init();
+#else
return -ENXIO;
-}
#endif
+}
/*
* Initialize the count register as a clocksource
*/
-#ifdef CONFIG_CSRC_R4K
-extern int init_mips_clocksource(void);
-#else
+#ifdef CONFIG_CSRC_R4K_LIB
+extern int init_r4k_clocksource(void);
+#endif
+
static inline int init_mips_clocksource(void)
{
+#ifdef CONFIG_CSRC_R4K
+ return init_r4k_clocksource();
+#else
return 0;
-}
#endif
+}
extern void clocksource_set_clock(struct clocksource *cs, unsigned int clock);
extern void clockevent_set_clock(struct clock_event_device *cd,
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index b1372c27f136..e96122159928 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
time.o topology.o traps.o unaligned.o watch.o
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
-obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_R4K_LIB) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
@@ -17,7 +17,7 @@ obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
-obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
+obj-$(CONFIG_CSRC_R4K_LIB) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
@@ -43,6 +43,7 @@ obj-$(CONFIG_CPU_SB1) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_TX49XX) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_VR41XX) += r4k_fpu.o r4k_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 72942226fcdd..c901c22d7ad0 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -64,6 +64,10 @@ void output_ptreg_defines(void)
#ifdef CONFIG_MIPS_MT_SMTC
OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ OFFSET(PT_MPL, pt_regs, mpl);
+ OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
DEFINE(PT_SIZE, sizeof(struct pt_regs));
BLANK();
}
@@ -295,3 +299,30 @@ void output_irq_cpustat_t_defines(void)
DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t));
BLANK();
}
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+ COMMENT("Octeon specific octeon_cop2_state offsets.");
+ OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
+ OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
+ OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
+ OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
+ OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
+ OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
+ OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
+ OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
+ OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
+ OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
+ OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
+ OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
+ OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
+ OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
+ OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
+ OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
+ OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
+ OFFSET(THREAD_CP2, task_struct, thread.cp2);
+ OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
+ BLANK();
+}
+#endif
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 6b5df8bfab85..0176ed015c89 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -205,6 +205,39 @@ int __compute_return_epc(struct pt_regs *regs)
break;
}
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32))) == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32)))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#endif
}
return 0;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index e1ec83b68031..0015e442572b 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -160,7 +160,7 @@ int c0_compare_int_usable(void)
#ifndef CONFIG_MIPS_MT_SMTC
-int __cpuinit mips_clockevent_init(void)
+int __cpuinit r4k_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c9207b5fd923..a7162a4484cf 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -96,6 +96,9 @@ int allow_au1k_wait;
static void au1k_wait(void)
{
+ if (!allow_au1k_wait)
+ return;
+
/* using the wait instruction makes CP0 counter unusable */
__asm__(" .set mips3 \n"
" cache 0x14, 0(%0) \n"
@@ -154,6 +157,7 @@ void __init check_wait(void)
case CPU_25KF:
case CPU_PR4450:
case CPU_BCM3302:
+ case CPU_CAVIUM_OCTEON:
cpu_wait = r4k_wait;
break;
@@ -185,8 +189,7 @@ void __init check_wait(void)
case CPU_AU1200:
case CPU_AU1210:
case CPU_AU1250:
- if (allow_au1k_wait)
- cpu_wait = au1k_wait;
+ cpu_wait = au1k_wait;
break;
case CPU_20KC:
/*
@@ -875,6 +878,27 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
}
}
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & 0xff00) {
+ case PRID_IMP_CAVIUM_CN38XX:
+ case PRID_IMP_CAVIUM_CN31XX:
+ case PRID_IMP_CAVIUM_CN30XX:
+ case PRID_IMP_CAVIUM_CN58XX:
+ case PRID_IMP_CAVIUM_CN56XX:
+ case PRID_IMP_CAVIUM_CN50XX:
+ case PRID_IMP_CAVIUM_CN52XX:
+ c->cputype = CPU_CAVIUM_OCTEON;
+ __cpu_name[cpu] = "Cavium Octeon";
+ break;
+ default:
+ printk(KERN_INFO "Unknown Octeon chip!\n");
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+}
+
const char *__cpu_name[NR_CPUS];
__cpuinit void cpu_probe(void)
@@ -909,6 +933,9 @@ __cpuinit void cpu_probe(void)
case PRID_COMP_NXP:
cpu_probe_nxp(c, cpu);
break;
+ case PRID_COMP_CAVIUM:
+ cpu_probe_cavium(c, cpu);
+ break;
}
BUG_ON(!__cpu_name[cpu]);
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 74fb74583b4e..f1a2893931ed 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -22,7 +22,7 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-int __init init_mips_clocksource(void)
+int __init init_r4k_clocksource(void)
{
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 757d48f0d80f..fb6f73148df2 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -385,10 +385,14 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro __build_clear_fpe
+ .set push
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ .set mips1
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
ctc1 a2, fcr31
+ .set pop
TRACE_IRQS_ON
STI
.endm
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 494a49a317e9..87deb8f6c458 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
}
- irq_desc[irq].affinity = *cpumask;
+ cpumask_copy(irq_desc[irq].affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 4b4007b3083a..a0ff2b66e22b 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -111,6 +111,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %14s", irq_desc[i].chip->name);
+ seq_printf(p, "-%-8s", irq_desc[i].name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 000000000000..d52389672b06
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,506 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable-bits.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+/*
+ * Offset to the current process status flags, the first 32 bytes of the
+ * stack are not used.
+ */
+#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 7
+ LEAF(resume)
+ .set arch=octeon
+#ifndef CONFIG_CPU_HAS_LLSC
+ sw zero, ll_bit
+#endif
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+ /* check if we need to save COP2 registers */
+ PTR_L t2, TASK_THREAD_INFO(a0)
+ LONG_L t0, ST_OFF(t2)
+ bbit0 t0, 30, 1f
+
+ /* Disable COP2 in the stored process state */
+ li t1, ST0_CU2
+ xor t0, t1
+ LONG_S t0, ST_OFF(t2)
+
+ /* Enable COP2 so we can save it */
+ mfc0 t0, CP0_STATUS
+ or t0, t1
+ mtc0 t0, CP0_STATUS
+
+ /* Save COP2 */
+ daddu a0, THREAD_CP2
+ jal octeon_cop2_save
+ dsubu a0, THREAD_CP2
+
+ /* Disable COP2 now that we are done */
+ mfc0 t0, CP0_STATUS
+ li t1, ST0_CU2
+ xor t0, t1
+ mtc0 t0, CP0_STATUS
+
+1:
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ /* Check if we need to store CVMSEG state */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ bbit0 t0, 6, 3f /* Is user access enabled? */
+
+ /* Store the CVMSEG state */
+ /* Extract the size of CVMSEG */
+ andi t0, 0x3f
+ /* Multiply * (cache line size/sizeof(long)/2) */
+ sll t0, 7-LONGLOG-1
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ synciobdma
+2:
+ .set noreorder
+ LONG_L t8, 0(t1) /* Load from CVMSEG */
+ subu t0, 1 /* Decrement loop var */
+ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
+ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
+ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+ bnez t0, 2b /* Loop until we've copied it all */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ .set reorder
+
+ /* Disable access to CVMSEG */
+ mfc0 t0, $11,7 /* CvmMemCtl */
+ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
+ mtc0 t0, $11,7 /* CvmMemCtl */
+#endif
+3:
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+#if (_THREAD_SIZE - 32) < 0x8000
+ PTR_ADDIU t0, $28, _THREAD_SIZE - 32
+#else
+ PTR_LI t0, _THREAD_SIZE - 32
+ PTR_ADDU t0, $28
+#endif
+ set_saved_sp t0, t1, t2
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ LEAF(octeon_cop2_save)
+
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ /* Save the COP2 CRC state */
+ dmfc2 t0, 0x0201
+ dmfc2 t1, 0x0202
+ dmfc2 t2, 0x0200
+ sd t0, OCTEON_CP2_CRC_IV(a0)
+ sd t1, OCTEON_CP2_CRC_LENGTH(a0)
+ sd t2, OCTEON_CP2_CRC_POLY(a0)
+ /* Skip next instructions if CvmCtl[NODFA_CP2] set */
+ bbit1 t9, 28, 1f
+
+ /* Save the LLM state */
+ dmfc2 t0, 0x0402
+ dmfc2 t1, 0x040A
+ sd t0, OCTEON_CP2_LLM_DAT(a0)
+ sd t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+
+ /* Save the COP2 crypto state */
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
+ sd t0, OCTEON_CP2_3DES_IV(a0)
+ dmfc2 t0, 0x0088
+ sd t1, OCTEON_CP2_3DES_KEY(a0)
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ sd t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmfc2 t2, 0x0102
+ sd t3, OCTEON_CP2_3DES_KEY+16(a0)
+ dmfc2 t3, 0x0103
+ sd t0, OCTEON_CP2_3DES_RESULT(a0)
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
+ sd t2, OCTEON_CP2_AES_IV(a0)
+ dmfc2 t2, 0x0106
+ sd t3, OCTEON_CP2_AES_IV+8(a0)
+ dmfc2 t3, 0x0107
+ sd t0, OCTEON_CP2_AES_KEY(a0)
+ dmfc2 t0, 0x0110
+ sd t1, OCTEON_CP2_AES_KEY+8(a0)
+ dmfc2 t1, 0x0100
+ sd t2, OCTEON_CP2_AES_KEY+16(a0)
+ dmfc2 t2, 0x0101
+ sd t3, OCTEON_CP2_AES_KEY+24(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ sd t0, OCTEON_CP2_AES_KEYLEN(a0)
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ sd t1, OCTEON_CP2_AES_RESULT(a0)
+ sd t2, OCTEON_CP2_AES_RESULT+8(a0)
+ /* Skip to the Pass1 version of the remainder of the COP2 state */
+ beq t3, t0, 2f
+
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ dmfc2 t1, 0x0240
+ dmfc2 t2, 0x0241
+ dmfc2 t3, 0x0242
+ dmfc2 t0, 0x0243
+ sd t1, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t1, 0x0244
+ sd t2, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t2, 0x0245
+ sd t3, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t3, 0x0246
+ sd t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t0, 0x0247
+ sd t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t1, 0x0248
+ sd t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t2, 0x0249
+ sd t3, OCTEON_CP2_HSH_DATW+48(a0)
+ dmfc2 t3, 0x024A
+ sd t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmfc2 t0, 0x024B
+ sd t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmfc2 t1, 0x024C
+ sd t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmfc2 t2, 0x024D
+ sd t3, OCTEON_CP2_HSH_DATW+80(a0)
+ dmfc2 t3, 0x024E
+ sd t0, OCTEON_CP2_HSH_DATW+88(a0)
+ dmfc2 t0, 0x0250
+ sd t1, OCTEON_CP2_HSH_DATW+96(a0)
+ dmfc2 t1, 0x0251
+ sd t2, OCTEON_CP2_HSH_DATW+104(a0)
+ dmfc2 t2, 0x0252
+ sd t3, OCTEON_CP2_HSH_DATW+112(a0)
+ dmfc2 t3, 0x0253
+ sd t0, OCTEON_CP2_HSH_IVW(a0)
+ dmfc2 t0, 0x0254
+ sd t1, OCTEON_CP2_HSH_IVW+8(a0)
+ dmfc2 t1, 0x0255
+ sd t2, OCTEON_CP2_HSH_IVW+16(a0)
+ dmfc2 t2, 0x0256
+ sd t3, OCTEON_CP2_HSH_IVW+24(a0)
+ dmfc2 t3, 0x0257
+ sd t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmfc2 t0, 0x0258
+ sd t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmfc2 t1, 0x0259
+ sd t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmfc2 t2, 0x025E
+ sd t3, OCTEON_CP2_HSH_IVW+56(a0)
+ dmfc2 t3, 0x025A
+ sd t0, OCTEON_CP2_GFM_MULT(a0)
+ dmfc2 t0, 0x025B
+ sd t1, OCTEON_CP2_GFM_MULT+8(a0)
+ sd t2, OCTEON_CP2_GFM_POLY(a0)
+ sd t3, OCTEON_CP2_GFM_RESULT(a0)
+ sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
+ jr ra
+
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+ dmfc2 t3, 0x0040
+ dmfc2 t0, 0x0041
+ dmfc2 t1, 0x0042
+ dmfc2 t2, 0x0043
+ sd t3, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t3, 0x0044
+ sd t0, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t0, 0x0045
+ sd t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t1, 0x0046
+ sd t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t2, 0x0048
+ sd t3, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t3, 0x0049
+ sd t0, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t0, 0x004A
+ sd t1, OCTEON_CP2_HSH_DATW+48(a0)
+ sd t2, OCTEON_CP2_HSH_IVW(a0)
+ sd t3, OCTEON_CP2_HSH_IVW+8(a0)
+ sd t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+ jr ra
+ END(octeon_cop2_save)
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_restore)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ pref 4, 256(a0)
+ ld t0, OCTEON_CP2_CRC_IV(a0)
+ pref 4, 384(a0)
+ ld t1, OCTEON_CP2_CRC_LENGTH(a0)
+ ld t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Restore the COP2 CRC state */
+ dmtc2 t0, 0x0201
+ dmtc2 t1, 0x1202
+ bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
+ dmtc2 t2, 0x4200
+
+ /* Restore the LLM state */
+ ld t0, OCTEON_CP2_LLM_DAT(a0)
+ ld t1, OCTEON_CP2_LLM_DAT+8(a0)
+ dmtc2 t0, 0x0402
+ dmtc2 t1, 0x040A
+
+2:
+ bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
+ nop
+
+ /* Restore the COP2 crypto state common to pass 1 and pass 2 */
+ ld t0, OCTEON_CP2_3DES_IV(a0)
+ ld t1, OCTEON_CP2_3DES_KEY(a0)
+ ld t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmtc2 t0, 0x0084
+ ld t0, OCTEON_CP2_3DES_KEY+16(a0)
+ dmtc2 t1, 0x0080
+ ld t1, OCTEON_CP2_3DES_RESULT(a0)
+ dmtc2 t2, 0x0081
+ ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+ dmtc2 t0, 0x0082
+ ld t0, OCTEON_CP2_AES_IV(a0)
+ dmtc2 t1, 0x0098
+ ld t1, OCTEON_CP2_AES_IV+8(a0)
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ ld t2, OCTEON_CP2_AES_KEY(a0)
+ dmtc2 t0, 0x0102
+ ld t0, OCTEON_CP2_AES_KEY+8(a0)
+ dmtc2 t1, 0x0103
+ ld t1, OCTEON_CP2_AES_KEY+16(a0)
+ dmtc2 t2, 0x0104
+ ld t2, OCTEON_CP2_AES_KEY+24(a0)
+ dmtc2 t0, 0x0105
+ ld t0, OCTEON_CP2_AES_KEYLEN(a0)
+ dmtc2 t1, 0x0106
+ ld t1, OCTEON_CP2_AES_RESULT(a0)
+ dmtc2 t2, 0x0107
+ ld t2, OCTEON_CP2_AES_RESULT+8(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ dmtc2 t0, 0x0110
+ li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ dmtc2 t1, 0x0100
+ bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
+ dmtc2 t2, 0x0101
+
+ /* this code is specific for pass 1 */
+ ld t0, OCTEON_CP2_HSH_DATW(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t2, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t0, 0x0040
+ ld t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t1, 0x0041
+ ld t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t2, 0x0042
+ ld t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t0, 0x0043
+ ld t0, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t1, 0x0044
+ ld t1, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t2, 0x0045
+ ld t2, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t0, 0x0046
+ ld t0, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t1, 0x0048
+ dmtc2 t2, 0x0049
+ b done_restore /* unconditional branch */
+ dmtc2 t0, 0x004A
+
+3: /* this is post-pass1 code */
+ ld t2, OCTEON_CP2_HSH_DATW(a0)
+ ld t0, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t2, 0x0240
+ ld t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t0, 0x0241
+ ld t0, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t1, 0x0242
+ ld t1, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t2, 0x0243
+ ld t2, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t0, 0x0244
+ ld t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmtc2 t1, 0x0245
+ ld t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmtc2 t2, 0x0246
+ ld t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmtc2 t0, 0x0247
+ ld t0, OCTEON_CP2_HSH_DATW+80(a0)
+ dmtc2 t1, 0x0248
+ ld t1, OCTEON_CP2_HSH_DATW+88(a0)
+ dmtc2 t2, 0x0249
+ ld t2, OCTEON_CP2_HSH_DATW+96(a0)
+ dmtc2 t0, 0x024A
+ ld t0, OCTEON_CP2_HSH_DATW+104(a0)
+ dmtc2 t1, 0x024B
+ ld t1, OCTEON_CP2_HSH_DATW+112(a0)
+ dmtc2 t2, 0x024C
+ ld t2, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t0, 0x024D
+ ld t0, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t1, 0x024E
+ ld t1, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t2, 0x0250
+ ld t2, OCTEON_CP2_HSH_IVW+24(a0)
+ dmtc2 t0, 0x0251
+ ld t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmtc2 t1, 0x0252
+ ld t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmtc2 t2, 0x0253
+ ld t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmtc2 t0, 0x0254
+ ld t0, OCTEON_CP2_HSH_IVW+56(a0)
+ dmtc2 t1, 0x0255
+ ld t1, OCTEON_CP2_GFM_MULT(a0)
+ dmtc2 t2, 0x0256
+ ld t2, OCTEON_CP2_GFM_MULT+8(a0)
+ dmtc2 t0, 0x0257
+ ld t0, OCTEON_CP2_GFM_POLY(a0)
+ dmtc2 t1, 0x0258
+ ld t1, OCTEON_CP2_GFM_RESULT(a0)
+ dmtc2 t2, 0x0259
+ ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
+ dmtc2 t0, 0x025E
+ dmtc2 t1, 0x025A
+ dmtc2 t2, 0x025B
+
+done_restore:
+ jr ra
+ nop
+ END(octeon_cop2_restore)
+ .set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
+ * safely modify k0 and k1.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_save)
+ dmfc0 k0, $9,7 /* CvmCtl register. */
+ bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */
+ nop
+
+ /* Save the multiplier state */
+ v3mulu k0, $0, $0
+ v3mulu k1, $0, $0
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
+ ori k1, $0, 1
+ v3mulu k1, k1, $0
+ sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ v3mulu k1, $0, $0
+ sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
+ jr ra
+ sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ END(octeon_mult_save)
+ .set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_restore)
+ dmfc0 k1, $9,7 /* CvmCtl register. */
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
+ bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
+ /* Normally falls through, so no time wasted here */
+ nop
+
+ /* Restore the multiplier state */
+ ld k1, PT_MTP+16(sp) /* P2 */
+ MTM0 v0 /* MPL0 */
+ ld v0, PT_MTP+8(sp) /* P1 */
+ MTM1 v1 /* MPL1 */
+ ld v1, PT_MTP(sp) /* P0 */
+ MTM2 k0 /* MPL2 */
+ MTP2 k1 /* P2 */
+ MTP1 v0 /* P1 */
+ jr ra
+ MTP0 v1 /* P0 */
+
+1: /* Resume here if CvmCtl[NOMUL] */
+ jr ra
+ nop
+ END(octeon_mult_restore)
+ .set pop
+
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 1ca34104e593..c4f9ac17474a 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -49,19 +49,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
int ret;
switch (request) {
- /* when I and D space are separate, these will need to be fixed. */
- case PTRACE_PEEKTEXT: /* read word at location addr. */
- case PTRACE_PEEKDATA: {
- unsigned int tmp;
- int copied;
-
- copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
- ret = -EIO;
- if (copied != sizeof(tmp))
- break;
- ret = put_user(tmp, (unsigned int __user *) (unsigned long) data);
- break;
- }
/*
* Read 4 bytes of the other process' storage
@@ -208,16 +195,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
}
- /* when I and D space are separate, this will have to be fixed. */
- case PTRACE_POKETEXT: /* write the word at location addr. */
- case PTRACE_POKEDATA:
- ret = 0;
- if (access_process_vm(child, addr, &data, sizeof(data), 1)
- == sizeof(data))
- break;
- ret = -EIO;
- break;
-
/*
* Write 4 bytes into the other process' storage
* data is the 4 bytes that the user wants written
@@ -332,50 +309,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: { /* restart after signal. */
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL) {
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- else {
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- }
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
- break;
- }
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- wake_up_process(child);
- break;
-
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
break;
- case PTRACE_DETACH: /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
- break;
-
- case PTRACE_GETEVENTMSG:
- ret = put_user(child->ptrace_message,
- (unsigned int __user *) (unsigned long) data);
- break;
-
case PTRACE_GET_THREAD_AREA_3264:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) (unsigned long) data);
@@ -392,7 +330,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
default:
- ret = ptrace_request(child, request, addr, data);
+ ret = compat_ptrace_request(child, request, addr, data);
break;
}
out:
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index b6cca01ff82b..5f5af7d4c890 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
* and efficiency, we just pick the easiest one to find.
*/
- target = first_cpu(irq_desc[irq].affinity);
+ target = cpumask_first(irq_desc[irq].affinity);
/*
* We depend on the platform code to have correctly processed
@@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct clock_event_device *cd;
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
+ int irq = MIPS_CPU_IRQ_BASE + 1;
+
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
irq_enter();
- kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
irq_exit();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 353056110f2b..f6083c6bfaa4 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -47,6 +47,7 @@
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
+#include <asm/irq.h>
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
@@ -78,6 +79,10 @@ extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+extern asmlinkage void octeon_cop2_restore(struct octeon_cop2_state *task);
+#endif
+
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
@@ -860,6 +865,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
unsigned int opcode;
unsigned int cpid;
int status;
+ unsigned long __maybe_unused flags;
die_if_kernel("do_cpu invoked from kernel context!", regs);
@@ -915,6 +921,17 @@ asmlinkage void do_cpu(struct pt_regs *regs)
return;
case 2:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ prefetch(&current->thread.cp2);
+ local_irq_save(flags);
+ KSTK_STATUS(current) |= ST0_CU2;
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ octeon_cop2_restore(&(current->thread.cp2));
+ write_c0_status(status & ~ST0_CU2);
+ local_irq_restore(flags);
+ return;
+#endif
case 3:
break;
}
@@ -1488,6 +1505,10 @@ void __cpuinit per_cpu_trap_init(void)
write_c0_hwrena(enable);
}
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ write_c0_hwrena(0xc000000f); /* Octeon has register 30 and 31 */
+#endif
+
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index dbcf6511b74e..c13c7ad2cdae 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CPU_SB1) += dump_tlb.o
obj-$(CONFIG_CPU_TX39XX) += r3k_dump_tlb.o
obj-$(CONFIG_CPU_TX49XX) += dump_tlb.o
obj-$(CONFIG_CPU_VR41XX) += dump_tlb.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += dump_tlb.o
# libgcc-style stuff needed in the kernel
obj-y += ashldi3.o ashrdi3.o cmpdi2.o lshrdi3.o ucmpdi2.o
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 95ba32b5b720..d7ec95522292 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o tlb-r4k.o
obj-$(CONFIG_CPU_VR41XX) += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
new file mode 100644
index 000000000000..44d01a0a8490
--- /dev/null
+++ b/arch/mips/mm/c-octeon.c
@@ -0,0 +1,307 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005-2007 Cavium Networks
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/bcache.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheops.h>
+#include <asm/cpu-features.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/r4kcache.h>
+#include <asm/system.h>
+#include <asm/mmu_context.h>
+#include <asm/war.h>
+
+#include <asm/octeon/octeon.h>
+
+unsigned long long cache_err_dcache[NR_CPUS];
+
+/**
+ * Octeon automatically flushes the dcache on tlb changes, so
+ * from Linux's viewpoint it acts much like a physically
+ * tagged cache. No flushing is needed
+ *
+ */
+static void octeon_flush_data_cache_page(unsigned long addr)
+{
+ /* Nothing to do */
+}
+
+static inline void octeon_local_flush_icache(void)
+{
+ asm volatile ("synci 0($0)");
+}
+
+/*
+ * Flush local I-cache for the specified range.
+ */
+static void local_octeon_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ octeon_local_flush_icache();
+}
+
+/**
+ * Flush caches as necessary for all cores affected by a
+ * vma. If no vma is supplied, all cores are flushed.
+ *
+ * @vma: VMA to flush or NULL to flush all icaches.
+ */
+static void octeon_flush_icache_all_cores(struct vm_area_struct *vma)
+{
+ extern void octeon_send_ipi_single(int cpu, unsigned int action);
+#ifdef CONFIG_SMP
+ int cpu;
+ cpumask_t mask;
+#endif
+
+ mb();
+ octeon_local_flush_icache();
+#ifdef CONFIG_SMP
+ preempt_disable();
+ cpu = smp_processor_id();
+
+ /*
+ * If we have a vma structure, we only need to worry about
+ * cores it has been used on
+ */
+ if (vma)
+ mask = vma->vm_mm->cpu_vm_mask;
+ else
+ mask = cpu_online_map;
+ cpu_clear(cpu, mask);
+ for_each_cpu_mask(cpu, mask)
+ octeon_send_ipi_single(cpu, SMP_ICACHE_FLUSH);
+
+ preempt_enable();
+#endif
+}
+
+
+/**
+ * Called to flush the icache on all cores
+ */
+static void octeon_flush_icache_all(void)
+{
+ octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Called to flush all memory associated with a memory
+ * context.
+ *
+ * @mm: Memory context to flush
+ */
+static void octeon_flush_cache_mm(struct mm_struct *mm)
+{
+ /*
+ * According to the R4K version of this file, CPUs without
+ * dcache aliases don't need to do anything here
+ */
+}
+
+
+/**
+ * Flush a range of kernel addresses out of the icache
+ *
+ */
+static void octeon_flush_icache_range(unsigned long start, unsigned long end)
+{
+ octeon_flush_icache_all_cores(NULL);
+}
+
+
+/**
+ * Flush the icache for a trampoline. These are used for interrupt
+ * and exception hooking.
+ *
+ * @addr: Address to flush
+ */
+static void octeon_flush_cache_sigtramp(unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ vma = find_vma(current->mm, addr);
+ octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Flush a range out of a vma
+ *
+ * @vma: VMA to flush
+ * @start:
+ * @end:
+ */
+static void octeon_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ if (vma->vm_flags & VM_EXEC)
+ octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Flush a specific page of a vma
+ *
+ * @vma: VMA to flush page for
+ * @page: Page to flush
+ * @pfn:
+ */
+static void octeon_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long page, unsigned long pfn)
+{
+ if (vma->vm_flags & VM_EXEC)
+ octeon_flush_icache_all_cores(vma);
+}
+
+
+/**
+ * Probe Octeon's caches
+ *
+ */
+static void __devinit probe_octeon(void)
+{
+ unsigned long icache_size;
+ unsigned long dcache_size;
+ unsigned int config1;
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ switch (c->cputype) {
+ case CPU_CAVIUM_OCTEON:
+ config1 = read_c0_config1();
+ c->icache.linesz = 2 << ((config1 >> 19) & 7);
+ c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.ways = 1 + ((config1 >> 16) & 7);
+ c->icache.flags |= MIPS_CACHE_VTAG;
+ icache_size =
+ c->icache.sets * c->icache.ways * c->icache.linesz;
+ c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
+ c->dcache.linesz = 128;
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
+ c->dcache.sets = 1; /* CN3XXX has one Dcache set */
+ else
+ c->dcache.sets = 2; /* CN5XXX has two Dcache sets */
+ c->dcache.ways = 64;
+ dcache_size =
+ c->dcache.sets * c->dcache.ways * c->dcache.linesz;
+ c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
+ c->options |= MIPS_CPU_PREFETCH;
+ break;
+
+ default:
+ panic("Unsupported Cavium Networks CPU type\n");
+ break;
+ }
+
+ /* compute a couple of other cache variables */
+ c->icache.waysize = icache_size / c->icache.ways;
+ c->dcache.waysize = dcache_size / c->dcache.ways;
+
+ c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
+ c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
+
+ if (smp_processor_id() == 0) {
+ pr_notice("Primary instruction cache %ldkB, %s, %d way, "
+ "%d sets, linesize %d bytes.\n",
+ icache_size >> 10,
+ cpu_has_vtag_icache ?
+ "virtually tagged" : "physically tagged",
+ c->icache.ways, c->icache.sets, c->icache.linesz);
+
+ pr_notice("Primary data cache %ldkB, %d-way, %d sets, "
+ "linesize %d bytes.\n",
+ dcache_size >> 10, c->dcache.ways,
+ c->dcache.sets, c->dcache.linesz);
+ }
+}
+
+
+/**
+ * Setup the Octeon cache flush routines
+ *
+ */
+void __devinit octeon_cache_init(void)
+{
+ extern unsigned long ebase;
+ extern char except_vec2_octeon;
+
+ memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
+ octeon_flush_cache_sigtramp(ebase + 0x100);
+
+ probe_octeon();
+
+ shm_align_mask = PAGE_SIZE - 1;
+
+ flush_cache_all = octeon_flush_icache_all;
+ __flush_cache_all = octeon_flush_icache_all;
+ flush_cache_mm = octeon_flush_cache_mm;
+ flush_cache_page = octeon_flush_cache_page;
+ flush_cache_range = octeon_flush_cache_range;
+ flush_cache_sigtramp = octeon_flush_cache_sigtramp;
+ flush_icache_all = octeon_flush_icache_all;
+ flush_data_cache_page = octeon_flush_data_cache_page;
+ flush_icache_range = octeon_flush_icache_range;
+ local_flush_icache_range = local_octeon_flush_icache_range;
+
+ build_clear_page();
+ build_copy_page();
+}
+
+/**
+ * Handle a cache error exception
+ */
+
+static void cache_parity_error_octeon(int non_recoverable)
+{
+ unsigned long coreid = cvmx_get_core_num();
+ uint64_t icache_err = read_octeon_c0_icacheerr();
+
+ pr_err("Cache error exception:\n");
+ pr_err("cp0_errorepc == %lx\n", read_c0_errorepc());
+ if (icache_err & 1) {
+ pr_err("CacheErr (Icache) == %llx\n",
+ (unsigned long long)icache_err);
+ write_octeon_c0_icacheerr(0);
+ }
+ if (cache_err_dcache[coreid] & 1) {
+ pr_err("CacheErr (Dcache) == %llx\n",
+ (unsigned long long)cache_err_dcache[coreid]);
+ cache_err_dcache[coreid] = 0;
+ }
+
+ if (non_recoverable)
+ panic("Can't handle cache error: nested exception");
+}
+
+/**
+ * Called when the the exception is not recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_recoverable(void)
+{
+ cache_parity_error_octeon(0);
+}
+
+/**
+ * Called when the the exception is recoverable
+ */
+
+asmlinkage void cache_parity_error_octeon_non_recoverable(void)
+{
+ cache_parity_error_octeon(1);
+}
+
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 1eb7c71e3d6a..98ad0a82c29e 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,6 +182,12 @@ void __devinit cpu_cache_init(void)
tx39_cache_init();
}
+ if (cpu_has_octeon_cache) {
+ extern void __weak octeon_cache_init(void);
+
+ octeon_cache_init();
+ }
+
setup_protection_map();
}
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
new file mode 100644
index 000000000000..3db8553fcd34
--- /dev/null
+++ b/arch/mips/mm/cex-oct.S
@@ -0,0 +1,70 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006 Cavium Networks
+ * Cache error handler
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+/*
+ * Handle cache error. Indicate to the second level handler whether
+ * the exception is recoverable.
+ */
+ LEAF(except_vec2_octeon)
+
+ .set push
+ .set mips64r2
+ .set noreorder
+ .set noat
+
+
+ /* due to an errata we need to read the COP0 CacheErr (Dcache)
+ * before any cache/DRAM access */
+
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
+ PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
+
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
+
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
+ nop
+ j cache_parity_error_octeon_non_recoverable
+ nop
+
+ /* exception is recoverable */
+1: j handle_cache_err
+ nop
+
+ .set pop
+ END(except_vec2_octeon)
+
+ /* We need to jump to handle_cache_err so that the previous handler
+ * can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ LEAF(handle_cache_err)
+ .set push
+ .set noreorder
+ .set noat
+
+ SAVE_ALL
+ KMODE
+ jal cache_parity_error_octeon_recoverable
+ nop
+ j ret_from_exception
+ nop
+
+ .set pop
+ END(handle_cache_err)
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index e6708b3ad343..546e6977d4ff 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
- plat_unmap_dma_mem(dma_handle);
+ plat_unmap_dma_mem(dev, dma_handle);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{
unsigned long addr = (unsigned long) vaddr;
- plat_unmap_dma_mem(dma_handle);
+ plat_unmap_dma_mem(dev, dma_handle);
if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr);
@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
__dma_sync(dma_addr_to_virt(dma_addr), size,
direction);
- plat_unmap_dma_mem(dma_addr);
+ plat_unmap_dma_mem(dev, dma_addr);
}
EXPORT_SYMBOL(dma_unmap_single);
@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
dma_cache_wback_inv(addr, size);
}
- plat_unmap_dma_mem(dma_address);
+ plat_unmap_dma_mem(dev, dma_address);
}
EXPORT_SYMBOL(dma_unmap_page);
@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
if (addr)
__dma_sync(addr, sg->length, direction);
}
- plat_unmap_dma_mem(sg->dma_address);
+ plat_unmap_dma_mem(dev, sg->dma_address);
}
}
@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) {
unsigned long addr;
@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device);
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return 0;
+ return plat_dma_mapping_error(dev, dma_addr);
}
EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask)
{
- /*
- * we fall back to GFP_DMA when the mask isn't all 1s,
- * so we can't guarantee allocations that must be
- * within a tighter range than GFP_DMA..
- */
- if (mask < DMA_BIT_MASK(24))
- return 0;
-
- return 1;
+ return plat_dma_supported(dev, mask);
}
EXPORT_SYMBOL(dma_supported);
@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{
BUG_ON(direction == DMA_NONE);
+ plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)vaddr, size, direction);
}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5ce2fa745626..9619f66e531e 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -478,7 +478,10 @@ void __cpuinit tlb_init(void)
probe_tlb(config);
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
- write_c0_framemask(0);
+ if (current_cpu_type() == CPU_R10000 ||
+ current_cpu_type() == CPU_R12000 ||
+ current_cpu_type() == CPU_R14000)
+ write_c0_framemask(0);
temp_tlb_entry = current_cpu_data.tlbsize - 1;
/* From this point on the ARC firmware is dead. */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 979cf9197282..42942038d0fd 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -317,6 +317,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_BCM3302:
case CPU_BCM4710:
case CPU_LOONGSON2:
+ case CPU_CAVIUM_OCTEON:
if (m4kc_tlbp_war())
uasm_i_nop(p);
tlbw(p);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index aabd7274507b..5ba31888fefb 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
{
- cpumask_t tmask = *affinity;
+ cpumask_t tmask;
int cpu = 0;
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* be made to forward to an offline "CPU".
*/
+ cpumask_copy(&tmask, affinity);
for_each_cpu(cpu, affinity) {
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask);
}
- irq_desc[irq].affinity = tmask;
+ cpumask_copy(irq_desc[irq].affinity, &tmask);
if (cpus_empty(tmask))
/*
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index f8b18af141a1..0ecd5fe9486e 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
int irq = SGI_BUSERR_IRQ;
irq_enter();
- kstat_this_cpu.irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
ip22_be_interrupt(irq);
irq_exit();
}
diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c
index 3dcb27ec0c53..c8f7d2328b24 100644
--- a/arch/mips/sgi-ip22/ip22-time.c
+++ b/arch/mips/sgi-ip22/ip22-time.c
@@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
char c;
irq_enter();
- kstat_this_cpu.irqs[irq]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode();
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index dddfda8e8294..314691648c97 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
void bcm1480_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
+ int irq = K_BCM1480_INT_MBOX_0_0;
unsigned int action;
- kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 5950a288a7da..cad14003b84f 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
void sb1250_mailbox_interrupt(void)
{
int cpu = smp_processor_id();
+ int irq = K_INT_MBOX_0;
unsigned int action;
- kstat_this_cpu.irqs[K_INT_MBOX_0]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c
index 10811e981d20..2e370d88a87a 100644
--- a/arch/mn10300/kernel/mn10300-watchdog.c
+++ b/arch/mn10300/kernel/mn10300-watchdog.c
@@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
* the stack NMI-atomically, it's safe to use smp_processor_id().
*/
int sum, cpu = smp_processor_id();
+ int irq = NMIIRQ;
u8 wdt, tmp;
wdt = WDCTR & ~WDCTR_WDCNE;
@@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
NMICR = NMICR_WDIF;
nmi_count(cpu)++;
- kstat_this_cpu.irqs[NMIIRQ]++;
+ kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
sum = irq_stat[cpu].__irq_count;
if (last_irq_sums[cpu] == sum) {
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index ac2c822928c7..49482806863f 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already
* been set; we must reset it */
- irq_desc[irq].affinity = CPU_MASK_ALL;
+ cpumask_setall(irq_desc[irq].affinity);
return -EINVAL;
}
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if (cpu_check_affinity(irq, dest))
return;
- irq_desc[irq].affinity = *dest;
+ cpumask_copy(irq_desc[irq].affinity, dest);
}
#endif
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{
#ifdef CONFIG_SMP
- irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+ cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
#endif
return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP
- dest = irq_desc[irq].affinity;
+ cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest);
diff --git a/arch/powerpc/boot/dts/gef_sbc610.dts b/arch/powerpc/boot/dts/gef_sbc610.dts
index 9708b3423bbd..e78c355c7bac 100644
--- a/arch/powerpc/boot/dts/gef_sbc610.dts
+++ b/arch/powerpc/boot/dts/gef_sbc610.dts
@@ -88,6 +88,21 @@
compatible = "gef,fpga-regs";
reg = <0x4 0x0 0x40>;
};
+
+ wdt@4,2000 {
+ compatible = "gef,fpga-wdt";
+ reg = <0x4 0x2000 0x8>;
+ interrupts = <0x1a 0x4>;
+ interrupt-parent = <&gef_pic>;
+ };
+ /* Second watchdog available, driver currently supports one.
+ wdt@4,2010 {
+ compatible = "gef,fpga-wdt";
+ reg = <0x4 0x2010 0x8>;
+ interrupts = <0x1b 0x4>;
+ interrupt-parent = <&gef_pic>;
+ };
+ */
gef_pic: pic@4,4000 {
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index cd1ffa449327..391874c7b436 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -1164,6 +1164,7 @@ CONFIG_WATCHDOG=y
# CONFIG_SOFT_WATCHDOG is not set
# CONFIG_ALIM7101_WDT is not set
# CONFIG_8xxx_WDT is not set
+CONFIG_GEF_WDT=y
#
# PCI-based Watchdog Cards
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index f5ae4878ccef..7f8e6a92c5a1 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -241,7 +241,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_desc[irq].affinity, map);
+ cpumask_and(&mask, irq_desc[irq].affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq);
mask = map;
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 47bf15cd2c9e..04e8ecea9b40 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -182,6 +182,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
__per_cpu_start = .;
+ *(.data.percpu.page_aligned)
*(.data.percpu)
*(.data.percpu.shared_aligned)
__per_cpu_end = .;
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index ec7c8f45a215..e6506cd0ff94 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP
- policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
+ cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
#endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index a3c6c01bd6db..968c1c0b4d5b 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -110,7 +110,7 @@ static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
}
/* initialize spu_gov_info for all affected cpus */
- for_each_cpu_mask(i, policy->cpus) {
+ for_each_cpu(i, policy->cpus) {
affected_info = &per_cpu(spu_gov_info, i);
affected_info->policy = policy;
}
@@ -127,7 +127,7 @@ static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
spu_gov_cancel_work(info);
/* clean spu_gov_info for all affected cpus */
- for_each_cpu_mask (i, policy->cpus) {
+ for_each_cpu (i, policy->cpus) {
info = &per_cpu(spu_gov_info, i);
info->policy = NULL;
}
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index 86db47c1b665..be2527a516ea 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency;
- policy->cpus = cpu_online_map;
+ cpumask_copy(policy->cpus, &cpu_online_map);
ppc_proc_freq = policy->cur * 1000ul;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 4dfb4bc242b5..beb38333b6d2 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
- policy->cpus = cpu_online_map;
+ cpumask_copy(policy->cpus, &cpu_online_map);
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 84e058f1e1cc..80b513449f4c 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
{
int server;
/* For the moment only implement delivery to all cpus or one cpu */
- cpumask_t cpumask = irq_desc[virq].affinity;
+ cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE;
+ cpumask_copy(&cpumask, irq_desc[virq].affinity);
if (!distribute_irqs)
return default_server;
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
virq, cpu);
/* Reset affinity to all cpus */
- irq_desc[virq].affinity = CPU_MASK_ALL;
+ cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask);
unlock:
spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 3e0d89dcdba2..0afd21f9a222 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
- cpumask_t mask = irq_desc[virt_irq].affinity;
+ cpumask_t mask;
int cpuid;
+ cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock);
diff --git a/arch/sparc/configs/sparc32_defconfig b/arch/sparc/configs/sparc32_defconfig
index 2e3a149ea0e7..09ab46e4c59d 100644
--- a/arch/sparc/configs/sparc32_defconfig
+++ b/arch/sparc/configs/sparc32_defconfig
@@ -1,15 +1,21 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25
-# Tue Apr 29 01:28:58 2008
+# Linux kernel version: 2.6.28
+# Thu Jan 8 16:45:44 2009
#
+# CONFIG_64BIT is not set
+CONFIG_SPARC=y
+CONFIG_SPARC32=y
+# CONFIG_SPARC64 is not set
+CONFIG_ARCH_DEFCONFIG="arch/sparc/configs/sparc32_defconfig"
+CONFIG_BITS=32
+CONFIG_AUDIT_ARCH=y
CONFIG_MMU=y
CONFIG_HIGHMEM=y
CONFIG_ZONE_DMA=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
-CONFIG_HZ=100
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -66,31 +72,30 @@ CONFIG_SIGNALFD=y
CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
+CONFIG_AIO=y
CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
# CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
# CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
CONFIG_BLOCK=y
# CONFIG_LBD is not set
# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
@@ -105,59 +110,73 @@ CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
+# CONFIG_FREEZER is not set
#
-# General machine setup
+# Processor type and features
#
# CONFIG_SMP is not set
-CONFIG_SPARC=y
-CONFIG_SPARC32=y
-CONFIG_SBUS=y
-CONFIG_SBUSCHAR=y
-CONFIG_SERIAL_CONSOLE=y
-CONFIG_SUN_AUXIO=y
-CONFIG_SUN_IO=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_EMULATED_CMPXCHG=y
-CONFIG_SUN_PM=y
-# CONFIG_SUN4 is not set
-CONFIG_PCI=y
-CONFIG_PCI_SYSCALL=y
-# CONFIG_ARCH_SUPPORTS_MSI is not set
-CONFIG_PCI_LEGACY=y
-# CONFIG_PCI_DEBUG is not set
-# CONFIG_NO_DMA is not set
-CONFIG_SUN_OPENPROMFS=m
-# CONFIG_SPARC_LED is not set
-CONFIG_BINFMT_ELF=y
-CONFIG_BINFMT_MISC=m
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_SUN_PM=y
+# CONFIG_SPARC_LED is not set
+CONFIG_SERIAL_CONSOLE=y
#
-# Networking
+# Bus options (PCI etc.)
#
+CONFIG_SBUS=y
+CONFIG_SBUSCHAR=y
+CONFIG_PCI=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCCARD is not set
+CONFIG_SUN_OPENPROMFS=m
+CONFIG_SPARC32_PCI=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=m
CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NET_NS is not set
+CONFIG_COMPAT_NET_DEV_OPS=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@@ -166,6 +185,7 @@ CONFIG_XFRM_USER=m
# CONFIG_XFRM_SUB_POLICY is not set
# CONFIG_XFRM_MIGRATE is not set
# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
CONFIG_NET_KEY=m
# CONFIG_NET_KEY_MIGRATE is not set
CONFIG_INET=y
@@ -221,6 +241,7 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -231,6 +252,7 @@ CONFIG_IPV6_TUNNEL=m
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
#
# Network testing
@@ -241,14 +263,14 @@ CONFIG_NET_PKTGEN=m
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-
-#
-# Wireless
-#
+# CONFIG_PHONET is not set
+CONFIG_WIRELESS=y
# CONFIG_CFG80211 is not set
+CONFIG_WIRELESS_OLD_REGULATORY=y
# CONFIG_WIRELESS_EXT is not set
+# CONFIG_LIB80211 is not set
# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -262,7 +284,9 @@ CONFIG_NET_PKTGEN=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
-# CONFIG_FW_LOADER is not set
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_DEBUG_DRIVER is not set
# CONFIG_DEBUG_DEVRES is not set
# CONFIG_SYS_HYPERVISOR is not set
@@ -286,12 +310,15 @@ CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
# CONFIG_PHANTOM is not set
# CONFIG_EEPROM_93CX6 is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -335,6 +362,7 @@ CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_SRP_ATTRS is not set
CONFIG_SCSI_LOWLEVEL=y
# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@@ -348,6 +376,8 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_FCOE is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_IPS is not set
@@ -367,6 +397,7 @@ CONFIG_SCSI_QLOGICPTI=m
# CONFIG_SCSI_DEBUG is not set
CONFIG_SCSI_SUNESP=y
# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_DH is not set
# CONFIG_ATA is not set
# CONFIG_MD is not set
# CONFIG_FUSION is not set
@@ -374,11 +405,14 @@ CONFIG_SCSI_SUNESP=y
#
# IEEE 1394 (FireWire) support
#
+
+#
+# Enable only one of the two stacks, unless you know what you are doing
+#
# CONFIG_FIREWIRE is not set
# CONFIG_IEEE1394 is not set
# CONFIG_I2O is not set
CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
CONFIG_DUMMY=m
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -402,14 +436,16 @@ CONFIG_SUNQE=m
# CONFIG_IBM_NEW_EMAC_RGMII is not set
# CONFIG_IBM_NEW_EMAC_TAH is not set
# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_NET_PCI is not set
# CONFIG_B44 is not set
+# CONFIG_ATL2 is not set
CONFIG_NETDEV_1000=y
# CONFIG_ACENIC is not set
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
-# CONFIG_E1000E is not set
-# CONFIG_E1000E_ENABLED is not set
# CONFIG_IP1000 is not set
# CONFIG_IGB is not set
# CONFIG_MYRI_SBUS is not set
@@ -425,18 +461,25 @@ CONFIG_NETDEV_1000=y
# CONFIG_BNX2 is not set
# CONFIG_QLA3XXX is not set
# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_JME is not set
CONFIG_NETDEV_10000=y
# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
# CONFIG_IXGBE is not set
# CONFIG_IXGB is not set
# CONFIG_S2IO is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NETXEN_NIC is not set
# CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
# CONFIG_MLX4_CORE is not set
# CONFIG_TEHUTI is not set
# CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
+# CONFIG_SFC is not set
# CONFIG_TR is not set
#
@@ -445,6 +488,10 @@ CONFIG_NETDEV_10000=y
# CONFIG_WLAN_PRE80211 is not set
# CONFIG_WLAN_80211 is not set
# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
# CONFIG_WAN is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
@@ -492,9 +539,11 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
CONFIG_MOUSE_PS2_SYNAPTICS=y
CONFIG_MOUSE_PS2_LIFEBOOK=y
CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
# CONFIG_MOUSE_PS2_TOUCHKIT is not set
CONFIG_MOUSE_SERIAL=m
# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_VSXXXAA is not set
# CONFIG_INPUT_JOYSTICK is not set
# CONFIG_INPUT_TABLET is not set
@@ -516,15 +565,18 @@ CONFIG_SERIO_LIBPS2=m
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
# CONFIG_NOZOMI is not set
#
# Serial drivers
#
+# CONFIG_SERIAL_8250 is not set
#
# Non-8250 serial port support
@@ -540,23 +592,20 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_CONSOLE_POLL=y
# CONFIG_SERIAL_JSM is not set
CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
CONFIG_HW_RANDOM=m
-CONFIG_JS_RTC=m
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_RAW_DRIVER is not set
# CONFIG_TCG_TPM is not set
CONFIG_DEVPORT=y
# CONFIG_I2C is not set
-
-#
-# SPI support
-#
# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_GPIOLIB is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
CONFIG_HWMON=y
@@ -577,25 +626,38 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
# CONFIG_THERMAL is not set
+# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
#
# Sonics Silicon Backplane
#
-CONFIG_SSB_POSSIBLE=y
# CONFIG_SSB is not set
#
# Multifunction device drivers
#
+# CONFIG_MFD_CORE is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
#
# Multimedia devices
#
+
+#
+# Multimedia core support
+#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
+# CONFIG_VIDEO_MEDIA is not set
+
+#
+# Multimedia drivers
+#
# CONFIG_DAB is not set
#
@@ -616,15 +678,17 @@ CONFIG_SSB_POSSIBLE=y
#
# CONFIG_PROM_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-
-#
-# Sound
-#
# CONFIG_SOUND is not set
CONFIG_HID_SUPPORT=y
CONFIG_HID=y
# CONFIG_HID_DEBUG is not set
# CONFIG_HIDRAW is not set
+# CONFIG_HID_PID is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
CONFIG_USB_SUPPORT=y
CONFIG_USB_ARCH_HAS_HCD=y
CONFIG_USB_ARCH_HAS_OHCI=y
@@ -632,32 +696,71 @@ CONFIG_USB_ARCH_HAS_EHCI=y
# CONFIG_USB is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# Enable Host or Gadget support to see Inventra options
+#
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
#
# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
# CONFIG_MMC is not set
# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
+# CONFIG_ACCESSIBILITY is not set
# CONFIG_INFINIBAND is not set
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+CONFIG_RTC_DRV_M48T59=y
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
# CONFIG_UIO is not set
+# CONFIG_STAGING is not set
#
# Misc Linux/SPARC drivers
#
CONFIG_SUN_OPENPROMIO=m
-CONFIG_SUN_MOSTEK_RTC=m
-# CONFIG_SUN_BPP is not set
-# CONFIG_SUN_VIDEOPIX is not set
# CONFIG_TADPOLE_TS102_UCTRL is not set
# CONFIG_SUN_JSFLASH is not set
#
-# Unix98 PTY support
-#
-CONFIG_UNIX98_PTY_COUNT=256
-
-#
# File systems
#
CONFIG_EXT2_FS=y
@@ -666,11 +769,12 @@ CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
CONFIG_DNOTIFY=y
@@ -702,14 +806,12 @@ CONFIG_ISO9660_FS=m
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
# CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
# CONFIG_HFS_FS is not set
@@ -720,6 +822,7 @@ CONFIG_SYSFS=y
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
CONFIG_ROMFS_FS=m
@@ -729,13 +832,13 @@ CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
# CONFIG_NFS_V3 is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFSD is not set
CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=m
-# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_SUNRPC_REGISTER_V4 is not set
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_RPCSEC_GSS_SPKM3 is not set
# CONFIG_SMB_FS is not set
@@ -806,9 +909,12 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_HEADERS_CHECK is not set
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
# CONFIG_SCHED_DEBUG is not set
# CONFIG_SCHEDSTATS is not set
# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
# CONFIG_DEBUG_SLAB is not set
# CONFIG_DEBUG_RT_MUTEXES is not set
# CONFIG_RT_MUTEX_TESTER is not set
@@ -822,37 +928,59 @@ CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_VM is not set
# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_DEBUG_LIST is not set
# CONFIG_DEBUG_SG is not set
-CONFIG_FRAME_POINTER=y
+# CONFIG_DEBUG_NOTIFIERS is not set
# CONFIG_BOOT_PRINTK_DELAY is not set
# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+
+#
+# Tracers
+#
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
# CONFIG_SAMPLES is not set
-CONFIG_KGDB=y
CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_KGDB=y
CONFIG_KGDB_SERIAL_CONSOLE=y
CONFIG_KGDB_TESTS=y
# CONFIG_KGDB_TESTS_ON_BOOT is not set
# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_STACK_DEBUG is not set
#
# Security options
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
# CONFIG_SECURITY_FILE_CAPABILITIES is not set
CONFIG_CRYPTO=y
#
# Crypto core or helper
#
+# CONFIG_CRYPTO_FIPS is not set
CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
# CONFIG_CRYPTO_GF128MUL is not set
CONFIG_CRYPTO_NULL=m
# CONFIG_CRYPTO_CRYPTD is not set
@@ -890,6 +1018,10 @@ CONFIG_CRYPTO_CRC32C=m
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
@@ -921,15 +1053,21 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m
#
CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
# CONFIG_CRC7 is not set
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h
index 73d45521db04..33e31ce6b31f 100644
--- a/arch/sparc/include/asm/oplib_32.h
+++ b/arch/sparc/include/asm/oplib_32.h
@@ -177,17 +177,6 @@ extern void prom_putsegment(int context, unsigned long virt_addr,
/* PROM device tree traversal functions... */
-#ifdef PROMLIB_INTERNAL
-
-/* Internal version of prom_getchild. */
-extern int __prom_getchild(int parent_node);
-
-/* Internal version of prom_getsibling. */
-extern int __prom_getsibling(int node);
-
-#endif
-
-
/* Get the child node of the given node, or zero if no child exists. */
extern int prom_getchild(int parent_node);
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index 6d2c2ca98039..a5db0317b5fb 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -218,16 +218,6 @@ extern void prom_unmap(unsigned long size, unsigned long vaddr);
/* PROM device tree traversal functions... */
-#ifdef PROMLIB_INTERNAL
-
-/* Internal version of prom_getchild. */
-extern int __prom_getchild(int parent_node);
-
-/* Internal version of prom_getsibling. */
-extern int __prom_getsibling(int node);
-
-#endif
-
/* Get the child node of the given node, or zero if no child exists. */
extern int prom_getchild(int parent_node);
diff --git a/arch/sparc/include/asm/signal.h b/arch/sparc/include/asm/signal.h
index 41535e77b255..cba45206b7f2 100644
--- a/arch/sparc/include/asm/signal.h
+++ b/arch/sparc/include/asm/signal.h
@@ -84,7 +84,11 @@
#define __OLD_NSIG 32
#define __NEW_NSIG 64
+#ifdef __arch64__
#define _NSIG_BPW 64
+#else
+#define _NSIG_BPW 32
+#endif
#define _NSIG_WORDS (__NEW_NSIG / _NSIG_BPW)
#define SIGRTMIN 32
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index b8a65b64e1df..5bc0b8fd6374 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -47,6 +47,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
(pcibus_to_node(bus) == -1 ? \
CPU_MASK_ALL : \
node_to_cpumask(pcibus_to_node(bus)))
+#define cpumask_of_pcibus(bus) \
+ (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL_PTR : \
+ cpumask_of_node(pcibus_to_node(bus)))
#define SD_NODE_INIT (struct sched_domain) { \
.min_interval = 8, \
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 09c857215a52..45c41232fc4c 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -76,6 +76,7 @@ unsigned char get_auxio(void)
return sbus_readb(auxio_register);
return 0;
}
+EXPORT_SYMBOL(get_auxio);
void set_auxio(unsigned char bits_on, unsigned char bits_off)
{
@@ -102,7 +103,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
};
spin_unlock_irqrestore(&auxio_lock, flags);
}
-
+EXPORT_SYMBOL(set_auxio);
/* sun4m power control register (AUXIO2) */
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 8b67347d4221..9f52db2d441c 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -72,6 +72,7 @@ void auxio_set_led(int on)
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
+EXPORT_SYMBOL(auxio_set_led);
static void __auxio_sbus_set_lte(int on)
{
@@ -90,6 +91,7 @@ void auxio_set_lte(int on)
break;
}
}
+EXPORT_SYMBOL(auxio_set_lte);
static struct of_device_id __initdata auxio_match[] = {
{
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 6c2da2420f76..f0b825505da5 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
@@ -20,6 +21,7 @@
#include "kernel.h"
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
+EXPORT_PER_CPU_SYMBOL(__cpu_data);
struct cpu_info {
int psr_vers;
diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c
index c16135e0c151..57922f69c3f7 100644
--- a/arch/sparc/kernel/idprom.c
+++ b/arch/sparc/kernel/idprom.c
@@ -8,11 +8,14 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/oplib.h>
#include <asm/idprom.h>
struct idprom *idprom;
+EXPORT_SYMBOL(idprom);
+
static struct idprom idprom_buffer;
#ifdef CONFIG_SPARC32
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 7ce14f05eb48..87ea0d03d975 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -120,6 +120,7 @@ void __iomem *ioremap(unsigned long offset, unsigned long size)
sprintf(name, "phys_%08x", (u32)offset);
return _sparc_alloc_io(0, offset, size, name);
}
+EXPORT_SYMBOL(ioremap);
/*
* Comlimentary to ioremap().
@@ -141,6 +142,7 @@ void iounmap(volatile void __iomem *virtual)
kfree(res);
}
}
+EXPORT_SYMBOL(iounmap);
void __iomem *of_ioremap(struct resource *res, unsigned long offset,
unsigned long size, char *name)
@@ -237,6 +239,7 @@ void sbus_set_sbus64(struct device *dev, int x)
{
printk("sbus_set_sbus64: unsupported\n");
}
+EXPORT_SYMBOL(sbus_set_sbus64);
/*
* Allocate a chunk of memory suitable for DMA.
@@ -436,6 +439,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba)
*pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
return (void *) res->start;
}
+EXPORT_SYMBOL(pci_alloc_consistent);
/* Free and unmap a consistent DMA buffer.
* cpu_addr is what was returned from pci_alloc_consistent,
@@ -477,6 +481,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba)
free_pages(pgp, get_order(n));
}
+EXPORT_SYMBOL(pci_free_consistent);
/* Map a single buffer of the indicated size for DMA in streaming mode.
* The 32-bit bus address to use is returned.
@@ -491,6 +496,7 @@ dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
/* IIep is write-through, not flushing. */
return virt_to_phys(ptr);
}
+EXPORT_SYMBOL(pci_map_single);
/* Unmap a single streaming mode DMA translation. The dma_addr and size
* must match what was provided for in a previous pci_map_single call. All
@@ -508,6 +514,7 @@ void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size,
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
+EXPORT_SYMBOL(pci_unmap_single);
/*
* Same as pci_map_single, but with pages.
@@ -519,6 +526,7 @@ dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
+EXPORT_SYMBOL(pci_map_page);
void pci_unmap_page(struct pci_dev *hwdev,
dma_addr_t dma_address, size_t size, int direction)
@@ -526,6 +534,7 @@ void pci_unmap_page(struct pci_dev *hwdev,
BUG_ON(direction == PCI_DMA_NONE);
/* mmu_inval_dma_area XXX */
}
+EXPORT_SYMBOL(pci_unmap_page);
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scather-gather version of the
@@ -557,6 +566,7 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
return nents;
}
+EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
@@ -578,6 +588,7 @@ void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
}
}
}
+EXPORT_SYMBOL(pci_unmap_sg);
/* Make physical memory consistent for a single
* streaming mode DMA translation before or after a transfer.
@@ -597,6 +608,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t si
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
+EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction)
{
@@ -606,6 +618,7 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
(size + PAGE_SIZE-1) & PAGE_MASK);
}
}
+EXPORT_SYMBOL(pci_dma_sync_single_for_device);
/* Make physical memory consistent for a set of streaming
* mode DMA translations after a transfer.
@@ -628,6 +641,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int
}
}
}
+EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
@@ -644,6 +658,7 @@ void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl,
}
}
}
+EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
#endif /* CONFIG_PCI */
#ifdef CONFIG_PROC_FS
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index 1eff942fe22f..44dd5ee64339 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -294,6 +294,7 @@ void synchronize_irq(unsigned int irq)
while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
cpu_relax();
}
+EXPORT_SYMBOL(synchronize_irq);
#endif /* SMP */
void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index cab8e0286871..4ac5c651e00d 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -247,9 +247,10 @@ struct irq_handler_data {
#ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq)
{
- cpumask_t mask = irq_desc[virt_irq].affinity;
+ cpumask_t mask;
int cpuid;
+ cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock);
@@ -854,7 +855,7 @@ void fixup_irqs(void)
!(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq,
- &irq_desc[irq].affinity);
+ irq_desc[irq].affinity);
}
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 923e9bbb9fe2..4638fba799e4 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1077,6 +1077,7 @@ int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
return (device_mask & dma_addr_mask) == dma_addr_mask;
}
+EXPORT_SYMBOL(pci_dma_supported);
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 75ed98be3edf..85e7037429b9 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -956,6 +956,7 @@ void outsb(unsigned long addr, const void *src, unsigned long count)
/* addr += 1; */
}
}
+EXPORT_SYMBOL(outsb);
void outsw(unsigned long addr, const void *src, unsigned long count)
{
@@ -966,6 +967,7 @@ void outsw(unsigned long addr, const void *src, unsigned long count)
/* addr += 2; */
}
}
+EXPORT_SYMBOL(outsw);
void outsl(unsigned long addr, const void *src, unsigned long count)
{
@@ -976,6 +978,7 @@ void outsl(unsigned long addr, const void *src, unsigned long count)
/* addr += 4; */
}
}
+EXPORT_SYMBOL(outsl);
void insb(unsigned long addr, void *dst, unsigned long count)
{
@@ -986,6 +989,7 @@ void insb(unsigned long addr, void *dst, unsigned long count)
/* addr += 1; */
}
}
+EXPORT_SYMBOL(insb);
void insw(unsigned long addr, void *dst, unsigned long count)
{
@@ -996,6 +1000,7 @@ void insw(unsigned long addr, void *dst, unsigned long count)
/* addr += 2; */
}
}
+EXPORT_SYMBOL(insw);
void insl(unsigned long addr, void *dst, unsigned long count)
{
@@ -1009,5 +1014,6 @@ void insl(unsigned long addr, void *dst, unsigned long count)
/* addr += 4; */
}
}
+EXPORT_SYMBOL(insl);
subsys_initcall(pcic_init);
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index 5a8d8ced33da..f4bee35a1b46 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -44,6 +44,7 @@
* Set in pm platform drivers (apc.c and pmc.c)
*/
void (*pm_idle)(void);
+EXPORT_SYMBOL(pm_idle);
/*
* Power-off handler instantiation for pm.h compliance
@@ -673,6 +674,7 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
"g1", "g2", "g3", "o0", "o1", "memory", "cc");
return retval;
}
+EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *task)
{
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index d5e2acef9877..cc8b5604442c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -678,6 +678,7 @@ pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
"g1", "g2", "g3", "o0", "o1", "memory", "cc");
return retval;
}
+EXPORT_SYMBOL(kernel_thread);
typedef struct {
union {
@@ -743,6 +744,7 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
}
return 1;
}
+EXPORT_SYMBOL(dump_fpu);
/*
* sparc_execve() executes a new program after the asm stub has set
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index 40689ae3c9b0..8f1478475421 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -11,19 +11,19 @@
#include "iommu_common.h"
#include "psycho_common.h"
-#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL
-#define PSYCHO_STCERR_WRITE 0x0000000000000002UL
-#define PSYCHO_STCERR_READ 0x0000000000000001UL
-#define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL
-#define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL
-#define PSYCHO_STCTAG_VALID 0x0000000000000002UL
-#define PSYCHO_STCTAG_WRITE 0x0000000000000001UL
-#define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL
-#define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL
-#define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL
-#define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL
-#define PSYCHO_STCLINE_VALID 0x0000000000000002UL
-#define PSYCHO_STCLINE_FOFN 0x0000000000000001UL
+#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002ULL
+#define PSYCHO_STCERR_WRITE 0x0000000000000002ULL
+#define PSYCHO_STCERR_READ 0x0000000000000001ULL
+#define PSYCHO_STCTAG_PPN 0x0fffffff00000000ULL
+#define PSYCHO_STCTAG_VPN 0x00000000ffffe000ULL
+#define PSYCHO_STCTAG_VALID 0x0000000000000002ULL
+#define PSYCHO_STCTAG_WRITE 0x0000000000000001ULL
+#define PSYCHO_STCLINE_LINDX 0x0000000001e00000ULL
+#define PSYCHO_STCLINE_SPTR 0x00000000001f8000ULL
+#define PSYCHO_STCLINE_LADDR 0x0000000000007f00ULL
+#define PSYCHO_STCLINE_EPTR 0x00000000000000fcULL
+#define PSYCHO_STCLINE_VALID 0x0000000000000002ULL
+#define PSYCHO_STCLINE_FOFN 0x0000000000000001ULL
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
@@ -144,10 +144,10 @@ static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm,
#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
-#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
+#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffULL
#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
-#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
+#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffULL
static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
@@ -190,7 +190,7 @@ static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
pbm->name, i,
((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
- (data_val & PSYCHO_IOMMU_DATA_PPAGE)<<IOMMU_PAGE_SHIFT);
+ (data_val & PSYCHO_IOMMU_DATA_PPAGE) << IOMMU_PAGE_SHIFT);
}
}
@@ -285,20 +285,20 @@ static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm)
return ret;
}
-#define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL
-#define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL
-#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL
-#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL
-#define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL
-#define PSYCHO_PCIAFSR_STA 0x0400000000000000UL
-#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL
-#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL
-#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL
-#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL
-#define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL
-#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL
-#define PSYCHO_PCIAFSR_MID 0x000000003e000000UL
-#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL
+#define PSYCHO_PCIAFSR_PMA 0x8000000000000000ULL
+#define PSYCHO_PCIAFSR_PTA 0x4000000000000000ULL
+#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000ULL
+#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000ULL
+#define PSYCHO_PCIAFSR_SMA 0x0800000000000000ULL
+#define PSYCHO_PCIAFSR_STA 0x0400000000000000ULL
+#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000ULL
+#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000ULL
+#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000ULL
+#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000ULL
+#define PSYCHO_PCIAFSR_BLK 0x0000000080000000ULL
+#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000ULL
+#define PSYCHO_PCIAFSR_MID 0x000000003e000000ULL
+#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffULL
irqreturn_t psycho_pcierr_intr(int irq, void *dev_id)
{
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ead310066d1..406e0872504e 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -117,6 +117,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
val |= (1UL << 4UL);
upa_writeq(val, cfg_reg);
}
+EXPORT_SYMBOL(sbus_set_sbus64);
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c96c65d1b58b..998cadb4e7f2 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -199,7 +199,9 @@ extern unsigned short ram_flags;
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
+
enum sparc_cpu sparc_cpu_model;
+EXPORT_SYMBOL(sparc_cpu_model);
struct tt_entry *sparc_ttable;
@@ -391,6 +393,7 @@ void sun_do_break(void)
prom_cmdline();
}
+EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 555db7452ebe..49d061f4ae9d 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -58,6 +58,7 @@
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
+EXPORT_SYMBOL(ns87303_lock);
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
@@ -425,5 +426,7 @@ void sun_do_break(void)
prom_cmdline();
}
+EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
+EXPORT_SYMBOL(stop_a_enabled);
diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c
index e1e97639231b..baeab8720237 100644
--- a/arch/sparc/kernel/sparc_ksyms_32.c
+++ b/arch/sparc/kernel/sparc_ksyms_32.c
@@ -5,49 +5,14 @@
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-#define PROMLIB_INTERNAL
-
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/in6.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/syscalls.h>
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#endif
-#include <linux/pm.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/highmem.h>
-#endif
-#include <asm/oplib.h>
-#include <asm/delay.h>
-#include <asm/system.h>
-#include <asm/auxio.h>
#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/idprom.h>
-#include <asm/head.h>
-#include <asm/smp.h>
-#include <asm/ptrace.h>
#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#ifdef CONFIG_SBUS
+#include <asm/delay.h>
+#include <asm/head.h>
#include <asm/dma.h>
-#endif
-#include <asm/io-unit.h>
-#include <asm/bug.h>
-
-extern spinlock_t rtc_lock;
struct poll {
int fd;
@@ -55,72 +20,15 @@ struct poll {
short revents;
};
-extern void (*__copy_1page)(void *, const void *);
-extern void __memmove(void *, const void *, __kernel_size_t);
-extern void (*bzero_1page)(void *);
-extern void *__bzero(void *, size_t);
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern int __strncmp(const char *, const char *, __kernel_size_t);
-
-extern int __ashrdi3(int, int);
-extern int __ashldi3(int, int);
-extern int __lshrdi3(int, int);
-extern int __muldi3(int, int);
-extern int __divdi3(int, int);
-
-/* Private functions with odd calling conventions. */
-extern void ___atomic24_add(void);
-extern void ___atomic24_sub(void);
-extern void ___rw_read_enter(void);
-extern void ___rw_read_try(void);
-extern void ___rw_read_exit(void);
-extern void ___rw_write_enter(void);
-
-/* Alias functions whose names begin with "." and export the aliases.
- * The module references will be fixed up by module_frob_arch_sections.
- */
-extern int _Div(int, int);
-extern int _Mul(int, int);
-extern int _Rem(int, int);
-extern unsigned _Udiv(unsigned, unsigned);
-extern unsigned _Umul(unsigned, unsigned);
-extern unsigned _Urem(unsigned, unsigned);
-
-/* used by various drivers */
-EXPORT_SYMBOL(sparc_cpu_model);
-EXPORT_SYMBOL(kernel_thread);
-#ifdef CONFIG_SMP
-// XXX find what uses (or used) these. AV: see asm/spinlock.h
-EXPORT_SYMBOL(___rw_read_enter);
-EXPORT_SYMBOL(___rw_read_try);
-EXPORT_SYMBOL(___rw_read_exit);
-EXPORT_SYMBOL(___rw_write_enter);
-#endif
-
-EXPORT_SYMBOL(sparc_valid_addr_bitmap);
-EXPORT_SYMBOL(phys_base);
-EXPORT_SYMBOL(pfn_base);
-
-/* Atomic operations. */
-EXPORT_SYMBOL(___atomic24_add);
-EXPORT_SYMBOL(___atomic24_sub);
-
-/* Per-CPU information table */
-EXPORT_PER_CPU_SYMBOL(__cpu_data);
-
-#ifdef CONFIG_SMP
-/* IRQ implementation. */
-EXPORT_SYMBOL(synchronize_irq);
-#endif
-
+/* from entry.S */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
-EXPORT_SYMBOL(rtc_lock);
-EXPORT_SYMBOL(set_auxio);
-EXPORT_SYMBOL(get_auxio);
-EXPORT_SYMBOL(io_remap_pfn_range);
+/* from head_32.S */
+EXPORT_SYMBOL(__ret_efault);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Defined using magic */
#ifndef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else
@@ -132,122 +40,7 @@ EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
-
EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
-#ifdef CONFIG_SBUS
-EXPORT_SYMBOL(sbus_set_sbus64);
-#endif
-#ifdef CONFIG_PCI
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(insl);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(pci_alloc_consistent);
-EXPORT_SYMBOL(pci_free_consistent);
-EXPORT_SYMBOL(pci_map_single);
-EXPORT_SYMBOL(pci_unmap_single);
-EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
-EXPORT_SYMBOL(pci_dma_sync_single_for_device);
-EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-EXPORT_SYMBOL(pci_dma_sync_sg_for_device);
-EXPORT_SYMBOL(pci_map_sg);
-EXPORT_SYMBOL(pci_unmap_sg);
-EXPORT_SYMBOL(pci_map_page);
-EXPORT_SYMBOL(pci_unmap_page);
-/* Actually, ioremap/iounmap are not PCI specific. But it is ok for drivers. */
-EXPORT_SYMBOL(ioremap);
-EXPORT_SYMBOL(iounmap);
-#endif
-
-/* in arch/sparc/mm/highmem.c */
-#ifdef CONFIG_HIGHMEM
-EXPORT_SYMBOL(kmap_atomic);
-EXPORT_SYMBOL(kunmap_atomic);
-#endif
-
-/* prom symbols */
-EXPORT_SYMBOL(idprom);
-EXPORT_SYMBOL(prom_root_node);
-EXPORT_SYMBOL(prom_getchild);
-EXPORT_SYMBOL(prom_getsibling);
-EXPORT_SYMBOL(prom_searchsiblings);
-EXPORT_SYMBOL(prom_firstprop);
-EXPORT_SYMBOL(prom_nextprop);
-EXPORT_SYMBOL(prom_getproplen);
-EXPORT_SYMBOL(prom_getproperty);
-EXPORT_SYMBOL(prom_node_has_property);
-EXPORT_SYMBOL(prom_setprop);
+/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
-EXPORT_SYMBOL(prom_apply_obio_ranges);
-EXPORT_SYMBOL(prom_feval);
-EXPORT_SYMBOL(prom_getbool);
-EXPORT_SYMBOL(prom_getstring);
-EXPORT_SYMBOL(prom_getint);
-EXPORT_SYMBOL(prom_getintdefault);
-EXPORT_SYMBOL(prom_finddevice);
-EXPORT_SYMBOL(romvec);
-EXPORT_SYMBOL(__prom_getchild);
-EXPORT_SYMBOL(__prom_getsibling);
-
-/* sparc library symbols */
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(page_kernel);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(__copy_1page);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(bzero_1page);
-EXPORT_SYMBOL(__bzero);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__strncmp);
-EXPORT_SYMBOL(__memmove);
-
-/* Moving data to/from userspace. */
-EXPORT_SYMBOL(__copy_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
-EXPORT_SYMBOL(csum_partial);
-
-/* Cache flushing. */
-EXPORT_SYMBOL(sparc_flush_page_to_ram);
-
-/* For when serial stuff is built as modules. */
-EXPORT_SYMBOL(sun_do_break);
-
-EXPORT_SYMBOL(__ret_efault);
-
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__divdi3);
-
-EXPORT_SYMBOL(_Rem);
-EXPORT_SYMBOL(_Urem);
-EXPORT_SYMBOL(_Mul);
-EXPORT_SYMBOL(_Umul);
-EXPORT_SYMBOL(_Div);
-EXPORT_SYMBOL(_Udiv);
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-EXPORT_SYMBOL(do_BUG);
-#endif
-
-/* Sun Power Management Idle Handler */
-EXPORT_SYMBOL(pm_idle);
-
-EXPORT_SYMBOL(empty_zero_page);
diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c
index 0133211ab634..da8f804feb49 100644
--- a/arch/sparc/kernel/sparc_ksyms_64.c
+++ b/arch/sparc/kernel/sparc_ksyms_64.c
@@ -5,50 +5,16 @@
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
-/* Tell string.h we don't want memcpy etc. as cpp defines */
-#define EXPORT_SYMTAB_STROPS
-#define PROMLIB_INTERNAL
-
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/in6.h>
#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/fs_struct.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/syscalls.h>
-#include <linux/percpu.h>
#include <linux/init.h>
-#include <linux/rwsem.h>
-#include <net/compat.h>
-#include <asm/oplib.h>
+#include <asm/spinlock.h>
#include <asm/system.h>
-#include <asm/auxio.h>
-#include <asm/pgtable.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/idprom.h>
-#include <asm/elf.h>
-#include <asm/head.h>
-#include <asm/smp.h>
-#include <asm/ptrace.h>
-#include <asm/uaccess.h>
-#include <asm/checksum.h>
-#include <asm/fpumacro.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#ifdef CONFIG_SBUS
-#include <asm/dma.h>
-#endif
-#include <asm/ns87303.h>
-#include <asm/timer.h>
#include <asm/cpudata.h>
-#include <asm/ftrace.h>
+#include <asm/uaccess.h>
+#include <asm/spitfire.h>
+#include <asm/oplib.h>
#include <asm/hypervisor.h>
struct poll {
@@ -57,42 +23,6 @@ struct poll {
short revents;
};
-extern void die_if_kernel(char *str, struct pt_regs *regs);
-extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-extern void *__bzero(void *, size_t);
-extern void *__memscan_zero(void *, size_t);
-extern void *__memscan_generic(void *, int, size_t);
-extern __kernel_size_t strlen(const char *);
-extern void sys_sigsuspend(void);
-extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
-extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
-extern long sparc32_open(const char __user * filename, int flags, int mode);
-extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long pfn, unsigned long size, pgprot_t prot);
-
-extern int __ashrdi3(int, int);
-
-extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
-
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
-
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
- unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
- unsigned long *, unsigned long *, unsigned long *);
-
-/* Per-CPU information table */
-EXPORT_PER_CPU_SYMBOL(__cpu_data);
-
/* used by various drivers */
#ifdef CONFIG_SMP
/* Out of line rw-locking implementation. */
@@ -103,68 +33,24 @@ EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
#endif /* CONFIG_SMP */
-#ifdef CONFIG_MCOUNT
-EXPORT_SYMBOL(_mcount);
-#endif
-
-EXPORT_SYMBOL(sparc64_get_clock_tick);
-
-/* RW semaphores */
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
-
-/* Atomic counter implementation. */
-EXPORT_SYMBOL(atomic_add);
-EXPORT_SYMBOL(atomic_add_ret);
-EXPORT_SYMBOL(atomic_sub);
-EXPORT_SYMBOL(atomic_sub_ret);
-EXPORT_SYMBOL(atomic64_add);
-EXPORT_SYMBOL(atomic64_add_ret);
-EXPORT_SYMBOL(atomic64_sub);
-EXPORT_SYMBOL(atomic64_sub_ret);
-
-/* Atomic bit operations. */
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(change_bit);
-
+/* from helpers.S */
EXPORT_SYMBOL(__flushw_user);
+EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
+/* from head_64.S */
+EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
-EXPORT_SYMBOL(get_fb_unmapped_area);
-EXPORT_SYMBOL(flush_icache_range);
-
-EXPORT_SYMBOL(flush_dcache_page);
-#ifdef DCACHE_ALIASING_POSSIBLE
-EXPORT_SYMBOL(__flush_dcache_range);
-#endif
+EXPORT_SYMBOL(prom_root_node);
+/* from hvcalls.S */
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
-EXPORT_SYMBOL(auxio_set_led);
-EXPORT_SYMBOL(auxio_set_lte);
-#ifdef CONFIG_SBUS
-EXPORT_SYMBOL(sbus_set_sbus64);
-#endif
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
#ifdef CONFIG_PCI
+/* inline functions in asm/pci_64.h */
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent);
EXPORT_SYMBOL(pci_map_single);
@@ -173,112 +59,7 @@ EXPORT_SYMBOL(pci_map_sg);
EXPORT_SYMBOL(pci_unmap_sg);
EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
-EXPORT_SYMBOL(pci_dma_supported);
#endif
-/* I/O device mmaping on Sparc64. */
-EXPORT_SYMBOL(io_remap_pfn_range);
-
-EXPORT_SYMBOL(dump_fpu);
-
-/* math-emu wants this */
-EXPORT_SYMBOL(die_if_kernel);
-
-/* Kernel thread creation. */
-EXPORT_SYMBOL(kernel_thread);
-
-/* prom symbols */
-EXPORT_SYMBOL(idprom);
-EXPORT_SYMBOL(prom_root_node);
-EXPORT_SYMBOL(prom_getchild);
-EXPORT_SYMBOL(prom_getsibling);
-EXPORT_SYMBOL(prom_searchsiblings);
-EXPORT_SYMBOL(prom_firstprop);
-EXPORT_SYMBOL(prom_nextprop);
-EXPORT_SYMBOL(prom_getproplen);
-EXPORT_SYMBOL(prom_getproperty);
-EXPORT_SYMBOL(prom_node_has_property);
-EXPORT_SYMBOL(prom_setprop);
+/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
-EXPORT_SYMBOL(prom_finddevice);
-EXPORT_SYMBOL(prom_feval);
-EXPORT_SYMBOL(prom_getbool);
-EXPORT_SYMBOL(prom_getstring);
-EXPORT_SYMBOL(prom_getint);
-EXPORT_SYMBOL(prom_getintdefault);
-EXPORT_SYMBOL(__prom_getchild);
-EXPORT_SYMBOL(__prom_getsibling);
-
-/* sparc library symbols */
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(__strlen_user);
-EXPORT_SYMBOL(__strnlen_user);
-
-/* Special internal versions of library functions. */
-EXPORT_SYMBOL(_clear_page);
-EXPORT_SYMBOL(clear_user_page);
-EXPORT_SYMBOL(copy_user_page);
-EXPORT_SYMBOL(__bzero);
-EXPORT_SYMBOL(__memscan_zero);
-EXPORT_SYMBOL(__memscan_generic);
-EXPORT_SYMBOL(__memset);
-
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_partial_copy_from_user);
-EXPORT_SYMBOL(__csum_partial_copy_to_user);
-EXPORT_SYMBOL(ip_fast_csum);
-
-/* Moving data to/from/in userspace. */
-EXPORT_SYMBOL(___copy_to_user);
-EXPORT_SYMBOL(___copy_from_user);
-EXPORT_SYMBOL(___copy_in_user);
-EXPORT_SYMBOL(copy_to_user_fixup);
-EXPORT_SYMBOL(copy_from_user_fixup);
-EXPORT_SYMBOL(copy_in_user_fixup);
-EXPORT_SYMBOL(__strncpy_from_user);
-EXPORT_SYMBOL(__clear_user);
-
-/* Various address conversion macros use this. */
-EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
-
-/* No version information on this, heavily used in inline asm,
- * and will always be 'void __ret_efault(void)'.
- */
-EXPORT_SYMBOL(__ret_efault);
-
-/* No version information on these, as gcc produces such symbols. */
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(strncmp);
-
-void VISenter(void);
-/* RAID code needs this */
-EXPORT_SYMBOL(VISenter);
-
-/* for input/keybdev */
-EXPORT_SYMBOL(sun_do_break);
-EXPORT_SYMBOL(stop_a_enabled);
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-EXPORT_SYMBOL(do_BUG);
-#endif
-
-/* for ns8703 */
-EXPORT_SYMBOL(ns87303_lock);
-
-EXPORT_SYMBOL(tick_ops);
-
-EXPORT_SYMBOL(xor_vis_2);
-EXPORT_SYMBOL(xor_vis_3);
-EXPORT_SYMBOL(xor_vis_4);
-EXPORT_SYMBOL(xor_vis_5);
-
-EXPORT_SYMBOL(xor_niagara_2);
-EXPORT_SYMBOL(xor_niagara_3);
-EXPORT_SYMBOL(xor_niagara_4);
-EXPORT_SYMBOL(xor_niagara_5);
-
-EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index 16ab0cb731c5..50afaed99c8a 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -60,7 +60,7 @@ extern int __smp4d_processor_id(void);
#define SMP_PRINTK(x)
#endif
-static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
+static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val)
{
__asm__ __volatile__("swap [%1], %0\n\t" :
"=&r" (val), "=&r" (ptr) :
@@ -115,7 +115,7 @@ void __cpuinit smp4d_callin(void)
local_flush_tlb_all();
/* Allow master to continue. */
- swap((unsigned long *)&cpu_callin_map[cpuid], 1);
+ sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1);
local_flush_cache_all();
local_flush_tlb_all();
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 39749e32dc7e..09058fc39e73 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -23,6 +23,7 @@
#include <linux/ipc.h>
#include <linux/personality.h>
#include <linux/random.h>
+#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/utrap.h>
@@ -354,6 +355,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
return addr;
}
+EXPORT_SYMBOL(get_fb_unmapped_area);
/* Essentially the same as PowerPC... */
void arch_pick_mmap_layout(struct mm_struct *mm)
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c
index 00f7383c7657..614ac7b4a9dd 100644
--- a/arch/sparc/kernel/time_32.c
+++ b/arch/sparc/kernel/time_32.c
@@ -48,6 +48,8 @@
#include "irq.h"
DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
+
static int set_rtc_mmss(unsigned long);
static int sbus_do_settimeofday(struct timespec *tv);
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 54405d362148..db310aa00183 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -176,6 +176,7 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
};
struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
+EXPORT_SYMBOL(tick_ops);
static void stick_disable_irq(void)
{
@@ -639,6 +640,7 @@ unsigned long sparc64_get_clock_tick(unsigned int cpu)
return ft->clock_tick_ref;
return cpu_data(cpu).clock_tick;
}
+EXPORT_SYMBOL(sparc64_get_clock_tick);
#ifdef CONFIG_CPU_FREQ
@@ -727,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter();
- kstat_this_cpu.irqs[0]++;
+ kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
if (unlikely(!evt->event_handler)) {
printk(KERN_WARNING
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 213645be6e92..358283341b47 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -424,6 +424,7 @@ void do_BUG(const char *file, int line)
// bust_spinlocks(1); XXX Not in our original BUG()
printk("kernel BUG at %s:%d!\n", file, line);
}
+EXPORT_SYMBOL(do_BUG);
#endif
/* Since we have our mappings set up, on multiprocessors we can spin them
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index bca3b4e09c41..c2d153d46586 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -128,6 +128,7 @@ void do_BUG(const char *file, int line)
bust_spinlocks(1);
printk("kernel BUG at %s:%d!\n", file, line);
}
+EXPORT_SYMBOL(do_BUG);
#endif
static DEFINE_SPINLOCK(dimm_handler_lock);
@@ -2261,6 +2262,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
do_exit(SIGKILL);
do_exit(SIGSEGV);
}
+EXPORT_SYMBOL(die_if_kernel);
#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
#define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index f164d5a850f9..379209982a07 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -589,7 +589,6 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn;
- u32 first, second;
u64 value;
u8 freg;
int flag;
@@ -601,19 +600,20 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
int asi = decode_asi(insn, regs);
+ u32 first, second;
int err;
if ((asi > ASI_SNFL) ||
(asi < ASI_P))
goto daex;
+ first = second = 0;
err = get_user(first, (u32 __user *)sfar);
if (!err)
err = get_user(second, (u32 __user *)(sfar + 4));
if (err) {
- if (asi & 0x2) /* NF */ {
- first = 0; second = 0;
- } else
+ if (!(asi & 0x2))
goto daex;
+ first = second = 0;
}
save_and_clear_fpu();
freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 375016e19144..273fc85269fc 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -42,3 +42,4 @@ lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o
obj-y += iomap.o
obj-$(CONFIG_SPARC32) += atomic32.o
+obj-y += ksyms.o
diff --git a/arch/sparc/lib/PeeCeeI.c b/arch/sparc/lib/PeeCeeI.c
index 46053e6ddd7b..6529f8657597 100644
--- a/arch/sparc/lib/PeeCeeI.c
+++ b/arch/sparc/lib/PeeCeeI.c
@@ -4,6 +4,8 @@
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/module.h>
+
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -15,6 +17,7 @@ void outsb(unsigned long __addr, const void *src, unsigned long count)
while (count--)
outb(*p++, addr);
}
+EXPORT_SYMBOL(outsb);
void outsw(unsigned long __addr, const void *src, unsigned long count)
{
@@ -25,6 +28,7 @@ void outsw(unsigned long __addr, const void *src, unsigned long count)
src += sizeof(u16);
}
}
+EXPORT_SYMBOL(outsw);
void outsl(unsigned long __addr, const void *src, unsigned long count)
{
@@ -78,6 +82,7 @@ void outsl(unsigned long __addr, const void *src, unsigned long count)
break;
}
}
+EXPORT_SYMBOL(outsl);
void insb(unsigned long __addr, void *dst, unsigned long count)
{
@@ -105,6 +110,7 @@ void insb(unsigned long __addr, void *dst, unsigned long count)
*pb++ = inb(addr);
}
}
+EXPORT_SYMBOL(insb);
void insw(unsigned long __addr, void *dst, unsigned long count)
{
@@ -132,6 +138,7 @@ void insw(unsigned long __addr, void *dst, unsigned long count)
*ps = le16_to_cpu(inw(addr));
}
}
+EXPORT_SYMBOL(insw);
void insl(unsigned long __addr, void *dst, unsigned long count)
{
@@ -200,4 +207,5 @@ void insl(unsigned long __addr, void *dst, unsigned long count)
}
}
}
+EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
new file mode 100644
index 000000000000..704b12668388
--- /dev/null
+++ b/arch/sparc/lib/ksyms.c
@@ -0,0 +1,196 @@
+/*
+ * Export of symbols defined in assembler
+ */
+
+/* Tell string.h we don't want memcpy etc. as cpp defines */
+#define EXPORT_SYMTAB_STROPS
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/checksum.h>
+#include <asm/uaccess.h>
+#include <asm/ftrace.h>
+
+/* string functions */
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(__strlen_user);
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(strncmp);
+
+/* mem* functions */
+extern void *__memscan_zero(void *, size_t);
+extern void *__memscan_generic(void *, int, size_t);
+extern void *__bzero(void *, size_t);
+
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__memscan_zero);
+EXPORT_SYMBOL(__memscan_generic);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(__bzero);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial);
+
+#ifdef CONFIG_MCOUNT
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/*
+ * sparc
+ */
+#ifdef CONFIG_SPARC32
+extern int __ashrdi3(int, int);
+extern int __ashldi3(int, int);
+extern int __lshrdi3(int, int);
+extern int __muldi3(int, int);
+extern int __divdi3(int, int);
+
+extern void (*__copy_1page)(void *, const void *);
+extern void (*bzero_1page)(void *);
+
+extern int __strncmp(const char *, const char *, __kernel_size_t);
+
+extern void ___rw_read_enter(void);
+extern void ___rw_read_try(void);
+extern void ___rw_read_exit(void);
+extern void ___rw_write_enter(void);
+extern void ___atomic24_add(void);
+extern void ___atomic24_sub(void);
+
+/* Alias functions whose names begin with "." and export the aliases.
+ * The module references will be fixed up by module_frob_arch_sections.
+ */
+extern int _Div(int, int);
+extern int _Mul(int, int);
+extern int _Rem(int, int);
+extern unsigned _Udiv(unsigned, unsigned);
+extern unsigned _Umul(unsigned, unsigned);
+extern unsigned _Urem(unsigned, unsigned);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(__csum_partial_copy_sparc_generic);
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(__copy_1page);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
+EXPORT_SYMBOL(bzero_1page);
+
+/* string functions */
+EXPORT_SYMBOL(__strncmp);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(__copy_user);
+
+/* Used by asm/spinlock.h */
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(___rw_read_enter);
+EXPORT_SYMBOL(___rw_read_try);
+EXPORT_SYMBOL(___rw_read_exit);
+EXPORT_SYMBOL(___rw_write_enter);
+#endif
+
+/* Atomic operations. */
+EXPORT_SYMBOL(___atomic24_add);
+EXPORT_SYMBOL(___atomic24_sub);
+
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__divdi3);
+
+EXPORT_SYMBOL(_Rem);
+EXPORT_SYMBOL(_Urem);
+EXPORT_SYMBOL(_Mul);
+EXPORT_SYMBOL(_Umul);
+EXPORT_SYMBOL(_Div);
+EXPORT_SYMBOL(_Udiv);
+#endif
+
+/*
+ * sparc64
+ */
+#ifdef CONFIG_SPARC64
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(__csum_partial_copy_from_user);
+EXPORT_SYMBOL(__csum_partial_copy_to_user);
+EXPORT_SYMBOL(ip_fast_csum);
+
+/* Moving data to/from/in userspace. */
+EXPORT_SYMBOL(___copy_to_user);
+EXPORT_SYMBOL(___copy_from_user);
+EXPORT_SYMBOL(___copy_in_user);
+EXPORT_SYMBOL(__clear_user);
+
+/* RW semaphores */
+EXPORT_SYMBOL(__down_read);
+EXPORT_SYMBOL(__down_read_trylock);
+EXPORT_SYMBOL(__down_write);
+EXPORT_SYMBOL(__down_write_trylock);
+EXPORT_SYMBOL(__up_read);
+EXPORT_SYMBOL(__up_write);
+EXPORT_SYMBOL(__downgrade_write);
+
+/* Atomic counter implementation. */
+EXPORT_SYMBOL(atomic_add);
+EXPORT_SYMBOL(atomic_add_ret);
+EXPORT_SYMBOL(atomic_sub);
+EXPORT_SYMBOL(atomic_sub_ret);
+EXPORT_SYMBOL(atomic64_add);
+EXPORT_SYMBOL(atomic64_add_ret);
+EXPORT_SYMBOL(atomic64_sub);
+EXPORT_SYMBOL(atomic64_sub_ret);
+
+/* Atomic bit operations. */
+EXPORT_SYMBOL(test_and_set_bit);
+EXPORT_SYMBOL(test_and_clear_bit);
+EXPORT_SYMBOL(test_and_change_bit);
+EXPORT_SYMBOL(set_bit);
+EXPORT_SYMBOL(clear_bit);
+EXPORT_SYMBOL(change_bit);
+
+/* Special internal versions of library functions. */
+EXPORT_SYMBOL(_clear_page);
+EXPORT_SYMBOL(clear_user_page);
+EXPORT_SYMBOL(copy_user_page);
+
+/* RAID code needs this */
+void VISenter(void);
+EXPORT_SYMBOL(VISenter);
+
+extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+EXPORT_SYMBOL(xor_vis_2);
+EXPORT_SYMBOL(xor_vis_3);
+EXPORT_SYMBOL(xor_vis_4);
+EXPORT_SYMBOL(xor_vis_5);
+
+extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *);
+extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *);
+extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+ unsigned long *, unsigned long *, unsigned long *);
+
+EXPORT_SYMBOL(xor_niagara_2);
+EXPORT_SYMBOL(xor_niagara_3);
+EXPORT_SYMBOL(xor_niagara_4);
+EXPORT_SYMBOL(xor_niagara_5);
+#endif
diff --git a/arch/sparc/lib/user_fixup.c b/arch/sparc/lib/user_fixup.c
index 05a361b0a1a4..ac96ae236709 100644
--- a/arch/sparc/lib/user_fixup.c
+++ b/arch/sparc/lib/user_fixup.c
@@ -7,6 +7,8 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/module.h>
+
#include <asm/uaccess.h>
/* Calculating the exact fault address when using
@@ -40,6 +42,7 @@ unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned l
return size;
}
+EXPORT_SYMBOL(copy_from_user_fixup);
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
{
@@ -47,6 +50,7 @@ unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned lon
return compute_size((unsigned long) to, size, &offset);
}
+EXPORT_SYMBOL(copy_to_user_fixup);
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
{
@@ -64,3 +68,4 @@ unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned lo
return size;
}
+EXPORT_SYMBOL(copy_in_user_fixup);
diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c
index a289261da9fd..5edcac184eaf 100644
--- a/arch/sparc/mm/generic_32.c
+++ b/arch/sparc/mm/generic_32.c
@@ -95,3 +95,4 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
flush_tlb_range(vma, beg, end);
return error;
}
+EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c
index f362c2037013..04f2bf4cd571 100644
--- a/arch/sparc/mm/generic_64.c
+++ b/arch/sparc/mm/generic_64.c
@@ -161,3 +161,4 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
flush_tlb_range(vma, beg, end);
return error;
}
+EXPORT_SYMBOL(io_remap_pfn_range);
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 01fc6c254292..752d0c9fb544 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -62,6 +62,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
return (void*) vaddr;
}
+EXPORT_SYMBOL(kmap_atomic);
void kunmap_atomic(void *kvaddr, enum km_type type)
{
@@ -98,6 +99,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
pagefault_enable();
}
+EXPORT_SYMBOL(kunmap_atomic);
/* We may be fed a pagetable here by ptep_to_xxx and others. */
struct page *kmap_atomic_to_page(void *ptr)
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index fec926021f49..cbb282dab5a7 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -38,11 +38,16 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
unsigned long *sparc_valid_addr_bitmap;
+EXPORT_SYMBOL(sparc_valid_addr_bitmap);
unsigned long phys_base;
+EXPORT_SYMBOL(phys_base);
+
unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
unsigned long page_kernel;
+EXPORT_SYMBOL(page_kernel);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
unsigned long sparc_unmapped_base;
@@ -522,3 +527,4 @@ void sparc_flush_page_to_ram(struct page *page)
if (vaddr)
__flush_page_to_ram(vaddr);
}
+EXPORT_SYMBOL(sparc_flush_page_to_ram);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c77c7ef5d5d4..00373ce2d8fb 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -146,6 +146,7 @@ static void __init read_obp_memory(const char *property,
}
unsigned long *sparc64_valid_addr_bitmap __read_mostly;
+EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
/* Kernel physical address base and size in bytes. */
unsigned long kern_base __read_mostly;
@@ -369,6 +370,7 @@ void flush_dcache_page(struct page *page)
out:
put_cpu();
}
+EXPORT_SYMBOL(flush_dcache_page);
void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
@@ -396,6 +398,7 @@ void __kprobes flush_icache_range(unsigned long start, unsigned long end)
}
}
}
+EXPORT_SYMBOL(flush_icache_range);
void mmu_info(struct seq_file *m)
{
@@ -599,6 +602,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
"i" (ASI_DCACHE_INVALIDATE));
}
}
+EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 873217c6d823..6193c33ed4d4 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -8,16 +8,20 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
struct linux_romvec *romvec;
+EXPORT_SYMBOL(romvec);
+
enum prom_major_version prom_vers;
unsigned int prom_rev, prom_prev;
/* The root node of the prom device tree. */
int prom_root_node;
+EXPORT_SYMBOL(prom_root_node);
/* Pointer to the device tree operations structure. */
struct linux_nodeops *prom_nodeops;
diff --git a/arch/sparc/prom/misc_32.c b/arch/sparc/prom/misc_32.c
index cf6c3f6d36c3..4d61c540bb3d 100644
--- a/arch/sparc/prom/misc_32.c
+++ b/arch/sparc/prom/misc_32.c
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/module.h>
+
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
@@ -44,6 +46,7 @@ prom_feval(char *fstring)
restore_current();
spin_unlock_irqrestore(&prom_lock, flags);
}
+EXPORT_SYMBOL(prom_feval);
/* Drop into the prom, with the chance to continue with the 'go'
* prom command.
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c
index 9b0c0760901e..eedffb4fec2d 100644
--- a/arch/sparc/prom/misc_64.c
+++ b/arch/sparc/prom/misc_64.c
@@ -11,6 +11,8 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/module.h>
+
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/system.h>
@@ -54,6 +56,7 @@ void prom_feval(const char *fstring)
p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_INOUT(1, 1), fstring);
}
+EXPORT_SYMBOL(prom_feval);
#ifdef CONFIG_SMP
extern void smp_capture(void);
diff --git a/arch/sparc/prom/ranges.c b/arch/sparc/prom/ranges.c
index 64579a376419..cd5790853ff6 100644
--- a/arch/sparc/prom/ranges.c
+++ b/arch/sparc/prom/ranges.c
@@ -6,6 +6,8 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
+
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/types.h>
@@ -62,6 +64,7 @@ prom_apply_obio_ranges(struct linux_prom_registers *regs, int nregs)
if(num_obio_ranges)
prom_adjust_regs(regs, nregs, promlib_obio_ranges, num_obio_ranges);
}
+EXPORT_SYMBOL(prom_apply_obio_ranges);
void __init prom_ranges_init(void)
{
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c
index 6d8187357331..646d244b1fdb 100644
--- a/arch/sparc/prom/tree_32.c
+++ b/arch/sparc/prom/tree_32.c
@@ -5,13 +5,12 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
-#define PROMLIB_INTERNAL
-
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/ctype.h>
+#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
@@ -50,6 +49,7 @@ int prom_getchild(int node)
return cnode;
}
+EXPORT_SYMBOL(prom_getchild);
/* Internal version of prom_getsibling that does not alter return values. */
int __prom_getsibling(int node)
@@ -81,6 +81,7 @@ int prom_getsibling(int node)
return sibnode;
}
+EXPORT_SYMBOL(prom_getsibling);
/* Return the length in bytes of property 'prop' at node 'node'.
* Return -1 on error.
@@ -99,6 +100,7 @@ int prom_getproplen(int node, const char *prop)
spin_unlock_irqrestore(&prom_lock, flags);
return ret;
}
+EXPORT_SYMBOL(prom_getproplen);
/* Acquire a property 'prop' at node 'node' and place it in
* 'buffer' which has a size of 'bufsize'. If the acquisition
@@ -119,6 +121,7 @@ int prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
spin_unlock_irqrestore(&prom_lock, flags);
return ret;
}
+EXPORT_SYMBOL(prom_getproperty);
/* Acquire an integer property and return its value. Returns -1
* on failure.
@@ -132,6 +135,7 @@ int prom_getint(int node, char *prop)
return -1;
}
+EXPORT_SYMBOL(prom_getint);
/* Acquire an integer property, upon error return the passed default
* integer.
@@ -145,6 +149,7 @@ int prom_getintdefault(int node, char *property, int deflt)
return retval;
}
+EXPORT_SYMBOL(prom_getintdefault);
/* Acquire a boolean property, 1=TRUE 0=FALSE. */
int prom_getbool(int node, char *prop)
@@ -155,6 +160,7 @@ int prom_getbool(int node, char *prop)
if(retval == -1) return 0;
return 1;
}
+EXPORT_SYMBOL(prom_getbool);
/* Acquire a property whose value is a string, returns a null
* string on error. The char pointer is the user supplied string
@@ -169,6 +175,7 @@ void prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
user_buf[0] = 0;
return;
}
+EXPORT_SYMBOL(prom_getstring);
/* Does the device at node 'node' have name 'name'?
@@ -204,6 +211,7 @@ int prom_searchsiblings(int node_start, char *nodename)
return 0;
}
+EXPORT_SYMBOL(prom_searchsiblings);
/* Interal version of nextprop that does not alter return values. */
char * __prom_nextprop(int node, char * oprop)
@@ -228,6 +236,7 @@ char * prom_firstprop(int node, char *bufer)
return __prom_nextprop(node, "");
}
+EXPORT_SYMBOL(prom_firstprop);
/* Return the property type string after property type 'oprop'
* at node 'node' . Returns empty string if no more
@@ -240,6 +249,7 @@ char * prom_nextprop(int node, char *oprop, char *buffer)
return __prom_nextprop(node, oprop);
}
+EXPORT_SYMBOL(prom_nextprop);
int prom_finddevice(char *name)
{
@@ -287,6 +297,7 @@ int prom_finddevice(char *name)
}
return node;
}
+EXPORT_SYMBOL(prom_finddevice);
int prom_node_has_property(int node, char *prop)
{
@@ -299,6 +310,7 @@ int prom_node_has_property(int node, char *prop)
} while (*current_property);
return 0;
}
+EXPORT_SYMBOL(prom_node_has_property);
/* Set property 'pname' at node 'node' to value 'value' which has a length
* of 'size' bytes. Return the number of bytes the prom accepted.
@@ -316,6 +328,7 @@ int prom_setprop(int node, const char *pname, char *value, int size)
spin_unlock_irqrestore(&prom_lock, flags);
return ret;
}
+EXPORT_SYMBOL(prom_setprop);
int prom_inst2pkg(int inst)
{
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 281aea44790b..8ea73ddc61dc 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/module.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
@@ -32,6 +33,7 @@ inline int prom_getchild(int node)
if(cnode == -1) return 0;
return (int)cnode;
}
+EXPORT_SYMBOL(prom_getchild);
inline int prom_getparent(int node)
{
@@ -63,6 +65,7 @@ inline int prom_getsibling(int node)
return sibnode;
}
+EXPORT_SYMBOL(prom_getsibling);
/* Return the length in bytes of property 'prop' at node 'node'.
* Return -1 on error.
@@ -75,6 +78,7 @@ inline int prom_getproplen(int node, const char *prop)
P1275_INOUT(2, 1),
node, prop);
}
+EXPORT_SYMBOL(prom_getproplen);
/* Acquire a property 'prop' at node 'node' and place it in
* 'buffer' which has a size of 'bufsize'. If the acquisition
@@ -97,6 +101,7 @@ inline int prom_getproperty(int node, const char *prop,
node, prop, buffer, P1275_SIZE(plen));
}
}
+EXPORT_SYMBOL(prom_getproperty);
/* Acquire an integer property and return its value. Returns -1
* on failure.
@@ -110,6 +115,7 @@ inline int prom_getint(int node, const char *prop)
return -1;
}
+EXPORT_SYMBOL(prom_getint);
/* Acquire an integer property, upon error return the passed default
* integer.
@@ -124,6 +130,7 @@ int prom_getintdefault(int node, const char *property, int deflt)
return retval;
}
+EXPORT_SYMBOL(prom_getintdefault);
/* Acquire a boolean property, 1=TRUE 0=FALSE. */
int prom_getbool(int node, const char *prop)
@@ -134,6 +141,7 @@ int prom_getbool(int node, const char *prop)
if(retval == -1) return 0;
return 1;
}
+EXPORT_SYMBOL(prom_getbool);
/* Acquire a property whose value is a string, returns a null
* string on error. The char pointer is the user supplied string
@@ -148,7 +156,7 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
user_buf[0] = 0;
return;
}
-
+EXPORT_SYMBOL(prom_getstring);
/* Does the device at node 'node' have name 'name'?
* YES = 1 NO = 0
@@ -181,6 +189,7 @@ int prom_searchsiblings(int node_start, const char *nodename)
return 0;
}
+EXPORT_SYMBOL(prom_searchsiblings);
/* Return the first property type for node 'node'.
* buffer should be at least 32B in length
@@ -194,6 +203,7 @@ inline char *prom_firstprop(int node, char *buffer)
node, (char *) 0x0, buffer);
return buffer;
}
+EXPORT_SYMBOL(prom_firstprop);
/* Return the property type string after property type 'oprop'
* at node 'node' . Returns NULL string if no more
@@ -217,6 +227,7 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer)
node, oprop, buffer);
return buffer;
}
+EXPORT_SYMBOL(prom_nextprop);
int
prom_finddevice(const char *name)
@@ -228,6 +239,7 @@ prom_finddevice(const char *name)
P1275_INOUT(1, 1),
name);
}
+EXPORT_SYMBOL(prom_finddevice);
int prom_node_has_property(int node, const char *prop)
{
@@ -241,7 +253,8 @@ int prom_node_has_property(int node, const char *prop)
} while (*buf);
return 0;
}
-
+EXPORT_SYMBOL(prom_node_has_property);
+
/* Set property 'pname' at node 'node' to value 'value' which has a length
* of 'size' bytes. Return the number of bytes the prom accepted.
*/
@@ -264,6 +277,7 @@ prom_setprop(int node, const char *pname, char *value, int size)
P1275_INOUT(4, 1),
node, pname, value, P1275_SIZE(size));
}
+EXPORT_SYMBOL(prom_setprop);
inline int prom_inst2pkg(int inst)
{
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 3c14ed07dc4e..01e7c4c5c7fe 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
CFI_DEF_CFA rsp,0
CFI_REGISTER rsp,rbp
SWAPGS_UNSAFE_STACK
- movq %gs:pda_kernelstack, %rsp
- addq $(PDA_STACKOFFSET),%rsp
+ movq PER_CPU_VAR(kernel_stack), %rsp
+ addq $(KERNEL_STACK_OFFSET),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
ENTRY(ia32_cstar_target)
CFI_STARTPROC32 simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,PDA_STACKOFFSET
+ CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
movl %esp,%r8d
CFI_REGISTER rsp,r8
- movq %gs:pda_kernelstack,%rsp
+ movq PER_CPU_VAR(kernel_stack),%rsp
/*
* No need to follow this irqs on/off section: the syscall
* disabled irqs and here we enable it straight after entry:
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644
index 000000000000..82f613c607ce
--- /dev/null
+++ b/arch/x86/include/asm/apicnum.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_APICNUM_H
+#define _ASM_X86_APICNUM_H
+
+/* define MAX_IO_APICS */
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index e02a359d2aa5..02b47a603fc8 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -3,6 +3,9 @@
/*
* Copyright 1992, Linus Torvalds.
+ *
+ * Note: inlines with more than a single statement should be marked
+ * __always_inline to avoid problems with older gcc's inlining heuristics.
*/
#ifndef _LINUX_BITOPS_H
@@ -53,7 +56,8 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void set_bit(unsigned int nr, volatile unsigned long *addr)
+static __always_inline void
+set_bit(unsigned int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+clear_bit(int nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
*
* This is the same as test_and_set_bit on x86.
*/
-static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
+static __always_inline int
+test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
@@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return oldbit;
}
-static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bae482df6039..f03b23e32864 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -7,6 +7,20 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
+#define safe_smp_processor_id() 0
+#define stack_smp_processor_id() 0
+
+#endif /* CONFIG_SMP */
+
struct x86_cpu {
struct cpu cpu;
};
@@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
#endif
DECLARE_PER_CPU(int, cpu_state);
+
+#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
+extern unsigned char boot_cpu_id;
+#else
+#define boot_cpu_id 0
+#endif
+
#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644
index 000000000000..26c6dad90479
--- /dev/null
+++ b/arch/x86/include/asm/cpumask.h
@@ -0,0 +1,28 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index 0930b4f8d672..c68c361697e1 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -1,39 +1,21 @@
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
-#ifdef CONFIG_X86_32
#include <linux/compiler.h>
#include <asm/percpu.h>
+#ifndef __ASSEMBLY__
struct task_struct;
DECLARE_PER_CPU(struct task_struct *, current_task);
-static __always_inline struct task_struct *get_current(void)
-{
- return x86_read_percpu(current_task);
-}
-
-#else /* X86_32 */
-
-#ifndef __ASSEMBLY__
-#include <asm/pda.h>
-
-struct task_struct;
static __always_inline struct task_struct *get_current(void)
{
- return read_pda(pcurrent);
+ return percpu_read(current_task);
}
-#else /* __ASSEMBLY__ */
-
-#include <asm/asm-offsets.h>
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+#define current get_current()
#endif /* __ASSEMBLY__ */
-#endif /* X86_32 */
-
-#define current get_current()
-
#endif /* _ASM_X86_CURRENT_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
index 7a07897a7888..7838276bfe51 100644
--- a/arch/x86/include/asm/hardirq_32.h
+++ b/arch/x86/include/asm/hardirq_32.h
@@ -20,6 +20,9 @@ typedef struct {
DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+#define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
#define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
index b5a6b5d56704..42930b279215 100644
--- a/arch/x86/include/asm/hardirq_64.h
+++ b/arch/x86/include/asm/hardirq_64.h
@@ -3,22 +3,37 @@
#include <linux/threads.h>
#include <linux/irq.h>
-#include <asm/pda.h>
#include <asm/apic.h>
+typedef struct {
+ unsigned int __softirq_pending;
+ unsigned int __nmi_count; /* arch dependent */
+ unsigned int apic_timer_irqs; /* arch dependent */
+ unsigned int apic_perf_irqs; /* arch dependent */
+ unsigned int irq0_irqs;
+ unsigned int irq_resched_count;
+ unsigned int irq_call_count;
+ unsigned int irq_tlb_count;
+ unsigned int irq_thermal_count;
+ unsigned int irq_spurious_count;
+ unsigned int irq_threshold_count;
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT 1
-#define inc_irq_stat(member) add_pda(member, 1)
+#define inc_irq_stat(member) percpu_add(irq_stat.member, 1)
-#define local_softirq_pending() read_pda(__softirq_pending)
+#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
#define __ARCH_SET_SOFTIRQ_PENDING 1
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x) percpu_or(irq_stat.__softirq_pending, (x))
extern void ack_bad_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7a1f44ac1f17..08ec793aa043 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS];
-/*
- * MP-BIOS irq configuration table structures:
- */
-
#define MP_MAX_IOAPIC_PIN 127
-struct mp_config_ioapic {
- unsigned long mp_apicaddr;
- unsigned int mp_apicid;
- unsigned char mp_type;
- unsigned char mp_apicver;
- unsigned char mp_flags;
-};
-
-struct mp_config_intsrc {
- unsigned int mp_dstapic;
- unsigned char mp_type;
- unsigned char mp_irqtype;
- unsigned short mp_irqflag;
- unsigned char mp_srcbus;
- unsigned char mp_srcbusirq;
- unsigned char mp_dstirq;
-};
-
/* I/O APIC entries */
-extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */
extern int mp_irq_entries;
/* MP IRQ source entries */
-extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */
extern int mpc_default_type;
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
index 86afd7473457..d7ed33ee94e9 100644
--- a/arch/x86/include/asm/irq_regs_32.h
+++ b/arch/x86/include/asm/irq_regs_32.h
@@ -15,7 +15,7 @@ DECLARE_PER_CPU(struct pt_regs *, irq_regs);
static inline struct pt_regs *get_irq_regs(void)
{
- return x86_read_percpu(irq_regs);
+ return percpu_read(irq_regs);
}
static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
@@ -23,7 +23,7 @@ static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
struct pt_regs *old_regs;
old_regs = get_irq_regs();
- x86_write_percpu(irq_regs, new_regs);
+ percpu_write(irq_regs, new_regs);
return old_regs;
}
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 21a0b92027f5..1554d0236e03 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -110,6 +110,8 @@
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
+#include <asm/apicnum.h> /* need MAX_IO_APICS */
+
#ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
@@ -117,11 +119,12 @@
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif
#else
-# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
-# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
-# else
-# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
+
+# define NR_IRQS \
+ ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
+ (NR_VECTORS + (8 * NR_CPUS)) : \
+ (NR_VECTORS + (32 * MAX_IO_APICS))) \
+
#endif
#elif defined(CONFIG_X86_VOYAGER)
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
index ceb013660146..89897a6a65b9 100644
--- a/arch/x86/include/asm/mach-default/mach_wakecpu.h
+++ b/arch/x86/include/asm/mach-default/mach_wakecpu.h
@@ -24,7 +24,13 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
{
}
+#ifdef CONFIG_SMP
extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
static inline void inquire_remote_apic(int apicid)
{
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
index 7e98ce1d2c0e..08b53454f831 100644
--- a/arch/x86/include/asm/mmu_context_32.h
+++ b/arch/x86/include/asm/mmu_context_32.h
@@ -4,8 +4,8 @@
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
@@ -19,8 +19,8 @@ static inline void switch_mm(struct mm_struct *prev,
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
- x86_write_percpu(cpu_tlbstate.active_mm, next);
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev,
}
#ifdef CONFIG_SMP
else {
- x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
- BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
index 677d36e9540a..c4572505ab3e 100644
--- a/arch/x86/include/asm/mmu_context_64.h
+++ b/arch/x86/include/asm/mmu_context_64.h
@@ -1,13 +1,11 @@
#ifndef _ASM_X86_MMU_CONTEXT_64_H
#define _ASM_X86_MMU_CONTEXT_64_H
-#include <asm/pda.h>
-
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
- if (read_pda(mmu_state) == TLBSTATE_OK)
- write_pda(mmu_state, TLBSTATE_LAZY);
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+ percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}
@@ -19,8 +17,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
- write_pda(mmu_state, TLBSTATE_OK);
- write_pda(active_mm, next);
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ percpu_write(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
load_cr3(next->pgd);
@@ -30,9 +28,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#ifdef CONFIG_SMP
else {
- write_pda(mmu_state, TLBSTATE_OK);
- if (read_pda(active_mm) != next)
- BUG();
+ percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+ BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
diff --git a/arch/x86/include/asm/mpspec_def.h b/arch/x86/include/asm/mpspec_def.h
index 59568bc4767f..4a7f96d7c188 100644
--- a/arch/x86/include/asm/mpspec_def.h
+++ b/arch/x86/include/asm/mpspec_def.h
@@ -24,17 +24,18 @@
# endif
#endif
-struct intel_mp_floating {
- char mpf_signature[4]; /* "_MP_" */
- unsigned int mpf_physptr; /* Configuration table address */
- unsigned char mpf_length; /* Our length (paragraphs) */
- unsigned char mpf_specification;/* Specification version */
- unsigned char mpf_checksum; /* Checksum (makes sum 0) */
- unsigned char mpf_feature1; /* Standard or configuration ? */
- unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */
- unsigned char mpf_feature3; /* Unused (0) */
- unsigned char mpf_feature4; /* Unused (0) */
- unsigned char mpf_feature5; /* Unused (0) */
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+ char signature[4]; /* "_MP_" */
+ unsigned int physptr; /* Configuration table address */
+ unsigned char length; /* Our length (paragraphs) */
+ unsigned char specification; /* Specification version */
+ unsigned char checksum; /* Checksum (makes sum 0) */
+ unsigned char feature1; /* Standard or configuration ? */
+ unsigned char feature2; /* Bit7 set for IMCR|PIC */
+ unsigned char feature3; /* Unused (0) */
+ unsigned char feature4; /* Unused (0) */
+ unsigned char feature5; /* Unused (0) */
};
#define MPC_SIGNATURE "PCMP"
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index cb988aab716d..14080d22edb3 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -58,15 +58,15 @@ struct mtrr_gentry {
#endif /* !__i386__ */
struct mtrr_var_range {
- u32 base_lo;
- u32 base_hi;
- u32 mask_lo;
- u32 mask_hi;
+ __u32 base_lo;
+ __u32 base_hi;
+ __u32 mask_lo;
+ __u32 mask_hi;
};
/* In the Intel processor's MTRR interface, the MTRR type is always held in
an 8 bit field: */
-typedef u8 mtrr_type;
+typedef __u8 mtrr_type;
#define MTRR_NUM_FIXED_RANGES 88
#define MTRR_MAX_VAR_RANGES 256
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 5ebca29f44f0..e27fdbe5f9e4 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -13,8 +13,8 @@
#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
+#define IRQ_STACK_ORDER 2
+#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
#define STACKFAULT_STACK 1
#define DOUBLEFAULT_STACK 2
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index ba3e2ff6aedc..c26c6bf4da00 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -244,7 +244,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr);
- void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
+ void (*flush_tlb_others)(const struct cpumask *cpus,
+ struct mm_struct *mm,
unsigned long va);
/* Hooks for allocating and freeing a pagetable top-level */
@@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
}
-static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm,
unsigned long va)
{
- PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
+ PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
}
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index 90a8d9d4206b..c31ca048a901 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -5,134 +5,41 @@
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
+#include <linux/threads.h>
#include <asm/page.h>
+#include <asm/percpu.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
- struct task_struct *pcurrent; /* 0 Current process */
- unsigned long data_offset; /* 8 Per cpu data offset from linker
- address */
- unsigned long kernelstack; /* 16 top of kernel stack for current */
- unsigned long oldrsp; /* 24 user rsp for system call */
- int irqcount; /* 32 Irq nesting counter. Starts -1 */
- unsigned int cpunumber; /* 36 Logical CPU number */
+ unsigned long unused1;
+ unsigned long unused2;
+ unsigned long unused3;
+ unsigned long unused4;
+ int unused5;
+ unsigned int unused6; /* 36 was cpunumber */
#ifdef CONFIG_CC_STACKPROTECTOR
unsigned long stack_canary; /* 40 stack canary value */
/* gcc-ABI: this canary MUST be at
offset 40!!! */
#endif
- char *irqstackptr;
- short nodenumber; /* number of current node (32k max) */
short in_bootmem; /* pda lives in bootmem */
- unsigned int __softirq_pending;
- unsigned int __nmi_count; /* number of NMI on this CPUs */
- short mmu_state;
- short isidle;
- struct mm_struct *active_mm;
- unsigned apic_timer_irqs;
- unsigned apic_perf_irqs;
- unsigned irq0_irqs;
- unsigned irq_resched_count;
- unsigned irq_call_count;
- unsigned irq_tlb_count;
- unsigned irq_thermal_count;
- unsigned irq_threshold_count;
- unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;
-extern struct x8664_pda **_cpu_pda;
+DECLARE_PER_CPU(struct x8664_pda, __pda);
extern void pda_init(int);
-#define cpu_pda(i) (_cpu_pda[i])
+#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
- */
-extern void __bad_pda_field(void) __attribute__((noreturn));
-
-/*
- * proxy_pda doesn't actually exist, but tell gcc it is accessed for
- * all PDA accesses so it gets read/write dependencies right.
- */
-extern struct x8664_pda _proxy_pda;
-
-#define pda_offset(field) offsetof(struct x8664_pda, field)
-
-#define pda_to_op(op, field, val) \
-do { \
- typedef typeof(_proxy_pda.field) T__; \
- if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- case 4: \
- asm(op "l %1,%%gs:%c2" : \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i" (pda_offset(field))); \
- break; \
- case 8: \
- asm(op "q %1,%%gs:%c2": \
- "+m" (_proxy_pda.field) : \
- "ri" ((T__)val), \
- "i"(pda_offset(field))); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
-} while (0)
-
-#define pda_from_op(op, field) \
-({ \
- typeof(_proxy_pda.field) ret__; \
- switch (sizeof(_proxy_pda.field)) { \
- case 2: \
- asm(op "w %%gs:%c1,%0" : \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 4: \
- asm(op "l %%gs:%c1,%0": \
- "=r" (ret__): \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- case 8: \
- asm(op "q %%gs:%c1,%0": \
- "=r" (ret__) : \
- "i" (pda_offset(field)), \
- "m" (_proxy_pda.field)); \
- break; \
- default: \
- __bad_pda_field(); \
- } \
- ret__; \
-})
-
-#define read_pda(field) pda_from_op("mov", field)
-#define write_pda(field, val) pda_to_op("mov", field, val)
-#define add_pda(field, val) pda_to_op("add", field, val)
-#define sub_pda(field, val) pda_to_op("sub", field, val)
-#define or_pda(field, val) pda_to_op("or", field, val)
+#define read_pda(field) percpu_read(__pda.field)
+#define write_pda(field, val) percpu_write(__pda.field, val)
+#define add_pda(field, val) percpu_add(__pda.field, val)
+#define sub_pda(field, val) percpu_sub(__pda.field, val)
+#define or_pda(field, val) percpu_or(__pda.field, val)
/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit, field) \
-({ \
- int old__; \
- asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (_proxy_pda.field) \
- : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
- old__; \
-})
+ x86_test_and_clear_bit_percpu(bit, __pda.field)
#endif
-#define PDA_STACKOFFSET (5*8)
-
#endif /* _ASM_X86_PDA_H */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index ece72053ba63..165d5272ece1 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -2,53 +2,12 @@
#define _ASM_X86_PERCPU_H
#ifdef CONFIG_X86_64
-#include <linux/compiler.h>
-
-/* Same as asm-generic/percpu.h, except that we store the per cpu offset
- in the PDA. Longer term the PDA and every per cpu variable
- should be just put into a single section and referenced directly
- from %gs */
-
-#ifdef CONFIG_SMP
-#include <asm/pda.h>
-
-#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
-#define __my_cpu_offset read_pda(data_offset)
-
-#define per_cpu_offset(x) (__per_cpu_offset(x))
-
+#define __percpu_seg gs
+#define __percpu_mov_op movq
+#else
+#define __percpu_seg fs
+#define __percpu_mov_op movl
#endif
-#include <asm-generic/percpu.h>
-
-DECLARE_PER_CPU(struct x8664_pda, pda);
-
-/*
- * These are supposed to be implemented as a single instruction which
- * operates on the per-cpu data base segment. x86-64 doesn't have
- * that yet, so this is a fairly inefficient workaround for the
- * meantime. The single instruction is atomic with respect to
- * preemption and interrupts, so we need to explicitly disable
- * interrupts here to achieve the same effect. However, because it
- * can be used from within interrupt-disable/enable, we can't actually
- * disable interrupts; disabling preemption is enough.
- */
-#define x86_read_percpu(var) \
- ({ \
- typeof(per_cpu_var(var)) __tmp; \
- preempt_disable(); \
- __tmp = __get_cpu_var(var); \
- preempt_enable(); \
- __tmp; \
- })
-
-#define x86_write_percpu(var, val) \
- do { \
- preempt_disable(); \
- __get_cpu_var(var) = (val); \
- preempt_enable(); \
- } while(0)
-
-#else /* CONFIG_X86_64 */
#ifdef __ASSEMBLY__
@@ -65,47 +24,26 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
* PER_CPU(cpu_gdt_descr, %ebx)
*/
#ifdef CONFIG_SMP
-#define PER_CPU(var, reg) \
- movl %fs:per_cpu__##this_cpu_off, reg; \
+#define PER_CPU(var, reg) \
+ __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var) %fs:per_cpu__##var
+#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
#else /* ! SMP */
-#define PER_CPU(var, reg) \
- movl $per_cpu__##var, reg
+#define PER_CPU(var, reg) \
+ __percpu_mov_op $per_cpu__##var, reg
#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
#else /* ...!ASSEMBLY */
-/*
- * PER_CPU finds an address of a per-cpu variable.
- *
- * Args:
- * var - variable name
- * cpu - 32bit register containing the current CPU number
- *
- * The resulting address is stored in the "cpu" argument.
- *
- * Example:
- * PER_CPU(cpu_gdt_descr, %ebx)
- */
-#ifdef CONFIG_SMP
-
-#define __my_cpu_offset x86_read_percpu(this_cpu_off)
-
-/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
-#define __percpu_seg "%%fs:"
-
-#else /* !SMP */
+#include <linux/stringify.h>
-#define __percpu_seg ""
-
-#endif /* SMP */
-
-#include <asm-generic/percpu.h>
-
-/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU(unsigned long, this_cpu_off);
+#ifdef CONFIG_SMP
+#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
+#define __my_cpu_offset percpu_read(this_cpu_off)
+#else
+#define __percpu_arg(x) "%" #x
+#endif
/* For arch-specific code, we can use direct single-insn ops (they
* don't give an lvalue though). */
@@ -120,20 +58,25 @@ do { \
} \
switch (sizeof(var)) { \
case 1: \
- asm(op "b %1,"__percpu_seg"%0" \
+ asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 2: \
- asm(op "w %1,"__percpu_seg"%0" \
+ asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
case 4: \
- asm(op "l %1,"__percpu_seg"%0" \
+ asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
: "ri" ((T__)val)); \
break; \
+ case 8: \
+ asm(op "q %1,"__percpu_arg(0) \
+ : "+m" (var) \
+ : "r" ((T__)val)); \
+ break; \
default: __bad_percpu_size(); \
} \
} while (0)
@@ -143,17 +86,22 @@ do { \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_seg"%1,%0" \
+ asm(op "b "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_seg"%1,%0" \
+ asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_seg"%1,%0" \
+ asm(op "l "__percpu_arg(1)",%0" \
+ : "=r" (ret__) \
+ : "m" (var)); \
+ break; \
+ case 8: \
+ asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
@@ -162,13 +110,36 @@ do { \
ret__; \
})
-#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_read(var) percpu_from_op("mov", per_cpu__##var)
+#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
+#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
+#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
+#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+
+/* This is not atomic against other CPUs -- CPU preemption needs to be off */
+#define x86_test_and_clear_bit_percpu(bit, var) \
+({ \
+ int old__; \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (per_cpu__##var) \
+ : "dIr" (bit)); \
+ old__; \
+})
+
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU(unsigned long, this_cpu_off);
+
+#ifdef CONFIG_X86_64
+extern void load_pda_offset(int cpu);
+#else
+static inline void load_pda_offset(int cpu) { }
+#endif
+
#endif /* !__ASSEMBLY__ */
-#endif /* !CONFIG_X86_64 */
#ifdef CONFIG_SMP
@@ -195,9 +166,9 @@ do { \
#define early_per_cpu_ptr(_name) (_name##_early_ptr)
#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
#define early_per_cpu(_name, _cpu) \
- (early_per_cpu_ptr(_name) ? \
- early_per_cpu_ptr(_name)[_cpu] : \
- per_cpu(_name, _cpu))
+ *(early_per_cpu_ptr(_name) ? \
+ &early_per_cpu_ptr(_name)[_cpu] : \
+ &per_cpu(_name, _cpu))
#else /* !CONFIG_SMP */
#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 83e69f4a37f0..06bbcbd66e9c 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -341,6 +341,25 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
+static inline int is_new_memtype_allowed(unsigned long flags,
+ unsigned long new_flags)
+{
+ /*
+ * Certain new memtypes are not allowed with certain
+ * requested memtype:
+ * - request is uncached, return cannot be write-back
+ * - request is write-combine, return cannot be write-back
+ */
+ if ((flags == _PAGE_CACHE_UC_MINUS &&
+ new_flags == _PAGE_CACHE_WB) ||
+ (flags == _PAGE_CACHE_WC &&
+ new_flags == _PAGE_CACHE_WB)) {
+ return 0;
+ }
+
+ return 1;
+}
+
#ifndef __ASSEMBLY__
/* Indicate that x86 has its own track and untrack pfn vma functions */
#define __HAVE_PFNMAP_TRACKING
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 091cd8855f2e..f511246fa6cd 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -378,6 +378,9 @@ union thread_xstate {
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+DECLARE_PER_CPU(char[IRQ_STACK_SIZE], irq_stack);
+DECLARE_PER_CPU(char *, irq_stack_ptr);
#endif
extern void print_cpu_info(struct cpuinfo_x86 *);
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ebe858cdc8a3..536949749bc2 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -100,7 +100,6 @@ extern unsigned long init_pg_tables_start;
extern unsigned long init_pg_tables_end;
#else
-void __init x86_64_init_pda(void);
void __init x86_64_start_kernel(char *real_mode);
void __init x86_64_start_reservations(char *real_mode_data);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 19953df61c52..68636e767a91 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -17,32 +17,7 @@
#endif
#include <asm/pda.h>
#include <asm/thread_info.h>
-
-#ifdef CONFIG_X86_64
-
-extern cpumask_var_t cpu_callin_mask;
-extern cpumask_var_t cpu_callout_mask;
-extern cpumask_var_t cpu_initialized_mask;
-extern cpumask_var_t cpu_sibling_setup_mask;
-
-#else /* CONFIG_X86_32 */
-
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_sibling_setup_map;
-
-#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
-#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
-#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
-#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
-
-#endif /* CONFIG_X86_32 */
-
-extern void (*mtrr_hook)(void);
-extern void zap_low_mappings(void);
-
-extern int __cpuinit get_local_pda(int cpu);
+#include <asm/cpumask.h>
extern int smp_num_siblings;
extern unsigned int num_processors;
@@ -50,9 +25,7 @@ extern unsigned int num_processors;
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map);
DECLARE_PER_CPU(u16, cpu_llc_id);
-#ifdef CONFIG_X86_32
DECLARE_PER_CPU(int, cpu_number);
-#endif
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@@ -167,8 +140,6 @@ void play_dead_common(void);
void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu);
-extern void prefill_possible_map(void);
-
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@@ -177,10 +148,6 @@ static inline int num_booting_cpus(void)
{
return cpumask_weight(cpu_callout_mask);
}
-#else
-static inline void prefill_possible_map(void)
-{
-}
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +158,11 @@ extern unsigned disabled_cpus __cpuinitdata;
* from the initial startup. We map APIC_BASE very early in page_setup(),
* so this is correct in the x86 case.
*/
-#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+#define raw_smp_processor_id() (percpu_read(cpu_number))
extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() (percpu_read(cpu_number))
#define stack_smp_processor_id() \
({ \
@@ -205,10 +172,6 @@ extern int safe_smp_processor_id(void);
})
#define safe_smp_processor_id() smp_processor_id()
-#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
-#define cpu_physical_id(cpu) boot_cpu_physical_apicid
-#define safe_smp_processor_id() 0
-#define stack_smp_processor_id() 0
#endif
#ifdef CONFIG_X86_LOCAL_APIC
@@ -251,11 +214,5 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id 0
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 8e626ea33a1a..d1dc27dba36d 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -94,7 +94,7 @@ do { \
"call __switch_to\n\t" \
".globl thread_return\n" \
"thread_return:\n\t" \
- "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
+ "movq "__percpu_arg([current_task])",%%rsi\n\t" \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
"movq %%rax,%%rdi\n\t" \
@@ -106,7 +106,7 @@ do { \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[tif_fork] "i" (TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
- [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
+ [current_task] "m" (per_cpu_var(current_task)) \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index efdf93820aed..f38488989db7 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -196,25 +196,21 @@ static inline struct thread_info *current_thread_info(void)
#else /* X86_32 */
-#include <asm/pda.h>
+#include <asm/percpu.h>
+#define KERNEL_STACK_OFFSET (5*8)
/*
* macros/functions for gaining access to the thread information structure
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
-{
- struct thread_info *ti;
- ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
- return ti;
-}
+DECLARE_PER_CPU(unsigned long, kernel_stack);
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
+static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
- asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+ ti = (void *)(percpu_read(kernel_stack) +
+ KERNEL_STACK_OFFSET - THREAD_SIZE);
return ti;
}
@@ -222,8 +218,8 @@ static inline struct thread_info *stack_thread_info(void)
/* how to get the thread information struct from ASM */
#define GET_THREAD_INFO(reg) \
- movq %gs:pda_kernelstack,reg ; \
- subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
+ movq PER_CPU_VAR(kernel_stack),reg ; \
+ subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
#endif
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 0e7bbb549116..d3539f998f88 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}
-static inline void native_flush_tlb_others(const cpumask_t *cpumask,
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va)
{
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
flush_tlb_mm(vma->vm_mm);
}
-void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
- unsigned long va);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm, unsigned long va);
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
-#ifdef CONFIG_X86_32
struct tlb_state {
struct mm_struct *active_mm;
int state;
- char __cacheline_padding[L1_CACHE_BYTES-8];
};
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
-void reset_lazy_tlbstate(void);
-#else
static inline void reset_lazy_tlbstate(void)
{
+ percpu_write(cpu_tlbstate.state, 0);
+ percpu_write(cpu_tlbstate.active_mm, &init_mm);
}
-#endif
#endif /* SMP */
#ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
+#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
#endif
static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all();
}
+extern void zap_low_mappings(void);
+
#endif /* _ASM_X86_TLBFLUSH_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 4e2f2e0aab27..ffea1fe03a99 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -83,7 +83,8 @@ extern cpumask_t *node_to_cpumask_map;
DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
/* Returns the number of the current Node. */
-#define numa_node_id() read_pda(nodenumber)
+DECLARE_PER_CPU(int, node_number);
+#define numa_node_id() percpu_read(node_number)
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
extern int cpu_to_node(int cpu);
@@ -102,10 +103,7 @@ static inline int cpu_to_node(int cpu)
/* Same function but used if called before per_cpu areas are setup */
static inline int early_cpu_to_node(int cpu)
{
- if (early_per_cpu_ptr(x86_cpu_to_node_map))
- return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
- return per_cpu(x86_cpu_to_node_map, cpu);
+ return early_per_cpu(x86_cpu_to_node_map, cpu);
}
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
index 780ba0ab94f9..90f06c25221d 100644
--- a/arch/x86/include/asm/trampoline.h
+++ b/arch/x86/include/asm/trampoline.h
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
extern unsigned long init_rsp;
extern unsigned long initial_code;
+extern unsigned long initial_gs;
#define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
#define TRAMPOLINE_BASE 0x6000
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 50423c7b56b2..74e6393bfddb 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -325,7 +325,8 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
#define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits)
-extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
+extern int uv_flush_tlb_others(struct cpumask *,
+ struct mm_struct *, unsigned long);
extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void);
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d37593c2f438..4cb5964f1499 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
DECLARE_BITMAP(used, 256);
bitmap_zero(used, 256);
for (i = 0; i < nr_ioapics; i++) {
- struct mp_config_ioapic *ia = &mp_ioapics[i];
- __set_bit(ia->mp_apicid, used);
+ struct mpc_ioapic *ia = &mp_ioapics[i];
+ __set_bit(ia->apicid, used);
}
if (!test_bit(id, used))
return id;
@@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
idx = nr_ioapics;
- mp_ioapics[idx].mp_type = MP_IOAPIC;
- mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
- mp_ioapics[idx].mp_apicaddr = address;
+ mp_ioapics[idx].type = MP_IOAPIC;
+ mp_ioapics[idx].flags = MPC_APIC_USABLE;
+ mp_ioapics[idx].apicaddr = address;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
- mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
+ mp_ioapics[idx].apicid = uniq_ioapic_id(id);
#ifdef CONFIG_X86_32
- mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
+ mp_ioapics[idx].apicver = io_apic_get_version(idx);
#else
- mp_ioapics[idx].mp_apicver = 0;
+ mp_ioapics[idx].apicver = 0;
#endif
/*
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/
- mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
mp_ioapic_routing[idx].gsi_base = gsi_base;
mp_ioapic_routing[idx].gsi_end = gsi_base +
io_apic_get_redir_entries(idx);
- printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
- "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
- mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+ "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
+ mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
nr_ioapics++;
}
-static void assign_to_mp_irq(struct mp_config_intsrc *m,
- struct mp_config_intsrc *mp_irq)
+static void assign_to_mp_irq(struct mpc_intsrc *m,
+ struct mpc_intsrc *mp_irq)
{
- memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
+ memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
}
-static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
- struct mp_config_intsrc *m)
+static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
+ struct mpc_intsrc *m)
{
- return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
+ return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
}
-static void save_mp_irq(struct mp_config_intsrc *m)
+static void save_mp_irq(struct mpc_intsrc *m)
{
int i;
@@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
{
int ioapic;
int pin;
- struct mp_config_intsrc mp_irq;
+ struct mpc_intsrc mp_irq;
/*
* Convert 'gsi' to 'ioapic.pin'.
@@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
if ((bus_irq == 0) && (trigger == 3))
trigger = 1;
- mp_irq.mp_type = MP_INTSRC;
- mp_irq.mp_irqtype = mp_INT;
- mp_irq.mp_irqflag = (trigger << 2) | polarity;
- mp_irq.mp_srcbus = MP_ISA_BUS;
- mp_irq.mp_srcbusirq = bus_irq; /* IRQ */
- mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
- mp_irq.mp_dstirq = pin; /* INTIN# */
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = (trigger << 2) | polarity;
+ mp_irq.srcbus = MP_ISA_BUS;
+ mp_irq.srcbusirq = bus_irq; /* IRQ */
+ mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
+ mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq);
}
@@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
int i;
int ioapic;
unsigned int dstapic;
- struct mp_config_intsrc mp_irq;
+ struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
/*
@@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
ioapic = mp_find_ioapic(0);
if (ioapic < 0)
return;
- dstapic = mp_ioapics[ioapic].mp_apicid;
+ dstapic = mp_ioapics[ioapic].apicid;
/*
* Use the default configuration for the IRQs 0-15. Unless
@@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
int idx;
for (idx = 0; idx < mp_irq_entries; idx++) {
- struct mp_config_intsrc *irq = mp_irqs + idx;
+ struct mpc_intsrc *irq = mp_irqs + idx;
/* Do we already have a mapping for this ISA IRQ? */
- if (irq->mp_srcbus == MP_ISA_BUS
- && irq->mp_srcbusirq == i)
+ if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
break;
/* Do we already have a mapping for this IOAPIC pin */
- if (irq->mp_dstapic == dstapic &&
- irq->mp_dstirq == i)
+ if (irq->dstapic == dstapic && irq->dstirq == i)
break;
}
@@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
continue; /* IRQ already used */
}
- mp_irq.mp_type = MP_INTSRC;
- mp_irq.mp_irqflag = 0; /* Conforming */
- mp_irq.mp_srcbus = MP_ISA_BUS;
- mp_irq.mp_dstapic = dstapic;
- mp_irq.mp_irqtype = mp_INT;
- mp_irq.mp_srcbusirq = i; /* Identity mapped */
- mp_irq.mp_dstirq = i;
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqflag = 0; /* Conforming */
+ mp_irq.srcbus = MP_ISA_BUS;
+ mp_irq.dstapic = dstapic;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.srcbusirq = i; /* Identity mapped */
+ mp_irq.dstirq = i;
save_mp_irq(&mp_irq);
}
@@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity)
{
#ifdef CONFIG_X86_MPPARSE
- struct mp_config_intsrc mp_irq;
+ struct mpc_intsrc mp_irq;
int ioapic;
if (!acpi_ioapic)
return 0;
/* print the entry should happen on mptable identically */
- mp_irq.mp_type = MP_INTSRC;
- mp_irq.mp_irqtype = mp_INT;
- mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
- mp_irq.mp_srcbus = number;
- mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
+ mp_irq.srcbus = number;
+ mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
ioapic = mp_find_ioapic(gsi);
- mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
- mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
+ mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
+ mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
save_mp_irq(&mp_irq);
#endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 707c1f6f95fa..4abff454c55b 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
stack_start.sp = temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
+ initial_gs = per_cpu_offset(smp_processor_id());
#endif
initial_code = (unsigned long)wakeup_long64;
saved_magic = 0x123456789abcdef0;
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
index d2d17b8d10f8..e9af14f748ea 100644
--- a/arch/x86/kernel/apic.c
+++ b/arch/x86/kernel/apic.c
@@ -48,6 +48,7 @@
#include <asm/proto.h>
#include <asm/apic.h>
#include <asm/i8259.h>
+#include <asm/smp.h>
#include <mach_apic.h>
#include <mach_apicdef.h>
@@ -895,6 +896,10 @@ void disable_local_APIC(void)
{
unsigned int value;
+ /* APIC hasn't been mapped yet */
+ if (!apic_phys)
+ return;
+
clear_local_APIC();
/*
@@ -1126,6 +1131,11 @@ void __cpuinit setup_local_APIC(void)
unsigned int value;
int i, j;
+ if (disable_apic) {
+ disable_ioapic_setup();
+ return;
+ }
+
#ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && esr_disable) {
@@ -1567,11 +1577,11 @@ int apic_version[MAX_APICS];
int __init APIC_init_uniprocessor(void)
{
-#ifdef CONFIG_X86_64
if (disable_apic) {
pr_info("Apic disabled\n");
return -1;
}
+#ifdef CONFIG_X86_64
if (!cpu_has_apic) {
disable_apic = 1;
pr_info("Apic disabled by BIOS\n");
@@ -1869,17 +1879,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
#endif
#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
- /* are we being called early in kernel startup? */
- if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
- u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
- u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-
- cpu_to_apicid[cpu] = apicid;
- bios_cpu_apicid[cpu] = apicid;
- } else {
- per_cpu(x86_cpu_to_apicid, cpu) = apicid;
- per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
- }
+ early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+ early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
#endif
set_cpu_possible(cpu, true);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index 1d41d3f1edbc..64c834a39aa8 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -49,13 +49,7 @@ int main(void)
BLANK();
#undef ENTRY
#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
- ENTRY(kernelstack);
- ENTRY(oldrsp);
- ENTRY(pcurrent);
- ENTRY(irqcount);
- ENTRY(cpunumber);
- ENTRY(irqstackptr);
- ENTRY(data_offset);
+ DEFINE(pda_size, sizeof(struct x8664_pda));
BLANK();
#undef ENTRY
#ifdef CONFIG_PARAVIRT
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 667e5d561ed7..95eb30e1e677 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -22,6 +22,8 @@
#include <asm/asm.h>
#include <asm/numa.h>
#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h>
#include <asm/apic.h>
@@ -879,54 +881,34 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct x8664_pda **_cpu_pda __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
-static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
+DEFINE_PER_CPU_PAGE_ALIGNED(char[IRQ_STACK_SIZE], irq_stack);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */
+#else
+DEFINE_PER_CPU(char *, irq_stack_ptr) =
+ per_cpu_var(irq_stack) + IRQ_STACK_SIZE - 64;
+#endif
+
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
+
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
void __cpuinit pda_init(int cpu)
{
- struct x8664_pda *pda = cpu_pda(cpu);
-
/* Setup up data that may be needed in __get_free_pages early */
loadsegment(fs, 0);
loadsegment(gs, 0);
- /* Memory clobbers used to order PDA accessed */
- mb();
- wrmsrl(MSR_GS_BASE, pda);
- mb();
-
- pda->cpunumber = cpu;
- pda->irqcount = -1;
- pda->kernelstack = (unsigned long)stack_thread_info() -
- PDA_STACKOFFSET + THREAD_SIZE;
- pda->active_mm = &init_mm;
- pda->mmu_state = 0;
-
- if (cpu == 0) {
- /* others are initialized in smpboot.c */
- pda->pcurrent = &init_task;
- pda->irqstackptr = boot_cpu_stack;
- pda->irqstackptr += IRQSTACKSIZE - 64;
- } else {
- if (!pda->irqstackptr) {
- pda->irqstackptr = (char *)
- __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
- if (!pda->irqstackptr)
- panic("cannot allocate irqstack for cpu %d",
- cpu);
- pda->irqstackptr += IRQSTACKSIZE - 64;
- }
- if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
- pda->nodenumber = cpu_to_node(cpu);
- }
+ load_pda_offset(cpu);
}
-static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
- DEBUG_STKSZ] __page_aligned_bss;
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+ [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+ __aligned(PAGE_SIZE);
extern asmlinkage void ignore_sysret(void);
@@ -984,15 +966,18 @@ void __cpuinit cpu_init(void)
struct tss_struct *t = &per_cpu(init_tss, cpu);
struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
unsigned long v;
- char *estacks = NULL;
struct task_struct *me;
int i;
/* CPU 0 is initialised in head64.c */
if (cpu != 0)
pda_init(cpu);
- else
- estacks = boot_exception_stacks;
+
+#ifdef CONFIG_NUMA
+ if (cpu != 0 && percpu_read(node_number) == 0 &&
+ cpu_to_node(cpu) != NUMA_NO_NODE)
+ percpu_write(node_number, cpu_to_node(cpu));
+#endif
me = current;
@@ -1026,18 +1011,13 @@ void __cpuinit cpu_init(void)
* set up and load the per-CPU TSS
*/
if (!orig_ist->ist[0]) {
- static const unsigned int order[N_EXCEPTION_STACKS] = {
- [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
- [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
+ static const unsigned int sizes[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
+ [DEBUG_STACK - 1] = DEBUG_STKSZ
};
+ char *estacks = per_cpu(exception_stacks, cpu);
for (v = 0; v < N_EXCEPTION_STACKS; v++) {
- if (cpu) {
- estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
- if (!estacks)
- panic("Cannot allocate exception "
- "stack %ld %d\n", v, cpu);
- }
- estacks += PAGE_SIZE << order[v];
+ estacks += sizes[v];
orig_ist->ist[v] = t->x86_tss.ist[v] =
(unsigned long)estacks;
}
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 06fcd8f9323c..8f3c95c7e61f 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -150,9 +150,8 @@ struct drv_cmd {
u32 val;
};
-static long do_drv_read(void *_cmd)
+static void do_drv_read(struct drv_cmd *cmd)
{
- struct drv_cmd *cmd = _cmd;
u32 h;
switch (cmd->type) {
@@ -167,12 +166,10 @@ static long do_drv_read(void *_cmd)
default:
break;
}
- return 0;
}
-static long do_drv_write(void *_cmd)
+static void do_drv_write(struct drv_cmd *cmd)
{
- struct drv_cmd *cmd = _cmd;
u32 lo, hi;
switch (cmd->type) {
@@ -189,23 +186,30 @@ static long do_drv_write(void *_cmd)
default:
break;
}
- return 0;
}
static void drv_read(struct drv_cmd *cmd)
{
+ cpumask_t saved_mask = current->cpus_allowed;
cmd->val = 0;
- work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd);
+ set_cpus_allowed_ptr(current, cmd->mask);
+ do_drv_read(cmd);
+ set_cpus_allowed_ptr(current, &saved_mask);
}
static void drv_write(struct drv_cmd *cmd)
{
+ cpumask_t saved_mask = current->cpus_allowed;
unsigned int i;
for_each_cpu(i, cmd->mask) {
- work_on_cpu(i, do_drv_write, cmd);
+ set_cpus_allowed_ptr(current, cpumask_of(i));
+ do_drv_write(cmd);
}
+
+ set_cpus_allowed_ptr(current, &saved_mask);
+ return;
}
static u32 get_cur_val(const struct cpumask *mask)
@@ -231,15 +235,8 @@ static u32 get_cur_val(const struct cpumask *mask)
return 0;
}
- if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL)))
- return 0;
-
- cpumask_copy(cmd.mask, mask);
-
drv_read(&cmd);
- free_cpumask_var(cmd.mask);
-
dprintk("get_cur_val = %u\n", cmd.val);
return cmd.val;
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 48533d77be78..58527a9fc404 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -132,7 +132,16 @@ struct _cpuid4_info {
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
- cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */
+ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+};
+
+/* subset of above _cpuid4_info w/o shared_cpu_map */
+struct _cpuid4_info_regs {
+ union _cpuid4_leaf_eax eax;
+ union _cpuid4_leaf_ebx ebx;
+ union _cpuid4_leaf_ecx ecx;
+ unsigned long size;
+ unsigned long can_disable;
};
#ifdef CONFIG_PCI
@@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
}
static void __cpuinit
-amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
if (index < 3)
return;
@@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
}
static int
-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+__cpuinit cpuid4_cache_lookup_regs(int index,
+ struct _cpuid4_info_regs *this_leaf)
{
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
@@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
return 0;
}
+static int
+__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+{
+ struct _cpuid4_info_regs *leaf_regs =
+ (struct _cpuid4_info_regs *)this_leaf;
+
+ return cpuid4_cache_lookup_regs(index, leaf_regs);
+}
+
static int __cpuinit find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
* parameters cpuid leaf to find the cache details
*/
for (i = 0; i < num_cache_leaves; i++) {
- struct _cpuid4_info this_leaf;
-
+ struct _cpuid4_info_regs this_leaf;
int retval;
- retval = cpuid4_cache_lookup(i, &this_leaf);
+ retval = cpuid4_cache_lookup_regs(i, &this_leaf);
if (retval >= 0) {
switch(this_leaf.eax.split.level) {
case 1:
@@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1)
- cpu_set(cpu, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
else {
index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) {
if (cpu_data(i).apicid >> index_msb ==
c->apicid >> index_msb) {
- cpu_set(i, this_leaf->shared_cpu_map);
+ cpumask_set_cpu(i,
+ to_cpumask(this_leaf->shared_cpu_map));
if (i != cpu && per_cpu(cpuid4_info, i)) {
- sibling_leaf = CPUID4_INFO_IDX(i, index);
- cpu_set(cpu, sibling_leaf->shared_cpu_map);
+ sibling_leaf =
+ CPUID4_INFO_IDX(i, index);
+ cpumask_set_cpu(cpu, to_cpumask(
+ sibling_leaf->shared_cpu_map));
}
}
}
@@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
- for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
+ for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
- cpu_clear(cpu, sibling_leaf->shared_cpu_map);
+ cpumask_clear_cpu(cpu,
+ to_cpumask(sibling_leaf->shared_cpu_map));
}
}
#else
@@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
int n = 0;
if (len > 1) {
- cpumask_t *mask = &this_leaf->shared_cpu_map;
+ const struct cpumask *mask;
+ mask = to_cpumask(this_leaf->shared_cpu_map);
n = type?
cpulist_scnprintf(buf, len-2, mask) :
cpumask_scnprintf(buf, len-2, mask);
@@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node)
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
{
- int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+ const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+ int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
ssize_t ret = 0;
int i;
@@ -718,7 +742,8 @@ static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count)
{
- int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+ const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+ int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL;
unsigned int ret, index, val;
@@ -863,7 +888,7 @@ err_out:
return -ENOMEM;
}
-static cpumask_t cache_dev_map = CPU_MASK_NONE;
+static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
- cpu_set(cpu, cache_dev_map);
+ cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
return 0;
@@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (per_cpu(cpuid4_info, cpu) == NULL)
return;
- if (!cpu_isset(cpu, cache_dev_map))
+ if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return;
- cpu_clear(cpu, cache_dev_map);
+ cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
index 8ae8c4ff094d..4772e91e8246 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
struct threshold_bank {
struct kobject *kobj;
struct threshold_block *blocks;
- cpumask_t cpus;
+ cpumask_var_t cpus;
};
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
- i = first_cpu(per_cpu(cpu_core_map, cpu));
+ i = cpumask_first(&per_cpu(cpu_core_map, cpu));
/* first core not up yet */
if (cpu_data(i).cpu_core_id)
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out;
- b->cpus = per_cpu(cpu_core_map, cpu);
+ cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
+ if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+ kfree(b);
+ err = -ENOMEM;
+ goto out;
+ }
b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
if (!b->kobj)
goto out_free;
#ifndef CONFIG_SMP
- b->cpus = CPU_MASK_ALL;
+ cpumask_setall(b->cpus);
#else
- b->cpus = per_cpu(cpu_core_map, cpu);
+ cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err)
goto out_free;
- for_each_cpu_mask_nr(i, b->cpus) {
+ for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
out_free:
per_cpu(threshold_banks, cpu)[bank] = NULL;
+ free_cpumask_var(b->cpus);
kfree(b);
out:
return err;
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#endif
/* remove all sibling symlinks before unregistering */
- for_each_cpu_mask_nr(i, b->cpus) {
+ for_each_cpu(i, b->cpus) {
if (i == cpu)
continue;
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out:
kobject_del(b->kobj);
kobject_put(b->kobj);
+ free_cpumask_var(b->cpus);
kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c689d19e35ab..11b93cabdf78 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -24,7 +24,7 @@
#include <asm/apic.h>
#include <asm/hpet.h>
#include <linux/kdebug.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
#include <asm/reboot.h>
#include <asm/virtext.h>
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index c302d0707048..d35db5993fd6 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
- unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+ unsigned long *irq_stack_end =
+ (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned used = 0;
struct thread_info *tinfo;
int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
stack = (unsigned long *) estack_end[-2];
continue;
}
- if (irqstack_end) {
- unsigned long *irqstack;
- irqstack = irqstack_end -
- (IRQSTACKSIZE - 64) / sizeof(*irqstack);
+ if (irq_stack_end) {
+ unsigned long *irq_stack;
+ irq_stack = irq_stack_end -
+ (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
- if (stack >= irqstack && stack < irqstack_end) {
+ if (stack >= irq_stack && stack < irq_stack_end) {
if (ops->stack(data, "IRQ") < 0)
break;
bp = print_context_stack(tinfo, stack, bp,
- ops, data, irqstack_end, &graph);
+ ops, data, irq_stack_end, &graph);
/*
* We link to the next stack (which would be
* the process stack normally) the last
* pointer (index -1 to end) in the IRQ stack:
*/
- stack = (unsigned long *) (irqstack_end[-1]);
- irqstack_end = NULL;
+ stack = (unsigned long *) (irq_stack_end[-1]);
+ irq_stack_end = NULL;
ops->stack(data, "EOI");
continue;
}
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack;
int i;
const int cpu = smp_processor_id();
- unsigned long *irqstack_end =
- (unsigned long *) (cpu_pda(cpu)->irqstackptr);
- unsigned long *irqstack =
- (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
+ unsigned long *irq_stack_end =
+ (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+ unsigned long *irq_stack =
+ (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
/*
* debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) {
- if (stack >= irqstack && stack <= irqstack_end) {
- if (stack == irqstack_end) {
- stack = (unsigned long *) (irqstack_end[-1]);
+ if (stack >= irq_stack && stack <= irq_stack_end) {
+ if (stack == irq_stack_end) {
+ stack = (unsigned long *) (irq_stack_end[-1]);
printk(" <EOI> ");
}
} else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
int i;
unsigned long sp;
const int cpu = smp_processor_id();
- struct task_struct *cur = cpu_pda(cpu)->pcurrent;
+ struct task_struct *cur = current;
sp = regs->sp;
printk("CPU %d ", cpu);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index d6f0490a7391..46469029e9d3 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1203,7 +1203,6 @@ nmi_stack_correct:
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
- TRACE_IRQS_OFF
xorl %edx,%edx # zero error code
movl %esp,%eax # pt_regs pointer
call do_nmi
@@ -1244,7 +1243,6 @@ nmi_espfix_stack:
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
- TRACE_IRQS_OFF
FIXUP_ESPFIX_STACK # %eax == %esp
xorl %edx,%edx # zero error code
call do_nmi
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1954a9662203..c092e7d2686d 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -52,6 +52,7 @@
#include <asm/irqflags.h>
#include <asm/paravirt.h>
#include <asm/ftrace.h>
+#include <asm/percpu.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
@@ -209,7 +210,7 @@ ENTRY(native_usergs_sysret64)
/* %rsp:at FRAMEEND */
.macro FIXUP_TOP_OF_STACK tmp offset=0
- movq %gs:pda_oldrsp,\tmp
+ movq PER_CPU_VAR(old_rsp),\tmp
movq \tmp,RSP+\offset(%rsp)
movq $__USER_DS,SS+\offset(%rsp)
movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +221,7 @@ ENTRY(native_usergs_sysret64)
.macro RESTORE_TOP_OF_STACK tmp offset=0
movq RSP+\offset(%rsp),\tmp
- movq \tmp,%gs:pda_oldrsp
+ movq \tmp,PER_CPU_VAR(old_rsp)
movq EFLAGS+\offset(%rsp),\tmp
movq \tmp,R11+\offset(%rsp)
.endm
@@ -336,15 +337,15 @@ ENTRY(save_args)
je 1f
SWAPGS
/*
- * irqcount is used to check if a CPU is already on an interrupt stack
+ * irq_count is used to check if a CPU is already on an interrupt stack
* or not. While this is essentially redundant with preempt_count it is
* a little cheaper to use a separate counter in the PDA (short of
* moving irq_enter into assembly, which would be too much work)
*/
-1: incl %gs:pda_irqcount
+1: incl PER_CPU_VAR(irq_count)
jne 2f
popq_cfi %rax /* move return address... */
- mov %gs:pda_irqstackptr,%rsp
+ mov PER_CPU_VAR(irq_stack_ptr),%rsp
EMPTY_FRAME 0
pushq_cfi %rax /* ... to the new stack */
/*
@@ -467,7 +468,7 @@ END(ret_from_fork)
ENTRY(system_call)
CFI_STARTPROC simple
CFI_SIGNAL_FRAME
- CFI_DEF_CFA rsp,PDA_STACKOFFSET
+ CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
CFI_REGISTER rip,rcx
/*CFI_REGISTER rflags,r11*/
SWAPGS_UNSAFE_STACK
@@ -478,8 +479,8 @@ ENTRY(system_call)
*/
ENTRY(system_call_after_swapgs)
- movq %rsp,%gs:pda_oldrsp
- movq %gs:pda_kernelstack,%rsp
+ movq %rsp,PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
/*
* No need to follow this irqs off/on section - it's straight
* and short:
@@ -522,7 +523,7 @@ sysret_check:
CFI_REGISTER rip,rcx
RESTORE_ARGS 0,-ARG_SKIP,1
/*CFI_REGISTER rflags,r11*/
- movq %gs:pda_oldrsp, %rsp
+ movq PER_CPU_VAR(old_rsp), %rsp
USERGS_SYSRET64
CFI_RESTORE_STATE
@@ -832,11 +833,11 @@ common_interrupt:
XCPT_FRAME
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
interrupt do_IRQ
- /* 0(%rsp): oldrsp-ARGOFFSET */
+ /* 0(%rsp): old_rsp-ARGOFFSET */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- decl %gs:pda_irqcount
+ decl PER_CPU_VAR(irq_count)
leaveq
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
@@ -1077,10 +1078,10 @@ ENTRY(\sym)
TRACE_IRQS_OFF
movq %rsp,%rdi /* pt_regs pointer */
xorl %esi,%esi /* no error code */
- movq %gs:pda_data_offset, %rbp
- subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ PER_CPU(init_tss, %rbp)
+ subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
call \do_sym
- addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+ addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
jmp paranoid_exit /* %ebx: no swapgs flag */
CFI_ENDPROC
END(\sym)
@@ -1264,14 +1265,14 @@ ENTRY(call_softirq)
CFI_REL_OFFSET rbp,0
mov %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
- incl %gs:pda_irqcount
- cmove %gs:pda_irqstackptr,%rsp
+ incl PER_CPU_VAR(irq_count)
+ cmove PER_CPU_VAR(irq_stack_ptr),%rsp
push %rbp # backlink for old unwinder
call __do_softirq
leaveq
CFI_DEF_CFA_REGISTER rsp
CFI_ADJUST_CFA_OFFSET -8
- decl %gs:pda_irqcount
+ decl PER_CPU_VAR(irq_count)
ret
CFI_ENDPROC
END(call_softirq)
@@ -1301,15 +1302,15 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
movq %rdi, %rsp # we don't return, adjust the stack frame
CFI_ENDPROC
DEFAULT_FRAME
-11: incl %gs:pda_irqcount
+11: incl PER_CPU_VAR(irq_count)
movq %rsp,%rbp
CFI_DEF_CFA_REGISTER rbp
- cmovzq %gs:pda_irqstackptr,%rsp
+ cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
pushq %rbp # backlink for old unwinder
call xen_evtchn_do_upcall
popq %rsp
CFI_DEF_CFA_REGISTER rsp
- decl %gs:pda_irqcount
+ decl PER_CPU_VAR(irq_count)
jmp error_exit
CFI_ENDPROC
END(do_hypervisor_callback)
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b9a4d8c4b935..af67d3227ea6 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -26,27 +26,6 @@
#include <asm/bios_ebda.h>
#include <asm/trampoline.h>
-/* boot cpu pda */
-static struct x8664_pda _boot_cpu_pda;
-
-#ifdef CONFIG_SMP
-/*
- * We install an empty cpu_pda pointer table to indicate to early users
- * (numa_set_node) that the cpu_pda pointer table for cpus other than
- * the boot cpu is not yet setup.
- */
-static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
-#else
-static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
-#endif
-
-void __init x86_64_init_pda(void)
-{
- _cpu_pda = __cpu_pda;
- cpu_pda(0) = &_boot_cpu_pda;
- pda_init(0);
-}
-
static void __init zap_identity_mappings(void)
{
pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,7 +91,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
if (console_loglevel == 10)
early_printk("Kernel alive\n");
- x86_64_init_pda();
+ pda_init(0);
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 0e275d495563..c8ace880661b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
#include <asm/msr.h>
#include <asm/cache.h>
#include <asm/processor-flags.h>
+#include <asm/percpu.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -204,6 +205,23 @@ ENTRY(secondary_startup_64)
pushq $0
popfq
+#ifdef CONFIG_SMP
+ /*
+ * early_gdt_base should point to the gdt_page in static percpu init
+ * data area. Computing this requires two symbols - __per_cpu_load
+ * and per_cpu__gdt_page. As linker can't do no such relocation, do
+ * it by hand. As early_gdt_descr is manipulated by C code for
+ * secondary CPUs, this should be done only once for the boot CPU
+ * when early_gdt_descr_base contains zero.
+ */
+ movq early_gdt_descr_base(%rip), %rax
+ testq %rax, %rax
+ jnz 1f
+ movq $__per_cpu_load, %rax
+ addq $per_cpu__gdt_page, %rax
+ movq %rax, early_gdt_descr_base(%rip)
+1:
+#endif
/*
* We must switch to a new descriptor in kernel space for the GDT
* because soon the kernel won't have access anymore to the userspace
@@ -226,12 +244,18 @@ ENTRY(secondary_startup_64)
movl %eax,%fs
movl %eax,%gs
- /*
- * Setup up a dummy PDA. this is just for some early bootup code
- * that does in_interrupt()
- */
+ /* Set up %gs.
+ *
+ * On SMP, %gs should point to the per-cpu area. For initial
+ * boot, make %gs point to the init data section. For a
+ * secondary CPU,initial_gs should be set to its pda address
+ * before the CPU runs this code.
+ *
+ * On UP, initial_gs points to PER_CPU_VAR(__pda) and doesn't
+ * change.
+ */
movl $MSR_GS_BASE,%ecx
- movq $empty_zero_page,%rax
+ movq initial_gs(%rip),%rax
movq %rax,%rdx
shrq $32,%rdx
wrmsr
@@ -257,6 +281,12 @@ ENTRY(secondary_startup_64)
.align 8
ENTRY(initial_code)
.quad x86_64_start_kernel
+ ENTRY(initial_gs)
+#ifdef CONFIG_SMP
+ .quad __per_cpu_load
+#else
+ .quad PER_CPU_VAR(__pda)
+#endif
__FINITDATA
ENTRY(stack_start)
@@ -401,7 +431,12 @@ NEXT_PAGE(level2_spare_pgt)
.globl early_gdt_descr
early_gdt_descr:
.word GDT_ENTRIES*8-1
- .quad per_cpu__gdt_page
+#ifdef CONFIG_SMP
+early_gdt_descr_base:
+ .quad 0x0000000000000000
+#else
+ .quad per_cpu__gdt_page
+#endif
ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index 1c4a1302536c..f79660390724 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -46,6 +46,7 @@
#include <asm/idle.h>
#include <asm/io.h>
#include <asm/smp.h>
+#include <asm/cpu.h>
#include <asm/desc.h>
#include <asm/proto.h>
#include <asm/acpi.h>
@@ -82,11 +83,11 @@ static DEFINE_SPINLOCK(vector_lock);
int nr_ioapic_registers[MAX_IO_APICS];
/* I/O APIC entries */
-struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
int nr_ioapics;
/* MP IRQ source entries */
-struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* # of MP IRQ source entries */
int mp_irq_entries;
@@ -356,7 +357,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
if (!cfg->move_in_progress) {
/* it means that domain is not changed */
- if (!cpumask_intersects(&desc->affinity, mask))
+ if (!cpumask_intersects(desc->affinity, mask))
cfg->move_desc_pending = 1;
}
}
@@ -386,7 +387,7 @@ struct io_apic {
static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{
return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
- + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
+ + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
}
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@@ -579,9 +580,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID;
- cpumask_and(&desc->affinity, cfg->domain, mask);
+ cpumask_and(desc->affinity, cfg->domain, mask);
set_extra_move_desc(desc, mask);
- return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
+ return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
}
static void
@@ -944,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type)
int i;
for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].mp_irqtype == type &&
- (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
- mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
- mp_irqs[i].mp_dstirq == pin)
+ if (mp_irqs[i].irqtype == type &&
+ (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
+ mp_irqs[i].dstapic == MP_APIC_ALL) &&
+ mp_irqs[i].dstirq == pin)
return i;
return -1;
@@ -961,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type)
int i;
for (i = 0; i < mp_irq_entries; i++) {
- int lbus = mp_irqs[i].mp_srcbus;
+ int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) &&
- (mp_irqs[i].mp_irqtype == type) &&
- (mp_irqs[i].mp_srcbusirq == irq))
+ (mp_irqs[i].irqtype == type) &&
+ (mp_irqs[i].srcbusirq == irq))
- return mp_irqs[i].mp_dstirq;
+ return mp_irqs[i].dstirq;
}
return -1;
}
@@ -977,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type)
int i;
for (i = 0; i < mp_irq_entries; i++) {
- int lbus = mp_irqs[i].mp_srcbus;
+ int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) &&
- (mp_irqs[i].mp_irqtype == type) &&
- (mp_irqs[i].mp_srcbusirq == irq))
+ (mp_irqs[i].irqtype == type) &&
+ (mp_irqs[i].srcbusirq == irq))
break;
}
if (i < mp_irq_entries) {
int apic;
for(apic = 0; apic < nr_ioapics; apic++) {
- if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
+ if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
return apic;
}
}
@@ -1012,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
return -1;
}
for (i = 0; i < mp_irq_entries; i++) {
- int lbus = mp_irqs[i].mp_srcbus;
+ int lbus = mp_irqs[i].srcbus;
for (apic = 0; apic < nr_ioapics; apic++)
- if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
- mp_irqs[i].mp_dstapic == MP_APIC_ALL)
+ if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
+ mp_irqs[i].dstapic == MP_APIC_ALL)
break;
if (!test_bit(lbus, mp_bus_not_pci) &&
- !mp_irqs[i].mp_irqtype &&
+ !mp_irqs[i].irqtype &&
(bus == lbus) &&
- (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
- int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
+ (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
+ int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
if (!(apic || IO_APIC_IRQ(irq)))
continue;
- if (pin == (mp_irqs[i].mp_srcbusirq & 3))
+ if (pin == (mp_irqs[i].srcbusirq & 3))
return irq;
/*
* Use the first all-but-pin matching entry as a
@@ -1071,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq)
* EISA conforming in the MP table, that means its trigger type must
* be read in from the ELCR */
-#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
#define default_EISA_polarity(idx) default_ISA_polarity(idx)
/* PCI interrupts are always polarity one level triggered,
@@ -1088,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq)
static int MPBIOS_polarity(int idx)
{
- int bus = mp_irqs[idx].mp_srcbus;
+ int bus = mp_irqs[idx].srcbus;
int polarity;
/*
* Determine IRQ line polarity (high active or low active):
*/
- switch (mp_irqs[idx].mp_irqflag & 3)
+ switch (mp_irqs[idx].irqflag & 3)
{
case 0: /* conforms, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci))
@@ -1130,13 +1131,13 @@ static int MPBIOS_polarity(int idx)
static int MPBIOS_trigger(int idx)
{
- int bus = mp_irqs[idx].mp_srcbus;
+ int bus = mp_irqs[idx].srcbus;
int trigger;
/*
* Determine IRQ trigger mode (edge or level sensitive):
*/
- switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
+ switch ((mp_irqs[idx].irqflag>>2) & 3)
{
case 0: /* conforms, ie. bus-type dependent */
if (test_bit(bus, mp_bus_not_pci))
@@ -1214,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin)
{
int irq, i;
- int bus = mp_irqs[idx].mp_srcbus;
+ int bus = mp_irqs[idx].srcbus;
/*
* Debugging check, we are in big trouble if this message pops up!
*/
- if (mp_irqs[idx].mp_dstirq != pin)
+ if (mp_irqs[idx].dstirq != pin)
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
if (test_bit(bus, mp_bus_not_pci)) {
- irq = mp_irqs[idx].mp_srcbusirq;
+ irq = mp_irqs[idx].srcbusirq;
} else {
/*
* PCI IRQs are mapped in order
@@ -1566,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n",
- apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
+ apic, mp_ioapics[apic].apicid, pin, cfg->vector,
irq, trigger, polarity);
- if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
+ if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
dest, trigger, polarity, cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
- mp_ioapics[apic].mp_apicid, pin);
+ mp_ioapics[apic].apicid, pin);
__clear_irq_vector(irq, cfg);
return;
}
@@ -1604,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void)
notcon = 1;
apic_printk(APIC_VERBOSE,
KERN_DEBUG " %d-%d",
- mp_ioapics[apic].mp_apicid,
- pin);
+ mp_ioapics[apic].apicid, pin);
} else
apic_printk(APIC_VERBOSE, " %d-%d",
- mp_ioapics[apic].mp_apicid,
- pin);
+ mp_ioapics[apic].apicid, pin);
continue;
}
if (notcon) {
@@ -1699,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void)
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
for (i = 0; i < nr_ioapics; i++)
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
- mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
+ mp_ioapics[i].apicid, nr_ioapic_registers[i]);
/*
* We are a bit conservative about what we expect. We have to
@@ -1719,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void)
spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n");
- printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@@ -2121,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void)
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
- old_id = mp_ioapics[apic].mp_apicid;
+ old_id = mp_ioapics[apic].apicid;
- if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
+ if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
- apic, mp_ioapics[apic].mp_apicid);
+ apic, mp_ioapics[apic].apicid);
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
reg_00.bits.ID);
- mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
+ mp_ioapics[apic].apicid = reg_00.bits.ID;
}
/*
@@ -2137,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
* 'stuck on smp_invalidate_needed IPI wait' messages.
*/
if (check_apicid_used(phys_id_present_map,
- mp_ioapics[apic].mp_apicid)) {
+ mp_ioapics[apic].apicid)) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
- apic, mp_ioapics[apic].mp_apicid);
+ apic, mp_ioapics[apic].apicid);
for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map))
break;
@@ -2148,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
physid_set(i, phys_id_present_map);
- mp_ioapics[apic].mp_apicid = i;
+ mp_ioapics[apic].apicid = i;
} else {
physid_mask_t tmp;
- tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
+ tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
apic_printk(APIC_VERBOSE, "Setting %d in the "
"phys_id_present_map\n",
- mp_ioapics[apic].mp_apicid);
+ mp_ioapics[apic].apicid);
physids_or(phys_id_present_map, phys_id_present_map, tmp);
}
@@ -2163,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
* We need to adjust the IRQ routing table
* if the ID changed.
*/
- if (old_id != mp_ioapics[apic].mp_apicid)
+ if (old_id != mp_ioapics[apic].apicid)
for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].mp_dstapic == old_id)
- mp_irqs[i].mp_dstapic
- = mp_ioapics[apic].mp_apicid;
+ if (mp_irqs[i].dstapic == old_id)
+ mp_irqs[i].dstapic
+ = mp_ioapics[apic].apicid;
/*
* Read the right value from the MPC table and
@@ -2175,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
*/
apic_printk(APIC_VERBOSE, KERN_INFO
"...changing IO-APIC physical APIC ID to %d ...",
- mp_ioapics[apic].mp_apicid);
+ mp_ioapics[apic].apicid);
- reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
+ reg_00.bits.ID = mp_ioapics[apic].apicid;
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0, reg_00.raw);
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -2188,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags);
- if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
+ if (reg_00.bits.ID != mp_ioapics[apic].apicid)
printk("could not set ID!\n");
else
apic_printk(APIC_VERBOSE, " ok.\n");
@@ -2383,7 +2382,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
- cpumask_copy(&desc->affinity, mask);
+ cpumask_copy(desc->affinity, mask);
}
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@@ -2405,11 +2404,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
}
/* everthing is clear. we have right of way */
- migrate_ioapic_irq_desc(desc, &desc->pending_mask);
+ migrate_ioapic_irq_desc(desc, desc->pending_mask);
ret = 0;
desc->status &= ~IRQ_MOVE_PENDING;
- cpumask_clear(&desc->pending_mask);
+ cpumask_clear(desc->pending_mask);
unmask:
unmask_IO_APIC_irq_desc(desc);
@@ -2434,7 +2433,7 @@ static void ir_irq_migration(struct work_struct *work)
continue;
}
- desc->chip->set_affinity(irq, &desc->pending_mask);
+ desc->chip->set_affinity(irq, desc->pending_mask);
spin_unlock_irqrestore(&desc->lock, flags);
}
}
@@ -2448,7 +2447,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{
if (desc->status & IRQ_LEVEL) {
desc->status |= IRQ_MOVE_PENDING;
- cpumask_copy(&desc->pending_mask, mask);
+ cpumask_copy(desc->pending_mask, mask);
migrate_irq_remapped_level_desc(desc);
return;
}
@@ -2516,7 +2515,7 @@ static void irq_complete_move(struct irq_desc **descp)
/* domain has not changed, but affinity did */
me = smp_processor_id();
- if (cpu_isset(me, desc->affinity)) {
+ if (cpumask_test_cpu(me, desc->affinity)) {
*descp = desc = move_irq_desc(desc, me);
/* get the new one */
cfg = desc->chip_data;
@@ -3117,8 +3116,8 @@ static int ioapic_resume(struct sys_device *dev)
spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0);
- if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
- reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
+ if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
+ reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw);
}
spin_unlock_irqrestore(&ioapic_lock, flags);
@@ -3183,7 +3182,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
irq = 0;
spin_lock_irqsave(&vector_lock, flags);
- for (new = irq_want; new < NR_IRQS; new++) {
+ for (new = irq_want; new < nr_irqs; new++) {
if (platform_legacy_irq(new))
continue;
@@ -3258,6 +3257,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
int err;
unsigned dest;
+ if (disable_apic)
+ return -ENXIO;
+
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (err)
@@ -3726,6 +3728,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
struct irq_cfg *cfg;
int err;
+ if (disable_apic)
+ return -ENXIO;
+
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (!err) {
@@ -3850,6 +3855,22 @@ void __init probe_nr_irqs_gsi(void)
nr_irqs_gsi = nr;
}
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+ int nr;
+
+ nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
+ (NR_VECTORS + (8 * nr_cpu_ids)) :
+ (NR_VECTORS + (32 * nr_ioapics)));
+
+ if (nr < nr_irqs && nr > nr_irqs_gsi)
+ nr_irqs = nr;
+
+ return 0;
+}
+#endif
+
/* --------------------------------------------------------------------------
ACPI-based IOAPIC Configuration
-------------------------------------------------------------------------- */
@@ -3984,8 +4005,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
return -1;
for (i = 0; i < mp_irq_entries; i++)
- if (mp_irqs[i].mp_irqtype == mp_INT &&
- mp_irqs[i].mp_srcbusirq == bus_irq)
+ if (mp_irqs[i].irqtype == mp_INT &&
+ mp_irqs[i].srcbusirq == bus_irq)
break;
if (i >= mp_irq_entries)
return -1;
@@ -4039,7 +4060,7 @@ void __init setup_ioapic_dest(void)
*/
if (desc->status &
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
- mask = &desc->affinity;
+ mask = desc->affinity;
else
mask = TARGET_CPUS;
@@ -4100,7 +4121,7 @@ void __init ioapic_init_mappings(void)
ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) {
- ioapic_phys = mp_ioapics[i].mp_apicaddr;
+ ioapic_phys = mp_ioapics[i].apicaddr;
#ifdef CONFIG_X86_32
if (!ioapic_phys) {
printk(KERN_ERR
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 22f650db917f..a6bca1d33a8a 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -36,11 +36,7 @@ void ack_bad_irq(unsigned int irq)
#endif
}
-#ifdef CONFIG_X86_32
-# define irq_stats(x) (&per_cpu(irq_stat, x))
-#else
-# define irq_stats(x) cpu_pda(x)
-#endif
+#define irq_stats(x) (&per_cpu(irq_stat, x))
/*
* /proc/interrupts printing:
*/
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 74b9ff7341e9..e0f29be8ab0b 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -248,7 +248,7 @@ void fixup_irqs(void)
if (irq == 2)
continue;
- affinity = &desc->affinity;
+ affinity = desc->affinity;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
affinity = cpu_all_mask;
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 63c88e6ec025..1db05247b47f 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -19,6 +19,9 @@
#include <asm/io_apic.h>
#include <asm/idle.h>
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
/*
* Probabilistic stack overflow check:
*
@@ -100,7 +103,7 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */
spin_lock(&desc->lock);
- affinity = &desc->affinity;
+ affinity = desc->affinity;
if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock);
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
index b7f4c929e615..5e9f4fc51385 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/microcode_intel.c
@@ -87,9 +87,9 @@
#include <linux/cpu.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
+#include <linux/uaccess.h>
#include <asm/msr.h>
-#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/microcode.h>
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
}
-static inline int
+static inline int
update_match_revision(struct microcode_header_intel *mc_header, int rev)
{
return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
return ret;
}
- ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
- &get_ucode_fw);
+ ret = generic_load_microcode(cpu, (void *)firmware->data,
+ firmware->size, &get_ucode_fw);
release_firmware(firmware);
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
/* We should bind the task to the CPU */
BUG_ON(cpu != raw_smp_processor_id());
- return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
+ return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}
static void microcode_fini_cpu(int cpu)
diff --git a/arch/x86/kernel/module_32.c b/arch/x86/kernel/module_32.c
index 3db0a5442eb1..0edd819050e7 100644
--- a/arch/x86/kernel/module_32.c
+++ b/arch/x86/kernel/module_32.c
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
+ table entries. */
}
/* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
*para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
- for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".text", secstrings + s->sh_name))
text = s;
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
- locks= s;
+ locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}
diff --git a/arch/x86/kernel/module_64.c b/arch/x86/kernel/module_64.c
index 6ba87830d4b1..c23880b90b5c 100644
--- a/arch/x86/kernel/module_64.c
+++ b/arch/x86/kernel/module_64.c
@@ -30,14 +30,14 @@
#include <asm/page.h>
#include <asm/pgtable.h>
-#define DEBUGP(fmt...)
+#define DEBUGP(fmt...)
#ifndef CONFIG_UML
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
+ table entries. */
}
void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
void *loc;
- u64 val;
+ u64 val;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
- DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
- (int)ELF64_R_TYPE(rel[i].r_info),
- sym->st_value, rel[i].r_addend, (u64)loc);
+ DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
+ (int)ELF64_R_TYPE(rel[i].r_info),
+ sym->st_value, rel[i].r_addend, (u64)loc);
- val = sym->st_value + rel[i].r_addend;
+ val = sym->st_value + rel[i].r_addend;
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
- case R_X86_64_PC32:
+ case R_X86_64_PC32:
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
if ((s64)val != *(s32 *)loc)
- goto overflow;
+ goto overflow;
#endif
break;
default:
- printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
+ printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
overflow:
- printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
+ printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
unsigned int relsec,
struct module *me)
{
- printk("non add relocation not supported\n");
+ printk(KERN_ERR "non add relocation not supported\n");
return -ENOSYS;
-}
+}
int module_finalize(const Elf_Ehdr *hdr,
- const Elf_Shdr *sechdrs,
- struct module *me)
+ const Elf_Shdr *sechdrs,
+ struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
- locks= s;
+ locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index c0601c2848a1..fa6bb263892e 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,6 +27,7 @@
#include <asm/e820.h>
#include <asm/trampoline.h>
#include <asm/setup.h>
+#include <asm/smp.h>
#include <mach_apic.h>
#ifdef CONFIG_X86_32
@@ -143,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
if (bad_ioapic(m->apicaddr))
return;
- mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
- mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
- mp_ioapics[nr_ioapics].mp_type = m->type;
- mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
- mp_ioapics[nr_ioapics].mp_flags = m->flags;
+ mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
+ mp_ioapics[nr_ioapics].apicid = m->apicid;
+ mp_ioapics[nr_ioapics].type = m->type;
+ mp_ioapics[nr_ioapics].apicver = m->apicver;
+ mp_ioapics[nr_ioapics].flags = m->flags;
nr_ioapics++;
}
@@ -159,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
m->srcbusirq, m->dstapic, m->dstirq);
}
-static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
+static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
{
apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n",
- mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
- (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
- mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
+ mp_irq->irqtype, mp_irq->irqflag & 3,
+ (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
+ mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
}
static void __init assign_to_mp_irq(struct mpc_intsrc *m,
- struct mp_config_intsrc *mp_irq)
+ struct mpc_intsrc *mp_irq)
{
- mp_irq->mp_dstapic = m->dstapic;
- mp_irq->mp_type = m->type;
- mp_irq->mp_irqtype = m->irqtype;
- mp_irq->mp_irqflag = m->irqflag;
- mp_irq->mp_srcbus = m->srcbus;
- mp_irq->mp_srcbusirq = m->srcbusirq;
- mp_irq->mp_dstirq = m->dstirq;
+ mp_irq->dstapic = m->dstapic;
+ mp_irq->type = m->type;
+ mp_irq->irqtype = m->irqtype;
+ mp_irq->irqflag = m->irqflag;
+ mp_irq->srcbus = m->srcbus;
+ mp_irq->srcbusirq = m->srcbusirq;
+ mp_irq->dstirq = m->dstirq;
}
-static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
+static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m)
{
- m->dstapic = mp_irq->mp_dstapic;
- m->type = mp_irq->mp_type;
- m->irqtype = mp_irq->mp_irqtype;
- m->irqflag = mp_irq->mp_irqflag;
- m->srcbus = mp_irq->mp_srcbus;
- m->srcbusirq = mp_irq->mp_srcbusirq;
- m->dstirq = mp_irq->mp_dstirq;
+ m->dstapic = mp_irq->dstapic;
+ m->type = mp_irq->type;
+ m->irqtype = mp_irq->irqtype;
+ m->irqflag = mp_irq->irqflag;
+ m->srcbus = mp_irq->srcbus;
+ m->srcbusirq = mp_irq->srcbusirq;
+ m->dstirq = mp_irq->dstirq;
}
-static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
+static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m)
{
- if (mp_irq->mp_dstapic != m->dstapic)
+ if (mp_irq->dstapic != m->dstapic)
return 1;
- if (mp_irq->mp_type != m->type)
+ if (mp_irq->type != m->type)
return 2;
- if (mp_irq->mp_irqtype != m->irqtype)
+ if (mp_irq->irqtype != m->irqtype)
return 3;
- if (mp_irq->mp_irqflag != m->irqflag)
+ if (mp_irq->irqflag != m->irqflag)
return 4;
- if (mp_irq->mp_srcbus != m->srcbus)
+ if (mp_irq->srcbus != m->srcbus)
return 5;
- if (mp_irq->mp_srcbusirq != m->srcbusirq)
+ if (mp_irq->srcbusirq != m->srcbusirq)
return 6;
- if (mp_irq->mp_dstirq != m->dstirq)
+ if (mp_irq->dstirq != m->dstirq)
return 7;
return 0;
@@ -416,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
intsrc.type = MP_INTSRC;
intsrc.irqflag = 0; /* conforming */
intsrc.srcbus = 0;
- intsrc.dstapic = mp_ioapics[0].mp_apicid;
+ intsrc.dstapic = mp_ioapics[0].apicid;
intsrc.irqtype = mp_INT;
@@ -569,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
}
}
-static struct intel_mp_floating *mpf_found;
+static struct mpf_intel *mpf_found;
/*
* Scan the memory blocks for an SMP configuration block.
*/
static void __init __get_smp_config(unsigned int early)
{
- struct intel_mp_floating *mpf = mpf_found;
+ struct mpf_intel *mpf = mpf_found;
if (!mpf)
return;
@@ -597,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
}
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
- mpf->mpf_specification);
+ mpf->specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
- if (mpf->mpf_feature2 & (1 << 7)) {
+ if (mpf->feature2 & (1 << 7)) {
printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
pic_mode = 1;
} else {
@@ -610,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
/*
* Now see if we need to read further.
*/
- if (mpf->mpf_feature1 != 0) {
+ if (mpf->feature1 != 0) {
if (early) {
/*
* local APIC has default address
@@ -620,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
}
printk(KERN_INFO "Default MP configuration #%d\n",
- mpf->mpf_feature1);
- construct_default_ISA_mptable(mpf->mpf_feature1);
+ mpf->feature1);
+ construct_default_ISA_mptable(mpf->feature1);
- } else if (mpf->mpf_physptr) {
+ } else if (mpf->physptr) {
/*
* Read the physical hardware table. Anything here will
* override the defaults.
*/
- if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
+ if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0;
#endif
@@ -687,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned reserve)
{
unsigned int *bp = phys_to_virt(base);
- struct intel_mp_floating *mpf;
+ struct mpf_intel *mpf;
apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
bp, length);
BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) {
- mpf = (struct intel_mp_floating *)bp;
+ mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) &&
- (mpf->mpf_length == 1) &&
+ (mpf->length == 1) &&
!mpf_checksum((unsigned char *)bp, 16) &&
- ((mpf->mpf_specification == 1)
- || (mpf->mpf_specification == 4))) {
+ ((mpf->specification == 1)
+ || (mpf->specification == 4))) {
#ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1;
#endif
@@ -712,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
return 1;
reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
BOOTMEM_DEFAULT);
- if (mpf->mpf_physptr) {
+ if (mpf->physptr) {
unsigned long size = PAGE_SIZE;
#ifdef CONFIG_X86_32
/*
@@ -721,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
* the bottom is mapped now.
* PC-9800's MPC table places on the very last
* of physical memory; so that simply reserving
- * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+ * PAGE_SIZE from mpf->physptr yields BUG()
* in reserve_bootmem.
*/
unsigned long end = max_low_pfn * PAGE_SIZE;
- if (mpf->mpf_physptr + size > end)
- size = end - mpf->mpf_physptr;
+ if (mpf->physptr + size > end)
+ size = end - mpf->physptr;
#endif
- reserve_bootmem_generic(mpf->mpf_physptr, size,
+ reserve_bootmem_generic(mpf->physptr, size,
BOOTMEM_DEFAULT);
}
@@ -808,15 +809,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
/* not legacy */
for (i = 0; i < mp_irq_entries; i++) {
- if (mp_irqs[i].mp_irqtype != mp_INT)
+ if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].mp_irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != 0x0f)
continue;
- if (mp_irqs[i].mp_srcbus != m->srcbus)
+ if (mp_irqs[i].srcbus != m->srcbus)
continue;
- if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
+ if (mp_irqs[i].srcbusirq != m->srcbusirq)
continue;
if (irq_used[i]) {
/* already claimed */
@@ -921,10 +922,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (irq_used[i])
continue;
- if (mp_irqs[i].mp_irqtype != mp_INT)
+ if (mp_irqs[i].irqtype != mp_INT)
continue;
- if (mp_irqs[i].mp_irqflag != 0x0f)
+ if (mp_irqs[i].irqflag != 0x0f)
continue;
if (nr_m_spare > 0) {
@@ -1000,7 +1001,7 @@ static int __init update_mp_table(void)
{
char str[16];
char oem[10];
- struct intel_mp_floating *mpf;
+ struct mpf_intel *mpf;
struct mpc_table *mpc, *mpc_new;
if (!enable_update_mptable)
@@ -1013,19 +1014,19 @@ static int __init update_mp_table(void)
/*
* Now see if we need to go further.
*/
- if (mpf->mpf_feature1 != 0)
+ if (mpf->feature1 != 0)
return 0;
- if (!mpf->mpf_physptr)
+ if (!mpf->physptr)
return 0;
- mpc = phys_to_virt(mpf->mpf_physptr);
+ mpc = phys_to_virt(mpf->physptr);
if (!smp_check_mpc(mpc, oem, str))
return 0;
printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
- printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
+ printk(KERN_INFO "physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) {
mpc_new_phys = 0;
@@ -1046,23 +1047,23 @@ static int __init update_mp_table(void)
}
printk(KERN_INFO "use in-positon replacing\n");
} else {
- mpf->mpf_physptr = mpc_new_phys;
+ mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys);
memcpy(mpc_new, mpc, mpc->length);
mpc = mpc_new;
/* check if we can modify that */
- if (mpc_new_phys - mpf->mpf_physptr) {
- struct intel_mp_floating *mpf_new;
+ if (mpc_new_phys - mpf->physptr) {
+ struct mpf_intel *mpf_new;
/* steal 16 bytes from [0, 1k) */
printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
mpf_new = phys_to_virt(0x400 - 16);
memcpy(mpf_new, mpf, 16);
mpf = mpf_new;
- mpf->mpf_physptr = mpc_new_phys;
+ mpf->physptr = mpc_new_phys;
}
- mpf->mpf_checksum = 0;
- mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
- printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
+ mpf->checksum = 0;
+ mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
+ printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
}
/*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 726266695b2c..3cf3413ec626 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -35,10 +35,10 @@
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/msr.h>
-#include <asm/uaccess.h>
#include <asm/system.h>
static struct class *msr_class;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 7228979f1e7f..23b6d9e6e4f5 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -61,11 +61,7 @@ static int endflag __initdata;
static inline unsigned int get_nmi_count(int cpu)
{
-#ifdef CONFIG_X86_64
- return cpu_pda(cpu)->__nmi_count;
-#else
- return nmi_count(cpu);
-#endif
+ return per_cpu(irq_stat, cpu).__nmi_count;
}
static inline int mce_in_progress(void)
@@ -82,12 +78,8 @@ static inline int mce_in_progress(void)
*/
static inline unsigned int get_timer_irqs(int cpu)
{
-#ifdef CONFIG_X86_64
- return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
-#else
return per_cpu(irq_stat, cpu).apic_timer_irqs +
per_cpu(irq_stat, cpu).irq0_irqs;
-#endif
}
#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index a546f55c77b4..2c00a57ccb90 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -66,9 +66,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
EXPORT_PER_CPU_SYMBOL(current_task);
-DEFINE_PER_CPU(int, cpu_number);
-EXPORT_PER_CPU_SYMBOL(cpu_number);
-
/*
* Return saved PC of a blocked thread.
*/
@@ -591,7 +588,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (prev->gs | next->gs)
loadsegment(gs, next->gs);
- x86_write_percpu(current_task, next_p);
+ percpu_write(current_task, next_p);
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 416fb9282f4f..4523ff88a69d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -57,6 +57,12 @@
asmlinkage extern void ret_from_fork(void);
+DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+
+DEFINE_PER_CPU(unsigned long, old_rsp);
+static DEFINE_PER_CPU(unsigned char, is_idle);
+
unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -75,13 +81,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
void enter_idle(void)
{
- write_pda(isidle, 1);
+ percpu_write(is_idle, 1);
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
static void __exit_idle(void)
{
- if (test_and_clear_bit_pda(0, isidle) == 0)
+ if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
@@ -392,7 +398,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
load_gs_index(0);
regs->ip = new_ip;
regs->sp = new_sp;
- write_pda(oldrsp, new_sp);
+ percpu_write(old_rsp, new_sp);
regs->cs = __USER_CS;
regs->ss = __USER_DS;
regs->flags = 0x200;
@@ -613,13 +619,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Switch the PDA and FPU contexts.
*/
- prev->usersp = read_pda(oldrsp);
- write_pda(oldrsp, next->usersp);
- write_pda(pcurrent, next_p);
+ prev->usersp = percpu_read(old_rsp);
+ percpu_write(old_rsp, next->usersp);
+ percpu_write(current_task, next_p);
- write_pda(kernelstack,
+ percpu_write(kernel_stack,
(unsigned long)task_stack_page(next_p) +
- THREAD_SIZE - PDA_STACKOFFSET);
+ THREAD_SIZE - KERNEL_STACK_OFFSET);
#ifdef CONFIG_CC_STACKPROTECTOR
write_pda(stack_canary, next_p->stack_canary);
/*
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 2b46eb41643b..f8536fee5c12 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -14,6 +14,7 @@
#include <asm/reboot.h>
#include <asm/pci_x86.h>
#include <asm/virtext.h>
+#include <asm/cpu.h>
#ifdef CONFIG_X86_32
# include <linux/dmi.h>
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index ae0d8042cf69..f41c4486c270 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -89,7 +89,7 @@
#include <asm/system.h>
#include <asm/vsyscall.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
#include <asm/desc.h>
#include <asm/dma.h>
#include <asm/iommu.h>
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 55c46074eba0..efbafbbff584 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -13,6 +13,23 @@
#include <asm/mpspec.h>
#include <asm/apicdef.h>
#include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
+#endif
+
+/*
+ * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
+ * voyager wants cpu_number too.
+ */
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
unsigned int num_processors;
@@ -26,31 +43,84 @@ unsigned int max_physical_apicid;
physid_mask_t phys_cpu_present_map;
#endif
-/* map cpu index to physical APIC ID */
+/*
+ * Map cpu index to physical APIC ID
+ */
DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define X86_64_NUMA 1
+#define X86_64_NUMA 1 /* (used later) */
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
-/* map cpu index to node index */
+/*
+ * Map cpu index to node index
+ */
DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
-/* which logical CPUs are on which nodes */
+/*
+ * Which logical CPUs are on which nodes
+ */
cpumask_t *node_to_cpumask_map;
EXPORT_SYMBOL(node_to_cpumask_map);
-/* setup node_to_cpumask_map */
+/*
+ * Setup node_to_cpumask_map
+ */
static void __init setup_node_to_cpumask_map(void);
#else
static inline void setup_node_to_cpumask_map(void) { }
#endif
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
+/*
+ * Define load_pda_offset() and per-cpu __pda for x86_64.
+ * load_pda_offset() is responsible for loading the offset of pda into
+ * %gs.
+ *
+ * On SMP, pda offset also duals as percpu base address and thus it
+ * should be at the start of per-cpu area. To achieve this, it's
+ * preallocated in vmlinux_64.lds.S directly instead of using
+ * DEFINE_PER_CPU().
+ */
+#ifdef CONFIG_X86_64
+void __cpuinit load_pda_offset(int cpu)
+{
+ /* Memory clobbers used to order pda/percpu accesses */
+ mb();
+ wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
+ mb();
+}
+#ifndef CONFIG_SMP
+DEFINE_PER_CPU(struct x8664_pda, __pda);
+#endif
+EXPORT_PER_CPU_SYMBOL(__pda);
+#endif /* CONFIG_SMP && CONFIG_X86_64 */
+
+#ifdef CONFIG_X86_64
+
+/* correctly size the local cpu masks */
+static void setup_cpu_local_masks(void)
+{
+ alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+ alloc_bootmem_cpumask_var(&cpu_callin_mask);
+ alloc_bootmem_cpumask_var(&cpu_callout_mask);
+ alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
+#else /* CONFIG_X86_32 */
+
+static inline void setup_cpu_local_masks(void)
+{
+}
+
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
/*
* Copy data used in early init routines from the initial arrays to the
* per cpu data areas. These arrays then become expendable and the
@@ -79,78 +149,14 @@ static void __init setup_per_cpu_maps(void)
#endif
}
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
+#ifdef CONFIG_X86_64
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+ [0] = (unsigned long)__per_cpu_load,
+};
+#else
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+#endif
EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
- */
-static void __init setup_cpu_pda_map(void)
-{
- char *pda;
- struct x8664_pda **new_cpu_pda;
- unsigned long size;
- int cpu;
-
- size = roundup(sizeof(struct x8664_pda), cache_line_size());
-
- /* allocate cpu_pda array and pointer table */
- {
- unsigned long tsize = nr_cpu_ids * sizeof(void *);
- unsigned long asize = size * (nr_cpu_ids - 1);
-
- tsize = roundup(tsize, cache_line_size());
- new_cpu_pda = alloc_bootmem(tsize + asize);
- pda = (char *)new_cpu_pda + tsize;
- }
-
- /* initialize pointer table to static pda's */
- for_each_possible_cpu(cpu) {
- if (cpu == 0) {
- /* leave boot cpu pda in place */
- new_cpu_pda[0] = cpu_pda(0);
- continue;
- }
- new_cpu_pda[cpu] = (struct x8664_pda *)pda;
- new_cpu_pda[cpu]->in_bootmem = 1;
- pda += size;
- }
-
- /* point to new pointer table */
- _cpu_pda = new_cpu_pda;
-}
-
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_64
-
-/* correctly size the local cpu masks */
-static void setup_cpu_local_masks(void)
-{
- alloc_bootmem_cpumask_var(&cpu_initialized_mask);
- alloc_bootmem_cpumask_var(&cpu_callin_mask);
- alloc_bootmem_cpumask_var(&cpu_callout_mask);
- alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
-
-#else /* CONFIG_X86_32 */
-
-static inline void setup_cpu_local_masks(void)
-{
-}
-
-#endif /* CONFIG_X86_32 */
/*
* Great future plan:
@@ -164,9 +170,6 @@ void __init setup_per_cpu_areas(void)
int cpu;
unsigned long align = 1;
- /* Setup cpu_pda map */
- setup_cpu_pda_map();
-
/* Copy section for each CPU (we discard the original) */
old_size = PERCPU_ENOUGH_ROOM;
align = max_t(unsigned long, PAGE_SIZE, align);
@@ -197,8 +200,25 @@ void __init setup_per_cpu_areas(void)
cpu, node, __pa(ptr));
}
#endif
+
+ memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
per_cpu_offset(cpu) = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+ per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+ per_cpu(cpu_number, cpu) = cpu;
+#ifdef CONFIG_X86_64
+ per_cpu(irq_stack_ptr, cpu) =
+ (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
+ /*
+ * CPU0 modified pda in the init data area, reload pda
+ * offset for CPU0 and clear the area for others.
+ */
+ if (cpu == 0)
+ load_pda_offset(0);
+ else
+ memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
+#endif
+
+ DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
}
/* Setup percpu data maps */
@@ -220,6 +240,7 @@ void __init setup_per_cpu_areas(void)
* Requires node_possible_map to be valid.
*
* Note: node_to_cpumask() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/
static void __init setup_node_to_cpumask_map(void)
{
@@ -235,6 +256,7 @@ static void __init setup_node_to_cpumask_map(void)
/* allocate the map */
map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+ DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
pr_debug("Node to cpumask map at %p for %d nodes\n",
map, nr_node_ids);
@@ -247,17 +269,23 @@ void __cpuinit numa_set_node(int cpu, int node)
{
int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
- if (cpu_pda(cpu) && node != NUMA_NO_NODE)
- cpu_pda(cpu)->nodenumber = node;
-
- if (cpu_to_node_map)
+ /* early setting, no percpu area yet */
+ if (cpu_to_node_map) {
cpu_to_node_map[cpu] = node;
+ return;
+ }
- else if (per_cpu_offset(cpu))
- per_cpu(x86_cpu_to_node_map, cpu) = node;
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+ if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
+ printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+ dump_stack();
+ return;
+ }
+#endif
+ per_cpu(x86_cpu_to_node_map, cpu) = node;
- else
- pr_debug("Setting node for non-present cpu %d\n", cpu);
+ if (node != NUMA_NO_NODE)
+ per_cpu(node_number, cpu) = node;
}
void __cpuinit numa_clear_node(int cpu)
@@ -274,7 +302,7 @@ void __cpuinit numa_add_cpu(int cpu)
void __cpuinit numa_remove_cpu(int cpu)
{
- cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+ cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
}
#else /* CONFIG_DEBUG_PER_CPU_MAPS */
@@ -284,7 +312,7 @@ void __cpuinit numa_remove_cpu(int cpu)
*/
static void __cpuinit numa_set_cpumask(int cpu, int enable)
{
- int node = cpu_to_node(cpu);
+ int node = early_cpu_to_node(cpu);
cpumask_t *mask;
char buf[64];
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bb1a3b1fc87f..869b98840fd0 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -53,7 +53,6 @@
#include <asm/nmi.h>
#include <asm/irq.h>
#include <asm/idle.h>
-#include <asm/smp.h>
#include <asm/trampoline.h>
#include <asm/cpu.h>
#include <asm/numa.h>
@@ -745,52 +744,6 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
complete(&c_idle->done);
}
-#ifdef CONFIG_X86_64
-
-/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
-static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
-{
- if (!after_bootmem)
- free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
-}
-
-/*
- * Allocate node local memory for the AP pda.
- *
- * Must be called after the _cpu_pda pointer table is initialized.
- */
-int __cpuinit get_local_pda(int cpu)
-{
- struct x8664_pda *oldpda, *newpda;
- unsigned long size = sizeof(struct x8664_pda);
- int node = cpu_to_node(cpu);
-
- if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
- return 0;
-
- oldpda = cpu_pda(cpu);
- newpda = kmalloc_node(size, GFP_ATOMIC, node);
- if (!newpda) {
- printk(KERN_ERR "Could not allocate node local PDA "
- "for CPU %d on node %d\n", cpu, node);
-
- if (oldpda)
- return 0; /* have a usable pda */
- else
- return -1;
- }
-
- if (oldpda) {
- memcpy(newpda, oldpda, size);
- free_bootmem_pda(oldpda);
- }
-
- newpda->in_bootmem = 0;
- cpu_pda(cpu) = newpda;
- return 0;
-}
-#endif /* CONFIG_X86_64 */
-
static int __cpuinit do_boot_cpu(int apicid, int cpu)
/*
* NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
@@ -808,16 +761,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
};
INIT_WORK(&c_idle.work, do_fork_idle);
-#ifdef CONFIG_X86_64
- /* Allocate node local memory for AP pdas */
- if (cpu > 0) {
- boot_error = get_local_pda(cpu);
- if (boot_error)
- goto restore_state;
- /* if can't get pda memory, can't start cpu */
- }
-#endif
-
alternatives_smp_switch(1);
c_idle.idle = get_idle_for_cpu(cpu);
@@ -847,14 +790,17 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
-#ifdef CONFIG_X86_32
per_cpu(current_task, cpu) = c_idle.idle;
+#ifdef CONFIG_X86_32
init_gdt(cpu);
/* Stack for startup_32 can be just as for start_secondary onwards */
irq_ctx_init(cpu);
#else
- cpu_pda(cpu)->pcurrent = c_idle.idle;
clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+ initial_gs = per_cpu_offset(cpu);
+ per_cpu(kernel_stack, cpu) =
+ (unsigned long)task_stack_page(c_idle.idle) -
+ KERNEL_STACK_OFFSET + THREAD_SIZE;
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
@@ -931,9 +877,7 @@ do_rest:
inquire_remote_apic(apicid);
}
}
-#ifdef CONFIG_X86_64
-restore_state:
-#endif
+
if (boot_error) {
/* Try to put things back the way they were before ... */
numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -1125,6 +1069,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
printk(KERN_ERR "... forcing use of dummy APIC emulation."
"(tell your hw vendor)\n");
smpboot_clear_io_apic();
+ disable_ioapic_setup();
return -1;
}
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
index 397e309839dd..add36b4e37c9 100644
--- a/arch/x86/kernel/smpcommon.c
+++ b/arch/x86/kernel/smpcommon.c
@@ -3,11 +3,16 @@
*/
#include <linux/module.h>
#include <asm/smp.h>
+#include <asm/sections.h>
-#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_64
+DEFINE_PER_CPU(unsigned long, this_cpu_off) = (unsigned long)__per_cpu_load;
+#else
DEFINE_PER_CPU(unsigned long, this_cpu_off);
+#endif
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
+#ifdef CONFIG_X86_32
/*
* Initialize the CPU's GDT. This is either the boot CPU doing itself
* (still using the master per-cpu area), or a CPU doing it for a
@@ -23,8 +28,5 @@ __cpuinit void init_gdt(int cpu)
write_gdt_entry(get_cpu_gdt_table(cpu),
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
-
- per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
- per_cpu(cpu_number, cpu) = cpu;
}
#endif
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
index ce5054642247..abf0808d6fc4 100644
--- a/arch/x86/kernel/tlb_32.c
+++ b/arch/x86/kernel/tlb_32.c
@@ -4,8 +4,8 @@
#include <asm/tlbflush.h>
-DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
- ____cacheline_aligned = { &init_mm, 0, };
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+ = { &init_mm, 0, };
/* must come after the send_IPI functions above for inlining */
#include <mach_ipi.h>
@@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
* Optimizations Manfred Spraul <manfred@colorfullife.com>
*/
-static cpumask_t flush_cpumask;
+static cpumask_var_t flush_cpumask;
static struct mm_struct *flush_mm;
static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);
@@ -34,8 +34,8 @@ static DEFINE_SPINLOCK(tlbstate_lock);
*/
void leave_mm(int cpu)
{
- BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
- cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
+ BUG_ON(percpu_read(cpu_tlbstate.state) == TLBSTATE_OK);
+ cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
@@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = get_cpu();
- if (!cpu_isset(cpu, flush_cpumask))
+ if (!cpumask_test_cpu(cpu, flush_cpumask))
goto out;
/*
* This was a BUG() but until someone can quote me the
@@ -103,8 +103,8 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
* BUG();
*/
- if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
- if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
+ if (flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (flush_va == TLB_FLUSH_ALL)
local_flush_tlb();
else
@@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
}
ack_APIC_irq();
smp_mb__before_clear_bit();
- cpu_clear(cpu, flush_cpumask);
+ cpumask_clear_cpu(cpu, flush_cpumask);
smp_mb__after_clear_bit();
out:
put_cpu_no_resched();
inc_irq_stat(irq_tlb_count);
}
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
- unsigned long va)
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm, unsigned long va)
{
- cpumask_t cpumask = *cpumaskp;
-
/*
- * A couple of (to be removed) sanity checks:
- *
- * - current CPU must not be in mask
* - mask must exist :)
*/
- BUG_ON(cpus_empty(cpumask));
- BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(cpumask_empty(cpumask));
BUG_ON(!mm);
-#ifdef CONFIG_HOTPLUG_CPU
- /* If a CPU which we ran on has gone down, OK. */
- cpus_and(cpumask, cpumask, cpu_online_map);
- if (unlikely(cpus_empty(cpumask)))
- return;
-#endif
-
/*
* i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is.
@@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
*/
spin_lock(&tlbstate_lock);
+ cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id()));
+#ifdef CONFIG_HOTPLUG_CPU
+ /* If a CPU which we ran on has gone down, OK. */
+ cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask);
+ if (unlikely(cpumask_empty(flush_cpumask))) {
+ spin_unlock(&tlbstate_lock);
+ return;
+ }
+#endif
flush_mm = mm;
flush_va = va;
- cpus_or(flush_cpumask, cpumask, flush_cpumask);
/*
* Make the above memory operations globally visible before
@@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
+ send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
- while (!cpus_empty(flush_cpumask))
+ while (!cpumask_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */
cpu_relax();
@@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
- cpumask_t cpu_mask;
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
local_flush_tlb();
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable();
}
void flush_tlb_mm(struct mm_struct *mm)
{
- cpumask_t cpu_mask;
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) {
if (current->mm)
@@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
struct mm_struct *mm = vma->vm_mm;
- cpumask_t cpu_mask;
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) {
if (current->mm)
@@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, va);
-
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable();
}
EXPORT_SYMBOL(flush_tlb_page);
@@ -237,7 +222,7 @@ static void do_flush_tlb_all(void *info)
unsigned long cpu = smp_processor_id();
__flush_tlb_all();
- if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(cpu);
}
@@ -246,11 +231,9 @@ void flush_tlb_all(void)
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
-void reset_lazy_tlbstate(void)
+static int init_flush_cpumask(void)
{
- int cpu = raw_smp_processor_id();
-
- per_cpu(cpu_tlbstate, cpu).state = 0;
- per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
+ alloc_cpumask_var(&flush_cpumask, GFP_KERNEL);
+ return 0;
}
-
+early_initcall(init_flush_cpumask);
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
index f8be6f1d2e48..e64a32c48825 100644
--- a/arch/x86/kernel/tlb_64.c
+++ b/arch/x86/kernel/tlb_64.c
@@ -18,6 +18,9 @@
#include <asm/uv/uv_hub.h>
#include <asm/uv/uv_bau.h>
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+ = { &init_mm, 0, };
+
#include <mach_ipi.h>
/*
* Smarter SMP flushing macros.
@@ -43,10 +46,10 @@
union smp_flush_state {
struct {
- cpumask_t flush_cpumask;
struct mm_struct *flush_mm;
unsigned long flush_va;
spinlock_t tlbstate_lock;
+ DECLARE_BITMAP(flush_cpumask, NR_CPUS);
};
char pad[SMP_CACHE_BYTES];
} ____cacheline_aligned;
@@ -62,9 +65,9 @@ static DEFINE_PER_CPU(union smp_flush_state, flush_state);
*/
void leave_mm(int cpu)
{
- if (read_pda(mmu_state) == TLBSTATE_OK)
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
- cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
+ cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
@@ -131,7 +134,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
- if (!cpu_isset(cpu, f->flush_cpumask))
+ if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
goto out;
/*
* This was a BUG() but until someone can quote me the
@@ -142,8 +145,8 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
* BUG();
*/
- if (f->flush_mm == read_pda(active_mm)) {
- if (read_pda(mmu_state) == TLBSTATE_OK) {
+ if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_va == TLB_FLUSH_ALL)
local_flush_tlb();
else
@@ -153,19 +156,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
}
out:
ack_APIC_irq();
- cpu_clear(cpu, f->flush_cpumask);
+ cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
inc_irq_stat(irq_tlb_count);
}
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
- unsigned long va)
+static void flush_tlb_others_ipi(const struct cpumask *cpumask,
+ struct mm_struct *mm, unsigned long va)
{
int sender;
union smp_flush_state *f;
- cpumask_t cpumask = *cpumaskp;
-
- if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
- return;
/* Caller has disabled preemption */
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@@ -180,7 +179,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
f->flush_mm = mm;
f->flush_va = va;
- cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
+ cpumask_andnot(to_cpumask(f->flush_cpumask),
+ cpumask, cpumask_of(smp_processor_id()));
/*
* Make the above memory operations globally visible before
@@ -191,9 +191,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to
* CPUs affected.
*/
- send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
+ send_IPI_mask(to_cpumask(f->flush_cpumask),
+ INVALIDATE_TLB_VECTOR_START + sender);
- while (!cpus_empty(f->flush_cpumask))
+ while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
cpu_relax();
f->flush_mm = NULL;
@@ -201,6 +202,25 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
spin_unlock(&f->tlbstate_lock);
}
+void native_flush_tlb_others(const struct cpumask *cpumask,
+ struct mm_struct *mm, unsigned long va)
+{
+ if (is_uv_system()) {
+ /* FIXME: could be an percpu_alloc'd thing */
+ static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+ struct cpumask *after_uv_flush = &get_cpu_var(flush_tlb_mask);
+
+ cpumask_andnot(after_uv_flush, cpumask,
+ cpumask_of(smp_processor_id()));
+ if (!uv_flush_tlb_others(after_uv_flush, mm, va))
+ flush_tlb_others_ipi(after_uv_flush, mm, va);
+
+ put_cpu_var(flush_tlb_uv_cpumask);
+ return;
+ }
+ flush_tlb_others_ipi(cpumask, mm, va);
+}
+
static int __cpuinit init_smp_flush(void)
{
int i;
@@ -215,25 +235,18 @@ core_initcall(init_smp_flush);
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
- cpumask_t cpu_mask;
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
local_flush_tlb();
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable();
}
void flush_tlb_mm(struct mm_struct *mm)
{
- cpumask_t cpu_mask;
-
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) {
if (current->mm)
@@ -241,8 +254,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable();
}
@@ -250,11 +263,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
struct mm_struct *mm = vma->vm_mm;
- cpumask_t cpu_mask;
preempt_disable();
- cpu_mask = mm->cpu_vm_mask;
- cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) {
if (current->mm)
@@ -263,8 +273,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}
- if (!cpus_empty(cpu_mask))
- flush_tlb_others(cpu_mask, mm, va);
+ if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable();
}
@@ -274,7 +284,7 @@ static void do_flush_tlb_all(void *info)
unsigned long cpu = smp_processor_id();
__flush_tlb_all();
- if (read_pda(mmu_state) == TLBSTATE_LAZY)
+ if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(cpu);
}
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index f885023167e0..690dcf1a27d4 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* The cpumaskp mask contains the cpus the broadcast was sent to.
*
* Returns 1 if all remote flushing was done. The mask is zeroed.
- * Returns 0 if some remote flushing remains to be done. The mask is left
- * unchanged.
+ * Returns 0 if some remote flushing remains to be done. The mask will have
+ * some bits still set.
*/
int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
- cpumask_t *cpumaskp)
+ struct cpumask *cpumaskp)
{
int completion_status = 0;
int right_shift;
@@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Success, so clear the remote cpu's from the mask so we don't
* use the IPI method of shootdown on them.
*/
- for_each_cpu_mask(bit, *cpumaskp) {
+ for_each_cpu(bit, cpumaskp) {
blade = uv_cpu_to_blade_id(bit);
if (blade == this_blade)
continue;
- cpu_clear(bit, *cpumaskp);
+ cpumask_clear_cpu(bit, cpumaskp);
}
- if (!cpus_empty(*cpumaskp))
+ if (!cpumask_empty(cpumaskp))
return 0;
return 1;
}
@@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Returns 1 if all remote flushing was done.
* Returns 0 if some remote flushing remains to be done.
*/
-int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
+int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm,
unsigned long va)
{
int i;
@@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
i = 0;
- for_each_cpu_mask(bit, *cpumaskp) {
+ for_each_cpu(bit, cpumaskp) {
blade = uv_cpu_to_blade_id(bit);
BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
if (blade == this_blade) {
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index 82c67559dde7..3eba7f7bac05 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -178,14 +178,7 @@ SECTIONS
__initramfs_end = .;
}
#endif
- . = ALIGN(PAGE_SIZE);
- .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
- __per_cpu_start = .;
- *(.data.percpu.page_aligned)
- *(.data.percpu)
- *(.data.percpu.shared_aligned)
- __per_cpu_end = .;
- }
+ PERCPU(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
/* freed after init ends here */
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 1a614c0e6bef..a09abb8fb97f 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -5,6 +5,7 @@
#define LOAD_OFFSET __START_KERNEL_map
#include <asm-generic/vmlinux.lds.h>
+#include <asm/asm-offsets.h>
#include <asm/page.h>
#undef i386 /* in case the preprocessor is a 32bit one */
@@ -13,12 +14,14 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
OUTPUT_ARCH(i386:x86-64)
ENTRY(phys_startup_64)
jiffies_64 = jiffies;
-_proxy_pda = 1;
PHDRS {
text PT_LOAD FLAGS(5); /* R_E */
data PT_LOAD FLAGS(7); /* RWE */
user PT_LOAD FLAGS(7); /* RWE */
data.init PT_LOAD FLAGS(7); /* RWE */
+#ifdef CONFIG_SMP
+ percpu PT_LOAD FLAGS(7); /* RWE */
+#endif
note PT_NOTE FLAGS(0); /* ___ */
}
SECTIONS
@@ -208,14 +211,29 @@ SECTIONS
__initramfs_end = .;
#endif
+#ifdef CONFIG_SMP
+ /*
+ * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
+ * output PHDR, so the next output section - __data_nosave - should
+ * switch it back to data.init. Also, pda should be at the head of
+ * percpu area. Preallocate it and define the percpu offset symbol
+ * so that it can be accessed as a percpu variable.
+ */
+ . = ALIGN(PAGE_SIZE);
+ PERCPU_VADDR_PREALLOC(0, :percpu, pda_size)
+ per_cpu____pda = __per_cpu_start;
+#else
PERCPU(PAGE_SIZE)
+#endif
. = ALIGN(PAGE_SIZE);
__init_end = .;
. = ALIGN(PAGE_SIZE);
__nosave_begin = .;
- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
+ .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+ *(.data.nosave)
+ } :data.init /* switch back to data.init, see PERCPU_VADDR() above */
. = ALIGN(PAGE_SIZE);
__nosave_end = .;
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 695e426aa354..3909e3ba5ce3 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(init_level4_pgt);
EXPORT_SYMBOL(load_gs_index);
-
-EXPORT_SYMBOL(_proxy_pda);
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
index a580b9562e76..0ade62555ff3 100644
--- a/arch/x86/mach-voyager/setup.c
+++ b/arch/x86/mach-voyager/setup.c
@@ -9,6 +9,7 @@
#include <asm/e820.h>
#include <asm/io.h>
#include <asm/setup.h>
+#include <asm/cpu.h>
void __init pre_intr_init_hook(void)
{
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index 9840b7ec749a..96f15b09a4c5 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -402,7 +402,7 @@ void __init find_smp_config(void)
VOYAGER_SUS_IN_CONTROL_PORT);
current_thread_info()->cpu = boot_cpu_id;
- x86_write_percpu(cpu_number, boot_cpu_id);
+ percpu_write(cpu_number, boot_cpu_id);
}
/*
@@ -531,6 +531,7 @@ static void __init do_boot_cpu(__u8 cpu)
stack_start.sp = (void *)idle->thread.sp;
init_gdt(cpu);
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
per_cpu(current_task, cpu) = idle;
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
irq_ctx_init(cpu);
@@ -1748,6 +1749,7 @@ static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
static void __cpuinit voyager_smp_prepare_boot_cpu(void)
{
init_gdt(smp_processor_id());
+ per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
switch_to_new_gdt();
cpu_set(smp_processor_id(), cpu_online_map);
@@ -1780,7 +1782,7 @@ static void __init voyager_smp_cpus_done(unsigned int max_cpus)
void __init smp_setup_processor_id(void)
{
current_thread_info()->cpu = hard_smp_processor_id();
- x86_write_percpu(cpu_number, hard_smp_processor_id());
+ percpu_write(cpu_number, hard_smp_processor_id());
}
static void voyager_send_call_func(cpumask_t callmask)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9e268b6b204e..90dfae511a41 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -534,7 +534,7 @@ static int vmalloc_fault(unsigned long address)
happen within a race in page table update. In the later
case just flush. */
- pgd = pgd_offset(current->mm ?: &init_mm, address);
+ pgd = pgd_offset(current->active_mm, address);
pgd_ref = pgd_offset_k(address);
if (pgd_none(*pgd_ref))
return -1;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 88f1b10de3be..4a6989e47a53 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -49,7 +49,6 @@
#include <asm/paravirt.h>
#include <asm/setup.h>
#include <asm/cacheflush.h>
-#include <asm/smp.h>
unsigned int __VMALLOC_RESERVE = 128 << 20;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e89d24815f26..4cf30dee8161 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -555,10 +555,12 @@ repeat:
if (!pte_val(old_pte)) {
if (!primary)
return 0;
- WARN(1, KERN_WARNING "CPA: called for zero pte. "
- "vaddr = %lx cpa->vaddr = %lx\n", address,
- *cpa->vaddr);
- return -EINVAL;
+
+ /*
+ * Special error value returned, indicating that the mapping
+ * did not exist at this address.
+ */
+ return -EFAULT;
}
if (level == PG_LEVEL_4K) {
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 85cbd3cd3723..3be399013de6 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
req_type & _PAGE_CACHE_MASK);
}
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return reserve_ram_pages_type(start, end, req_type, new_type);
- else if (is_range_ram < 0)
- return -EINVAL;
+ /*
+ * For legacy reasons, some parts of the physical address range in the
+ * legacy 1MB region is treated as non-RAM (even when listed as RAM in
+ * the e820 tables). So we will track the memory attributes of this
+ * legacy 1MB region using the linear memtype_list always.
+ */
+ if (end >= ISA_END_ADDRESS) {
+ is_range_ram = pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return reserve_ram_pages_type(start, end, req_type,
+ new_type);
+ else if (is_range_ram < 0)
+ return -EINVAL;
+ }
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new)
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
if (is_ISA_range(start, end - 1))
return 0;
- is_range_ram = pagerange_is_ram(start, end);
- if (is_range_ram == 1)
- return free_ram_pages_type(start, end);
- else if (is_range_ram < 0)
- return -EINVAL;
+ /*
+ * For legacy reasons, some parts of the physical address range in the
+ * legacy 1MB region is treated as non-RAM (even when listed as RAM in
+ * the e820 tables). So we will track the memory attributes of this
+ * legacy 1MB region using the linear memtype_list always.
+ */
+ if (end >= ISA_END_ADDRESS) {
+ is_range_ram = pagerange_is_ram(start, end);
+ if (is_range_ram == 1)
+ return free_ram_pages_type(start, end);
+ else if (is_range_ram < 0)
+ return -EINVAL;
+ }
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
@@ -505,6 +522,35 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
}
#endif /* CONFIG_STRICT_DEVMEM */
+/*
+ * Change the memory type for the physial address range in kernel identity
+ * mapping space if that range is a part of identity map.
+ */
+static int kernel_map_sync_memtype(u64 base, unsigned long size,
+ unsigned long flags)
+{
+ unsigned long id_sz;
+ int ret;
+
+ if (!pat_enabled || base >= __pa(high_memory))
+ return 0;
+
+ id_sz = (__pa(high_memory) < base + size) ?
+ __pa(high_memory) - base :
+ size;
+
+ ret = ioremap_change_attr((unsigned long)__va(base), id_sz, flags);
+ /*
+ * -EFAULT return means that the addr was not valid and did not have
+ * any identity mapping. That case is a success for
+ * kernel_map_sync_memtype.
+ */
+ if (ret == -EFAULT)
+ ret = 0;
+
+ return ret;
+}
+
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t *vma_prot)
{
@@ -555,9 +601,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
if (retval < 0)
return 0;
- if (((pfn < max_low_pfn_mapped) ||
- (pfn >= (1UL<<(32 - PAGE_SHIFT)) && pfn < max_pfn_mapped)) &&
- ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
+ if (kernel_map_sync_memtype(offset, size, flags)) {
free_memtype(offset, offset + size);
printk(KERN_INFO
"%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n",
@@ -601,12 +645,13 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
* Reserved non RAM regions only and after successful reserve_memtype,
* this func also keeps identity mapping (if any) in sync with this new prot.
*/
-static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
+static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
+ int strict_prot)
{
int is_ram = 0;
- int id_sz, ret;
+ int ret;
unsigned long flags;
- unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
+ unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
is_ram = pagerange_is_ram(paddr, paddr + size);
@@ -625,26 +670,27 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
return ret;
if (flags != want_flags) {
- free_memtype(paddr, paddr + size);
- printk(KERN_ERR
- "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
- current->comm, current->pid,
- cattr_name(want_flags),
- (unsigned long long)paddr,
- (unsigned long long)(paddr + size),
- cattr_name(flags));
- return -EINVAL;
+ if (strict_prot || !is_new_memtype_allowed(want_flags, flags)) {
+ free_memtype(paddr, paddr + size);
+ printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
+ " for %Lx-%Lx, got %s\n",
+ current->comm, current->pid,
+ cattr_name(want_flags),
+ (unsigned long long)paddr,
+ (unsigned long long)(paddr + size),
+ cattr_name(flags));
+ return -EINVAL;
+ }
+ /*
+ * We allow returning different type than the one requested in
+ * non strict case.
+ */
+ *vma_prot = __pgprot((pgprot_val(*vma_prot) &
+ (~_PAGE_CACHE_MASK)) |
+ flags);
}
- /* Need to keep identity mapping in sync */
- if (paddr >= __pa(high_memory))
- return 0;
-
- id_sz = (__pa(high_memory) < paddr + size) ?
- __pa(high_memory) - paddr :
- size;
-
- if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
+ if (kernel_map_sync_memtype(paddr, size, flags)) {
free_memtype(paddr, paddr + size);
printk(KERN_ERR
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
@@ -689,6 +735,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
unsigned long vma_start = vma->vm_start;
unsigned long vma_end = vma->vm_end;
unsigned long vma_size = vma_end - vma_start;
+ pgprot_t pgprot;
if (!pat_enabled)
return 0;
@@ -702,7 +749,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
WARN_ON_ONCE(1);
return -EINVAL;
}
- return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
+ pgprot = __pgprot(prot);
+ return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
}
/* reserve entire vma page by page, using pfn and prot from pte */
@@ -710,7 +758,8 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
continue;
- retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
+ pgprot = __pgprot(prot);
+ retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
if (retval)
goto cleanup_ret;
}
@@ -741,7 +790,7 @@ cleanup_ret:
* Note that this function can be called with caller trying to map only a
* subrange/page inside the vma.
*/
-int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
+int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long size)
{
int retval = 0;
@@ -758,14 +807,14 @@ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
if (is_linear_pfn_mapping(vma)) {
/* reserve the whole chunk starting from vm_pgoff */
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
- return reserve_pfn_range(paddr, vma_size, prot);
+ return reserve_pfn_range(paddr, vma_size, prot, 0);
}
/* reserve page by page using pfn and size */
base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
for (i = 0; i < size; i += PAGE_SIZE) {
paddr = base_paddr + i;
- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
+ retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
if (retval)
goto cleanup_ret;
}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index f884740da318..5ead808dd70c 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -314,17 +314,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
return retval;
if (flags != new_flags) {
- /*
- * Do not fallback to certain memory types with certain
- * requested type:
- * - request is uncached, return cannot be write-back
- * - request is uncached, return cannot be write-combine
- * - request is write-combine, return cannot be write-back
- */
- if ((flags == _PAGE_CACHE_UC_MINUS &&
- (new_flags == _PAGE_CACHE_WB)) ||
- (flags == _PAGE_CACHE_WC &&
- new_flags == _PAGE_CACHE_WB)) {
+ if (!is_new_memtype_allowed(flags, new_flags)) {
free_memtype(addr, addr+len);
return -EINVAL;
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index bea215230b20..75b94139e1f2 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr)
preempt_enable();
}
-static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
- unsigned long va)
+static void xen_flush_tlb_others(const struct cpumask *cpus,
+ struct mm_struct *mm, unsigned long va)
{
struct {
struct mmuext_op op;
- cpumask_t mask;
+ DECLARE_BITMAP(mask, NR_CPUS);
} *args;
- cpumask_t cpumask = *cpus;
struct multicall_space mcs;
- /*
- * A couple of (to be removed) sanity checks:
- *
- * - current CPU must not be in mask
- * - mask must exist :)
- */
- BUG_ON(cpus_empty(cpumask));
- BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(cpumask_empty(cpus));
BUG_ON(!mm);
- /* If a CPU which we ran on has gone down, OK. */
- cpus_and(cpumask, cpumask, cpu_online_map);
- if (cpus_empty(cpumask))
- return;
-
mcs = xen_mc_entry(sizeof(*args));
args = mcs.args;
- args->mask = cpumask;
- args->op.arg2.vcpumask = &args->mask;
+ args->op.arg2.vcpumask = to_cpumask(args->mask);
+
+ /* Remove us, and any offline CPUS. */
+ cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
+ if (unlikely(cpumask_empty(to_cpumask(args->mask))))
+ goto issue;
if (va == TLB_FLUSH_ALL) {
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
@@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+issue:
xen_mc_issue(PARAVIRT_LAZY_MMU);
}
@@ -702,17 +695,17 @@ static void xen_write_cr0(unsigned long cr0)
static void xen_write_cr2(unsigned long cr2)
{
- x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
+ percpu_read(xen_vcpu)->arch.cr2 = cr2;
}
static unsigned long xen_read_cr2(void)
{
- return x86_read_percpu(xen_vcpu)->arch.cr2;
+ return percpu_read(xen_vcpu)->arch.cr2;
}
static unsigned long xen_read_cr2_direct(void)
{
- return x86_read_percpu(xen_vcpu_info.arch.cr2);
+ return percpu_read(xen_vcpu_info.arch.cr2);
}
static void xen_write_cr4(unsigned long cr4)
@@ -725,12 +718,12 @@ static void xen_write_cr4(unsigned long cr4)
static unsigned long xen_read_cr3(void)
{
- return x86_read_percpu(xen_cr3);
+ return percpu_read(xen_cr3);
}
static void set_current_cr3(void *v)
{
- x86_write_percpu(xen_current_cr3, (unsigned long)v);
+ percpu_write(xen_current_cr3, (unsigned long)v);
}
static void __xen_write_cr3(bool kernel, unsigned long cr3)
@@ -755,7 +748,7 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3)
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
if (kernel) {
- x86_write_percpu(xen_cr3, cr3);
+ percpu_write(xen_cr3, cr3);
/* Update xen_current_cr3 once the batch has actually
been submitted. */
@@ -771,7 +764,7 @@ static void xen_write_cr3(unsigned long cr3)
/* Update while interrupts are disabled, so its atomic with
respect to ipis */
- x86_write_percpu(xen_cr3, cr3);
+ percpu_write(xen_cr3, cr3);
__xen_write_cr3(true, cr3);
@@ -1652,7 +1645,7 @@ asmlinkage void __init xen_start_kernel(void)
#ifdef CONFIG_X86_64
/* Disable until direct per-cpu data access. */
have_vcpu_info_placement = 0;
- x86_64_init_pda();
+ pda_init(0);
#endif
xen_smp_init();
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index bb042608c602..2e8271431e1a 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -39,7 +39,7 @@ static unsigned long xen_save_fl(void)
struct vcpu_info *vcpu;
unsigned long flags;
- vcpu = x86_read_percpu(xen_vcpu);
+ vcpu = percpu_read(xen_vcpu);
/* flag has opposite sense of mask */
flags = !vcpu->evtchn_upcall_mask;
@@ -62,7 +62,7 @@ static void xen_restore_fl(unsigned long flags)
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
- vcpu = x86_read_percpu(xen_vcpu);
+ vcpu = percpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = flags;
preempt_enable_no_resched();
@@ -83,7 +83,7 @@ static void xen_irq_disable(void)
make sure we're don't switch CPUs between getting the vcpu
pointer and updating the mask. */
preempt_disable();
- x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+ percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
preempt_enable_no_resched();
}
@@ -96,7 +96,7 @@ static void xen_irq_enable(void)
the caller is confused and is trying to re-enable interrupts
on an indeterminate processor. */
- vcpu = x86_read_percpu(xen_vcpu);
+ vcpu = percpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
/* Doesn't matter if we get preempted here, because any
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 503c240e26c7..98cb9869eb24 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1063,18 +1063,14 @@ static void drop_other_mm_ref(void *info)
struct mm_struct *mm = info;
struct mm_struct *active_mm;
-#ifdef CONFIG_X86_64
- active_mm = read_pda(active_mm);
-#else
- active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
-#endif
+ active_mm = percpu_read(cpu_tlbstate.active_mm);
if (active_mm == mm)
leave_mm(smp_processor_id());
/* If this cpu still has a stale cr3 reference, then make sure
it has been flushed. */
- if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
+ if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
load_cr3(swapper_pg_dir);
arch_flush_lazy_cpu_mode();
}
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 858938241616..e786fa7f2615 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -39,7 +39,7 @@ static inline void xen_mc_issue(unsigned mode)
xen_mc_flush();
/* restore flags saved in xen_mc_batch */
- local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
+ local_irq_restore(percpu_read(xen_mc_irq_flags));
}
/* Set up a callback to be called when the current batch is flushed */
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c44e2069c7c7..72c2eb9b64cd 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
*/
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
{
-#ifdef CONFIG_X86_32
- __get_cpu_var(irq_stat).irq_resched_count++;
-#else
- add_pda(irq_resched_count, 1);
-#endif
+ inc_irq_stat(irq_resched_count);
return IRQ_HANDLED;
}
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
xen_setup_cpu_clockevents();
cpu_set(cpu, cpu_online_map);
- x86_write_percpu(cpu_state, CPU_ONLINE);
+ percpu_write(cpu_state, CPU_ONLINE);
wmb();
/* We can take interrupts now: we're officially "up". */
@@ -283,22 +279,11 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
struct task_struct *idle = idle_task(cpu);
int rc;
-#ifdef CONFIG_X86_64
- /* Allocate node local memory for AP pdas */
- WARN_ON(cpu == 0);
- if (cpu > 0) {
- rc = get_local_pda(cpu);
- if (rc)
- return rc;
- }
-#endif
-
+ per_cpu(current_task, cpu) = idle;
#ifdef CONFIG_X86_32
init_gdt(cpu);
- per_cpu(current_task, cpu) = idle;
irq_ctx_init(cpu);
#else
- cpu_pda(cpu)->pcurrent = idle;
clear_tsk_thread_flag(idle, TIF_FORK);
#endif
xen_setup_timer(cpu);
@@ -445,11 +430,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
{
irq_enter();
generic_smp_call_function_interrupt();
-#ifdef CONFIG_X86_32
- __get_cpu_var(irq_stat).irq_call_count++;
-#else
- add_pda(irq_call_count, 1);
-#endif
+ inc_irq_stat(irq_call_count);
irq_exit();
return IRQ_HANDLED;
@@ -459,11 +440,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
{
irq_enter();
generic_smp_call_function_single_interrupt();
-#ifdef CONFIG_X86_32
- __get_cpu_var(irq_stat).irq_call_count++;
-#else
- add_pda(irq_call_count, 1);
-#endif
+ inc_irq_stat(irq_call_count);
irq_exit();
return IRQ_HANDLED;
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 05794c566e87..d6fc51f4ce85 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -17,6 +17,7 @@
#include <asm/processor-flags.h>
#include <asm/errno.h>
#include <asm/segment.h>
+#include <asm/percpu.h>
#include <xen/interface/xen.h>
@@ -28,12 +29,10 @@
#if 1
/*
- x86-64 does not yet support direct access to percpu variables
- via a segment override, so we just need to make sure this code
- never gets used
+ FIXME: x86_64 now can support direct access to percpu variables
+ via a segment override. Update xen accordingly.
*/
#define BUG ud2a
-#define PER_CPU_VAR(var, off) 0xdeadbeef
#endif
/*
@@ -45,14 +44,14 @@ ENTRY(xen_irq_enable_direct)
BUG
/* Unmask events */
- movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+ movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* Test for pending */
- testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
@@ -69,7 +68,7 @@ ENDPATCH(xen_irq_enable_direct)
ENTRY(xen_irq_disable_direct)
BUG
- movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+ movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
ENDPATCH(xen_irq_disable_direct)
ret
ENDPROC(xen_irq_disable_direct)
@@ -87,7 +86,7 @@ ENDPATCH(xen_irq_disable_direct)
ENTRY(xen_save_fl_direct)
BUG
- testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+ testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah,%ah
ENDPATCH(xen_save_fl_direct)
@@ -107,13 +106,13 @@ ENTRY(xen_restore_fl_direct)
BUG
testb $X86_EFLAGS_IF>>8, %ah
- setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
+ setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
/* Preempt here doesn't matter because that will deal with
any pending interrupts. The pending check may end up being
run on the wrong CPU, but that doesn't hurt. */
/* check for unmasked and pending */
- cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
+ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
jz 1f
2: call check_events
1:
@@ -195,11 +194,11 @@ RELOC(xen_sysexit, 1b+1)
ENTRY(xen_sysret64)
/* We're already on the usermode stack at this point, but still
with the kernel gs, so we can easily switch back */
- movq %rsp, %gs:pda_oldrsp
- movq %gs:pda_kernelstack,%rsp
+ movq %rsp, PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack),%rsp
pushq $__USER_DS
- pushq %gs:pda_oldrsp
+ pushq PER_CPU_VAR(old_rsp)
pushq %r11
pushq $__USER_CS
pushq %rcx
@@ -212,11 +211,11 @@ RELOC(xen_sysret64, 1b+1)
ENTRY(xen_sysret32)
/* We're already on the usermode stack at this point, but still
with the kernel gs, so we can easily switch back */
- movq %rsp, %gs:pda_oldrsp
- movq %gs:pda_kernelstack, %rsp
+ movq %rsp, PER_CPU_VAR(old_rsp)
+ movq PER_CPU_VAR(kernel_stack), %rsp
pushq $__USER32_DS
- pushq %gs:pda_oldrsp
+ pushq PER_CPU_VAR(old_rsp)
pushq %r11
pushq $__USER32_CS
pushq %rcx