summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Kconfig.debug16
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/common/edma.c54
-rw-r--r--arch/arm/include/asm/arch_timer.h9
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/pmu.h36
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/perf_callchain.c136
-rw-r--r--arch/arm/kernel/perf_event.c162
-rw-r--r--arch/arm/kernel/perf_event_cpu.c181
-rw-r--r--arch/arm/kernel/perf_event_v6.c12
-rw-r--r--arch/arm/kernel/perf_event_v7.c72
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--arch/arm/mach-bcm/Kconfig3
-rw-r--r--arch/arm/mach-bcm/Makefile2
-rw-r--r--arch/arm/mach-bcm/brcmstb.h19
-rw-r--r--arch/arm/mach-bcm/headsmp-brcmstb.S33
-rw-r--r--arch/arm/mach-bcm/platsmp-brcmstb.c329
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c3
-rw-r--r--arch/arm/mach-tegra/Kconfig9
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/init.c8
28 files changed, 828 insertions, 329 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 92dbde801d9c..643567258f92 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1260,9 +1260,6 @@ source "arch/arm/common/Kconfig"
menu "Bus support"
-config ARM_AMBA
- bool
-
config ISA
bool
help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 03dc4c1a8736..6a4ee008b092 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -139,6 +139,17 @@ choice
Say Y here if you want kernel low-level debugging support
on Marvell Berlin SoC based platforms.
+ config DEBUG_BRCMSTB_UART
+ bool "Use BRCMSTB UART for low-level debug"
+ depends on ARCH_BRCMSTB
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want the debug print routines to direct
+ their output to the first serial port on these devices.
+
+ If you have a Broadcom STB chip and would like early print
+ messages to appear over the UART, select this option.
+
config DEBUG_CLPS711X_UART1
bool "Kernel low-level debugging messages via UART1"
depends on ARCH_CLPS711X
@@ -1153,6 +1164,7 @@ config DEBUG_UART_PHYS
default 0xe0000000 if ARCH_SPEAR13XX
default 0xe4007000 if DEBUG_HIP04_UART
default 0xf0000be0 if ARCH_EBSA110
+ default 0xf040ab00 if DEBUG_BRCMSTB_UART
default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE
default 0xf1012000 if ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_ORION5X
@@ -1204,6 +1216,7 @@ config DEBUG_UART_VIRT
default 0xfb002000 if DEBUG_CNS3XXX
default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ default 0xfc40ab00 if DEBUG_BRCMSTB_UART
default 0xfcfe8600 if DEBUG_UART_BCM63XX
default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
default 0xfd000000 if ARCH_SPEAR13XX
@@ -1260,7 +1273,8 @@ config DEBUG_UART_8250_WORD
ARCH_KEYSTONE || \
DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
DEBUG_DAVINCI_DA8XX_UART2 || \
- DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2
+ DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
+ DEBUG_BRCMSTB_UART
config DEBUG_UART_8250_FLOW_CONTROL
bool "Enable flow control for 8250 UART"
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 739fcf29c643..bc82a12d4c2c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -668,6 +668,8 @@
bank-width = <2>;
pinctrl-names = "default";
pinctrl-0 = <&ethernet_pins>;
+ power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; /* gpio86 */
+ reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio164 */
gpmc,device-width = <2>;
gpmc,sync-clk-ps = <0>;
gpmc,cs-on-ns = <0>;
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index d86771abbf57..e093f2fcee7a 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -244,6 +244,8 @@ struct edma {
/* list of channels with no even trigger; terminated by "-1" */
const s8 *noevent;
+ struct edma_soc_info *info;
+
/* The edma_inuse bit for each PaRAM slot is clear unless the
* channel is in use ... by ARM or DSP, for QDMA, or whatever.
*/
@@ -295,7 +297,7 @@ static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
~(0x7 << bit), queue_no << bit);
}
-static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
+static void assign_priority_to_queue(unsigned ctlr, int queue_no,
int priority)
{
int bit = queue_no * 4;
@@ -314,7 +316,7 @@ static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
* included in that particular EDMA variant (Eg : dm646x)
*
*/
-static void __init map_dmach_param(unsigned ctlr)
+static void map_dmach_param(unsigned ctlr)
{
int i;
for (i = 0; i < EDMA_MAX_DMACH; i++)
@@ -1792,15 +1794,63 @@ static int edma_probe(struct platform_device *pdev)
edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
edma_write_array(j, EDMA_QRAE, i, 0x0);
}
+ edma_cc[j]->info = info[j];
arch_num_cc++;
}
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int edma_pm_resume(struct device *dev)
+{
+ int i, j;
+
+ for (j = 0; j < arch_num_cc; j++) {
+ struct edma *cc = edma_cc[j];
+
+ s8 (*queue_priority_mapping)[2];
+
+ queue_priority_mapping = cc->info->queue_priority_mapping;
+
+ /* Event queue priority mapping */
+ for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+ assign_priority_to_queue(j,
+ queue_priority_mapping[i][0],
+ queue_priority_mapping[i][1]);
+
+ /*
+ * Map the channel to param entry if channel mapping logic
+ * exist
+ */
+ if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
+ map_dmach_param(j);
+
+ for (i = 0; i < cc->num_channels; i++) {
+ if (test_bit(i, cc->edma_inuse)) {
+ /* ensure access through shadow region 0 */
+ edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
+ BIT(i & 0x1f));
+
+ setup_dma_interrupt(i,
+ cc->intr_data[i].callback,
+ cc->intr_data[i].data);
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops edma_pm_ops = {
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume)
+};
+
static struct platform_driver edma_driver = {
.driver = {
.name = "edma",
+ .pm = &edma_pm_ops,
.of_match_table = edma_of_ids,
},
.probe = edma_probe,
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 92793ba69c40..d4ebf5679f1f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -78,6 +78,15 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
+static inline u64 arch_counter_get_cntpct(void)
+{
+ u64 cval;
+
+ isb();
+ asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
+ return cval;
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index c3a83691af8e..d9cf138fd7d4 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -12,7 +12,7 @@
#ifndef __ARM_PERF_EVENT_H__
#define __ARM_PERF_EVENT_H__
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 0b648c541293..b1596bd59129 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -15,6 +15,8 @@
#include <linux/interrupt.h>
#include <linux/perf_event.h>
+#include <asm/cputype.h>
+
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
@@ -66,19 +68,25 @@ struct pmu_hw_events {
/*
* The events that are active on the PMU for the given index.
*/
- struct perf_event **events;
+ struct perf_event *events[ARMPMU_MAX_HWEVENTS];
/*
* A 1 bit for an index indicates that the counter is being used for
* an event. A 0 means that the counter can be used.
*/
- unsigned long *used_mask;
+ DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
raw_spinlock_t pmu_lock;
+
+ /*
+ * When using percpu IRQs, we need a percpu dev_id. Place it here as we
+ * already have to allocate this struct per cpu.
+ */
+ struct arm_pmu *percpu_pmu;
};
struct arm_pmu {
@@ -107,7 +115,8 @@ struct arm_pmu {
struct mutex reserve_mutex;
u64 max_period;
struct platform_device *plat_device;
- struct pmu_hw_events *(*get_hw_events)(void);
+ struct pmu_hw_events __percpu *hw_events;
+ struct notifier_block hotplug_nb;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -127,6 +136,27 @@ int armpmu_map_event(struct perf_event *event,
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask);
+struct pmu_probe_info {
+ unsigned int cpuid;
+ unsigned int mask;
+ int (*init)(struct arm_pmu *);
+};
+
+#define PMU_PROBE(_cpuid, _mask, _fn) \
+{ \
+ .cpuid = (_cpuid), \
+ .mask = (_mask), \
+ .init = (_fn), \
+}
+
+#define ARM_PMU_PROBE(_cpuid, _fn) \
+ PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
+
+#define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
+
+#define XSCALE_PMU_PROBE(_version, _fn) \
+ PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
+
#endif /* CONFIG_HW_PERF_EVENTS */
#endif /* __ARM_PMU_H__ */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 3aaa75cae90c..705bb7620673 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -412,6 +412,7 @@
#define __NR_seccomp (__NR_SYSCALL_BASE+383)
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
+#define __NR_bpf (__NR_SYSCALL_BASE+386)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 38ddd9f83d0e..8dcbed5016ac 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,7 +82,7 @@ obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
+obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 713e807621d2..2d2d6087b9b1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/compiler.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
@@ -39,10 +40,19 @@
* GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
* (http://gcc.gnu.org/PR8896) and incorrect structure
* initialisation in fs/jffs2/erase.c
+ * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
+ * miscompiles find_get_entry(), and can result in EXT3 and EXT4
+ * filesystem corruption (possibly other FS too).
*/
+#ifdef __GNUC__
#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 3.3
+#error Known good compilers: 3.3, 4.x
+#endif
+#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
+#error Your compiler is too buggy; it is known to miscompile kernels
+#error and result in filesystem corruption and oopses.
+#endif
#endif
int main(void)
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9f899d8fdcca..e51833f8cc38 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -395,6 +395,7 @@
CALL(sys_seccomp)
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
+ CALL(sys_bpf)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
new file mode 100644
index 000000000000..4e02ae5950ff
--- /dev/null
+++ b/arch/arm/kernel/perf_callchain.c
@@ -0,0 +1,136 @@
+/*
+ * ARM callchain support
+ *
+ * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
+ * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based on the ARM OProfile backtrace code.
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct frame_tail {
+ struct frame_tail __user *fp;
+ unsigned long sp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct frame_tail buftail;
+ unsigned long err;
+
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= buftail.fp)
+ return NULL;
+
+ return buftail.fp - 1;
+}
+
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ struct frame_tail __user *tail;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->ARM_pc);
+
+ if (!current->mm)
+ return;
+
+ tail = (struct frame_tail __user *)regs->ARM_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = user_backtrace(tail, entry);
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int
+callchain_trace(struct stackframe *fr,
+ void *data)
+{
+ struct perf_callchain_entry *entry = data;
+ perf_callchain_store(entry, fr->pc);
+ return 0;
+}
+
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+{
+ struct stackframe fr;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ arm_get_current_stackframe(regs, &fr);
+ walk_stackframe(&fr, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return perf_guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 266cba46db3e..e34934f63a49 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -7,21 +7,18 @@
* Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* This code is based on the sparc64 perf event code, which is in turn based
- * on the x86 code. Callchain code is based on the ARM OProfile backtrace
- * code.
+ * on the x86 code.
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/uaccess.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
-#include <asm/stacktrace.h>
static int
armpmu_map_cache_event(const unsigned (*cache_map)
@@ -80,8 +77,12 @@ armpmu_map_event(struct perf_event *event,
u32 raw_event_mask)
{
u64 config = event->attr.config;
+ int type = event->attr.type;
- switch (event->attr.type) {
+ if (type == event->pmu->type)
+ return armpmu_map_raw_event(raw_event_mask, config);
+
+ switch (type) {
case PERF_TYPE_HARDWARE:
return armpmu_map_hw_event(event_map, config);
case PERF_TYPE_HW_CACHE:
@@ -200,7 +201,7 @@ static void
armpmu_del(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -217,7 +218,7 @@ static int
armpmu_add(struct perf_event *event, int flags)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
@@ -274,14 +275,12 @@ validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu;
- DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
/*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
- memset(fake_used_mask, 0, sizeof(fake_used_mask));
- fake_pmu.used_mask = fake_used_mask;
+ memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
if (!validate_event(&fake_pmu, leader))
return -EINVAL;
@@ -305,17 +304,21 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
int ret;
u64 start_clock, finish_clock;
- if (irq_is_percpu(irq))
- dev = *(void **)dev;
- armpmu = dev;
+ /*
+ * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
+ * the handlers expect a struct arm_pmu*. The percpu_irq framework will
+ * do any necessary shifting, we just need to perform the first
+ * dereference.
+ */
+ armpmu = *(void **)dev;
plat_device = armpmu->plat_device;
plat = dev_get_platdata(&plat_device->dev);
start_clock = sched_clock();
if (plat && plat->handle_irq)
- ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
+ ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
else
- ret = armpmu->handle_irq(irq, dev);
+ ret = armpmu->handle_irq(irq, armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
@@ -468,7 +471,7 @@ static int armpmu_event_init(struct perf_event *event)
static void armpmu_enable(struct pmu *pmu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- struct pmu_hw_events *hw_events = armpmu->get_hw_events();
+ struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
if (enabled)
@@ -533,130 +536,3 @@ int armpmu_register(struct arm_pmu *armpmu, int type)
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
}
-/*
- * Callchain handling code.
- */
-
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long sp;
- unsigned long lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry *entry)
-{
- struct frame_tail buftail;
- unsigned long err;
-
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
- return NULL;
-
- pagefault_disable();
- err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
- pagefault_enable();
-
- if (err)
- return NULL;
-
- perf_callchain_store(entry, buftail.lr);
-
- /*
- * Frame pointers should strictly progress back up the stack
- * (towards higher addresses).
- */
- if (tail + 1 >= buftail.fp)
- return NULL;
-
- return buftail.fp - 1;
-}
-
-void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
-{
- struct frame_tail __user *tail;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
- perf_callchain_store(entry, regs->ARM_pc);
-
- if (!current->mm)
- return;
-
- tail = (struct frame_tail __user *)regs->ARM_fp - 1;
-
- while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
- tail && !((unsigned long)tail & 0x3))
- tail = user_backtrace(tail, entry);
-}
-
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
-static int
-callchain_trace(struct stackframe *fr,
- void *data)
-{
- struct perf_callchain_entry *entry = data;
- perf_callchain_store(entry, fr->pc);
- return 0;
-}
-
-void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
-{
- struct stackframe fr;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
- arm_get_current_stackframe(regs, &fr);
- walk_stackframe(&fr, callchain_trace, entry);
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
-
- return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
- int misc = 0;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
-
- return misc;
-}
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index eb2c4d55666b..dd9acc95ebc0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -35,11 +35,6 @@
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
-static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
-static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
-static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
-static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
-
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
@@ -69,11 +64,6 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#include "perf_event_v6.c"
#include "perf_event_v7.c"
-static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
-{
- return this_cpu_ptr(&cpu_hw_events);
-}
-
static void cpu_pmu_enable_percpu_irq(void *data)
{
int irq = *(int *)data;
@@ -92,20 +82,21 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
- free_percpu_irq(irq, &percpu_pmu);
+ free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
for (i = 0; i < irqs; ++i) {
if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
- free_irq(irq, cpu_pmu);
+ free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i));
}
}
}
@@ -114,19 +105,21 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
if (!pmu_device)
return -ENODEV;
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (irqs < 1) {
- printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
+ pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
return 0;
}
irq = platform_get_irq(pmu_device, 0);
if (irq >= 0 && irq_is_percpu(irq)) {
- err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &hw_events->percpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
@@ -153,7 +146,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
err = request_irq(irq, handler,
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
- cpu_pmu);
+ per_cpu_ptr(&hw_events->percpu_pmu, i));
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
irq);
@@ -167,18 +160,50 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (pmu->reset)
+ pmu->reset(pmu);
+ else
+ return NOTIFY_DONE;
+
+ return NOTIFY_OK;
+}
+
+static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
+ int err;
int cpu;
+ struct pmu_hw_events __percpu *cpu_hw_events;
+
+ cpu_hw_events = alloc_percpu(struct pmu_hw_events);
+ if (!cpu_hw_events)
+ return -ENOMEM;
+
+ cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
+ err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
+ if (err)
+ goto out_hw_events;
+
for_each_possible_cpu(cpu) {
- struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
- events->events = per_cpu(hw_events, cpu);
- events->used_mask = per_cpu(used_mask, cpu);
+ struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
raw_spin_lock_init(&events->pmu_lock);
- per_cpu(percpu_pmu, cpu) = cpu_pmu;
+ events->percpu_pmu = cpu_pmu;
}
- cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
+ cpu_pmu->hw_events = cpu_hw_events;
cpu_pmu->request_irq = cpu_pmu_request_irq;
cpu_pmu->free_irq = cpu_pmu_free_irq;
@@ -189,31 +214,19 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* If no interrupts available, set the corresponding capability flag */
if (!platform_get_irq(cpu_pmu->plat_device, 0))
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-}
-
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
- void *hcpu)
-{
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(cpu_pmu);
- else
- return NOTIFY_DONE;
+ return 0;
- return NOTIFY_OK;
+out_hw_events:
+ free_percpu(cpu_hw_events);
+ return err;
}
-static struct notifier_block cpu_pmu_hotplug_notifier = {
- .notifier_call = cpu_pmu_notify,
-};
+static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
+{
+ unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+ free_percpu(cpu_pmu->hw_events);
+}
/*
* PMU platform driver and devicetree bindings.
@@ -241,48 +254,34 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
{},
};
+static const struct pmu_probe_info pmu_probe_table[] = {
+ ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
+ XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
+ XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
+ { /* sentinel value */ }
+};
+
/*
* CPU PMU identification and probing.
*/
static int probe_current_pmu(struct arm_pmu *pmu)
{
int cpu = get_cpu();
+ unsigned int cpuid = read_cpuid_id();
int ret = -ENODEV;
+ const struct pmu_probe_info *info;
pr_info("probing PMU on CPU %d\n", cpu);
- switch (read_cpuid_part()) {
- /* ARM Ltd CPUs. */
- case ARM_CPU_PART_ARM1136:
- ret = armv6_1136_pmu_init(pmu);
- break;
- case ARM_CPU_PART_ARM1156:
- ret = armv6_1156_pmu_init(pmu);
- break;
- case ARM_CPU_PART_ARM1176:
- ret = armv6_1176_pmu_init(pmu);
- break;
- case ARM_CPU_PART_ARM11MPCORE:
- ret = armv6mpcore_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A8:
- ret = armv7_a8_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A9:
- ret = armv7_a9_pmu_init(pmu);
- break;
-
- default:
- if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
- switch (xscale_cpu_arch_version()) {
- case ARM_CPU_XSCALE_ARCH_V1:
- ret = xscale1pmu_init(pmu);
- break;
- case ARM_CPU_XSCALE_ARCH_V2:
- ret = xscale2pmu_init(pmu);
- break;
- }
- }
+ for (info = pmu_probe_table; info->init != NULL; info++) {
+ if ((cpuid & info->mask) != info->cpuid)
+ continue;
+ ret = info->init(pmu);
break;
}
@@ -299,13 +298,13 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
int ret = -ENODEV;
if (cpu_pmu) {
- pr_info("attempt to register multiple PMU devices!");
+ pr_info("attempt to register multiple PMU devices!\n");
return -ENOSPC;
}
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
- pr_info("failed to allocate PMU device!");
+ pr_info("failed to allocate PMU device!\n");
return -ENOMEM;
}
@@ -320,18 +319,24 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
}
if (ret) {
- pr_info("failed to probe PMU!");
+ pr_info("failed to probe PMU!\n");
goto out_free;
}
- cpu_pmu_init(cpu_pmu);
- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ ret = cpu_pmu_init(cpu_pmu);
+ if (ret)
+ goto out_free;
- if (!ret)
- return 0;
+ ret = armpmu_register(cpu_pmu, -1);
+ if (ret)
+ goto out_destroy;
+ return 0;
+
+out_destroy:
+ cpu_pmu_destroy(cpu_pmu);
out_free:
- pr_info("failed to register PMU devices!");
+ pr_info("failed to register PMU devices!\n");
kfree(pmu);
return ret;
}
@@ -348,16 +353,6 @@ static struct platform_driver cpu_pmu_driver = {
static int __init register_pmu_driver(void)
{
- int err;
-
- err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
- if (err)
- return err;
-
- err = platform_driver_register(&cpu_pmu_driver);
- if (err)
- unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
-
- return err;
+ return platform_driver_register(&cpu_pmu_driver);
}
device_initcall(register_pmu_driver);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index abfeb04f3213..f2ffd5c542ed 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -262,7 +262,7 @@ static void armv6pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -300,7 +300,7 @@ armv6pmu_handle_irq(int irq_num,
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -356,7 +356,7 @@ armv6pmu_handle_irq(int irq_num,
static void armv6pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -368,7 +368,7 @@ static void armv6pmu_start(struct arm_pmu *cpu_pmu)
static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = armv6_pmcr_read();
@@ -409,7 +409,7 @@ static void armv6pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
@@ -444,7 +444,7 @@ static void armv6mpcore_pmu_disable_event(struct perf_event *event)
unsigned long val, mask, flags, evt = 0;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (ARMV6_CYCLE_COUNTER == idx) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 116758b77f93..8993770c47de 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -564,13 +564,11 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
}
-static inline int armv7_pmnc_select_counter(int idx)
+static inline void armv7_pmnc_select_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
-
- return idx;
}
static inline u32 armv7pmu_read_counter(struct perf_event *event)
@@ -580,13 +578,15 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
int idx = hwc->idx;
u32 value = 0;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
- else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+ } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
+ } else {
+ armv7_pmnc_select_counter(idx);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
+ }
return value;
}
@@ -597,45 +597,43 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
+ if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
- else if (idx == ARMV7_IDX_CYCLE_COUNTER)
+ } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
- else if (armv7_pmnc_select_counter(idx) == idx)
+ } else {
+ armv7_pmnc_select_counter(idx);
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+ }
}
static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
{
- if (armv7_pmnc_select_counter(idx) == idx) {
- val &= ARMV7_EVTYPE_MASK;
- asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
- }
+ armv7_pmnc_select_counter(idx);
+ val &= ARMV7_EVTYPE_MASK;
+ asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
-static inline int armv7_pmnc_enable_counter(int idx)
+static inline void armv7_pmnc_enable_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
- return idx;
}
-static inline int armv7_pmnc_disable_counter(int idx)
+static inline void armv7_pmnc_disable_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
- return idx;
}
-static inline int armv7_pmnc_enable_intens(int idx)
+static inline void armv7_pmnc_enable_intens(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
- return idx;
}
-static inline int armv7_pmnc_disable_intens(int idx)
+static inline void armv7_pmnc_disable_intens(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
@@ -643,8 +641,6 @@ static inline int armv7_pmnc_disable_intens(int idx)
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
isb();
-
- return idx;
}
static inline u32 armv7_pmnc_getreset_flags(void)
@@ -667,34 +663,34 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
u32 val;
unsigned int cnt;
- printk(KERN_INFO "PMNC registers dump:\n");
+ pr_info("PMNC registers dump:\n");
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
- printk(KERN_INFO "PMNC =0x%08x\n", val);
+ pr_info("PMNC =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
- printk(KERN_INFO "CNTENS=0x%08x\n", val);
+ pr_info("CNTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
- printk(KERN_INFO "INTENS=0x%08x\n", val);
+ pr_info("INTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
- printk(KERN_INFO "FLAGS =0x%08x\n", val);
+ pr_info("FLAGS =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
- printk(KERN_INFO "SELECT=0x%08x\n", val);
+ pr_info("SELECT=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
- printk(KERN_INFO "CCNT =0x%08x\n", val);
+ pr_info("CCNT =0x%08x\n", val);
for (cnt = ARMV7_IDX_COUNTER0;
cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
- printk(KERN_INFO "CNT[%d] count =0x%08x\n",
+ pr_info("CNT[%d] count =0x%08x\n",
ARMV7_IDX_TO_COUNTER(cnt), val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
- printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
+ pr_info("CNT[%d] evtsel=0x%08x\n",
ARMV7_IDX_TO_COUNTER(cnt), val);
}
}
@@ -705,7 +701,7 @@ static void armv7pmu_enable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -751,7 +747,7 @@ static void armv7pmu_disable_event(struct perf_event *event)
unsigned long flags;
struct hw_perf_event *hwc = &event->hw;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
@@ -783,7 +779,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
u32 pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -843,7 +839,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
static void armv7pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Enable all counters */
@@ -854,7 +850,7 @@ static void armv7pmu_start(struct arm_pmu *cpu_pmu)
static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
/* Disable all counters */
@@ -1287,7 +1283,7 @@ static void krait_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/* Disable counter and interrupt */
raw_spin_lock_irqsave(&events->pmu_lock, flags);
@@ -1313,7 +1309,7 @@ static void krait_pmu_enable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
/*
* Enable counter and interrupt, and set the counter to count
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 08da0af550b7..8af9f1f82c68 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -138,7 +138,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -198,7 +198,7 @@ static void xscale1pmu_enable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
switch (idx) {
@@ -234,7 +234,7 @@ static void xscale1pmu_disable_event(struct perf_event *event)
unsigned long val, mask, evt, flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
switch (idx) {
@@ -287,7 +287,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -299,7 +299,7 @@ static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale1pmu_read_pmnc();
@@ -485,7 +485,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
- struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -539,7 +539,7 @@ static void xscale2pmu_enable_event(struct perf_event *event)
unsigned long flags, ien, evtsel;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -585,7 +585,7 @@ static void xscale2pmu_disable_event(struct perf_event *event)
unsigned long flags, ien, evtsel, of_flags;
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
int idx = hwc->idx;
ien = xscale2pmu_read_int_enable();
@@ -651,7 +651,7 @@ out:
static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
@@ -663,7 +663,7 @@ static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
- struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
raw_spin_lock_irqsave(&events->pmu_lock, flags);
val = xscale2pmu_read_pmnc();
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index 2abad742516d..9625dddb5cd3 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -118,10 +118,7 @@ config ARCH_BCM_63XX
config ARCH_BRCMSTB
bool "Broadcom BCM7XXX based boards" if ARCH_MULTI_V7
- depends on MMU
select ARM_GIC
- select MIGHT_HAVE_PCI
- select HAVE_SMP
select HAVE_ARM_ARCH_TIMER
select BRCMSTB_GISB_ARB
select BRCMSTB_L2_IRQ
diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
index 300ae4b79ae6..6710ea321220 100644
--- a/arch/arm/mach-bcm/Makefile
+++ b/arch/arm/mach-bcm/Makefile
@@ -38,5 +38,7 @@ obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o
obj-$(CONFIG_ARCH_BCM_63XX) := bcm63xx.o
ifeq ($(CONFIG_ARCH_BRCMSTB),y)
+CFLAGS_platsmp-brcmstb.o += -march=armv7-a
obj-y += brcmstb.o
+obj-$(CONFIG_SMP) += headsmp-brcmstb.o platsmp-brcmstb.o
endif
diff --git a/arch/arm/mach-bcm/brcmstb.h b/arch/arm/mach-bcm/brcmstb.h
new file mode 100644
index 000000000000..ec0c3d112b36
--- /dev/null
+++ b/arch/arm/mach-bcm/brcmstb.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013-2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMSTB_H__
+#define __BRCMSTB_H__
+
+void brcmstb_secondary_startup(void);
+
+#endif /* __BRCMSTB_H__ */
diff --git a/arch/arm/mach-bcm/headsmp-brcmstb.S b/arch/arm/mach-bcm/headsmp-brcmstb.S
new file mode 100644
index 000000000000..199c1ea58248
--- /dev/null
+++ b/arch/arm/mach-bcm/headsmp-brcmstb.S
@@ -0,0 +1,33 @@
+/*
+ * SMP boot code for secondary CPUs
+ * Based on arch/arm/mach-tegra/headsmp.S
+ *
+ * Copyright (C) 2010 NVIDIA, Inc.
+ * Copyright (C) 2013-2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/assembler.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+ .section ".text.head", "ax"
+
+ENTRY(brcmstb_secondary_startup)
+ /*
+ * Ensure CPU is in a sane state by disabling all IRQs and switching
+ * into SVC mode.
+ */
+ setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0
+
+ bl v7_invalidate_l1
+ b secondary_startup
+ENDPROC(brcmstb_secondary_startup)
diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c
new file mode 100644
index 000000000000..31c87a284a34
--- /dev/null
+++ b/arch/arm/mach-bcm/platsmp-brcmstb.c
@@ -0,0 +1,329 @@
+/*
+ * Broadcom STB CPU SMP and hotplug support for ARM
+ *
+ * Copyright (C) 2013-2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/smp.h>
+#include <linux/mfd/syscon.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
+
+#include "brcmstb.h"
+
+enum {
+ ZONE_MAN_CLKEN_MASK = BIT(0),
+ ZONE_MAN_RESET_CNTL_MASK = BIT(1),
+ ZONE_MAN_MEM_PWR_MASK = BIT(4),
+ ZONE_RESERVED_1_MASK = BIT(5),
+ ZONE_MAN_ISO_CNTL_MASK = BIT(6),
+ ZONE_MANUAL_CONTROL_MASK = BIT(7),
+ ZONE_PWR_DN_REQ_MASK = BIT(9),
+ ZONE_PWR_UP_REQ_MASK = BIT(10),
+ ZONE_BLK_RST_ASSERT_MASK = BIT(12),
+ ZONE_PWR_OFF_STATE_MASK = BIT(25),
+ ZONE_PWR_ON_STATE_MASK = BIT(26),
+ ZONE_DPG_PWR_STATE_MASK = BIT(28),
+ ZONE_MEM_PWR_STATE_MASK = BIT(29),
+ ZONE_RESET_STATE_MASK = BIT(31),
+ CPU0_PWR_ZONE_CTRL_REG = 1,
+ CPU_RESET_CONFIG_REG = 2,
+};
+
+static void __iomem *cpubiuctrl_block;
+static void __iomem *hif_cont_block;
+static u32 cpu0_pwr_zone_ctrl_reg;
+static u32 cpu_rst_cfg_reg;
+static u32 hif_cont_reg;
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * We must quiesce a dying CPU before it can be killed by the boot CPU. Because
+ * one or more cache may be disabled, we must flush to ensure coherency. We
+ * cannot use traditionl completion structures or spinlocks as they rely on
+ * coherency.
+ */
+static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state);
+
+static int per_cpu_sw_state_rd(u32 cpu)
+{
+ sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
+ return per_cpu(per_cpu_sw_state, cpu);
+}
+
+static void per_cpu_sw_state_wr(u32 cpu, int val)
+{
+ dmb();
+ per_cpu(per_cpu_sw_state, cpu) = val;
+ sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
+}
+#else
+static inline void per_cpu_sw_state_wr(u32 cpu, int val) { }
+#endif
+
+static void __iomem *pwr_ctrl_get_base(u32 cpu)
+{
+ void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg;
+ base += (cpu_logical_map(cpu) * 4);
+ return base;
+}
+
+static u32 pwr_ctrl_rd(u32 cpu)
+{
+ void __iomem *base = pwr_ctrl_get_base(cpu);
+ return readl_relaxed(base);
+}
+
+static void pwr_ctrl_wr(u32 cpu, u32 val)
+{
+ void __iomem *base = pwr_ctrl_get_base(cpu);
+ writel(val, base);
+}
+
+static void cpu_rst_cfg_set(u32 cpu, int set)
+{
+ u32 val;
+ val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg);
+ if (set)
+ val |= BIT(cpu_logical_map(cpu));
+ else
+ val &= ~BIT(cpu_logical_map(cpu));
+ writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg);
+}
+
+static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr)
+{
+ const int reg_ofs = cpu_logical_map(cpu) * 8;
+ writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs);
+ writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs);
+}
+
+static void brcmstb_cpu_boot(u32 cpu)
+{
+ /* Mark this CPU as "up" */
+ per_cpu_sw_state_wr(cpu, 1);
+
+ /*
+ * Set the reset vector to point to the secondary_startup
+ * routine
+ */
+ cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
+
+ /* Unhalt the cpu */
+ cpu_rst_cfg_set(cpu, 0);
+}
+
+static void brcmstb_cpu_power_on(u32 cpu)
+{
+ /*
+ * The secondary cores power was cut, so we must go through
+ * power-on initialization.
+ */
+ u32 tmp;
+
+ /* Request zone power up */
+ pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK);
+
+ /* Wait for the power up FSM to complete */
+ do {
+ tmp = pwr_ctrl_rd(cpu);
+ } while (!(tmp & ZONE_PWR_ON_STATE_MASK));
+}
+
+static int brcmstb_cpu_get_power_state(u32 cpu)
+{
+ int tmp = pwr_ctrl_rd(cpu);
+ return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void brcmstb_cpu_die(u32 cpu)
+{
+ v7_exit_coherency_flush(all);
+
+ per_cpu_sw_state_wr(cpu, 0);
+
+ /* Sit and wait to die */
+ wfi();
+
+ /* We should never get here... */
+ while (1)
+ ;
+}
+
+static int brcmstb_cpu_kill(u32 cpu)
+{
+ u32 tmp;
+
+ while (per_cpu_sw_state_rd(cpu))
+ ;
+
+ /* Program zone reset */
+ pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK |
+ ZONE_PWR_DN_REQ_MASK);
+
+ /* Verify zone reset */
+ tmp = pwr_ctrl_rd(cpu);
+ if (!(tmp & ZONE_RESET_STATE_MASK))
+ pr_err("%s: Zone reset bit for CPU %d not asserted!\n",
+ __func__, cpu);
+
+ /* Wait for power down */
+ do {
+ tmp = pwr_ctrl_rd(cpu);
+ } while (!(tmp & ZONE_PWR_OFF_STATE_MASK));
+
+ /* Flush pipeline before resetting CPU */
+ mb();
+
+ /* Assert reset on the CPU */
+ cpu_rst_cfg_set(cpu, 1);
+
+ return 1;
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static int __init setup_hifcpubiuctrl_regs(struct device_node *np)
+{
+ int rc = 0;
+ char *name;
+ struct device_node *syscon_np = NULL;
+
+ name = "syscon-cpu";
+
+ syscon_np = of_parse_phandle(np, name, 0);
+ if (!syscon_np) {
+ pr_err("can't find phandle %s\n", name);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ cpubiuctrl_block = of_iomap(syscon_np, 0);
+ if (!cpubiuctrl_block) {
+ pr_err("iomap failed for cpubiuctrl_block\n");
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG,
+ &cpu0_pwr_zone_ctrl_reg);
+ if (rc) {
+ pr_err("failed to read 1st entry from %s property (%d)\n", name,
+ rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG,
+ &cpu_rst_cfg_reg);
+ if (rc) {
+ pr_err("failed to read 2nd entry from %s property (%d)\n", name,
+ rc);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ of_node_put(syscon_np);
+ return rc;
+}
+
+static int __init setup_hifcont_regs(struct device_node *np)
+{
+ int rc = 0;
+ char *name;
+ struct device_node *syscon_np = NULL;
+
+ name = "syscon-cont";
+
+ syscon_np = of_parse_phandle(np, name, 0);
+ if (!syscon_np) {
+ pr_err("can't find phandle %s\n", name);
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ hif_cont_block = of_iomap(syscon_np, 0);
+ if (!hif_cont_block) {
+ pr_err("iomap failed for hif_cont_block\n");
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Offset is at top of hif_cont_block */
+ hif_cont_reg = 0;
+
+cleanup:
+ of_node_put(syscon_np);
+ return rc;
+}
+
+static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus)
+{
+ int rc;
+ struct device_node *np;
+ char *name;
+
+ name = "brcm,brcmstb-smpboot";
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np) {
+ pr_err("can't find compatible node %s\n", name);
+ return;
+ }
+
+ rc = setup_hifcpubiuctrl_regs(np);
+ if (rc)
+ return;
+
+ rc = setup_hifcont_regs(np);
+ if (rc)
+ return;
+}
+
+static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+ /* Missing the brcm,brcmstb-smpboot DT node? */
+ if (!cpubiuctrl_block || !hif_cont_block)
+ return -ENODEV;
+
+ /* Bring up power to the core if necessary */
+ if (brcmstb_cpu_get_power_state(cpu) == 0)
+ brcmstb_cpu_power_on(cpu);
+
+ brcmstb_cpu_boot(cpu);
+
+ return 0;
+}
+
+static struct smp_operations brcmstb_smp_ops __initdata = {
+ .smp_prepare_cpus = brcmstb_cpu_ctrl_setup,
+ .smp_boot_secondary = brcmstb_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_kill = brcmstb_cpu_kill,
+ .cpu_die = brcmstb_cpu_die,
+#endif
+};
+
+CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index c95346c94829..cec9d6c6442c 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void)
platform_device_register(&omap3_rom_rng_device);
}
-
- /* Only on some development boards */
- gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset");
}
static void __init omap3_tao3530_legacy_init(void)
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 095399618ca5..d0be9a1ef6b8 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -2,6 +2,7 @@ menuconfig ARCH_TEGRA
bool "NVIDIA Tegra" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
+ select ARM_AMBA
select ARM_GIC
select CLKSRC_MMIO
select HAVE_ARM_SCU if SMP
@@ -59,12 +60,4 @@ config ARCH_TEGRA_124_SOC
Support for NVIDIA Tegra T124 processor family, based on the
ARM CortexA15MP CPU
-config TEGRA_AHB
- bool "Enable AHB driver for NVIDIA Tegra SoCs"
- default y
- help
- Adds AHB configuration functionality for NVIDIA Tegra SoCs,
- which controls AHB bus master arbitration and some
- performance parameters(priority, prefech size).
-
endif
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 55f9d6e0cc88..5e65ca8dea62 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt;
* @associativity: variable to return the calculated associativity in
* @max_way_size: the maximum size in bytes for the cache ways
*/
-static void __init l2x0_cache_size_of_parse(const struct device_node *np,
+static int __init l2x0_cache_size_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask,
u32 *associativity,
u32 max_way_size)
@@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
of_property_read_u32(np, "cache-line-size", &line_size);
if (!cache_size || !sets)
- return;
+ return -ENODEV;
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
@@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size > max_way_size) {
pr_err("L2C OF: set size %dKB is too large\n", way_size);
- return;
+ return -EINVAL;
}
pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
@@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
if (way_size_bits < 1 || way_size_bits > 6) {
pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
way_size);
- return;
+ return -EINVAL;
}
mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
@@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
+
+ return 0;
}
static void __init l2x0_of_parse(const struct device_node *np,
@@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
u32 dirty = 0;
u32 val = 0, mask = 0;
u32 assoc;
+ int ret;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
@@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np,
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
- l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
+ ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
+ if (ret)
+ return;
+
if (assoc > 8) {
pr_err("l2x0 of: cache setting yield too high associativity\n");
pr_err("l2x0 of: %d calculated, max 8\n", assoc);
@@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
+ int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
@@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np,
l2x0_base + L310_ADDR_FILTER_START);
}
- l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
+ ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
+ if (ret)
+ return;
+
switch (assoc) {
case 16:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
@@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np,
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
default:
- pr_err("PL310 OF: cache setting yield illegal associativity\n");
- pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
+ pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
+ assoc);
break;
}
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c245d903927f..e8907117861e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
{
return dma_common_pages_remap(pages, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
- return NULL;
}
/*
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 45aeaaca9052..e17ed00828d7 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
+ struct page *page = pfn_to_page(pfn);
pagefault_disable();
+ if (!PageHighMem(page))
+ return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id();
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 92bba32d9230..9481f85c56e6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -559,10 +559,10 @@ void __init mem_init(void)
#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
- " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
+ " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),