Commit 404a02cb authored by Russell King's avatar Russell King
Browse files

Merge branch 'devel-stable' into devel

Conflicts:
	arch/arm/mach-pxa/clock.c
	arch/arm/mach-pxa/clock.h
parents 28cdac66 1051b9f0
......@@ -364,6 +364,14 @@ config ARCH_MXC
help
Support for Freescale MXC/iMX-based family of processors
config ARCH_MXS
bool "Freescale MXS-based"
select GENERIC_CLOCKEVENTS
select ARCH_REQUIRE_GPIOLIB
select COMMON_CLKDEV
help
Support for Freescale MXS-based family of processors
config ARCH_STMP3XXX
bool "Freescale STMP3xxx"
select CPU_ARM926T
......@@ -817,6 +825,7 @@ config ARCH_U8500
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ
help
Support for ST-Ericsson's Ux500 architecture
......@@ -923,6 +932,8 @@ source "arch/arm/mach-mv78xx0/Kconfig"
source "arch/arm/plat-mxc/Kconfig"
source "arch/arm/mach-mxs/Kconfig"
source "arch/arm/mach-netx/Kconfig"
source "arch/arm/mach-nomadik/Kconfig"
......@@ -1022,8 +1033,8 @@ source arch/arm/mm/Kconfig
config IWMMXT
bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
default y if PXA27x || PXA3xx || ARCH_MMP
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
default y if PXA27x || PXA3xx || PXA95x || ARCH_MMP
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
......
......@@ -154,10 +154,11 @@ machine-$(CONFIG_ARCH_MSM) := msm
machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
machine-$(CONFIG_ARCH_MX1) := imx
machine-$(CONFIG_ARCH_MX2) := imx
machine-$(CONFIG_ARCH_MX25) := mx25
machine-$(CONFIG_ARCH_MX25) := imx
machine-$(CONFIG_ARCH_MX3) := mx3
machine-$(CONFIG_ARCH_MX5) := mx5
machine-$(CONFIG_ARCH_MXC91231) := mxc91231
machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_NS9XXX) := ns9xxx
......
......@@ -84,6 +84,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
CONFIG_SPI=y
CONFIG_W1=y
CONFIG_W1_MASTER_MXC=y
CONFIG_W1_SLAVE_THERM=y
......
......@@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl {
struct arch_hw_breakpoint {
u32 address;
u32 trigger;
struct perf_event *suspended_wp;
struct arch_hw_breakpoint_ctrl ctrl;
struct arch_hw_breakpoint_ctrl step_ctrl;
struct arch_hw_breakpoint_ctrl ctrl;
};
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
......
......@@ -54,6 +54,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
......
......@@ -178,6 +178,7 @@ __dabt_svc:
@
@ set desired IRQ state, then call main handler
@
debug_entry r1
msr cpsr_c, r9
mov r2, sp
bl do_DataAbort
......@@ -304,6 +305,7 @@ __pabt_svc:
#else
bl CPU_PABORT_HANDLER
#endif
debug_entry r1
msr cpsr_c, r9 @ Maybe enable interrupts
mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
......@@ -419,6 +421,7 @@ __dabt_usr:
@
@ IRQs on, then call the main handler
@
debug_entry r1
enable_irq
mov r2, sp
adr lr, BSYM(ret_from_exception)
......@@ -683,6 +686,7 @@ __pabt_usr:
#else
bl CPU_PABORT_HANDLER
#endif
debug_entry r1
enable_irq @ Enable interrupts
mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
......
......@@ -165,6 +165,25 @@
.endm
#endif /* !CONFIG_THUMB2_KERNEL */
@
@ Debug exceptions are taken as prefetch or data aborts.
@ We must disable preemption during the handler so that
@ we can access the debug registers safely.
@
.macro debug_entry, fsr
#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
ldr r4, =0x40f @ mask out fsr.fs
and r5, r4, \fsr
cmp r5, #2 @ debug exception
bne 1f
get_thread_info r10
ldr r6, [r10, #TI_PREEMPT] @ get preempt count
add r11, r6, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
1:
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
......
This diff is collapsed.
......@@ -19,6 +19,14 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#if defined(CONFIG_CPU_PJ4)
#define PJ4(code...) code
#define XSC(code...)
#else
#define PJ4(code...)
#define XSC(code...) code
#endif
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
......@@ -58,11 +66,17 @@
ENTRY(iwmmxt_task_enable)
mrc p15, 0, r2, c15, c1, 0
tst r2, #0x3 @ CP0 and CP1 accessible?
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
movne pc, lr @ if so no business here
orr r2, r2, #0x3 @ enable access to CP0 and CP1
mcr p15, 0, r2, c15, c1, 0
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
......@@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable)
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
mrc p15, 0, r4, c15, c1, 0
orr r4, r4, #0x3 @ enable access to CP0 and CP1
mcr p15, 0, r4, c15, c1, 0
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0xf)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0x3)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
bic r4, r4, #0x3 @ disable access to CP0 and CP1
mcr p15, 0, r4, c15, c1, 0
@ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
......@@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
mrc p15, 0, r1, c15, c1, 0
tst r1, #0x3 @ CP0 and CP1 accessible?
XSC(mrc p15, 0, r1, c15, c1, 0)
PJ4(mrc p15, 0, r1, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r1, #0x3)
PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
......@@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch)
teq r2, r3 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
1: eor r1, r1, #3 @ flip Concan access
mcr p15, 0, r1, c15, c1, 0
1: @ flip Conan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
......
......@@ -32,7 +32,7 @@ static struct platform_device *pmu_device;
* Hardware lock to serialize accesses to PMU registers. Needed for the
* read/modify/write sequences.
*/
DEFINE_SPINLOCK(pmu_lock);
static DEFINE_RAW_SPINLOCK(pmu_lock);
/*
* ARMv6 supports a maximum of 3 events, starting from index 1. If we add
......@@ -65,7 +65,7 @@ struct cpu_hw_events {
*/
unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct arm_pmu {
enum arm_perf_pmu_ids id;
......@@ -673,17 +673,17 @@ arch_initcall(init_hw_perf_events);
* This code has been adapted from the ARM OProfile support.
*/
struct frame_tail {
struct frame_tail *fp;
unsigned long sp;
unsigned long lr;
struct frame_tail __user *fp;
unsigned long sp;
unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static struct frame_tail *
user_backtrace(struct frame_tail *tail,
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
......@@ -709,10 +709,10 @@ user_backtrace(struct frame_tail *tail,
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
struct frame_tail *tail;
struct frame_tail __user *tail;
tail = (struct frame_tail *)regs->ARM_fp - 1;
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while (tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
......
......@@ -400,7 +400,7 @@ armv6pmu_write_counter(int counter,
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
void
static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
......@@ -426,12 +426,12 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t
......@@ -500,11 +500,11 @@ armv6pmu_start(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -512,11 +512,11 @@ armv6pmu_stop(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
......@@ -570,12 +570,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -599,12 +599,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static const struct arm_pmu armv6pmu = {
......@@ -625,7 +625,7 @@ static const struct arm_pmu armv6pmu = {
.max_period = (1LLU << 32) - 1,
};
const struct arm_pmu *__init armv6pmu_init(void)
static const struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
......@@ -655,17 +655,17 @@ static const struct arm_pmu armv6mpcore_pmu = {
.max_period = (1LLU << 32) - 1,
};
const struct arm_pmu *__init armv6mpcore_pmu_init(void)
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
const struct arm_pmu *__init armv6pmu_init(void)
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
const struct arm_pmu *__init armv6mpcore_pmu_init(void)
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
......
......@@ -681,7 +681,7 @@ static void armv7_pmnc_dump_regs(void)
}
#endif
void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
......@@ -689,7 +689,7 @@ void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
......@@ -713,7 +713,7 @@ void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_enable_counter(idx);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
......@@ -723,7 +723,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
/*
* Disable counter and interrupt
*/
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
......@@ -735,7 +735,7 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
*/
armv7_pmnc_disable_intens(idx);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
......@@ -805,20 +805,20 @@ static void armv7pmu_start(void)
{
unsigned long flags;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
......@@ -874,7 +874,7 @@ static u32 __init armv7_reset_read_pmnc(void)
return nb_cnt + 1;
}
const struct arm_pmu *__init armv7_a8_pmu_init(void)
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
......@@ -884,7 +884,7 @@ const struct arm_pmu *__init armv7_a8_pmu_init(void)
return &armv7pmu;
}
const struct arm_pmu *__init armv7_a9_pmu_init(void)
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
......@@ -894,12 +894,12 @@ const struct arm_pmu *__init armv7_a9_pmu_init(void)
return &armv7pmu;
}
#else
const struct arm_pmu *__init armv7_a8_pmu_init(void)
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
const struct arm_pmu *__init armv7_a9_pmu_init(void)
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
......
......@@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
......@@ -355,11 +355,11 @@ xscale1pmu_start(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -367,11 +367,11 @@ xscale1pmu_stop(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
......@@ -428,7 +428,7 @@ static const struct arm_pmu xscale1pmu = {
.max_period = (1LLU << 32) - 1,
};
const struct arm_pmu *__init xscale1pmu_init(void)
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
......@@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
......@@ -705,11 +705,11 @@ xscale2pmu_start(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
......@@ -717,11 +717,11 @@ xscale2pmu_stop(void)
{
unsigned long flags, val;
spin_lock_irqsave(&pmu_lock, flags);
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
spin_unlock_irqrestore(&pmu_lock, flags);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}