Commit 76a22271 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm:
  [ARM] 3672/1: PXA: don't probe output GPIOs for interrupt
  [ARM] 3671/1: ep93xx: add cirrus logic edb9315 support
  [ARM] 3370/2: ep93xx: add crunch support
  [ARM] 3665/1: crunch: add ptrace support
  [ARM] 3664/1: crunch: add signal frame save/restore
  [ARM] 3663/1: fix resource->end off-by-one thinko during physmap conversion
  [ARM] 3662/1: ixp23xx: don't include asm/hardware.h in uncompress.h
  [ARM] 3660/1: Remove legacy defines
  [ARM] 3661/1: S3C2412: Fix compilation if CPU_S3C2410 only
  [ARM] 3658/1: S3C244X: Change usb-gadget name to s3c2440-usbgadget
  [ARM] Remove the __arch_* layer from uaccess.h
parents fffcb480 e033108b
......@@ -22,6 +22,9 @@ obj-$(CONFIG_PCI) += bios32.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_IWMMXT) += iwmmxt.o
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
......
......@@ -109,11 +109,11 @@ EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
/* user mem (segment) */
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__arch_clear_user);
EXPORT_SYMBOL(__arch_strnlen_user);
EXPORT_SYMBOL(__arch_strncpy_from_user);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
......
......@@ -59,6 +59,9 @@ int main(void)
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
#ifdef CONFIG_IWMMXT
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
#endif
#ifdef CONFIG_CRUNCH
DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
#endif
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
......
/*
* arch/arm/kernel/crunch-bits.S
* Cirrus MaverickCrunch context switching and handling
*
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
*
* Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
* Copyright (c) 2003-2004, MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/linkage.h>
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/arch/ep93xx-regs.h>
/*
* We can't use hex constants here due to a bug in gas.
*/
#define CRUNCH_MVDX0 0
#define CRUNCH_MVDX1 8
#define CRUNCH_MVDX2 16
#define CRUNCH_MVDX3 24
#define CRUNCH_MVDX4 32
#define CRUNCH_MVDX5 40
#define CRUNCH_MVDX6 48
#define CRUNCH_MVDX7 56
#define CRUNCH_MVDX8 64
#define CRUNCH_MVDX9 72
#define CRUNCH_MVDX10 80
#define CRUNCH_MVDX11 88
#define CRUNCH_MVDX12 96
#define CRUNCH_MVDX13 104
#define CRUNCH_MVDX14 112
#define CRUNCH_MVDX15 120
#define CRUNCH_MVAX0L 128
#define CRUNCH_MVAX0M 132
#define CRUNCH_MVAX0H 136
#define CRUNCH_MVAX1L 140
#define CRUNCH_MVAX1M 144
#define CRUNCH_MVAX1H 148
#define CRUNCH_MVAX2L 152
#define CRUNCH_MVAX2M 156
#define CRUNCH_MVAX2H 160
#define CRUNCH_MVAX3L 164
#define CRUNCH_MVAX3M 168
#define CRUNCH_MVAX3H 172
#define CRUNCH_DSPSC 176
#define CRUNCH_SIZE 184
.text
/*
* Lazy switching of crunch coprocessor context
*
* r10 = struct thread_info pointer
* r9 = ret_from_exception
* lr = undefined instr exit
*
* called from prefetch exception handler with interrupts disabled
*/
ENTRY(crunch_task_enable)
ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
ldr r1, [r8, #0x80]
tst r1, #0x00800000 @ access to crunch enabled?
movne pc, lr @ if so no business here
mov r3, #0xaa @ unlock syscon swlock
str r3, [r8, #0xc0]
orr r1, r1, #0x00800000 @ enable access to crunch
str r1, [r8, #0x80]
ldr r3, =crunch_owner
add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
ldr r2, [sp, #60] @ current task pc value
ldr r1, [r3] @ get current crunch owner
str r0, [r3] @ this task now owns crunch
sub r2, r2, #4 @ adjust pc back
str r2, [sp, #60]
ldr r2, [r8, #0x80]
mov r2, r2 @ flush out enable (@@@)
teq r1, #0 @ test for last ownership
mov lr, r9 @ normal exit from exception
beq crunch_load @ no owner, skip save
crunch_save:
cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
#ifdef __ARMEB__
#error fix me for ARMEB
#endif
cfmv32al mvfx0, mvax0 @ save 72b accumulators
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
cfmv32am mvfx0, mvax0
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
cfmv32ah mvfx0, mvax0
cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
cfmv32al mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
cfmv32am mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
cfmv32ah mvfx0, mvax1
cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
cfmv32al mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
cfmv32am mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
cfmv32ah mvfx0, mvax2
cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
cfmv32al mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
cfmv32am mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
cfmv32ah mvfx0, mvax3
cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
cfmv32sc mvdx0, dspsc @ save status word
cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
teq r0, #0 @ anything to load?
cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
moveq pc, lr
crunch_load:
cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
cfmvsc32 dspsc, mvdx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
cfmval32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
cfmvam32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
cfmvah32 mvax0, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
cfmval32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
cfmvam32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
cfmvah32 mvax1, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
cfmval32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
cfmvam32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
cfmvah32 mvax2, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
cfmval32 mvax3, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
cfmvam32 mvax3, mvfx0
cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
cfmvah32 mvax3, mvfx0
cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
mov pc, lr
/*
* Back up crunch regs to save area and disable access to them
* (mainly for gdb or sleep mode usage)
*
* r0 = struct thread_info pointer of target task or NULL for any
*/
ENTRY(crunch_task_disable)
stmfd sp!, {r4, r5, lr}
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r1, [r3] @ get current crunch owner
teq r1, #0 @ any current owner?
beq 1f @ no: quit
teq r0, #0 @ any owner?
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
ldr r5, [r4, #0x80] @ enable access to crunch
mov r2, #0xaa
str r2, [r4, #0xc0]
orr r5, r5, #0x00800000
str r5, [r4, #0x80]
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
ldr r2, [r4, #0x80] @ flush out enable (@@@)
mov r2, r2
bl crunch_save
mov r2, #0xaa @ disable access to crunch
str r2, [r4, #0xc0]
bic r5, r5, #0x00800000
str r5, [r4, #0x80]
ldr r5, [r4, #0x80] @ flush out enable (@@@)
mov r5, r5
1: msr cpsr_c, ip @ restore interrupt mode
ldmfd sp!, {r4, r5, pc}
/*
* Copy crunch state to given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to store crunch state
*
* this is called mainly in the creation of signal stack frames
*/
ENTRY(crunch_task_copy)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r3, [r3] @ get current crunch owner
teq r2, r3 @ does this task own it...
beq 1f
@ current crunch values are in the task save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r1
mov r1, r2
mov r2, #CRUNCH_SIZE
b memcpy
1: @ this task owns crunch regs -- grab a copy from there
mov r0, #0 @ nothing to load
mov r3, lr @ preserve return address
bl crunch_save
msr cpsr_c, ip @ restore interrupt mode
mov pc, r3
/*
* Restore crunch state from given memory address
*
* r0 = struct thread_info pointer of target task
* r1 = memory address where to get crunch state from
*
* this is used to restore crunch state when unwinding a signal stack frame
*/
ENTRY(crunch_task_restore)
mrs ip, cpsr
orr r2, ip, #PSR_I_BIT @ disable interrupts
msr cpsr_c, r2
ldr r3, =crunch_owner
add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
ldr r3, [r3] @ get current crunch owner
teq r2, r3 @ does this task own it...
beq 1f
@ this task doesn't own crunch regs -- use its save area
msr cpsr_c, ip @ restore interrupt mode
mov r0, r2
mov r2, #CRUNCH_SIZE
b memcpy
1: @ this task owns crunch regs -- load them directly
mov r0, r1
mov r1, #0 @ nothing to save
mov r3, lr @ preserve return address
bl crunch_load
msr cpsr_c, ip @ restore interrupt mode
mov pc, r3
/*
* arch/arm/kernel/crunch.c
* Cirrus MaverickCrunch context switching and handling
*
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <asm/arch/ep93xx-regs.h>
#include <asm/thread_notify.h>
#include <asm/io.h>
struct crunch_state *crunch_owner;
void crunch_task_release(struct thread_info *thread)
{
local_irq_disable();
if (crunch_owner == &thread->crunchstate)
crunch_owner = NULL;
local_irq_enable();
}
static int crunch_enabled(u32 devcfg)
{
return !!(devcfg & EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE);
}
static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = (struct thread_info *)t;
struct crunch_state *crunch_state;
u32 devcfg;
crunch_state = &thread->crunchstate;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
memset(crunch_state, 0, sizeof(*crunch_state));
/*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_RELEASE:
crunch_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
devcfg = __raw_readl(EP93XX_SYSCON_DEVICE_CONFIG);
if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
devcfg ^= EP93XX_SYSCON_DEVICE_CONFIG_CRUNCH_ENABLE;
__raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
__raw_writel(devcfg, EP93XX_SYSCON_DEVICE_CONFIG);
}
break;
}
return NOTIFY_DONE;
}
static struct notifier_block crunch_notifier_block = {
.notifier_call = crunch_do,
};
static int __init crunch_init(void)
{
thread_register_notifier(&crunch_notifier_block);
return 0;
}
late_initcall(crunch_init);
......@@ -492,9 +492,15 @@ call_fpe:
b do_fpe @ CP#1 (FPE)
b do_fpe @ CP#2 (FPE)
mov pc, lr @ CP#3
#ifdef CONFIG_CRUNCH
b crunch_task_enable @ CP#4 (MaverickCrunch)
b crunch_task_enable @ CP#5 (MaverickCrunch)
b crunch_task_enable @ CP#6 (MaverickCrunch)
#else
mov pc, lr @ CP#4
mov pc, lr @ CP#5
mov pc, lr @ CP#6
#endif
mov pc, lr @ CP#7
mov pc, lr @ CP#8
mov pc, lr @ CP#9
......
......@@ -634,6 +634,32 @@ static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
#endif
#ifdef CONFIG_CRUNCH
/*
* Get the child Crunch state.
*/
static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
? -EFAULT : 0;
}
/*
* Set the child Crunch state.
*/
static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_release(thread); /* force a reload */
return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
? -EFAULT : 0;
}
#endif
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
unsigned long tmp;
......@@ -765,6 +791,16 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
child->ptrace_message = data;
break;
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
ret = ptrace_getcrunchregs(child, (void __user *)data);
break;
case PTRACE_SETCRUNCHREGS:
ret = ptrace_setcrunchregs(child, (void __user *)data);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
......
......@@ -132,6 +132,37 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
return ret;
}
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe *frame)
{
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
/* the crunch context must be 64 bit aligned */
kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
kframe->magic = CRUNCH_MAGIC;
kframe->size = CRUNCH_STORAGE_SIZE;
crunch_task_copy(current_thread_info(), &kframe->storage);
return __copy_to_user(frame, kframe, sizeof(*frame));
}
static int restore_crunch_context(struct crunch_sigframe *frame)
{
char kbuf[sizeof(*frame) + 8];
struct crunch_sigframe *kframe;
/* the crunch context must be 64 bit aligned */
kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
if (__copy_from_user(kframe, frame, sizeof(*frame)))
return -1;
if (kframe->magic != CRUNCH_MAGIC ||
kframe->size != CRUNCH_STORAGE_SIZE)
return -1;
crunch_task_restore(current_thread_info(), &kframe->storage);
return 0;
}
#endif
#ifdef CONFIG_IWMMXT
static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
......@@ -214,6 +245,10 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= !valid_user_regs(regs);
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
err |= restore_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= restore_iwmmxt_context(&aux->iwmmxt);
......@@ -333,6 +368,10 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
#ifdef CONFIG_CRUNCH
if (err == 0)
err |= preserve_crunch_context(&aux->crunch);
#endif
#ifdef CONFIG_IWMMXT
if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
err |= preserve_iwmmxt_context(&aux->iwmmxt);
......
......@@ -12,13 +12,13 @@
.text
/* Prototype: int __arch_clear_user(void *addr, size_t sz)
/* Prototype: int __clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__arch_clear_user)
ENTRY(__clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
......
......@@ -16,7 +16,7 @@
/*
* Prototype:
*
* size_t __arch_copy_from_user(void *to, const void *from, size_t n)
* size_t __copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
......@@ -83,7 +83,7 @@
.text
ENTRY(__arch_copy_from_user)
ENTRY(__copy_from_user)
#include "copy_template.S"
......
......@@ -16,7 +16,7 @@
/*
* Prototype:
*
* size_t __arch_copy_to_user(void *to, const void *from, size_t n)
* size_t __copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
......@@ -86,7 +86,7 @@