Commit 38cb162b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] wire up pselect, ppoll
  [IA64] Add TIF_RESTORE_SIGMASK
  [IA64] unwind did not work for processes born with CLONE_STOPPED
  [IA64] Optional method to purge the TLB on SN systems
  [IA64] SPIN_LOCK_UNLOCKED macro cleanup in arch/ia64
  [IA64-SN2][KJ] mmtimer.c-kzalloc
  [IA64] fix stack alignment for ia32 signal handlers
  [IA64] - Altix: hotplug after intr redirect can crash system
  [IA64] save and restore cpus_allowed in cpu_idle_wait
  [IA64] Removal of percpu TR cleanup in kexec code
  [IA64] Fix some section mismatch errors
parents ba7cc09c e180583b
......@@ -52,43 +52,6 @@ ENTRY(ia32_clone)
br.ret.sptk.many rp
END(ia32_clone)
ENTRY(sys32_rt_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
mov loc0=rp
mov out0=in0 // mask
mov out1=in1 // sigsetsize
mov out2=sp // out2 = &sigscratch
.fframe 16
adds sp=-16,sp // allocate dummy "sigscratch"
;;
.body
br.call.sptk.many rp=ia32_rt_sigsuspend
1: .restore sp
adds sp=16,sp
mov rp=loc0
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys32_rt_sigsuspend)
ENTRY(sys32_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs
mov loc0=rp
mov out0=in2 // mask (first two args are ignored)
;;
mov out1=sp // out1 = &sigscratch
.fframe 16
adds sp=-16,sp // allocate dummy "sigscratch"
.body
br.call.sptk.many rp=ia32_sigsuspend
1: .restore sp
adds sp=16,sp
mov rp=loc0
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
{ /*
......@@ -389,7 +352,7 @@ ia32_syscall_table:
data8 sys_rt_sigpending
data8 compat_sys_rt_sigtimedwait
data8 sys32_rt_sigqueueinfo
data8 sys32_rt_sigsuspend
data8 compat_sys_rt_sigsuspend
data8 sys32_pread /* 180 */
data8 sys32_pwrite
data8 sys_chown /* 16-bit version */
......
......@@ -451,59 +451,20 @@ sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int r
sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
}
long
__ia32_rt_sigsuspend (compat_sigset_t *sset, unsigned int sigsetsize, struct sigscratch *scr)
asmlinkage long
sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
{
extern long ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall);
sigset_t oldset, set;
scr->scratch_unat = 0; /* avoid leaking kernel bits to user level */
memset(&set, 0, sizeof(set));
memcpy(&set.sig, &sset->sig, sigsetsize);
sigdelsetmask(&set, ~_BLOCKABLE);
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
{
oldset = current->blocked;
current->blocked = set;
recalc_sigpending();
}
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
/*
* The return below usually returns to the signal handler. We need to pre-set the
* correct error code here to ensure that the right values get saved in sigcontext
* by ia64_do_signal.
*/
scr->pt.r8 = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (ia64_do_signal(&oldset, scr, 1))
return -EINTR;
}
}
asmlinkage long
ia32_rt_sigsuspend (compat_sigset_t __user *uset, unsigned int sigsetsize, struct sigscratch *scr)
{
compat_sigset_t set;
if (sigsetsize > sizeof(compat_sigset_t))
return -EINVAL;
if (copy_from_user(&set.sig, &uset->sig, sigsetsize))
return -EFAULT;
return __ia32_rt_sigsuspend(&set, sigsetsize, scr);
}
asmlinkage long
ia32_sigsuspend (unsigned int mask, struct sigscratch *scr)
{
return __ia32_rt_sigsuspend((compat_sigset_t *) &mask, sizeof(mask), scr);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
asmlinkage long
......@@ -810,7 +771,11 @@ get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
}
/* Legacy stack switching not supported */
return (void __user *)((esp - frame_size) & -8ul);
esp -= frame_size;
/* Align the stack pointer according to the i386 ABI,
* i.e. so that on function entry ((sp + 4) & 15) == 0. */
esp = ((esp + 4) & -16ul) - 4;
return (void __user *) esp;
}
static int
......
......@@ -1199,32 +1199,6 @@ ENTRY(notify_resume_user)
br.ret.sptk.many rp
END(notify_resume_user)
GLOBAL_ENTRY(sys_rt_sigsuspend)
.prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
mov r9=ar.unat
mov loc0=rp // save return address
mov out0=in0 // mask
mov out1=in1 // sigsetsize
adds out2=8,sp // out2=&sigscratch->ar_pfs
;;
.fframe 16
.spillsp ar.unat, 16
st8 [sp]=r9,-16 // allocate space for ar.unat and save it
st8 [out2]=loc1,-8 // save ar.pfs, out2=&sigscratch
.body
br.call.sptk.many rp=ia64_rt_sigsuspend
.ret17: .restore sp
adds sp=16,sp // pop scratch stack space
;;
ld8 r9=[sp] // load new unat from sw->caller_unat
mov rp=loc0
;;
mov ar.unat=r9
mov ar.pfs=loc1
br.ret.sptk.many rp
END(sys_rt_sigsuspend)
ENTRY(sys_rt_sigreturn)
PT_REGS_UNWIND_INFO(0)
/*
......@@ -1598,8 +1572,8 @@ sys_call_table:
data8 sys_readlinkat
data8 sys_fchmodat
data8 sys_faccessat
data8 sys_ni_syscall // reserved for pselect
data8 sys_ni_syscall // 1295 reserved for ppoll
data8 sys_pselect6
data8 sys_ppoll
data8 sys_unshare
data8 sys_splice
data8 sys_set_robust_list
......
......@@ -1012,7 +1012,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
/*
* ACPI calls this when it finds an entry for a legacy ISA IRQ override.
*/
void __init
void __devinit
iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long polarity,
unsigned long trigger)
......
......@@ -38,6 +38,7 @@
#include <asm/machvec.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_PERFMON
# include <asm/perfmon.h>
......@@ -126,8 +127,10 @@ void destroy_irq(unsigned int irq)
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
#else
# define IS_RESCHEDULE(vec) (0)
# define IS_LOCAL_TLB_FLUSH(vec) (0)
#endif
/*
* That's where the IVT branches when we get an external
......@@ -179,8 +182,11 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
else {
ia64_setreg(_IA64_REG_CR_TPR, vector);
ia64_srlz_d();
......@@ -226,8 +232,11 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing
*/
while (vector != IA64_SPURIOUS_INT_VECTOR) {
if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++;
} else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++;
else {
struct pt_regs *old_regs = set_irq_regs(NULL);
......@@ -259,12 +268,12 @@ void ia64_process_pending_intr(void)
#ifdef CONFIG_SMP
extern irqreturn_t handle_IPI (int irq, void *dev_id);
static irqreturn_t dummy_handler (int irq, void *dev_id)
{
BUG();
}
extern irqreturn_t handle_IPI (int irq, void *dev_id);
static struct irqaction ipi_irqaction = {
.handler = handle_IPI,
......@@ -277,6 +286,13 @@ static struct irqaction resched_irqaction = {
.flags = IRQF_DISABLED,
.name = "resched"
};
static struct irqaction tlb_irqaction = {
.handler = dummy_handler,
.flags = SA_INTERRUPT,
.name = "tlb_flush"
};
#endif
void
......@@ -302,6 +318,7 @@ init_IRQ (void)
#ifdef CONFIG_SMP
register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
#endif
#ifdef CONFIG_PERFMON
pfm_init_percpu();
......
......@@ -155,7 +155,7 @@ show_regs (struct pt_regs *regs)
}
void
do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall)
{
if (fsys_mode(current, &scr->pt)) {
/* defer signal-handling etc. until we return to privilege-level 0. */
......@@ -170,8 +170,8 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
#endif
/* deal with pending signal delivery */
if (test_thread_flag(TIF_SIGPENDING))
ia64_do_signal(oldset, scr, in_syscall);
if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
ia64_do_signal(scr, in_syscall);
}
static int pal_halt = 1;
......@@ -236,6 +236,7 @@ void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map;
cpumask_t tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
......@@ -257,6 +258,7 @@ void cpu_idle_wait(void)
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
......
......@@ -94,7 +94,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
4:
srlz.i
;;
//purge TR entry for kernel text and data
// purge TR entry for kernel text and data
movl r16=KERNEL_START
mov r18=KERNEL_TR_PAGE_SHIFT<<2
;;
......@@ -104,15 +104,6 @@ GLOBAL_ENTRY(relocate_new_kernel)
srlz.i
;;
// purge TR entry for percpu data
movl r16=PERCPU_ADDR
mov r18=PERCPU_PAGE_SHIFT<<2
;;
ptr.d r16,r18
;;
srlz.d
;;
// purge TR entry for pal code
mov r16=in3
mov r18=IA64_GRANULE_SHIFT<<2
......
......@@ -786,7 +786,7 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
void
void __init
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
......
......@@ -22,4 +22,4 @@ struct sigframe {
struct sigcontext sc;
};
extern long ia64_do_signal (sigset_t *, struct sigscratch *, long);
extern void ia64_do_signal (struct sigscratch *, long);
......@@ -40,47 +40,6 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
long
ia64_rt_sigsuspend (sigset_t __user *uset, size_t sigsetsize, struct sigscratch *scr)
{
sigset_t oldset, set;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (!access_ok(VERIFY_READ, uset, sigsetsize))
return -EFAULT;
if (GET_SIGSET(&set, uset))
return -EFAULT;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
{
oldset = current->blocked;
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sighand->siglock);
/*
* The return below usually returns to the signal handler. We need to
* pre-set the correct error code here to ensure that the right values
* get saved in sigcontext by ia64_do_signal.
*/
scr->pt.r8 = EINTR;
scr->pt.r10 = -1;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (ia64_do_signal(&oldset, scr, 1))
return -EINTR;
}
}
asmlinkage long
sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
long arg3, long arg4, long arg5, long arg6, long arg7,
......@@ -477,10 +436,11 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
* Note that `init' is a special process: it doesn't get signals it doesn't want to
* handle. Thus you cannot kill init even with a SIGKILL even by mistake.
*/
long
ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
void
ia64_do_signal (struct sigscratch *scr, long in_syscall)
{
struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info;
long restart = in_syscall;
long errno = scr->pt.r8;
......@@ -492,9 +452,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
* doing anything if so.
*/
if (!user_mode(&scr->pt))
return 0;
return;
if (!oldset)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
/*
......@@ -557,8 +519,15 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
* Whee! Actually deliver the signal. If the delivery failed, we need to
* continue to iterate in this loop so we can deliver the SIGSEGV...
*/
if (handle_signal(signr, &ka, &info, oldset, scr))
return 1;
if (handle_signal(signr, &ka, &info, oldset, scr)) {
/* a signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag */
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
return;
}
}
/* Did we come from a system call? */
......@@ -584,5 +553,11 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
}
}
}
return 0;
/* if there's no signal to deliver, we just put the saved sigmask
* back */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
......@@ -49,6 +49,18 @@
#include <asm/unistd.h>
#include <asm/mca.h>
/*
* Note: alignment of 4 entries/cacheline was empirically determined
* to be a good tradeoff between hot cachelines & spreading the array
* across too many cacheline.
*/
static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
/*
* Structure and data for smp_call_function(). This is designed to minimise static memory
* requirements. It also looks cleaner.
......@@ -248,6 +260,62 @@ smp_send_reschedule (int cpu)
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
/*
* Called with preeemption disabled.
*/
static void
smp_send_local_flush_tlb (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
}
void
smp_local_flush_tlb(void)
{
/*
* Use atomic ops. Otherwise, the load/increment/store sequence from
* a "++" operation can have the line stolen between the load & store.
* The overhead of the atomic op in negligible in this case & offers
* significant benefit for the brief periods where lots of cpus
* are simultaneously flushing TLBs.
*/
ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
local_flush_tlb_all();
}
#define FLUSH_DELAY 5 /* Usec backoff to eliminate excessive cacheline bouncing */
void
smp_flush_tlb_cpumask(cpumask_t xcpumask)
{
unsigned int *counts = __ia64_per_cpu_var(shadow_flush_counts);
cpumask_t cpumask = xcpumask;
int mycpu, cpu, flush_mycpu = 0;
preempt_disable();
mycpu = smp_processor_id();
for_each_cpu_mask(cpu, cpumask)
counts[cpu] = local_tlb_flush_counts[cpu].count;
mb();
for_each_cpu_mask(cpu, cpumask) {
if (cpu == mycpu)
flush_mycpu = 1;
else
smp_send_local_flush_tlb(cpu);
}
if (flush_mycpu)
smp_local_flush_tlb();
for_each_cpu_mask(cpu, cpumask)
while(counts[cpu] == local_tlb_flush_counts[cpu].count)
udelay(FLUSH_DELAY);
preempt_enable();
}
void
smp_flush_tlb_all (void)
{
......
......@@ -43,9 +43,9 @@ die (const char *str, struct pt_regs *regs, long err)
u32 lock_owner;
int lock_owner_depth;
} die = {
.lock = SPIN_LOCK_UNLOCKED,
.lock_owner = -1,
.lock_owner_depth = 0
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
.lock_owner = -1,
.lock_owner_depth = 0
};
static int die_counter;
int cpu = get_cpu();
......
......@@ -60,6 +60,7 @@
# define UNW_DEBUG_ON(n) unw_debug_level >= n
/* Do not code a printk level, not all debug lines end in newline */
# define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
# undef inline
# define inline
#else /* !UNW_DEBUG */
# define UNW_DEBUG_ON(n) 0
......@@ -145,7 +146,7 @@ static struct {
# endif
} unw = {
.tables = &unw.kernel_table,
.lock = SPIN_LOCK_UNLOCKED,
.lock = __SPIN_LOCK_UNLOCKED(unw.lock),
.save_order = {
UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
......@@ -1943,9 +1944,9 @@ EXPORT_SYMBOL(unw_unwind);
int
unw_unwind_to_user (struct unw_frame_info *info)
{
unsigned long ip, sp, pr = 0;
unsigned long ip, sp, pr = info->pr;
while (unw_unwind(info) >= 0) {
do {
unw_get_sp(info, &sp);
if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
......@@ -1963,7 +1964,7 @@ unw_unwind_to_user (struct unw_frame_info *info)
__FUNCTION__, ip);
return -1;
}
}
} while (unw_unwind(info) >= 0);
unw_get_ip(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
__FUNCTION__, ip);
......
......@@ -32,9 +32,9 @@ static struct {
} purge;
struct ia64_ctx ia64_ctx = {
.lock = SPIN_LOCK_UNLOCKED,
.next = 1,
.max_ctx = ~0U
.lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
.next = 1,
.max_ctx = ~0U
};
DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
......
......@@ -59,6 +59,22 @@ void sn_intr_free(nasid_t local_nasid, int local_widget,
(u64) sn_irq_info->irq_cookie, 0, 0);
}
u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
nasid_t req_nasid, int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_REDIRECT, (u64) local_nasid,