Commit 89c5a946 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc

Pull ARC changes from Vineet Gupta:

 - ARC MM changes:
    - preparation for MMUv4 (accomodate new PTE bits, new cmds)
    - Rework the ASID allocation algorithm to remove asid-mm reverse map
 - Boilerplate code consolidation in Exception Handlers
 - Disable FRAME_POINTER for ARC
 - Unaligned Access Emulation for Big-Endian from Noam
 - Bunch of fixes (udelay, missing accessors) from Mischa

* tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: fix new Section mismatches in build (post __cpuinit cleanup)
  Kconfig.debug: Add FRAME_POINTER anti-dependency for ARC
  ARC: Fix __udelay calculation
  ARC: remove console_verbose() from setup_arch()
  ARC: Add read*_relaxed to asm/io.h
  ARC: Handle un-aligned user space access in BE.
  ARC: [ASID] Track ASID allocation cycles/generations
  ARC: [ASID] activate_mm() == switch_mm()
  ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID
  ARC: [ASID] Refactor the TLB paranoid debug code
  ARC: [ASID] Remove legacy/unused debug code
  ARC: No need to flush the TLB in early boot
  ARC: MMUv4 preps/3 - Abstract out TLB Insert/Delete
  ARC: MMUv4 preps/2 - Reshuffle PTE bits
  ARC: MMUv4 preps/1 - Fold PTE K/U access flags
  ARC: Code cosmetics (Nothing semantical)
  ARC: Entry Handler tweaks: Optimize away redundant IRQ_DISABLE_SAVE
  ARC: Exception Handlers Code consolidation
  ARC: Add some .gitignore entries
parents 833ae40b 07b9b651
......@@ -57,7 +57,7 @@
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init read_decode_cache_bcr(void);
extern void read_decode_cache_bcr(void);
#endif /* !__ASSEMBLY__ */
......
......@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
/* (long long) cast ensures 64 bit MPY - real or emulated
/* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
loops = ((long long)(usecs * 4295 * HZ) *
(long long)(loops_per_jiffy)) >> 32;
loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
......
......@@ -365,7 +365,7 @@
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
*
* Before saving the full regfile - this reg is restored back, only
* to be saved again on kernel mode stack, as part of ptregs.
* to be saved again on kernel mode stack, as part of pt_regs.
*-------------------------------------------------------------*/
.macro EXCPN_PROLOG_FREEUP_REG reg
#ifdef CONFIG_SMP
......@@ -383,6 +383,28 @@
#endif
.endm
/*--------------------------------------------------------------
* Exception Entry prologue
* -Switches stack to K mode (if not already)
* -Saves the register file
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE
/* Need at least 1 reg to code the early exception prologue */
EXCPN_PROLOG_FREEUP_REG r9
/* U/K mode at time of exception (stack not switched if already K) */
lr r9, [erstatus]
/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK
/* save the regfile */
SAVE_ALL_SYS
.endm
/*--------------------------------------------------------------
* Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
* Requires SP to be already switched to kernel mode Stack
......
......@@ -100,6 +100,10 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl
#include <asm-generic/io.h>
#endif /* _ASM_ARC_IO_H */
......@@ -157,13 +157,6 @@ static inline void arch_unmask_irq(unsigned int irq)
flag \scratch
.endm
.macro IRQ_DISABLE_SAVE scratch, save
lr \scratch, [status32]
mov \save, \scratch /* Make a copy */
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
......
......@@ -32,6 +32,8 @@
/* Error code if probe fails */
#define TLB_LKUP_ERR 0x80000000
#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
/* TLB Commands */
#define TLBWrite 0x1
#define TLBRead 0x2
......@@ -46,21 +48,18 @@
#ifndef __ASSEMBLY__
typedef struct {
unsigned long asid; /* Pvt Addr-Space ID for mm */
#ifdef CONFIG_ARC_TLB_DBG
struct task_struct *tsk;
#endif
unsigned long asid; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
void tlb_paranoid_check(unsigned int mm_asid, unsigned long address);
#else
#define tlb_paranoid_check(a, b)
#endif
void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void __init read_decode_mmu_bcr(void);
void read_decode_mmu_bcr(void);
#endif /* !__ASSEMBLY__ */
......
......@@ -34,95 +34,65 @@
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
* For book-keeping, Linux uses a couple of data-structures:
* -mm_struct has an @asid field to keep a note of task's ASID (needed at the
* time of say switch_mm( )
* -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
* given an ASID, finding the mm struct associated.
*
* The round-robin allocation algorithm allows for ASID stealing.
* If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
* already assigned to another (switched-out) task. Obviously the prev owner
* is marked with an invalid ASID to make it request for a new ASID when it
* gets scheduled next time. However its TLB entries (with ASID "x") could
* exist, which must be cleared before the same ASID is used by the new owner.
* Flushing them would be plausible but costly solution. Instead we force a
* allocation policy quirk, which ensures that a stolen ASID won't have any
* TLB entries associates, alleviating the need to flush.
* The quirk essentially is not allowing ASID allocated in prev cycle
* to be used past a roll-over in the next cycle.
* When this happens (i.e. task ASID > asid tracker), task needs to refresh
* its ASID, aligning it to current value of tracker. If the task doesn't get
* scheduled past a roll-over, hence its ASID is not yet realigned with
* tracker, such ASID is anyways safely reusable because it is
* gauranteed that TLB entries with that ASID wont exist.
* A new allocation cycle, post rollover, could potentially reassign an ASID
* to a different task. Thus the rule is to refresh the ASID in a new cycle.
* The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
* serve as cycle/generation indicator and natural 32 bit unsigned math
* automagically increments the generation when lower 8 bits rollover.
*/
#define FIRST_ASID 0
#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)
#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
#define MM_CTXT_NO_ASID 0UL
/* ASID to mm struct mapping */
extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)
extern int asid_cache;
extern unsigned int asid_cache;
/*
* Assign a new ASID to task. If the task already has an ASID, it is
* relinquished.
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
* Also set the MMU PID register to existing/updated ASID
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
struct mm_struct *prev_owner;
unsigned long flags;
local_irq_save(flags);
/*
* Relinquish the currently owned ASID (if any).
* Doing unconditionally saves a cmp-n-branch; for already unused
* ASID slot, the value was/remains NULL
* Move to new ASID if it was not from current alloc-cycle/generation.
* This is done by ensuring that the generation bits in both mm->ASID
* and cpu's ASID counter are exactly same.
*
* Note: Callers needing new ASID unconditionally, independent of
* generation, e.g. local_flush_tlb_mm() for forking parent,
* first need to destroy the context, setting it to invalid
* value.
*/
asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
goto set_hw;
/* move to new ASID and handle rollover */
if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {
/* move to new ASID */
if (++asid_cache > MAX_ASID) { /* ASID roll-over */
asid_cache = FIRST_ASID;
flush_tlb_all();
}
/*
* Is next ASID already owned by some-one else (we are stealing it).
* If so, let the orig owner be aware of this, so when it runs, it
* asks for a brand new ASID. This would only happen for a long-lived
* task with ASID from prev allocation cycle (before ASID roll-over).
*
* This might look wrong - if we are re-using some other task's ASID,
* won't we use it's stale TLB entries too. Actually switch_mm( ) takes
* care of such a case: it ensures that task with ASID from prev alloc
* cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
* The stealing scenario described here will only happen if that task
* didn't get a chance to refresh it's ASID - implying stale entries
* won't exist.
*/
prev_owner = asid_mm_map[asid_cache];
if (prev_owner)
prev_owner->context.asid = NO_ASID;
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
if (!asid_cache)
asid_cache = MM_CTXT_FIRST_CYCLE;
}
/* Assign new ASID to tsk */
asid_mm_map[asid_cache] = mm;
mm->context.asid = asid_cache;
#ifdef CONFIG_ARC_TLB_DBG
pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
" pid:%u, assigned asid:%lu\n",
(unsigned int)mm, (unsigned int)prev_owner,
(unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
(mm->context.tsk)->pid, mm->context.asid);
#endif
write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
set_hw:
write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);
local_irq_restore(flags);
}
......@@ -134,10 +104,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.asid = NO_ASID;
#ifdef CONFIG_ARC_TLB_DBG
mm->context.tsk = tsk;
#endif
mm->context.asid = MM_CTXT_NO_ASID;
return 0;
}
......@@ -152,40 +119,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
/*
* Get a new ASID if task doesn't have a valid one. Possible when
* -task never had an ASID (fresh after fork)
* -it's ASID was stolen - past an ASID roll-over.
* -There's a third obscure scenario (if this task is running for the
* first time afer an ASID rollover), where despite having a valid
* ASID, we force a get for new ASID - see comments at top.
*
* Both the non-alloc scenario and first-use-after-rollover can be
* detected using the single condition below: NO_ASID = 256
* while asid_cache is always a valid ASID value (0-255).
*/
if (next->context.asid > asid_cache) {
get_new_mmu_context(next);
} else {
/*
* XXX: This will never happen given the chks above
* BUG_ON(next->context.asid > MAX_ASID);
*/
write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
}
get_new_mmu_context(next);
}
/*
* Called at the time of execve() to get a new ASID
* Note the subtlety here: get_new_mmu_context() behaves differently here
* vs. in switch_mm(). Here it always returns a new ASID, because mm has
* an unallocated "initial" value, while in latter, it moves to a new ASID,
* only if it was unallocated
*/
#define activate_mm(prev, next) switch_mm(prev, next, NULL)
static inline void destroy_context(struct mm_struct *mm)
{
unsigned long flags;
local_irq_save(flags);
asid_mm_map[mm->context.asid] = NULL;
mm->context.asid = NO_ASID;
local_irq_restore(flags);
mm->context.asid = MM_CTXT_NO_ASID;
}
/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
......@@ -197,17 +145,6 @@ static inline void destroy_context(struct mm_struct *mm)
*/
#define deactivate_mm(tsk, mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
#ifndef CONFIG_SMP
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif
/* Unconditionally get a new ASID */
get_new_mmu_context(next);
}
#define enter_lazy_tlb(mm, tsk)
#endif /* __ASM_ARC_MMU_CONTEXT_H */
......@@ -57,43 +57,31 @@
#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
#define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
#else
#else /* MMU v3 onwards */
/* PD1 */
#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
/* PD0 */
#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
usable for shared TLB entries (H) */
#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
#endif
/* Kernel allowed all permissions for all pages */
#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
/* vmalloc permissions */
#define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
_PAGE_GLOBAL | _PAGE_PRESENT)
#ifdef CONFIG_ARC_CACHE_PAGES
......@@ -109,10 +97,6 @@
*/
#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
......@@ -126,8 +110,8 @@
#define PAGE_SHARED PAGE_U_W_R
/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
* kernel vaddr space - visible in all addr spaces, but kernel mode only
/* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
* user vaddr space - visible in all addr spaces, but kernel mode only
* Thus Global, all-kernel-access, no-user-access, cached
*/
#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
......@@ -136,10 +120,9 @@
#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
/* Masks for actual TLB "PD"s */
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
_PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
......
......@@ -20,27 +20,17 @@ struct pt_regs {
/* Real registers */
long bta; /* bta_l1, bta_l2, erbta */
long lp_start;
long lp_end;
long lp_count;
long lp_start, lp_end, lp_count;
long status32; /* status32_l1, status32_l2, erstatus */
long ret; /* ilink1, ilink2 or eret */
long blink;
long fp;
long r26; /* gp */
long r12;
long r11;
long r10;
long r9;
long r8;
long r7;
long r6;
long r5;
long r4;
long r3;
long r2;
long r1;
long r0;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp; /* user/kernel sp depending on where we came from */
long orig_r0;
......@@ -70,19 +60,7 @@ struct pt_regs {
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
long r25;
long r24;
long r23;
long r22;
long r21;
long r20;
long r19;
long r18;
long r17;
long r16;
long r15;
long r14;
long r13;
long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define instruction_pointer(regs) ((regs)->ret)
......
......@@ -20,9 +20,9 @@ typedef struct {
#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
/*
* Unlocked: 0x01_00_00_00
* Read lock(s): 0x00_FF_00_00 to say 0x01
* Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
* Unlocked : 0x0100_0000
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct {
volatile unsigned int counter;
......
......@@ -267,12 +267,7 @@ ARC_EXIT handle_interrupt_level1
ARC_ENTRY instr_service
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
......@@ -289,15 +284,13 @@ ARC_EXIT instr_service
ARC_ENTRY mem_service
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_memory_error
b ret_from_exception
ARC_EXIT mem_service
......@@ -308,11 +301,7 @@ ARC_EXIT mem_service
ARC_ENTRY EV_MachineCheck
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
lr r2, [ecr]
lr r0, [efa]
......@@ -342,13 +331,7 @@ ARC_EXIT EV_MachineCheck
ARC_ENTRY EV_TLBProtV
EXCPN_PROLOG_FREEUP_REG r9
;Which mode (user/kernel) was the system in when Exception occured
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
;---------(3) Save some more regs-----------------
; vineetg: Mar 6th: Random Seg Fault issue #1
......@@ -406,12 +389,7 @@ ARC_EXIT EV_TLBProtV
; ---------------------------------------------
ARC_ENTRY EV_PrivilegeV
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
......@@ -427,14 +405,13 @@ ARC_EXIT EV_PrivilegeV
; ---------------------------------------------
ARC_ENTRY EV_Extension
EXCPN_PROLOG_FREEUP_REG r9
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
lr r0, [efa]
mov r1, sp
FAKE_RET_FROM_EXCPN r9
bl do_extension_fault
b ret_from_exception
ARC_EXIT EV_Extension
......@@ -526,14 +503,7 @@ trap_with_param:
ARC_ENTRY EV_Trap
; Need at least 1 reg to code the early exception prolog
EXCPN_PROLOG_FREEUP_REG r9
;Which mode (user/kernel) was the system in when intr occured
lr r9, [erstatus]
SWITCH_TO_KERNEL_STK
SAVE_ALL_SYS
EXCEPTION_PROLOGUE
;------- (4) What caused the Trap --------------
lr r12, [ecr]
......@@ -642,6 +612,9 @@ resume_kernel_mode:
#ifdef CONFIG_PREEMPT
; This is a must for preempt_schedule_irq()
IRQ_DISABLE r9
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
......@@ -651,8 +624,6 @@ resume_kernel_mode:
ld r9, [r10, THREAD_INFO_FLAGS]
bbit0 r9, TIF_NEED_RESCHED, restore_regs
IRQ_DISABLE r9