Commit 6ebbf390 authored by j_mayer's avatar j_mayer

Replace is_user variable with mmu_idx in softmmu core,

  allowing support of more than 2 mmu access modes.
Add backward compatibility is_user variable in targets code when needed.
Implement per target cpu_mmu_index function, avoiding duplicated code
  and #ifdef TARGET_xxx in softmmu core functions.
Implement per target mmu modes definitions. As an example, add PowerPC
  hypervisor mode definition and Alpha executive and kernel modes definitions.
Optimize PowerPC case, precomputing mmu_idx when MSR register changes
  and using the same definition in code translation code.


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@3384 c046a42c-6fe2-441c-8c8c-71466251a162
parent d0f48074
......@@ -112,15 +112,6 @@ typedef struct CPUTLBEntry {
target_phys_addr_t addend;
} CPUTLBEntry;
/* Alpha has 4 different running levels */
#if defined(TARGET_ALPHA)
#define NB_MMU_MODES 4
#elif defined(TARGET_PPC64H) /* PowerPC 64 with hypervisor mode support */
#define NB_MMU_MODES 3
#else
#define NB_MMU_MODES 2
#endif
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
/* soft mmu support */ \
......
......@@ -884,8 +884,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_x86_handle_mmu_fault(env, address, is_write,
((env->hflags & HF_CPL_MASK) == 3), 0);
ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -934,7 +933,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
return 1;
}
/* see if it is an MMU fault */
ret = cpu_arm_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -970,7 +969,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
return 1;
}
/* see if it is an MMU fault */
ret = cpu_sparc_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1007,7 +1006,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, msr_pr, 0);
ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1056,7 +1055,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
return 1;
}
/* see if it is an MMU fault */
ret = cpu_m68k_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1096,7 +1095,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_mips_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1146,7 +1145,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_sh4_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1191,7 +1190,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_alpha_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......@@ -1235,7 +1234,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
}
/* see if it is an MMU fault */
ret = cpu_cris_handle_mmu_fault(env, address, is_write, 1, 0);
ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
if (ret < 0)
return 0; /* not an MMU fault */
if (ret == 0)
......
......@@ -117,14 +117,14 @@ void tlb_flush_page(CPUState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global);
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu);
int mmu_idx, int is_softmmu);
static inline int tlb_set_page(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu)
int mmu_idx, int is_softmmu)
{
if (prot & PAGE_READ)
prot |= PAGE_EXEC;
return tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
return tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
}
#define CODE_GEN_MAX_SIZE 65536
......@@ -562,10 +562,10 @@ extern int tb_invalidated_flag;
#if !defined(CONFIG_USER_ONLY)
void tlb_fill(target_ulong addr, int is_write, int is_user,
void tlb_fill(target_ulong addr, int is_write, int mmu_idx,
void *retaddr);
#define ACCESS_TYPE 3
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
#define env cpu_single_env
......@@ -598,35 +598,15 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
is the offset relative to phys_ram_base */
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
{
int is_user, index, pd;
int mmu_idx, index, pd;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
#if defined(TARGET_I386)
is_user = ((env->hflags & HF_CPL_MASK) == 3);
#elif defined (TARGET_PPC)
is_user = msr_pr;
#elif defined (TARGET_MIPS)
is_user = ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM);
#elif defined (TARGET_SPARC)
is_user = (env->psrs == 0);
#elif defined (TARGET_ARM)
is_user = ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR);
#elif defined (TARGET_SH4)
is_user = ((env->sr & SR_MD) == 0);
#elif defined (TARGET_ALPHA)
is_user = ((env->ps >> 3) & 3);
#elif defined (TARGET_M68K)
is_user = ((env->sr & SR_S) == 0);
#elif defined (TARGET_CRIS)
is_user = (0);
#else
#error unimplemented CPU
#endif
if (__builtin_expect(env->tlb_table[is_user][index].addr_code !=
mmu_idx = cpu_mmu_index(env);
if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_code !=
(addr & TARGET_PAGE_MASK), 0)) {
ldub_code(addr);
}
pd = env->tlb_table[is_user][index].addr_code & ~TARGET_PAGE_MASK;
pd = env->tlb_table[mmu_idx][index].addr_code & ~TARGET_PAGE_MASK;
if (pd > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
#ifdef TARGET_SPARC
do_unassigned_access(addr, 0, 1, 0);
......@@ -634,7 +614,7 @@ static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
#endif
}
return addr + env->tlb_table[is_user][index].addend - (unsigned long)phys_ram_base;
return addr + env->tlb_table[mmu_idx][index].addend - (unsigned long)phys_ram_base;
}
#endif
......
......@@ -1608,7 +1608,7 @@ static inline void tlb_set_dirty(CPUState *env,
conflicting with the host address space). */
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu)
int mmu_idx, int is_softmmu)
{
PhysPageDesc *p;
unsigned long pd;
......@@ -1626,8 +1626,8 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
pd = p->phys_offset;
}
#if defined(DEBUG_TLB)
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
#endif
ret = 0;
......@@ -1664,7 +1664,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
addend -= vaddr;
te = &env->tlb_table[is_user][index];
te = &env->tlb_table[mmu_idx][index];
te->addend = addend;
if (prot & PAGE_READ) {
te->addr_read = address;
......@@ -1790,7 +1790,7 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
target_phys_addr_t paddr, int prot,
int is_user, int is_softmmu)
int mmu_idx, int is_softmmu)
{
return 0;
}
......
......@@ -811,12 +811,14 @@ static int get_page_bits (CPUState *env)
static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp,
uint64_t ptebase, int page_bits, uint64_t level,
int is_user, int rw)
int mmu_idx, int rw)
{
uint64_t pteaddr, pte, pfn;
uint8_t gh;
int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar;
int ure, uwe, kre, kwe, foE, foR, foW, v, ret, ar, is_user;
/* XXX: TOFIX */
is_user = mmu_idx == MMU_USER_IDX;
pteaddr = (ptebase << page_bits) + (8 * level);
pte = ldq_raw(pteaddr);
/* Decode all interresting PTE fields */
......@@ -871,7 +873,7 @@ static int get_pte (uint64_t *pfnp, int *zbitsp, int *protp,
static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
uint64_t ptebase, int page_bits,
uint64_t vaddr, int is_user, int rw)
uint64_t vaddr, int mmu_idx, int rw)
{
uint64_t pfn, page_mask, lvl_mask, level1, level2, level3;
int lvl_bits, ret;
......@@ -909,7 +911,7 @@ static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
break;
}
/* Level 3 PTE */
ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, is_user, rw);
ret = get_pte(&pfn, zbitsp, prot, pfn, page_bits, level3, mmu_idx, rw);
if (ret & 0x1) {
/* Translation not valid */
ret = 1;
......@@ -943,7 +945,7 @@ static int paddr_from_pte (uint64_t *paddr, int *zbitsp, int *prot,
static int virtual_to_physical (CPUState *env, uint64_t *physp,
int *zbitsp, int *protp,
uint64_t virtual, int is_user, int rw)
uint64_t virtual, int mmu_idx, int rw)
{
uint64_t sva, ptebase;
int seg, page_bits, ret;
......@@ -961,16 +963,16 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
case 0:
/* seg1: 3 levels of PTE */
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
virtual, is_user, rw);
virtual, mmu_idx, rw);
break;
case 1:
/* seg1: 2 levels of PTE */
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
virtual, is_user, rw);
virtual, mmu_idx, rw);
break;
case 2:
/* kernel segment */
if (is_user) {
if (mmu_idx != 0) {
ret = 2;
} else {
*physp = virtual;
......@@ -979,7 +981,7 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
case 3:
/* seg1: TB mapped */
ret = paddr_from_pte(physp, zbitsp, protp, ptebase, page_bits,
virtual, is_user, rw);
virtual, mmu_idx, rw);
break;
default:
ret = 1;
......@@ -991,7 +993,7 @@ static int virtual_to_physical (CPUState *env, uint64_t *physp,
/* XXX: code provision */
int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
int is_user, int is_softmmu)
int mmu_idx, int is_softmmu)
{
uint64_t physical, page_size, end;
int prot, zbits, ret;
......@@ -1000,7 +1002,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
ret = 2;
} else {
ret = virtual_to_physical(env, &physical, &zbits, &prot,
address, is_user, rw);
address, mmu_idx, rw);
}
switch (ret) {
case 0:
......@@ -1009,7 +1011,7 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
address &= ~(page_size - 1);
for (end = physical + page_size; physical < end; physical += 0x1000) {
ret = tlb_set_page(env, address, physical, prot,
is_user, is_softmmu);
mmu_idx, is_softmmu);
address += 0x1000;
}
break;
......
/* Common softmmu definitions and inline routines. */
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
/* XXX: find something cleaner.
* Furthermore, this is false for 64 bits targets
*/
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldul_hypv ldl_hypv
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
#define ACCESS_TYPE 0
#define MEMSUFFIX _kernel
#define MEMSUFFIX MMU_MODE0_SUFFIX
#define DATA_SIZE 1
#include "softmmu_header.h"
......@@ -20,7 +26,7 @@
#undef MEMSUFFIX
#define ACCESS_TYPE 1
#define MEMSUFFIX _user
#define MEMSUFFIX MMU_MODE1_SUFFIX
#define DATA_SIZE 1
#include "softmmu_header.h"
......@@ -35,8 +41,50 @@
#undef ACCESS_TYPE
#undef MEMSUFFIX
/* these access are slower, they must be as rare as possible */
#if (NB_MMU_MODES >= 3)
#define ACCESS_TYPE 2
#define MEMSUFFIX MMU_MODE2_SUFFIX
#define DATA_SIZE 1
#include "softmmu_header.h"
#define DATA_SIZE 2
#include "softmmu_header.h"
#define DATA_SIZE 4
#include "softmmu_header.h"
#define DATA_SIZE 8
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#if (NB_MMU_MODES >= 4)
#define ACCESS_TYPE 3
#define MEMSUFFIX MMU_MODE3_SUFFIX
#define DATA_SIZE 1
#include "softmmu_header.h"
#define DATA_SIZE 2
#include "softmmu_header.h"
#define DATA_SIZE 4
#include "softmmu_header.h"
#define DATA_SIZE 8
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#if (NB_MMU_MODES > 4)
#error "NB_MMU_MODES > 4 is not supported for now"
#endif /* (NB_MMU_MODES > 4) */
#endif /* (NB_MMU_MODES == 4) */
#endif /* (NB_MMU_MODES >= 3) */
/* these access are slower, they must be as rare as possible */
#define ACCESS_TYPE (NB_MMU_MODES)
#define MEMSUFFIX _data
#define DATA_SIZE 1
#include "softmmu_header.h"
......
......@@ -39,66 +39,19 @@
#error unsupported data size
#endif
#if ACCESS_TYPE == 0
#if ACCESS_TYPE < (NB_MMU_MODES)
#define CPU_MEM_INDEX 0
#define CPU_MMU_INDEX ACCESS_TYPE
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == 1
#elif ACCESS_TYPE == (NB_MMU_MODES)
#define CPU_MEM_INDEX 1
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == 2
#ifdef TARGET_I386
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
#elif defined (TARGET_PPC)
#define CPU_MEM_INDEX (msr_pr)
#elif defined (TARGET_MIPS)
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
#elif defined (TARGET_SPARC)
#define CPU_MEM_INDEX ((env->psrs) == 0)
#elif defined (TARGET_ARM)
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
#elif defined (TARGET_SH4)
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
#elif defined (TARGET_ALPHA)
#define CPU_MEM_INDEX ((env->ps >> 3) & 3)
#elif defined (TARGET_M68K)
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0)
#elif defined (TARGET_CRIS)
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */
#define CPU_MEM_INDEX (0)
#else
#error unsupported CPU
#endif
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == (NB_MMU_MODES + 1)
#elif ACCESS_TYPE == 3
#ifdef TARGET_I386
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
#elif defined (TARGET_PPC)
#define CPU_MEM_INDEX (msr_pr)
#elif defined (TARGET_MIPS)
#define CPU_MEM_INDEX ((env->hflags & MIPS_HFLAG_MODE) == MIPS_HFLAG_UM)
#elif defined (TARGET_SPARC)
#define CPU_MEM_INDEX ((env->psrs) == 0)
#elif defined (TARGET_ARM)
#define CPU_MEM_INDEX ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)
#elif defined (TARGET_SH4)
#define CPU_MEM_INDEX ((env->sr & SR_MD) == 0)
#elif defined (TARGET_ALPHA)
#define CPU_MEM_INDEX ((env->ps >> 3) & 3)
#elif defined (TARGET_M68K)
#define CPU_MEM_INDEX ((env->sr & SR_S) == 0)
#elif defined (TARGET_CRIS)
/* CRIS FIXME: I guess we want to validate supervisor mode acceses here. */
#define CPU_MEM_INDEX (0)
#else
#error unsupported CPU
#endif
#define CPU_MMU_INDEX (cpu_mmu_index(env))
#define MMUSUFFIX _cmmu
#else
......@@ -111,18 +64,18 @@
#define RES_TYPE int
#endif
#if ACCESS_TYPE == 3
#if ACCESS_TYPE == (NB_MMU_MODES + 1)
#define ADDR_READ addr_code
#else
#define ADDR_READ addr_read
#endif
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user);
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int is_user);
int mmu_idx);
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr, DATA_TYPE v, int mmu_idx);
#if (DATA_SIZE <= 4) && (TARGET_LONG_BITS == 32) && defined(__i386__) && \
(ACCESS_TYPE <= 1) && defined(ASM_SOFTMMU)
(ACCESS_TYPE < NB_MMU_MODES) && defined(ASM_SOFTMMU)
#define CPU_TLB_ENTRY_BITS 4
......@@ -161,8 +114,8 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
"i" (CPU_MEM_INDEX),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
"i" (CPU_MMU_INDEX),
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
: "%eax", "%ecx", "%edx", "memory", "cc");
return res;
......@@ -208,8 +161,8 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_read)),
"i" (CPU_MEM_INDEX),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_read)),
"i" (CPU_MMU_INDEX),
"m" (*(uint8_t *)&glue(glue(__ld, SUFFIX), MMUSUFFIX))
: "%eax", "%ecx", "%edx", "memory", "cc");
return res;
......@@ -260,8 +213,8 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
"i" ((CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
"i" (TARGET_PAGE_MASK | (DATA_SIZE - 1)),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MEM_INDEX][0].addr_write)),
"i" (CPU_MEM_INDEX),
"m" (*(uint32_t *)offsetof(CPUState, tlb_table[CPU_MMU_INDEX][0].addr_write)),
"i" (CPU_MMU_INDEX),
"m" (*(uint8_t *)&glue(glue(__st, SUFFIX), MMUSUFFIX))
: "%eax", "%ecx", "%edx", "memory", "cc");
}
......@@ -276,16 +229,16 @@ static inline RES_TYPE glue(glue(ld, USUFFIX), MEMSUFFIX)(target_ulong ptr)
RES_TYPE res;
target_ulong addr;
unsigned long physaddr;
int is_user;
int mmu_idx;
addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
mmu_idx = CPU_MMU_INDEX;
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
} else {
physaddr = addr + env->tlb_table[is_user][index].addend;
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
}
return res;
......@@ -297,23 +250,23 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(target_ulong ptr)
int res, index;
target_ulong addr;
unsigned long physaddr;
int is_user;
int mmu_idx;
addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_table[is_user][index].ADDR_READ !=
mmu_idx = CPU_MMU_INDEX;
if (__builtin_expect(env->tlb_table[mmu_idx][index].ADDR_READ !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, mmu_idx);
} else {
physaddr = addr + env->tlb_table[is_user][index].addend;
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr);
}
return res;
}
#endif
#if ACCESS_TYPE != 3
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
/* generic store macro */
......@@ -322,25 +275,25 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(target_ulong ptr, RES_TYPE
int index;
target_ulong addr;
unsigned long physaddr;
int is_user;
int mmu_idx;
addr = ptr;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
is_user = CPU_MEM_INDEX;
if (__builtin_expect(env->tlb_table[is_user][index].addr_write !=
mmu_idx = CPU_MMU_INDEX;
if (__builtin_expect(env->tlb_table[mmu_idx][index].addr_write !=
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) {
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, mmu_idx);
} else {
physaddr = addr + env->tlb_table[is_user][index].addend;
physaddr = addr + env->tlb_table[mmu_idx][index].addend;
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v);
}
}
#endif /* ACCESS_TYPE != 3 */
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
#endif /* !asm */
#if ACCESS_TYPE != 3
#if ACCESS_TYPE != (NB_MMU_MODES + 1)
#if DATA_SIZE == 8
static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
......@@ -386,7 +339,7 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
}
#endif /* DATA_SIZE == 4 */
#endif /* ACCESS_TYPE != 3 */
#endif /* ACCESS_TYPE != (NB_MMU_MODES + 1) */
#undef RES_TYPE
#undef DATA_TYPE
......@@ -394,6 +347,6 @@ static inline void glue(stfl, MEMSUFFIX)(target_ulong ptr, float32 v)
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef CPU_MEM_INDEX
#undef CPU_MMU_INDEX
#undef MMUSUFFIX
#undef ADDR_READ
......@@ -48,7 +48,7 @@
#endif
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user,
int mmu_idx,