Commit c8f94df5 authored by Richard Henderson's avatar Richard Henderson
Browse files

tcg: Introduce zero and sign-extended versions of load helpers


Reviewed-by: default avatarAurelien Jarno <aurelien@aurel32.net>
Signed-off-by: default avatarRichard Henderson <rth@twiddle.net>
parent e58eb534
......@@ -29,23 +29,39 @@
#if DATA_SIZE == 8
#define SUFFIX q
#define LSUFFIX q
#define DATA_TYPE uint64_t
#define SDATA_TYPE int64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define LSUFFIX l
#define DATA_TYPE uint32_t
#define SDATA_TYPE int32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define LSUFFIX uw
#define DATA_TYPE uint16_t
#define SDATA_TYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define LSUFFIX ub
#define DATA_TYPE uint8_t
#define SDATA_TYPE int8_t
#else
#error unsupported data size
#endif
#define DATA_TYPE glue(u, SDATA_TYPE)
/* For the benefit of TCG generated code, we want to avoid the complication
of ABI-specific return type promotion and always return a value extended
to the register size of the host. This is tcg_target_long, except in the
case of a 32-bit host and 64-bit data, and for that we always have
uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE DATA_TYPE
# define USUFFIX SUFFIX
#else
# define WORD_TYPE tcg_target_ulong
# define USUFFIX glue(u, SUFFIX)
# define SSUFFIX glue(s, SUFFIX)
#endif
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
#define ADDR_READ addr_code
......@@ -77,10 +93,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS
static
#endif
DATA_TYPE
glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE
glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
......@@ -126,9 +142,9 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
addr2 = addr1 + DATA_SIZE;
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)
res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)
res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
......@@ -147,19 +163,33 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
return glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
/* Note that ldl_raw is defined with type "int". */
return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
}
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETRA_EXT());
}
#ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr, mmu_idx, retaddr);
}
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
DATA_TYPE val,
......@@ -267,3 +297,7 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef LSUFFIX
#undef DATA_SIZE
#undef ADDR_READ
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
......@@ -1025,9 +1025,9 @@ static void tcg_out_jmp(TCGContext *s, uintptr_t dest)
* int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_ld_helpers[4] = {
helper_ret_ldb_mmu,
helper_ret_ldw_mmu,
helper_ret_ldl_mmu,
helper_ret_ldub_mmu,
helper_ret_lduw_mmu,
helper_ret_ldul_mmu,
helper_ret_ldq_mmu,
};
......
......@@ -754,15 +754,24 @@ void tcg_out_tb_finalize(TCGContext *s);
* Memory helpers that will be used by TCG generated code.
*/
#ifdef CONFIG_SOFTMMU
uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
/* Value zero-extended to tcg register size. */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_lduw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldul_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
/* Value sign-extended to tcg register size. */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
tcg_target_ulong helper_ret_ldsl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment