Commit 545825d4 authored by Aurelien Jarno's avatar Aurelien Jarno
Browse files

Merge branch 'tcg-next' of git://github.com/rth7680/qemu

* 'tcg-next' of git://github.com/rth7680/qemu: (29 commits)
  tcg-i386: Make use of zero-extended memory helper routines
  tcg: Introduce zero and sign-extended versions of load helpers
  exec: Split softmmu_defs.h
  target: Include softmmu_exec.h where forgotten
  exec: Rename USUFFIX to LSUFFIX
  tcg-i386: Don't perform GETPC adjustment in TCG code
  exec: Reorganize the GETRA/GETPC macros
  configure: Allow x32 as a host
  tcg-i386: Adjust tcg_out_tlb_load for x32
  tcg-i386: Use intptr_t appropriately
  tcg: Fix jit debug for x32
  tcg: Use appropriate types in tcg_reg_alloc_call
  tcg: Change tcg_out_ld/st offset to intptr_t
  tcg: Change tcg_gen_exit_tb argument to uintptr_t
  tcg: Use uintptr_t in TCGHelperInfo
  tcg: Change relocation offsets to intptr_t
  tcg: Change memory offsets to intptr_t
  tcg: Change frame pointer offsets to intptr_t
  tcg: Define TCG_ptr properly
  tcg: Define TCG_TYPE_PTR properly
  ...
parents 32f3bd6d 6fb58745
......@@ -362,7 +362,11 @@ if test ! -z "$cpu" ; then
elif check_define __i386__ ; then
cpu="i386"
elif check_define __x86_64__ ; then
cpu="x86_64"
if check_define __ILP32__ ; then
cpu="x32"
else
cpu="x86_64"
fi
elif check_define __sparc__ ; then
if check_define __arch64__ ; then
cpu="sparc64"
......@@ -399,7 +403,7 @@ ARCH=
# Normalise host CPU name and set ARCH.
# Note that this case should only have supported host CPUs, not guests.
case "$cpu" in
ia64|ppc|ppc64|s390|s390x|sparc64)
ia64|ppc|ppc64|s390|s390x|sparc64|x32)
cpu="$cpu"
;;
i386|i486|i586|i686|i86pc|BePC)
......@@ -550,7 +554,7 @@ Haiku)
kvm="yes"
vhost_net="yes"
vhost_scsi="yes"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" -o "$cpu" = "x32" ] ; then
audio_possible_drivers="$audio_possible_drivers fmod"
fi
QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers -I$(pwd)/linux-headers $QEMU_INCLUDES"
......@@ -977,6 +981,11 @@ case "$cpu" in
LDFLAGS="-m64 $LDFLAGS"
cc_i386='$(CC) -m32'
;;
x32)
CPU_CFLAGS="-mx32"
LDFLAGS="-mx32 $LDFLAGS"
cc_i386='$(CC) -m32'
;;
# No special flags required for other host CPUs
esac
......@@ -1251,7 +1260,7 @@ fi
if test "$pie" = ""; then
case "$cpu-$targetos" in
i386-Linux|x86_64-Linux|i386-OpenBSD|x86_64-OpenBSD)
i386-Linux|x86_64-Linux|x32-Linux|i386-OpenBSD|x86_64-OpenBSD)
;;
*)
pie="no"
......@@ -3506,7 +3515,7 @@ fi
if test "$pie" = "no" ; then
textseg_addr=
case "$cpu" in
arm | hppa | i386 | m68k | ppc | ppc64 | s390* | sparc | sparc64 | x86_64)
arm | hppa | i386 | m68k | ppc | ppc64 | s390* | sparc | sparc64 | x86_64 | x32)
textseg_addr=0x60000000
;;
mips)
......@@ -3681,7 +3690,7 @@ echo "libs_softmmu=$libs_softmmu" >> $config_host_mak
echo "ARCH=$ARCH" >> $config_host_mak
case "$cpu" in
arm|i386|x86_64|ppc|aarch64)
arm|i386|x86_64|x32|ppc|aarch64)
# The TCG interpreter currently does not support ld/st optimization.
if test "$tcg_interpreter" = "no" ; then
echo "CONFIG_QEMU_LDST_OPTIMIZATION=y" >> $config_host_mak
......@@ -4116,7 +4125,7 @@ elif test "$ARCH" = "sparc64" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/sparc $QEMU_INCLUDES"
elif test "$ARCH" = "s390x" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/s390 $QEMU_INCLUDES"
elif test "$ARCH" = "x86_64" ; then
elif test "$ARCH" = "x86_64" -o "$ARCH" = "x32" ; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/i386 $QEMU_INCLUDES"
else
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/\$(ARCH) $QEMU_INCLUDES"
......@@ -4178,7 +4187,7 @@ fi
if test "$linux" = "yes" ; then
mkdir -p linux-headers
case "$cpu" in
i386|x86_64)
i386|x86_64|x32)
linux_arch=x86
;;
ppcemb|ppc|ppc64)
......@@ -4444,7 +4453,7 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
echo "CONFIG_HPPA_DIS=y" >> $config_target_mak
echo "CONFIG_HPPA_DIS=y" >> config-all-disas.mak
;;
i386|x86_64)
i386|x86_64|x32)
echo "CONFIG_I386_DIS=y" >> $config_target_mak
echo "CONFIG_I386_DIS=y" >> config-all-disas.mak
;;
......
......@@ -53,7 +53,7 @@ void cpu_resume_from_signal(CPUArchState *env, void *puc)
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
CPUArchState *env = cpu->env_ptr;
tcg_target_ulong next_tb = tcg_qemu_tb_exec(env, tb_ptr);
uintptr_t next_tb = tcg_qemu_tb_exec(env, tb_ptr);
if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
/* We didn't start executing this TB (eg because the instruction
* counter hit zero); we must restore the guest PC to the address
......@@ -209,7 +209,7 @@ int cpu_exec(CPUArchState *env)
int ret, interrupt_request;
TranslationBlock *tb;
uint8_t *tc_ptr;
tcg_target_ulong next_tb;
uintptr_t next_tb;
if (cpu->halted) {
if (!cpu_has_work(cpu)) {
......
......@@ -295,47 +295,42 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
}
}
/* The return address may point to the start of the next instruction.
Subtracting one gets us the call instruction itself. */
/* GETRA is the true target of the return instruction that we'll execute,
defined here for simplicity of defining the follow-up macros. */
#if defined(CONFIG_TCG_INTERPRETER)
extern uintptr_t tci_tb_ptr;
# define GETPC() tci_tb_ptr
#elif defined(__s390__) && !defined(__s390x__)
# define GETPC() \
(((uintptr_t)__builtin_return_address(0) & 0x7fffffffUL) - 1)
#elif defined(__arm__)
/* Thumb return addresses have the low bit set, so we need to subtract two.
This is still safe in ARM mode because instructions are 4 bytes. */
# define GETPC() ((uintptr_t)__builtin_return_address(0) - 2)
# define GETRA() tci_tb_ptr
#else
# define GETRA() \
((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif
/* The true return address will often point to a host insn that is part of
the next translated guest insn. Adjust the address backward to point to
the middle of the call insn. Subtracting one would do the job except for
several compressed mode architectures (arm, mips) which set the low bit
to indicate the compressed mode; subtracting two works around that. It
is also the case that there are no host isas that contain a call insn
smaller than 4 bytes, so we don't worry about special-casing this. */
#if defined(CONFIG_TCG_INTERPRETER)
# define GETPC_ADJ 0
#else
# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
# define GETPC_ADJ 2
#endif
#define GETPC() (GETRA() - GETPC_ADJ)
/* The LDST optimizations splits code generation into fast and slow path.
In some implementations, we pass the "logical" return address manually;
in others, we must infer the logical return from the true return. */
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* qemu_ld/st optimization split code generation to fast and slow path, thus,
it needs special handling for an MMU helper which is called from the slow
path, to get the fast path's pc without any additional argument.
It uses a tricky solution which embeds the fast path pc into the slow path.
Code flow in slow path:
(1) pre-process
(2) call MMU helper
(3) jump to (5)
(4) fast path information (implementation specific)
(5) post-process (e.g. stack adjust)
(6) jump to corresponding code of the next of fast path
*/
# if defined(__i386__) || defined(__x86_64__)
# define GETPC_EXT() GETPC()
# elif defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() ((uintptr_t) ((*(int32_t *)(GETRA() - 4)) - 1))
# if defined (_ARCH_PPC) && !defined (_ARCH_PPC64)
# define GETRA_LDST(RA) (*(int32_t *)((RA) - 4))
# elif defined(__arm__)
/* We define two insns between the return address and the branch back to
straight-line. Find and decode that branch insn. */
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() tcg_getpc_ldst(GETRA())
static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
# define GETRA_LDST(RA) tcg_getra_ldst(RA)
static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 8; /* skip the two insns */
......@@ -343,33 +338,32 @@ static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
b = (b << 8) >> (8 - 2); /* extract the displacement */
ra += 8; /* branches are relative to pc+8 */
ra += b; /* apply the displacement */
ra -= 4; /* return a pointer into the current opcode,
not the start of the next opcode */
return ra;
}
# elif defined(__aarch64__)
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() tcg_getpc_ldst(GETRA())
static inline uintptr_t tcg_getpc_ldst(uintptr_t ra)
# define GETRA_LDST(RA) tcg_getra_ldst(RA)
static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 4; /* skip one instruction */
b = *(int32_t *)ra; /* load the branch insn */
b = (b << 6) >> (6 - 2); /* extract the displacement */
ra += b; /* apply the displacement */
ra -= 4; /* return a pointer into the current opcode,
not the start of the next opcode */
return ra;
}
# else
# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
# endif
#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
# ifndef GETPC_EXT
# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
# endif
#ifdef GETRA_LDST
# define GETRA_EXT() tcg_getra_ext(GETRA())
static inline uintptr_t tcg_getra_ext(uintptr_t ra)
{
return is_tcg_gen_code(ra) ? GETRA_LDST(ra) : ra;
}
#else
# define GETPC_EXT() GETPC()
# define GETRA_EXT() GETRA()
#endif
#if !defined(CONFIG_USER_ONLY)
......@@ -383,7 +377,10 @@ bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
void tlb_fill(CPUArchState *env1, target_ulong addr, int is_write, int mmu_idx,
uintptr_t retaddr);
#include "exec/softmmu_defs.h"
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#define ACCESS_TYPE (NB_MMU_MODES + 1)
#define MEMSUFFIX _code
......
......@@ -39,12 +39,12 @@ static inline void gen_tb_start(void)
static void gen_tb_end(TranslationBlock *tb, int num_insns)
{
gen_set_label(exitreq_label);
tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_REQUESTED);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_REQUESTED);
if (use_icount) {
*icount_arg = num_insns;
gen_set_label(icount_label);
tcg_gen_exit_tb((tcg_target_long)tb + TB_EXIT_ICOUNT_EXPIRED);
tcg_gen_exit_tb((uintptr_t)tb + TB_EXIT_ICOUNT_EXPIRED);
}
}
......
/*
* Software MMU support
*
* Declare helpers used by TCG for qemu_ld/st ops.
*
* Used by softmmu_exec.h, TCG targets and exec-all.h.
*
*/
#ifndef SOFTMMU_DEFS_H
#define SOFTMMU_DEFS_H
uint8_t helper_ret_ldb_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint16_t helper_ret_ldw_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint32_t helper_ret_ldl_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
uint64_t helper_ret_ldq_mmu(CPUArchState *env, target_ulong addr,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
int mmu_idx, uintptr_t retaddr);
void helper_ret_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
int mmu_idx, uintptr_t retaddr);
uint8_t helper_ldb_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr, int mmu_idx);
void helper_stb_mmu(CPUArchState *env, target_ulong addr,
uint8_t val, int mmu_idx);
void helper_stw_mmu(CPUArchState *env, target_ulong addr,
uint16_t val, int mmu_idx);
void helper_stl_mmu(CPUArchState *env, target_ulong addr,
uint32_t val, int mmu_idx);
void helper_stq_mmu(CPUArchState *env, target_ulong addr,
uint64_t val, int mmu_idx);
uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
#endif /* SOFTMMU_DEFS_H */
......@@ -19,7 +19,8 @@
#define ldul_executive ldl_executive
#define ldul_supervisor ldl_supervisor
#include "exec/softmmu_defs.h"
/* The memory helpers for tcg-generated code need tcg_target_long etc. */
#include "tcg.h"
#define ACCESS_TYPE 0
#define MEMSUFFIX MMU_MODE0_SUFFIX
......
......@@ -28,24 +28,40 @@
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define LSUFFIX q
#define SDATA_TYPE int64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define LSUFFIX l
#define SDATA_TYPE int32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define LSUFFIX uw
#define SDATA_TYPE int16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define LSUFFIX ub
#define SDATA_TYPE int8_t
#else
#error unsupported data size
#endif
#define DATA_TYPE glue(u, SDATA_TYPE)
/* For the benefit of TCG generated code, we want to avoid the complication
of ABI-specific return type promotion and always return a value extended
to the register size of the host. This is tcg_target_long, except in the
case of a 32-bit host and 64-bit data, and for that we always have
uint64_t. Don't bother with this widened value for SOFTMMU_CODE_ACCESS. */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE DATA_TYPE
# define USUFFIX SUFFIX
#else
# define WORD_TYPE tcg_target_ulong
# define USUFFIX glue(u, SUFFIX)
# define SSUFFIX glue(s, SUFFIX)
#endif
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
#define ADDR_READ addr_code
......@@ -77,15 +93,18 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
#ifdef SOFTMMU_CODE_ACCESS
static
#endif
DATA_TYPE
glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
WORD_TYPE
glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
......@@ -121,10 +140,12 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
addr1 = addr & ~(DATA_SIZE - 1);
addr2 = addr1 + DATA_SIZE;
res1 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr1,
mmu_idx, retaddr);
res2 = glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr2,
mmu_idx, retaddr);
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
res1 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
res2 = glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
shift = (addr & (DATA_SIZE - 1)) * 8;
#ifdef TARGET_WORDS_BIGENDIAN
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
......@@ -142,19 +163,33 @@ glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#endif
haddr = addr + env->tlb_table[mmu_idx][index].addend;
return glue(glue(ld, USUFFIX), _raw)((uint8_t *)haddr);
/* Note that ldl_raw is defined with type "int". */
return (DATA_TYPE) glue(glue(ld, LSUFFIX), _raw)((uint8_t *)haddr);
}
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
int mmu_idx)
{
return glue(glue(helper_ret_ld, SUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETPC_EXT());
return glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)(env, addr, mmu_idx,
GETRA_EXT());
}
#ifndef SOFTMMU_CODE_ACCESS
/* Provide signed versions of the load routines as well. We can of course
avoid this for 64-bit data, or for 32-bit data on 32-bit host. */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
WORD_TYPE
glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong addr, int mmu_idx,
uintptr_t retaddr)
{
return (SDATA_TYPE) glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
(env, addr, mmu_idx, retaddr);
}
#endif
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
hwaddr physaddr,
DATA_TYPE val,
......@@ -182,6 +217,9 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
uintptr_t haddr;
/* Adjust the given return address. */
retaddr -= GETPC_ADJ;
/* If the TLB entry is for a different page, reload and try again. */
if ((addr & TARGET_PAGE_MASK)
!= (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
......@@ -223,8 +261,10 @@ glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
#else
uint8_t val8 = val >> (i * 8);
#endif
/* Note the adjustment at the beginning of the function.
Undo that for the recursion. */
glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
mmu_idx, retaddr);
mmu_idx, retaddr + GETPC_ADJ);
}
return;
}
......@@ -245,7 +285,7 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
DATA_TYPE val, int mmu_idx)
{
glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)(env, addr, val, mmu_idx,
GETPC_EXT());
GETRA_EXT());
}
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
......@@ -254,6 +294,10 @@ glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
#undef USUFFIX
#undef LSUFFIX
#undef DATA_SIZE
#undef ADDR_READ
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
......@@ -177,7 +177,7 @@ static void qtest_send_prefix(CharDriverState *chr)
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[S +" FMT_timeval "] ",
tv.tv_sec, (long) tv.tv_usec);
(long) tv.tv_sec, (long) tv.tv_usec);
}
static void GCC_FMT_ATTR(2, 3) qtest_send(CharDriverState *chr,
......@@ -225,7 +225,7 @@ static void qtest_process_command(CharDriverState *chr, gchar **words)
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[R +" FMT_timeval "]",
tv.tv_sec, (long) tv.tv_usec);
(long) tv.tv_sec, (long) tv.tv_usec);
for (i = 0; words[i]; i++) {
fprintf(qtest_log_fp, " %s", words[i]);
}
......@@ -485,7 +485,7 @@ static void qtest_event(void *opaque, int event)
qtest_opened = true;
if (qtest_log_fp) {
fprintf(qtest_log_fp, "[I " FMT_timeval "] OPENED\n",
start_time.tv_sec, (long) start_time.tv_usec);
(long) start_time.tv_sec, (long) start_time.tv_usec);
}
break;
case CHR_EVENT_CLOSED:
......@@ -494,7 +494,7 @@ static void qtest_event(void *opaque, int event)
qemu_timeval tv;
qtest_get_time(&tv);
fprintf(qtest_log_fp, "[I +" FMT_timeval "] CLOSED\n",
tv.tv_sec, (long) tv.tv_usec);
(long) tv.tv_sec, (long) tv.tv_usec);
}
break;
default:
......
......@@ -415,7 +415,7 @@ static ExitStatus gen_bdirect(DisasContext *ctx, int ra, int32_t disp)
} else if (use_goto_tb(ctx, dest)) {
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, dest);
tcg_gen_exit_tb((tcg_target_long)ctx->tb);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
} else {
tcg_gen_movi_i64(cpu_pc, dest);
......@@ -434,12 +434,12 @@ static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, ctx->pc);
tcg_gen_exit_tb((tcg_target_long)ctx->tb);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
gen_set_label(lab_true);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(cpu_pc, dest);
tcg_gen_exit_tb((tcg_target_long)ctx->tb + 1);
tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
return EXIT_GOTO_TB;
} else {
......@@ -1629,7 +1629,7 @@ static ExitStatus gen_call_pal(DisasContext *ctx, int palcode)
we change the PAL base register. */
if (!ctx->singlestep_enabled && !(ctx->tb->cflags & CF_LAST_IO)) {
tcg_gen_goto_tb(0);
tcg_gen_exit_tb((tcg_target_long)ctx->tb);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
return EXIT_GOTO_TB;
}
......
......@@ -3356,7 +3356,7 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
gen_set_pc_im(dest);
tcg_gen_exit_tb((tcg_target_long)tb + n);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_set_pc_im(dest);
tcg_gen_exit_tb(0);
......
......@@ -558,7 +558,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb((tcg_target_long)tb + n);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(env_pc, dest);
tcg_gen_exit_tb(0);
......
......@@ -2413,7 +2413,7 @@ static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
/* jump to same page: we can use a direct jump */
tcg_gen_goto_tb(tb_num);
gen_jmp_im(eip);
tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
tcg_gen_exit_tb((uintptr_t)tb + tb_num);
} else {
/* jump to another page: currently not optimized */
gen_jmp_im(eip);
......
......@@ -6,6 +6,8 @@
#include "hw/lm32/lm32_pic.h"
#include "hw/char/lm32_juart.h"
#include "exec/softmmu_exec.h"
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define SHIFT 0
......
......@@ -129,7 +129,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
likely(!dc->singlestep_enabled)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_pc, dest);
tcg_gen_exit_tb((tcg_target_long)tb + n);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
tcg_gen_movi_tl(cpu_pc, dest);
if (dc->singlestep_enabled) {
......
......@@ -869,7 +869,7 @@ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest)
(s->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_i32(QREG_PC, dest);
tcg_gen_exit_tb((tcg_target_long)tb + n);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {
gen_jmp_im(s, dest);
tcg_gen_exit_tb(0);
......
......@@ -138,7 +138,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
tcg_gen_goto_tb(n);
tcg_gen_movi_tl(cpu_SR[SR_PC], dest);
tcg_gen_exit_tb((tcg_target_long)tb + n);
tcg_gen_exit_tb((uintptr_t)tb + n);
} else {