Commit fdbb84d1 authored by Yeongkyoon Lee's avatar Yeongkyoon Lee Committed by Blue Swirl

tcg: Add extended GETPC mechanism for MMU helpers with ldst optimization

Add GETPC_EXT which is used by MMU helpers to selectively calculate the code
address of accessing guest memory when called from a qemu_ld/st optimized code
or a C function. Currently, it supports only i386 and x86-64 hosts.
Signed-off-by: default avatarYeongkyoon Lee <yeongkyoon.lee@samsung.com>
Signed-off-by: default avatarBlue Swirl <blauwirbel@gmail.com>
parent 32761257
......@@ -310,6 +310,42 @@ extern uintptr_t tci_tb_ptr;
# define GETPC() ((uintptr_t)__builtin_return_address(0) - 1)
#endif
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* qemu_ld/st optimization split code generation to fast and slow path, thus,
it needs special handling for an MMU helper which is called from the slow
path, to get the fast path's pc without any additional argument.
It uses a tricky solution which embeds the fast path pc into the slow path.
Code flow in slow path:
(1) pre-process
(2) call MMU helper
(3) jump to (5)
(4) fast path information (implementation specific)
(5) post-process (e.g. stack adjust)
(6) jump to corresponding code of the next of fast path
*/
# if defined(__i386__) || defined(__x86_64__)
/* To avoid broken disassembling, long jmp is used for embedding fast path pc,
so that the destination is the next code of fast path, though this jmp is
never executed.
call MMU helper
jmp POST_PROC (2byte) <- GETRA()
jmp NEXT_CODE (5byte)
POST_PROCESS ... <- GETRA() + 7
*/
# define GETRA() ((uintptr_t)__builtin_return_address(0))
# define GETPC_LDST() ((uintptr_t)(GETRA() + 7 + \
*(int32_t *)((void *)GETRA() + 3) - 1))
# else
# error "CONFIG_QEMU_LDST_OPTIMIZATION needs GETPC_LDST() implementation!"
# endif
bool is_tcg_gen_code(uintptr_t pc_ptr);
# define GETPC_EXT() (is_tcg_gen_code(GETRA()) ? GETPC_LDST() : GETPC())
#else
# define GETPC_EXT() GETPC()
#endif
#if !defined(CONFIG_USER_ONLY)
struct MemoryRegion *iotlb_to_region(hwaddr index);
......
......@@ -1390,6 +1390,17 @@ void tb_link_page(TranslationBlock *tb,
mmap_unlock();
}
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
/* check whether the given addr is in TCG generated code buffer or not */
bool is_tcg_gen_code(uintptr_t tc_ptr)
{
/* This can be called during code generation, code_gen_buffer_max_size
is used instead of code_gen_ptr for upper boundary checking */
return (tc_ptr >= (uintptr_t)code_gen_buffer &&
tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
}
#endif
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
tb[1].tc_ptr. Return NULL if not found */
TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
......
......@@ -111,13 +111,13 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
retaddr = GETPC_EXT();
ioaddr = env->iotlb[mmu_idx][index];
res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
retaddr = GETPC();
retaddr = GETPC_EXT();
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
#endif
......@@ -128,7 +128,7 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
uintptr_t addend;
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
retaddr = GETPC_EXT();
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
}
#endif
......@@ -138,7 +138,7 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
}
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
retaddr = GETPC_EXT();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
do_unaligned_access(env, addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
......@@ -257,12 +257,12 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
/* IO access */
if ((addr & (DATA_SIZE - 1)) != 0)
goto do_unaligned_access;
retaddr = GETPC();
retaddr = GETPC_EXT();
ioaddr = env->iotlb[mmu_idx][index];
glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
retaddr = GETPC();
retaddr = GETPC_EXT();
#ifdef ALIGNED_ONLY
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
#endif
......@@ -273,7 +273,7 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
uintptr_t addend;
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0) {
retaddr = GETPC();
retaddr = GETPC_EXT();
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
}
#endif
......@@ -283,7 +283,7 @@ void glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env,
}
} else {
/* the page is not in the TLB : fill it */
retaddr = GETPC();
retaddr = GETPC_EXT();
#ifdef ALIGNED_ONLY
if ((addr & (DATA_SIZE - 1)) != 0)
do_unaligned_access(env, addr, 1, mmu_idx, retaddr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment