Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
10
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
Xing Lin
qemu
Commits
b8076a74
Commit
b8076a74
authored
Apr 07, 2005
by
bellard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64 host support (David Mosberger)
git-svn-id:
svn://svn.savannah.nongnu.org/qemu/trunk@1360
c046a42c-6fe2-441c-8c8c-71466251a162
parent
7a674b13
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
738 additions
and
64 deletions
+738
-64
Makefile.target
Makefile.target
+6
-0
cpu-exec.c
cpu-exec.c
+43
-0
disas.c
disas.c
+6
-3
dyngen-exec.h
dyngen-exec.h
+8
-6
dyngen.c
dyngen.c
+189
-17
dyngen.h
dyngen.h
+220
-0
exec-all.h
exec-all.h
+11
-0
exec.c
exec.c
+1
-1
ia64.ld
ia64.ld
+211
-0
linux-user/mmap.c
linux-user/mmap.c
+4
-2
linux-user/signal.c
linux-user/signal.c
+26
-31
qemu-img.c
qemu-img.c
+4
-4
vl.c
vl.c
+9
-0
No files found.
Makefile.target
View file @
b8076a74
...
...
@@ -184,7 +184,9 @@ LDFLAGS+=-Wl,-T,$(SRC_PATH)/alpha.ld
endif
ifeq
($(ARCH),ia64)
CFLAGS
+=
-mno-sdata
OP_CFLAGS
=
$(CFLAGS)
LDFLAGS
+=
-Wl
,-G0
-Wl
,-T,
$(SRC_PATH)
/ia64.ld
endif
ifeq
($(ARCH),arm)
...
...
@@ -382,6 +384,10 @@ vl.o: CFLAGS+=-p
VL_LDFLAGS
+=
-p
endif
ifeq
($(ARCH),ia64)
VL_LDFLAGS
+=
-Wl
,-G0
-Wl
,-T,
$(SRC_PATH)
/ia64.ld
endif
$(QEMU_SYSTEM)
:
$(VL_OBJS) libqemu.a
$(CC)
$(VL_LDFLAGS)
-o
$@
$^
$(LIBS)
$(SDL_LIBS)
$(COCOA_LIBS)
$(VL_LIBS)
...
...
cpu-exec.c
View file @
b8076a74
...
...
@@ -573,6 +573,15 @@ int cpu_exec(CPUState *env1)
);
}
}
#elif defined(__ia64)
struct
fptr
{
void
*
ip
;
void
*
gp
;
}
fp
;
fp
.
ip
=
tc_ptr
;
fp
.
gp
=
code_gen_buffer
+
2
*
(
1
<<
20
);
(
*
(
void
(
*
)(
void
))
&
fp
)();
#else
gen_func
();
#endif
...
...
@@ -1118,6 +1127,40 @@ int cpu_signal_handler(int host_signum, struct siginfo *info,
&
uc
->
uc_sigmask
,
puc
);
}
#elif defined(__ia64)
#ifndef __ISR_VALID
/* This ought to be in <bits/siginfo.h>... */
# define __ISR_VALID 1
# define si_flags _sifields._sigfault._si_pad0
#endif
int
cpu_signal_handler
(
int
host_signum
,
struct
siginfo
*
info
,
void
*
puc
)
{
struct
ucontext
*
uc
=
puc
;
unsigned
long
ip
;
int
is_write
=
0
;
ip
=
uc
->
uc_mcontext
.
sc_ip
;
switch
(
host_signum
)
{
case
SIGILL
:
case
SIGFPE
:
case
SIGSEGV
:
case
SIGBUS
:
case
SIGTRAP
:
if
(
info
->
si_code
&&
(
info
->
si_flags
&
__ISR_VALID
))
/* ISR.W (write-access) is bit 33: */
is_write
=
(
info
->
si_isr
>>
33
)
&
1
;
break
;
default:
break
;
}
return
handle_cpu_signal
(
ip
,
(
unsigned
long
)
info
->
si_addr
,
is_write
,
&
uc
->
uc_sigmask
,
puc
);
}
#else
#error host CPU specific signal handler needed
...
...
disas.c
View file @
b8076a74
...
...
@@ -143,7 +143,8 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags)
#elif defined(TARGET_PPC)
print_insn
=
print_insn_ppc
;
#else
fprintf
(
out
,
"Asm output not supported on this arch
\n
"
);
fprintf
(
out
,
"0x"
TARGET_FMT_lx
": Asm output not supported on this arch
\n
"
,
code
);
return
;
#endif
...
...
@@ -202,7 +203,8 @@ void disas(FILE *out, void *code, unsigned long size)
#elif defined(__arm__)
print_insn
=
print_insn_arm
;
#else
fprintf
(
out
,
"Asm output not supported on this arch
\n
"
);
fprintf
(
out
,
"0x%lx: Asm output not supported on this arch
\n
"
,
(
long
)
code
);
return
;
#endif
for
(
pc
=
(
unsigned
long
)
code
;
pc
<
(
unsigned
long
)
code
+
size
;
pc
+=
count
)
{
...
...
@@ -311,7 +313,8 @@ void monitor_disas(target_ulong pc, int nb_insn, int is_physical, int flags)
#elif defined(TARGET_PPC)
print_insn
=
print_insn_ppc
;
#else
term_printf
(
"Asm output not supported on this arch
\n
"
);
term_printf
(
"0x"
TARGET_FMT_lx
": Asm output not supported on this arch
\n
"
,
pc
);
return
;
#endif
...
...
dyngen-exec.h
View file @
b8076a74
...
...
@@ -29,7 +29,7 @@ typedef unsigned char uint8_t;
typedef
unsigned
short
uint16_t
;
typedef
unsigned
int
uint32_t
;
/* XXX may be done for all 64 bits targets ? */
#if defined (__x86_64__)
#if defined (__x86_64__)
|| defined(__ia64)
typedef
unsigned
long
uint64_t
;
#else
typedef
unsigned
long
long
uint64_t
;
...
...
@@ -38,7 +38,7 @@ typedef unsigned long long uint64_t;
typedef
signed
char
int8_t
;
typedef
signed
short
int16_t
;
typedef
signed
int
int32_t
;
#if defined (__x86_64__)
#if defined (__x86_64__)
|| defined(__ia64)
typedef
signed
long
int64_t
;
#else
typedef
signed
long
long
int64_t
;
...
...
@@ -148,10 +148,10 @@ extern int printf(const char *, ...);
#define AREG4 "%d5"
#endif
#ifdef __ia64__
#define AREG0 "r
2
7"
#define AREG1 "r
2
4"
#define AREG2 "r
2
5"
#define AREG3 "r
2
6"
#define AREG0 "r7"
#define AREG1 "r4"
#define AREG2 "r5"
#define AREG3 "r6"
#endif
/* force GCC to generate only one epilog at the end of the function */
...
...
@@ -224,6 +224,8 @@ extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
#endif
#ifdef __ia64__
#define EXIT_TB() asm volatile ("br.ret.sptk.many b0;;")
#define GOTO_LABEL_PARAM(n) asm volatile ("br.sptk.many " \
ASM_NAME(__op_gen_label) #n)
#endif
#ifdef __sparc__
#define EXIT_TB() asm volatile ("jmpl %i0 + 8, %g0\n" \
...
...
dyngen.c
View file @
b8076a74
...
...
@@ -1203,6 +1203,48 @@ void get_reloc_expr(char *name, int name_size, const char *sym_name)
}
}
#ifdef HOST_IA64
#define PLT_ENTRY_SIZE 16
/* 1 bundle containing "brl" */
struct
plt_entry
{
struct
plt_entry
*
next
;
const
char
*
name
;
unsigned
long
addend
;
}
*
plt_list
;
static
int
get_plt_index
(
const
char
*
name
,
unsigned
long
addend
)
{
struct
plt_entry
*
plt
,
*
prev
=
NULL
;
int
index
=
0
;
/* see if we already have an entry for this target: */
for
(
plt
=
plt_list
;
plt
;
++
index
,
prev
=
plt
,
plt
=
plt
->
next
)
if
(
strcmp
(
plt
->
name
,
name
)
==
0
&&
plt
->
addend
==
addend
)
return
index
;
/* nope; create a new PLT entry: */
plt
=
malloc
(
sizeof
(
*
plt
));
if
(
!
plt
)
{
perror
(
"malloc"
);
exit
(
1
);
}
memset
(
plt
,
0
,
sizeof
(
*
plt
));
plt
->
name
=
strdup
(
name
);
plt
->
addend
=
addend
;
/* append to plt-list: */
if
(
prev
)
prev
->
next
=
plt
;
else
plt_list
=
plt
;
return
index
;
}
#endif
#ifdef HOST_ARM
int
arm_emit_ldr_info
(
const
char
*
name
,
unsigned
long
start_offset
,
...
...
@@ -1392,7 +1434,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
/* 08 00 84 00 */
if
(
get32
((
uint32_t
*
)
p
)
!=
0x00840008
)
error
(
"br.ret.sptk.many b0;; expected at the end of %s"
,
name
);
copy_size
=
p
-
p_start
;
copy_size
=
p
_end
-
p_start
;
}
#elif defined(HOST_SPARC)
{
...
...
@@ -1529,7 +1571,11 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
fprintf
(
outfile
,
";
\n
"
);
}
#if defined(HOST_IA64)
fprintf
(
outfile
,
" extern char %s;
\n
"
,
name
);
#else
fprintf
(
outfile
,
" extern void %s();
\n
"
,
name
);
#endif
for
(
i
=
0
,
rel
=
relocs
;
i
<
nb_relocs
;
i
++
,
rel
++
)
{
host_ulong
offset
=
get_rel_offset
(
rel
);
...
...
@@ -1550,9 +1596,18 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
continue
;
}
#endif
#ifdef
__APPLE__
#if
def
ined(
__APPLE__
)
/* set __attribute((unused)) on darwin because we wan't to avoid warning when we don't use the symbol */
fprintf
(
outfile
,
"extern char %s __attribute__((unused));
\n
"
,
sym_name
);
#elif defined(HOST_IA64)
if
(
ELF64_R_TYPE
(
rel
->
r_info
)
!=
R_IA64_PCREL21B
)
/*
* PCREL21 br.call targets generally
* are out of range and need to go
* through an "import stub".
*/
fprintf
(
outfile
,
" extern char %s;
\n
"
,
sym_name
);
#else
fprintf
(
outfile
,
"extern char %s;
\n
"
,
sym_name
);
#endif
...
...
@@ -1964,25 +2019,78 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
#elif defined(HOST_IA64)
{
unsigned
long
sym_idx
;
long
code_offset
;
char
name
[
256
];
int
type
;
int
addend
;
long
addend
;
for
(
i
=
0
,
rel
=
relocs
;
i
<
nb_relocs
;
i
++
,
rel
++
)
{
if
(
rel
->
r_offset
>=
start_offset
&&
rel
->
r_offset
<
start_offset
+
copy_size
)
{
sym_name
=
strtab
+
symtab
[
ELF64_R_SYM
(
rel
->
r_info
)].
st_name
;
get_reloc_expr
(
name
,
sizeof
(
name
),
sym_name
);
type
=
ELF64_R_TYPE
(
rel
->
r_info
);
addend
=
rel
->
r_addend
;
switch
(
type
)
{
case
R_IA64_LTOFF22
:
error
(
"must implemnt R_IA64_LTOFF22 relocation"
);
case
R_IA64_PCREL21B
:
error
(
"must implemnt R_IA64_PCREL21B relocation"
);
default:
error
(
"unsupported ia64 relocation (%d)"
,
type
);
}
}
sym_idx
=
ELF64_R_SYM
(
rel
->
r_info
);
if
(
rel
->
r_offset
<
start_offset
||
rel
->
r_offset
>=
start_offset
+
copy_size
)
continue
;
sym_name
=
(
strtab
+
symtab
[
sym_idx
].
st_name
);
if
(
strstart
(
sym_name
,
"__op_jmp"
,
&
p
))
{
int
n
;
n
=
strtol
(
p
,
NULL
,
10
);
/* __op_jmp relocations are done at
runtime to do translated block
chaining: the offset of the instruction
needs to be stored */
fprintf
(
outfile
,
" jmp_offsets[%d] ="
"%ld + (gen_code_ptr - gen_code_buf);
\n
"
,
n
,
rel
->
r_offset
-
start_offset
);
continue
;
}
get_reloc_expr
(
name
,
sizeof
(
name
),
sym_name
);
type
=
ELF64_R_TYPE
(
rel
->
r_info
);
addend
=
rel
->
r_addend
;
code_offset
=
rel
->
r_offset
-
start_offset
;
switch
(
type
)
{
case
R_IA64_IMM64
:
fprintf
(
outfile
,
" ia64_imm64(gen_code_ptr + %ld, "
"%s + %ld);
\n
"
,
code_offset
,
name
,
addend
);
break
;
case
R_IA64_LTOFF22X
:
case
R_IA64_LTOFF22
:
fprintf
(
outfile
,
" IA64_LTOFF(gen_code_ptr + %ld,"
" %s + %ld, %d);
\n
"
,
code_offset
,
name
,
addend
,
(
type
==
R_IA64_LTOFF22X
));
break
;
case
R_IA64_LDXMOV
:
fprintf
(
outfile
,
" ia64_ldxmov(gen_code_ptr + %ld,"
" %s + %ld);
\n
"
,
code_offset
,
name
,
addend
);
break
;
case
R_IA64_PCREL21B
:
if
(
strstart
(
sym_name
,
"__op_gen_label"
,
NULL
))
{
fprintf
(
outfile
,
" ia64_imm21b(gen_code_ptr + %ld,"
" (long) (%s + %ld -
\n\t\t
"
"((long) gen_code_ptr + %ld)) >> 4);
\n
"
,
code_offset
,
name
,
addend
,
code_offset
&
~
0xfUL
);
}
else
{
fprintf
(
outfile
,
" IA64_PLT(gen_code_ptr + %ld, "
"%d);
\t
/* %s + %ld */
\n
"
,
code_offset
,
get_plt_index
(
sym_name
,
addend
),
sym_name
,
addend
);
}
break
;
default:
error
(
"unsupported ia64 relocation (0x%x)"
,
type
);
}
}
fprintf
(
outfile
,
" ia64_nop_b(gen_code_ptr + %d);
\n
"
,
copy_size
-
16
+
2
);
}
#elif defined(HOST_SPARC)
{
...
...
@@ -2236,6 +2344,63 @@ fprintf(outfile,
" LDREntry *arm_ldr_ptr = arm_ldr_table;
\n
"
" uint32_t *arm_data_ptr = arm_data_table;
\n
"
);
#endif
#ifdef HOST_IA64
{
long
addend
,
not_first
=
0
;
unsigned
long
sym_idx
;
int
index
,
max_index
;
const
char
*
sym_name
;
EXE_RELOC
*
rel
;
max_index
=
-
1
;
for
(
i
=
0
,
rel
=
relocs
;
i
<
nb_relocs
;
i
++
,
rel
++
)
{
sym_idx
=
ELF64_R_SYM
(
rel
->
r_info
);
sym_name
=
(
strtab
+
symtab
[
sym_idx
].
st_name
);
if
(
strstart
(
sym_name
,
"__op_gen_label"
,
NULL
))
continue
;
if
(
ELF64_R_TYPE
(
rel
->
r_info
)
!=
R_IA64_PCREL21B
)
continue
;
addend
=
rel
->
r_addend
;
index
=
get_plt_index
(
sym_name
,
addend
);
if
(
index
<=
max_index
)
continue
;
max_index
=
index
;
fprintf
(
outfile
,
" extern void %s(void);
\n
"
,
sym_name
);
}
fprintf
(
outfile
,
" struct ia64_fixup *plt_fixes = NULL, "
"*ltoff_fixes = NULL;
\n
"
" static long plt_target[] = {
\n\t
"
);
max_index
=
-
1
;
for
(
i
=
0
,
rel
=
relocs
;
i
<
nb_relocs
;
i
++
,
rel
++
)
{
sym_idx
=
ELF64_R_SYM
(
rel
->
r_info
);
sym_name
=
(
strtab
+
symtab
[
sym_idx
].
st_name
);
if
(
strstart
(
sym_name
,
"__op_gen_label"
,
NULL
))
continue
;
if
(
ELF64_R_TYPE
(
rel
->
r_info
)
!=
R_IA64_PCREL21B
)
continue
;
addend
=
rel
->
r_addend
;
index
=
get_plt_index
(
sym_name
,
addend
);
if
(
index
<=
max_index
)
continue
;
max_index
=
index
;
if
(
not_first
)
fprintf
(
outfile
,
",
\n\t
"
);
not_first
=
1
;
if
(
addend
)
fprintf
(
outfile
,
"(long) &%s + %ld"
,
sym_name
,
addend
);
else
fprintf
(
outfile
,
"(long) &%s"
,
sym_name
);
}
fprintf
(
outfile
,
"
\n
};
\n
"
" unsigned int plt_offset[%u] = { 0 };
\n
"
,
max_index
+
1
);
}
#endif
fprintf
(
outfile
,
"
\n
"
...
...
@@ -2298,6 +2463,13 @@ fprintf(outfile,
" }
\n
"
" the_end:
\n
"
);
#ifdef HOST_IA64
fprintf
(
outfile
,
" ia64_apply_fixes(&gen_code_ptr, ltoff_fixes, "
"(uint64_t) code_gen_buffer + 2*(1<<20), plt_fixes,
\n\t\t\t
"
"sizeof(plt_target)/sizeof(plt_target[0]),
\n\t\t\t
"
"plt_target, plt_offset);
\n
"
);
#endif
/* generate some code patching */
#ifdef HOST_ARM
...
...
dyngen.h
View file @
b8076a74
...
...
@@ -43,6 +43,11 @@ static inline void flush_icache_range(unsigned long start, unsigned long stop)
#ifdef __ia64__
static
inline
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
stop
)
{
while
(
start
<
stop
)
{
asm
volatile
(
"fc %0"
::
"r"
(
start
));
start
+=
32
;
}
asm
volatile
(
";;sync.i;;srlz.i;;"
);
}
#endif
...
...
@@ -204,3 +209,218 @@ static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr,
}
#endif
/* __arm__ */
#ifdef __ia64
/* Patch instruction with "val" where "mask" has 1 bits. */
static
inline
void
ia64_patch
(
uint64_t
insn_addr
,
uint64_t
mask
,
uint64_t
val
)
{
uint64_t
m0
,
m1
,
v0
,
v1
,
b0
,
b1
,
*
b
=
(
uint64_t
*
)
(
insn_addr
&
-
16
);
# define insn_mask ((1UL << 41) - 1)
unsigned
long
shift
;
b0
=
b
[
0
];
b1
=
b
[
1
];
shift
=
5
+
41
*
(
insn_addr
%
16
);
/* 5 template, 3 x 41-bit insns */
if
(
shift
>=
64
)
{
m1
=
mask
<<
(
shift
-
64
);
v1
=
val
<<
(
shift
-
64
);
}
else
{
m0
=
mask
<<
shift
;
m1
=
mask
>>
(
64
-
shift
);
v0
=
val
<<
shift
;
v1
=
val
>>
(
64
-
shift
);
b
[
0
]
=
(
b0
&
~
m0
)
|
(
v0
&
m0
);
}
b
[
1
]
=
(
b1
&
~
m1
)
|
(
v1
&
m1
);
}
static
inline
void
ia64_patch_imm60
(
uint64_t
insn_addr
,
uint64_t
val
)
{
ia64_patch
(
insn_addr
,
0x011ffffe000UL
,
(
((
val
&
0x0800000000000000UL
)
>>
23
)
/* bit 59 -> 36 */
|
((
val
&
0x00000000000fffffUL
)
<<
13
)
/* bit 0 -> 13 */
));
ia64_patch
(
insn_addr
-
1
,
0x1fffffffffcUL
,
val
>>
18
);
}
static
inline
void
ia64_imm64
(
void
*
insn
,
uint64_t
val
)
{
/* Ignore the slot number of the relocation; GCC and Intel
toolchains differed for some time on whether IMM64 relocs are
against slot 1 (Intel) or slot 2 (GCC). */
uint64_t
insn_addr
=
(
uint64_t
)
insn
&
~
3UL
;
ia64_patch
(
insn_addr
+
2
,
0x01fffefe000UL
,
(
((
val
&
0x8000000000000000UL
)
>>
27
)
/* bit 63 -> 36 */
|
((
val
&
0x0000000000200000UL
)
<<
0
)
/* bit 21 -> 21 */
|
((
val
&
0x00000000001f0000UL
)
<<
6
)
/* bit 16 -> 22 */
|
((
val
&
0x000000000000ff80UL
)
<<
20
)
/* bit 7 -> 27 */
|
((
val
&
0x000000000000007fUL
)
<<
13
)
/* bit 0 -> 13 */
)
);
ia64_patch
(
insn_addr
+
1
,
0x1ffffffffffUL
,
val
>>
22
);
}
static
inline
void
ia64_imm60b
(
void
*
insn
,
uint64_t
val
)
{
/* Ignore the slot number of the relocation; GCC and Intel
toolchains differed for some time on whether IMM64 relocs are
against slot 1 (Intel) or slot 2 (GCC). */
uint64_t
insn_addr
=
(
uint64_t
)
insn
&
~
3UL
;
if
(
val
+
((
uint64_t
)
1
<<
59
)
>=
(
1UL
<<
60
))
fprintf
(
stderr
,
"%s: value %ld out of IMM60 range
\n
"
,
__FUNCTION__
,
(
int64_t
)
val
);
ia64_patch_imm60
(
insn_addr
+
2
,
val
);
}
static
inline
void
ia64_imm22
(
void
*
insn
,
uint64_t
val
)
{
if
(
val
+
(
1
<<
21
)
>=
(
1
<<
22
))
fprintf
(
stderr
,
"%s: value %li out of IMM22 range
\n
"
,
__FUNCTION__
,
(
int64_t
)
val
);
ia64_patch
((
uint64_t
)
insn
,
0x01fffcfe000UL
,
(
((
val
&
0x200000UL
)
<<
15
)
/* bit 21 -> 36 */
|
((
val
&
0x1f0000UL
)
<<
6
)
/* bit 16 -> 22 */
|
((
val
&
0x00ff80UL
)
<<
20
)
/* bit 7 -> 27 */
|
((
val
&
0x00007fUL
)
<<
13
)
/* bit 0 -> 13 */
));
}
/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
the effect of turning "addl rX=imm22,rY" into "addl
rX=imm22,r0". */
static
inline
void
ia64_imm22_r0
(
void
*
insn
,
uint64_t
val
)
{
if
(
val
+
(
1
<<
21
)
>=
(
1
<<
22
))
fprintf
(
stderr
,
"%s: value %li out of IMM22 range
\n
"
,
__FUNCTION__
,
(
int64_t
)
val
);
ia64_patch
((
uint64_t
)
insn
,
0x01fffcfe000UL
|
(
0x3UL
<<
20
),
(
((
val
&
0x200000UL
)
<<
15
)
/* bit 21 -> 36 */
|
((
val
&
0x1f0000UL
)
<<
6
)
/* bit 16 -> 22 */
|
((
val
&
0x00ff80UL
)
<<
20
)
/* bit 7 -> 27 */
|
((
val
&
0x00007fUL
)
<<
13
)
/* bit 0 -> 13 */
));
}
static
inline
void
ia64_imm21b
(
void
*
insn
,
uint64_t
val
)
{
if
(
val
+
(
1
<<
20
)
>=
(
1
<<
21
))
fprintf
(
stderr
,
"%s: value %li out of IMM21b range
\n
"
,
__FUNCTION__
,
(
int64_t
)
val
);
ia64_patch
((
uint64_t
)
insn
,
0x11ffffe000UL
,
(
((
val
&
0x100000UL
)
<<
16
)
/* bit 20 -> 36 */
|
((
val
&
0x0fffffUL
)
<<
13
)
/* bit 0 -> 13 */
));
}
static
inline
void
ia64_nop_b
(
void
*
insn
)
{
ia64_patch
((
uint64_t
)
insn
,
(
1UL
<<
41
)
-
1
,
2UL
<<
37
);
}
static
inline
void
ia64_ldxmov
(
void
*
insn
,
uint64_t
val
)
{
if
(
val
+
(
1
<<
21
)
<
(
1
<<
22
))
ia64_patch
((
uint64_t
)
insn
,
0x1fff80fe000UL
,
8UL
<<
37
);
}
static
inline
int
ia64_patch_ltoff
(
void
*
insn
,
uint64_t
val
,
int
relaxable
)
{
if
(
relaxable
&&
(
val
+
(
1
<<
21
)
<
(
1
<<
22
)))
{
ia64_imm22_r0
(
insn
,
val
);
return
0
;
}
return
1
;
}
struct
ia64_fixup
{
struct
ia64_fixup
*
next
;
void
*
addr
;
/* address that needs to be patched */
long
value
;
};
#define IA64_PLT(insn, plt_index) \
do { \
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
fixup->next = plt_fixes; \
plt_fixes = fixup; \
fixup->addr = (insn); \
fixup->value = (plt_index); \
plt_offset[(plt_index)] = 1; \
} while (0)
#define IA64_LTOFF(insn, val, relaxable) \
do { \
if (ia64_patch_ltoff(insn, val, relaxable)) { \
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
fixup->next = ltoff_fixes; \
ltoff_fixes = fixup; \
fixup->addr = (insn); \
fixup->value = (val); \
} \
} while (0)
static
inline
void
ia64_apply_fixes
(
uint8_t
**
gen_code_pp
,
struct
ia64_fixup
*
ltoff_fixes
,
uint64_t
gp
,
struct
ia64_fixup
*
plt_fixes
,
int
num_plts
,
unsigned
long
*
plt_target
,
unsigned
int
*
plt_offset
)
{
static
const
uint8_t
plt_bundle
[]
=
{
0x04
,
0x00
,
0x00
,
0x00
,
0x01
,
0x00
,
0x00
,
0x00
,
/* nop 0; movl r1=GP */
0x00
,
0x00
,
0x00
,
0x20
,
0x00
,
0x00
,
0x00
,
0x60
,
0x05
,
0x00
,
0x00
,
0x00
,
0x01
,
0x00
,
0x00
,
0x00
,
/* nop 0; brl IP */
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc0
};
uint8_t
*
gen_code_ptr
=
*
gen_code_pp
,
*
plt_start
,
*
got_start
,
*
vp
;
struct
ia64_fixup
*
fixup
;
unsigned
int
offset
=
0
;
struct
fdesc
{
long
ip
;
long
gp
;
}
*
fdesc
;
int
i
;
if
(
plt_fixes
)
{
plt_start
=
gen_code_ptr
;
for
(
i
=
0
;
i
<
num_plts
;
++
i
)
{
if
(
plt_offset
[
i
])
{
plt_offset
[
i
]
=
offset
;
offset
+=
sizeof
(
plt_bundle
);
fdesc
=
(
struct
fdesc
*
)
plt_target
[
i
];
memcpy
(
gen_code_ptr
,
plt_bundle
,
sizeof
(
plt_bundle
));
ia64_imm64
(
gen_code_ptr
+
0x02
,
fdesc
->
gp
);
ia64_imm60b
(
gen_code_ptr
+
0x12
,
(
fdesc
->
ip
-
(
long
)
(
gen_code_ptr
+
0x10
))
>>
4
);
gen_code_ptr
+=
sizeof
(
plt_bundle
);
}
}
for
(
fixup
=
plt_fixes
;
fixup
;
fixup
=
fixup
->
next
)
ia64_imm21b
(
fixup
->
addr
,
((
long
)
plt_start
+
plt_offset
[
fixup
->
value
]
-
((
long
)
fixup
->
addr
&
~
0xf
))
>>
4
);
}
got_start
=
gen_code_ptr
;
/* First, create the GOT: */
for
(
fixup
=
ltoff_fixes
;
fixup
;
fixup
=
fixup
->
next
)
{
/* first check if we already have this value in the GOT: */
for
(
vp
=
got_start
;
vp
<
gen_code_ptr
;
++
vp
)
if
(
*
(
uint64_t
*
)
vp
==
fixup
->
value
)
break
;