Commit 182735ef authored by Andreas Färber's avatar Andreas Färber

cpu: Make first_cpu and next_cpu CPUState

Move next_cpu from CPU_COMMON to CPUState.
Move first_cpu variable to qom/cpu.h.

gdbstub needs to use CPUState::env_ptr for now.
cpu_copy() no longer needs to save and restore cpu_next.
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
[AF: Rebased, simplified cpu_copy()]
Signed-off-by: default avatarAndreas Färber <afaerber@suse.de>
parent 9b056fcc
......@@ -60,7 +60,7 @@
#endif /* CONFIG_LINUX */
static CPUArchState *next_cpu;
static CPUState *next_cpu;
static bool cpu_thread_is_idle(CPUState *cpu)
{
......@@ -79,10 +79,10 @@ static bool cpu_thread_is_idle(CPUState *cpu)
static bool all_cpu_threads_idle(void)
{
CPUArchState *env;
CPUState *cpu;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (!cpu_thread_is_idle(ENV_GET_CPU(env))) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
if (!cpu_thread_is_idle(cpu)) {
return false;
}
}
......@@ -388,15 +388,13 @@ void configure_icount(const char *option)
void hw_error(const char *fmt, ...)
{
va_list ap;
CPUArchState *env;
CPUState *cpu;
va_start(ap, fmt);
fprintf(stderr, "qemu: hardware error: ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
for (env = first_cpu; env != NULL; env = env->next_cpu) {
cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU);
}
......@@ -406,28 +404,28 @@ void hw_error(const char *fmt, ...)
void cpu_synchronize_all_states(void)
{
CPUArchState *env;
CPUState *cpu;
for (env = first_cpu; env; env = env->next_cpu) {
cpu_synchronize_state(ENV_GET_CPU(env));
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_state(cpu);
}
}
void cpu_synchronize_all_post_reset(void)
{
CPUArchState *cpu;
CPUState *cpu;
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_post_reset(ENV_GET_CPU(cpu));
cpu_synchronize_post_reset(cpu);
}
}
void cpu_synchronize_all_post_init(void)
{
CPUArchState *cpu;
CPUState *cpu;
for (cpu = first_cpu; cpu; cpu = cpu->next_cpu) {
cpu_synchronize_post_init(ENV_GET_CPU(cpu));
cpu_synchronize_post_init(cpu);
}
}
......@@ -698,7 +696,7 @@ static void qemu_wait_io_event_common(CPUState *cpu)
static void qemu_tcg_wait_io_event(void)
{
CPUArchState *env;
CPUState *cpu;
while (all_cpu_threads_idle()) {
/* Start accounting real time to the virtual clock if the CPUs
......@@ -711,8 +709,8 @@ static void qemu_tcg_wait_io_event(void)
qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
}
for (env = first_cpu; env != NULL; env = env->next_cpu) {
qemu_wait_io_event_common(ENV_GET_CPU(env));
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
qemu_wait_io_event_common(cpu);
}
}
......@@ -814,7 +812,6 @@ static void tcg_signal_cpu_creation(CPUState *cpu, void *data)
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
CPUArchState *env;
qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
......@@ -824,12 +821,12 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_cond_signal(&qemu_cpu_cond);
/* wait for initial kick-off after machine start */
while (ENV_GET_CPU(first_cpu)->stopped) {
while (first_cpu->stopped) {
qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
/* process any pending work */
for (env = first_cpu; env != NULL; env = env->next_cpu) {
qemu_wait_io_event_common(ENV_GET_CPU(env));
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
qemu_wait_io_event_common(cpu);
}
}
......@@ -923,7 +920,7 @@ void qemu_mutex_lock_iothread(void)
} else {
iothread_requesting_mutex = true;
if (qemu_mutex_trylock(&qemu_global_mutex)) {
qemu_cpu_kick_thread(ENV_GET_CPU(first_cpu));
qemu_cpu_kick_thread(first_cpu);
qemu_mutex_lock(&qemu_global_mutex);
}
iothread_requesting_mutex = false;
......@@ -938,14 +935,13 @@ void qemu_mutex_unlock_iothread(void)
static int all_vcpus_paused(void)
{
CPUArchState *penv = first_cpu;
CPUState *cpu = first_cpu;
while (penv) {
CPUState *pcpu = ENV_GET_CPU(penv);
if (!pcpu->stopped) {
while (cpu) {
if (!cpu->stopped) {
return 0;
}
penv = penv->next_cpu;
cpu = cpu->next_cpu;
}
return 1;
......@@ -953,25 +949,23 @@ static int all_vcpus_paused(void)
void pause_all_vcpus(void)
{
CPUArchState *penv = first_cpu;
CPUState *cpu = first_cpu;
qemu_clock_enable(vm_clock, false);
while (penv) {
CPUState *pcpu = ENV_GET_CPU(penv);
pcpu->stop = true;
qemu_cpu_kick(pcpu);
penv = penv->next_cpu;
while (cpu) {
cpu->stop = true;
qemu_cpu_kick(cpu);
cpu = cpu->next_cpu;
}
if (qemu_in_vcpu_thread()) {
cpu_stop_current();
if (!kvm_enabled()) {
penv = first_cpu;
while (penv) {
CPUState *pcpu = ENV_GET_CPU(penv);
pcpu->stop = false;
pcpu->stopped = true;
penv = penv->next_cpu;
cpu = first_cpu;
while (cpu) {
cpu->stop = false;
cpu->stopped = true;
cpu = cpu->next_cpu;
}
return;
}
......@@ -979,10 +973,10 @@ void pause_all_vcpus(void)
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
penv = first_cpu;
while (penv) {
qemu_cpu_kick(ENV_GET_CPU(penv));
penv = penv->next_cpu;
cpu = first_cpu;
while (cpu) {
qemu_cpu_kick(cpu);
cpu = cpu->next_cpu;
}
}
}
......@@ -996,13 +990,12 @@ void cpu_resume(CPUState *cpu)
void resume_all_vcpus(void)
{
CPUArchState *penv = first_cpu;
CPUState *cpu = first_cpu;
qemu_clock_enable(vm_clock, true);
while (penv) {
CPUState *pcpu = ENV_GET_CPU(penv);
cpu_resume(pcpu);
penv = penv->next_cpu;
while (cpu) {
cpu_resume(cpu);
cpu = cpu->next_cpu;
}
}
......@@ -1151,8 +1144,8 @@ static void tcg_exec_all(void)
next_cpu = first_cpu;
}
for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) {
CPUArchState *env = next_cpu;
CPUState *cpu = ENV_GET_CPU(env);
CPUState *cpu = next_cpu;
CPUArchState *env = cpu->env_ptr;
qemu_clock_enable(vm_clock,
(env->singlestep_enabled & SSTEP_NOTIMER) == 0);
......@@ -1172,12 +1165,10 @@ static void tcg_exec_all(void)
void set_numa_modes(void)
{
CPUArchState *env;
CPUState *cpu;
int i;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
for (i = 0; i < nb_numa_nodes; i++) {
if (test_bit(cpu->cpu_index, node_cpumask[i])) {
cpu->numa_node = i;
......@@ -1197,18 +1188,30 @@ void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
CpuInfoList *qmp_query_cpus(Error **errp)
{
CpuInfoList *head = NULL, *cur_item = NULL;
CPUArchState *env;
CPUState *cpu;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
CPUState *cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
CpuInfoList *info;
#if defined(TARGET_I386)
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
#elif defined(TARGET_PPC)
PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu);
CPUPPCState *env = &ppc_cpu->env;
#elif defined(TARGET_SPARC)
SPARCCPU *sparc_cpu = SPARC_CPU(cpu);
CPUSPARCState *env = &sparc_cpu->env;
#elif defined(TARGET_MIPS)
MIPSCPU *mips_cpu = MIPS_CPU(cpu);
CPUMIPSState *env = &mips_cpu->env;
#endif
cpu_synchronize_state(cpu);
info = g_malloc0(sizeof(*info));
info->value = g_malloc0(sizeof(*info->value));
info->value->CPU = cpu->cpu_index;
info->value->current = (env == first_cpu);
info->value->current = (cpu == first_cpu);
info->value->halted = cpu->halted;
info->value->thread_id = cpu->thread_id;
#if defined(TARGET_I386)
......@@ -1316,11 +1319,14 @@ exit:
void qmp_inject_nmi(Error **errp)
{
#if defined(TARGET_I386)
CPUArchState *env;
CPUState *cs;
for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) {
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
if (!env->apic_state) {
cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_NMI);
cpu_interrupt(cs, CPU_INTERRUPT_NMI);
} else {
apic_deliver_nmi(env->apic_state);
}
......
......@@ -186,11 +186,13 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
{
CPUState *cpu;
CPUArchState *env;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
int mmu_idx;
env = cpu->env_ptr;
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
unsigned int i;
......
......@@ -275,13 +275,11 @@ static inline int cpu_index(CPUState *cpu)
static int write_elf64_notes(DumpState *s)
{
CPUArchState *env;
CPUState *cpu;
int ret;
int id;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
id = cpu_index(cpu);
ret = cpu_write_elf64_note(fd_write_vmcore, cpu, id, s);
if (ret < 0) {
......@@ -290,7 +288,7 @@ static int write_elf64_notes(DumpState *s)
}
}
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
ret = cpu_write_elf64_qemunote(fd_write_vmcore, cpu, s);
if (ret < 0) {
dump_error(s, "dump: failed to write CPU status.\n");
......@@ -327,13 +325,11 @@ static int write_elf32_note(DumpState *s)
static int write_elf32_notes(DumpState *s)
{
CPUArchState *env;
CPUState *cpu;
int ret;
int id;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
id = cpu_index(cpu);
ret = cpu_write_elf32_note(fd_write_vmcore, cpu, id, s);
if (ret < 0) {
......@@ -342,7 +338,7 @@ static int write_elf32_notes(DumpState *s)
}
}
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
ret = cpu_write_elf32_qemunote(fd_write_vmcore, cpu, s);
if (ret < 0) {
dump_error(s, "dump: failed to write CPU status.\n");
......@@ -705,7 +701,7 @@ static ram_addr_t get_start_block(DumpState *s)
static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
int64_t begin, int64_t length, Error **errp)
{
CPUArchState *env;
CPUState *cpu;
int nr_cpus;
Error *err = NULL;
int ret;
......@@ -738,7 +734,7 @@ static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
*/
cpu_synchronize_all_states();
nr_cpus = 0;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
nr_cpus++;
}
......
......@@ -69,7 +69,7 @@ static MemoryRegion io_mem_unassigned;
#endif
CPUArchState *first_cpu;
CPUState *first_cpu;
/* current CPU in the current thread. It is only valid inside
cpu_exec() */
DEFINE_TLS(CPUState *, current_cpu);
......@@ -351,27 +351,26 @@ const VMStateDescription vmstate_cpu_common = {
CPUState *qemu_get_cpu(int index)
{
CPUArchState *env = first_cpu;
CPUState *cpu = NULL;
CPUState *cpu = first_cpu;
while (env) {
cpu = ENV_GET_CPU(env);
while (cpu) {
if (cpu->cpu_index == index) {
break;
}
env = env->next_cpu;
cpu = cpu->next_cpu;
}
return env ? cpu : NULL;
return cpu;
}
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
{
CPUArchState *env = first_cpu;
CPUState *cpu;
while (env) {
func(ENV_GET_CPU(env), data);
env = env->next_cpu;
cpu = first_cpu;
while (cpu) {
func(cpu, data);
cpu = cpu->next_cpu;
}
}
......@@ -379,17 +378,17 @@ void cpu_exec_init(CPUArchState *env)
{
CPUState *cpu = ENV_GET_CPU(env);
CPUClass *cc = CPU_GET_CLASS(cpu);
CPUArchState **penv;
CPUState **pcpu;
int cpu_index;
#if defined(CONFIG_USER_ONLY)
cpu_list_lock();
#endif
env->next_cpu = NULL;
penv = &first_cpu;
cpu->next_cpu = NULL;
pcpu = &first_cpu;
cpu_index = 0;
while (*penv != NULL) {
penv = &(*penv)->next_cpu;
while (*pcpu != NULL) {
pcpu = &(*pcpu)->next_cpu;
cpu_index++;
}
cpu->cpu_index = cpu_index;
......@@ -399,7 +398,7 @@ void cpu_exec_init(CPUArchState *env)
#ifndef CONFIG_USER_ONLY
cpu->thread_id = qemu_get_thread_id();
#endif
*penv = env;
*pcpu = cpu;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
#endif
......@@ -638,7 +637,6 @@ void cpu_abort(CPUArchState *env, const char *fmt, ...)
CPUArchState *cpu_copy(CPUArchState *env)
{
CPUArchState *new_env = cpu_init(env->cpu_model_str);
CPUArchState *next_cpu = new_env->next_cpu;
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
CPUWatchpoint *wp;
......@@ -646,9 +644,6 @@ CPUArchState *cpu_copy(CPUArchState *env)
memcpy(new_env, env, sizeof(CPUArchState));
/* Preserve chaining. */
new_env->next_cpu = next_cpu;
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
......@@ -1757,12 +1752,14 @@ static void core_commit(MemoryListener *listener)
static void tcg_commit(MemoryListener *listener)
{
CPUArchState *env;
CPUState *cpu;
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
/* XXX: slow ! */
for(env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
CPUArchState *env = cpu->env_ptr;
tlb_flush(env, 1);
}
}
......
......@@ -1839,6 +1839,7 @@ static const char *get_feature_xml(const char *p, const char **newp)
/* Generate the XML description for this CPU. */
if (!target_xml[0]) {
GDBRegisterState *r;
CPUArchState *env = first_cpu->env_ptr;
snprintf(target_xml, sizeof(target_xml),
"<?xml version=\"1.0\"?>"
......@@ -1847,7 +1848,7 @@ static const char *get_feature_xml(const char *p, const char **newp)
"<xi:include href=\"%s\"/>",
GDB_CORE_XML);
for (r = first_cpu->gdb_regs; r; r = r->next) {
for (r = env->gdb_regs; r; r = r->next) {
pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
pstrcat(target_xml, sizeof(target_xml), r->xml);
pstrcat(target_xml, sizeof(target_xml), "\"/>");
......@@ -1949,6 +1950,7 @@ static const int xlat_gdb_type[] = {
static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
{
CPUState *cpu;
CPUArchState *env;
int err = 0;
......@@ -1958,7 +1960,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
env = cpu->env_ptr;
err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
if (err)
break;
......@@ -1968,7 +1971,8 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
env = cpu->env_ptr;
err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
NULL);
if (err)
......@@ -1983,6 +1987,7 @@ static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
{
CPUState *cpu;
CPUArchState *env;
int err = 0;
......@@ -1992,7 +1997,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
switch (type) {
case GDB_BREAKPOINT_SW:
case GDB_BREAKPOINT_HW:
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
env = cpu->env_ptr;
err = cpu_breakpoint_remove(env, addr, BP_GDB);
if (err)
break;
......@@ -2002,7 +2008,8 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
case GDB_WATCHPOINT_WRITE:
case GDB_WATCHPOINT_READ:
case GDB_WATCHPOINT_ACCESS:
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
env = cpu->env_ptr;
err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
if (err)
break;
......@@ -2016,6 +2023,7 @@ static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
static void gdb_breakpoint_remove_all(void)
{
CPUState *cpu;
CPUArchState *env;
if (kvm_enabled()) {
......@@ -2023,7 +2031,8 @@ static void gdb_breakpoint_remove_all(void)
return;
}
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
env = cpu->env_ptr;
cpu_breakpoint_remove_all(env, BP_GDB);
#ifndef CONFIG_USER_ONLY
cpu_watchpoint_remove_all(env, BP_GDB);
......@@ -2071,13 +2080,11 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
static CPUArchState *find_cpu(uint32_t thread_id)
{
CPUArchState *env;
CPUState *cpu;
for (env = first_cpu; env != NULL; env = env->next_cpu) {
cpu = ENV_GET_CPU(env);
for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
if (cpu_index(cpu) == thread_id) {
return env;
return cpu->env_ptr;
}
}
......@@ -2394,7 +2401,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
put_packet(s, "QC1");
break;
} else if (strcmp(p,"fThreadInfo") == 0) {
s->query_cpu = first_cpu;
s->query_cpu = first_cpu->env_ptr;
goto report_cpuinfo;
} else if (strcmp(p,"sThreadInfo") == 0) {
report_cpuinfo:
......@@ -2402,7 +2409,7 @@ static int gdb_handle_packet(GDBState *s, const char *line_buf)
snprintf(buf, sizeof(buf), "m%x",
cpu_index(ENV_GET_CPU(s->query_cpu)));
put_packet(s, buf);
s->query_cpu = s->query_cpu->next_cpu;
s->query_cpu = ENV_GET_CPU(s->query_cpu)->next_cpu->env_ptr;
} else
put_packet(s, "l");
break;
......@@ -2869,8 +2876,8 @@ static void gdb_accept(void)
socket_set_nodelay(fd);
s = g_malloc0(sizeof(GDBState));
s->c_cpu = first_cpu;
s->g_cpu = first_cpu;
s->c_cpu = first_cpu->env_ptr;
s->g_cpu = first_cpu->env_ptr;
s->fd = fd;
gdb_has_xml = 0;
......@@ -3054,8 +3061,8 @@ int gdbserver_start(const char *device)
mon_chr = s->mon_chr;
memset(s, 0, sizeof(GDBState));
}
s->c_cpu = first_cpu;
s->g_cpu = first_cpu;
s->c_cpu = first_cpu->env_ptr;
s->g_cpu = first_cpu->env_ptr;
s->chr = chr;
s->state = chr ? RS_IDLE : RS_INACTIVE;
s->mon_chr = mon_chr;
......
......@@ -333,7 +333,7 @@ static void do_cpu_reset(void *opaque)
env->regs[15] = info->entry & 0xfffffffe;
env->thumb = info->entry & 1;
} else {
if (env == first_cpu) {
if (CPU(cpu) == first_cpu) {
env->regs[15] = info->loader_start;
if (!info->dtb_filename) {
if (old_param) {
......@@ -351,7 +351,7 @@ static void do_cpu_reset(void *opaque)
void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
{
CPUARMState *env = &cpu->env;
CPUState *cs = CPU(cpu);
int kernel_size;
int initrd_size;
int n;
......@@ -476,9 +476,9 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)