cpu-exec.c 19.4 KB
Newer Older
bellard's avatar
bellard committed
1
/*
2
 *  emulator main execution loop
3
 *
bellard's avatar
bellard committed
4
 *  Copyright (c) 2003-2005 Fabrice Bellard
bellard's avatar
bellard committed
5
 *
bellard's avatar
bellard committed
6 7 8 9
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
bellard's avatar
bellard committed
10
 *
bellard's avatar
bellard committed
11 12 13 14
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
bellard's avatar
bellard committed
15
 *
bellard's avatar
bellard committed
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18
 */
19
#include "config.h"
Blue Swirl's avatar
Blue Swirl committed
20
#include "cpu.h"
21
#include "trace.h"
22
#include "disas/disas.h"
23
#include "tcg.h"
24
#include "qemu/atomic.h"
25
#include "sysemu/qtest.h"
26 27 28 29 30 31 32
#include "qemu/timer.h"

/* -icount align implementation. */

typedef struct SyncClocks {
    int64_t diff_clk;
    int64_t last_cpu_icount;
33
    int64_t realtime_clock;
34 35 36 37 38 39 40 41
} SyncClocks;

#if !defined(CONFIG_USER_ONLY)
/* Allow the guest to have a max 3ms advance.
 * The difference between the 2 clocks could therefore
 * oscillate around 0.
 */
#define VM_CLOCK_ADVANCE 3000000
42 43 44
#define THRESHOLD_REDUCE 1.5
#define MAX_DELAY_PRINT_RATE 2000000000LL
#define MAX_NB_PRINTS 100
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
    int64_t cpu_icount;

    if (!icount_align_option) {
        return;
    }

    cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
    sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
    sc->last_cpu_icount = cpu_icount;

    if (sc->diff_clk > VM_CLOCK_ADVANCE) {
#ifndef _WIN32
        struct timespec sleep_delay, rem_delay;
        sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
        sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
        if (nanosleep(&sleep_delay, &rem_delay) < 0) {
            sc->diff_clk -= (sleep_delay.tv_sec - rem_delay.tv_sec) * 1000000000LL;
            sc->diff_clk -= sleep_delay.tv_nsec - rem_delay.tv_nsec;
        } else {
            sc->diff_clk = 0;
        }
#else
        Sleep(sc->diff_clk / SCALE_MS);
        sc->diff_clk = 0;
#endif
    }
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static void print_delay(const SyncClocks *sc)
{
    static float threshold_delay;
    static int64_t last_realtime_clock;
    static int nb_prints;

    if (icount_align_option &&
        sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
        nb_prints < MAX_NB_PRINTS) {
        if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
            (-sc->diff_clk / (float)1000000000LL <
             (threshold_delay - THRESHOLD_REDUCE))) {
            threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
            printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
                   threshold_delay - 1,
                   threshold_delay);
            nb_prints++;
            last_realtime_clock = sc->realtime_clock;
        }
    }
}

98 99 100 101 102 103
static void init_delay_params(SyncClocks *sc,
                              const CPUState *cpu)
{
    if (!icount_align_option) {
        return;
    }
104
    sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
105
    sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) -
106
                   sc->realtime_clock +
107 108
                   cpu_get_clock_offset();
    sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low;
109 110 111 112 113 114
    if (sc->diff_clk < max_delay) {
        max_delay = sc->diff_clk;
    }
    if (sc->diff_clk > max_advance) {
        max_advance = sc->diff_clk;
    }
115 116 117 118

    /* Print every 2s max if the guest is late. We limit the number
       of printed messages to NB_PRINT_MAX(currently 100) */
    print_delay(sc);
119 120 121 122 123 124 125 126 127 128
}
#else
static void align_clocks(SyncClocks *sc, const CPUState *cpu)
{
}

static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
{
}
#endif /* CONFIG USER ONLY */
bellard's avatar
bellard committed
129

130
void cpu_loop_exit(CPUState *cpu)
131
{
132
    cpu->current_tb = NULL;
133
    siglongjmp(cpu->jmp_env, 1);
134
}
135

136 137 138
/* exit the current TB from a signal handler. The host registers are
   restored in a state compatible with the CPU emulator
 */
139
#if defined(CONFIG_SOFTMMU)
140
void cpu_resume_from_signal(CPUState *cpu, void *puc)
141 142 143
{
    /* XXX: restore cpu registers saved in host registers */

144
    cpu->exception_index = -1;
145
    siglongjmp(cpu->jmp_env, 1);
146 147
}
#endif
148

149 150 151 152
/* Execute a TB, and fix up the CPU state afterwards if necessary */
static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
{
    CPUArchState *env = cpu->env_ptr;
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
    uintptr_t next_tb;

#if defined(DEBUG_DISAS)
    if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
#if defined(TARGET_I386)
        log_cpu_state(cpu, CPU_DUMP_CCOP);
#elif defined(TARGET_M68K)
        /* ??? Should not modify env state for dumping.  */
        cpu_m68k_flush_flags(env, env->cc_op);
        env->cc_op = CC_OP_FLAGS;
        env->sr = (env->sr & 0xffe0) | env->cc_dest | (env->cc_x << 4);
        log_cpu_state(cpu, 0);
#else
        log_cpu_state(cpu, 0);
#endif
    }
#endif /* DEBUG_DISAS */

    next_tb = tcg_qemu_tb_exec(env, tb_ptr);
172 173 174
    trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK),
                       next_tb & TB_EXIT_MASK);

175 176 177 178 179
    if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) {
        /* We didn't start executing this TB (eg because the instruction
         * counter hit zero); we must restore the guest PC to the address
         * of the start of the TB.
         */
180
        CPUClass *cc = CPU_GET_CLASS(cpu);
181
        TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
182 183 184 185 186 187
        if (cc->synchronize_from_tb) {
            cc->synchronize_from_tb(cpu, tb);
        } else {
            assert(cc->set_pc);
            cc->set_pc(cpu, tb->pc);
        }
188
    }
189 190 191 192 193 194
    if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) {
        /* We were asked to stop executing TBs (probably a pending
         * interrupt. We've now stopped, so clear the flag.
         */
        cpu->tcg_exit_req = 0;
    }
195 196 197
    return next_tb;
}

pbrook's avatar
pbrook committed
198 199
/* Execute the code without caching the generated code. An interpreter
   could be used if available. */
200
static void cpu_exec_nocache(CPUArchState *env, int max_cycles,
Blue Swirl's avatar
Blue Swirl committed
201
                             TranslationBlock *orig_tb)
pbrook's avatar
pbrook committed
202
{
203
    CPUState *cpu = ENV_GET_CPU(env);
pbrook's avatar
pbrook committed
204 205 206 207 208 209 210
    TranslationBlock *tb;

    /* Should never happen.
       We only end up here when an existing TB is too long.  */
    if (max_cycles > CF_COUNT_MASK)
        max_cycles = CF_COUNT_MASK;

211
    tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
pbrook's avatar
pbrook committed
212
                     max_cycles);
213
    cpu->current_tb = tb;
pbrook's avatar
pbrook committed
214
    /* execute the generated code */
215
    trace_exec_tb_nocache(tb, tb->pc);
216
    cpu_tb_exec(cpu, tb->tc_ptr);
217
    cpu->current_tb = NULL;
pbrook's avatar
pbrook committed
218 219 220 221
    tb_phys_invalidate(tb, -1);
    tb_free(tb);
}

222
static TranslationBlock *tb_find_slow(CPUArchState *env,
Blue Swirl's avatar
Blue Swirl committed
223
                                      target_ulong pc,
224
                                      target_ulong cs_base,
225
                                      uint64_t flags)
226
{
227
    CPUState *cpu = ENV_GET_CPU(env);
228 229
    TranslationBlock *tb, **ptb1;
    unsigned int h;
230
    tb_page_addr_t phys_pc, phys_page1;
Paul Brook's avatar
Paul Brook committed
231
    target_ulong virt_page2;
232

233
    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
234

235
    /* find translated block using physical mappings */
Paul Brook's avatar
Paul Brook committed
236
    phys_pc = get_page_addr_code(env, pc);
237 238
    phys_page1 = phys_pc & TARGET_PAGE_MASK;
    h = tb_phys_hash_func(phys_pc);
239
    ptb1 = &tcg_ctx.tb_ctx.tb_phys_hash[h];
240 241 242 243
    for(;;) {
        tb = *ptb1;
        if (!tb)
            goto not_found;
244
        if (tb->pc == pc &&
245
            tb->page_addr[0] == phys_page1 &&
246
            tb->cs_base == cs_base &&
247 248 249
            tb->flags == flags) {
            /* check next page if needed */
            if (tb->page_addr[1] != -1) {
250 251
                tb_page_addr_t phys_page2;

252
                virt_page2 = (pc & TARGET_PAGE_MASK) +
253
                    TARGET_PAGE_SIZE;
Paul Brook's avatar
Paul Brook committed
254
                phys_page2 = get_page_addr_code(env, virt_page2);
255 256 257 258 259 260 261 262 263
                if (tb->page_addr[1] == phys_page2)
                    goto found;
            } else {
                goto found;
            }
        }
        ptb1 = &tb->phys_hash_next;
    }
 not_found:
pbrook's avatar
pbrook committed
264
   /* if no translated code available, then translate it now */
265
    tb = tb_gen_code(cpu, pc, cs_base, flags, 0);
266

267
 found:
268 269 270
    /* Move the last found TB to the head of the list */
    if (likely(*ptb1)) {
        *ptb1 = tb->phys_hash_next;
271 272
        tb->phys_hash_next = tcg_ctx.tb_ctx.tb_phys_hash[h];
        tcg_ctx.tb_ctx.tb_phys_hash[h] = tb;
273
    }
274
    /* we add the TB in the virtual pc hash table */
275
    cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
276 277 278
    return tb;
}

279
static inline TranslationBlock *tb_find_fast(CPUArchState *env)
280
{
281
    CPUState *cpu = ENV_GET_CPU(env);
282 283
    TranslationBlock *tb;
    target_ulong cs_base, pc;
284
    int flags;
285 286 287 288

    /* we record a subset of the CPU state. It will
       always be the same before a given translated block
       is executed. */
289
    cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
290
    tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
291 292
    if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
                 tb->flags != flags)) {
Blue Swirl's avatar
Blue Swirl committed
293
        tb = tb_find_slow(env, pc, cs_base, flags);
294 295 296 297
    }
    return tb;
}

298
static void cpu_handle_debug_exception(CPUArchState *env)
299
{
300
    CPUState *cpu = ENV_GET_CPU(env);
301
    CPUClass *cc = CPU_GET_CLASS(cpu);
302 303
    CPUWatchpoint *wp;

304 305
    if (!cpu->watchpoint_hit) {
        QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
306 307 308
            wp->flags &= ~BP_WATCHPOINT_HIT;
        }
    }
309 310

    cc->debug_excp_handler(cpu);
311 312
}

bellard's avatar
bellard committed
313 314
/* main execution loop */

315 316
volatile sig_atomic_t exit_request;

317
int cpu_exec(CPUArchState *env)
bellard's avatar
bellard committed
318
{
319
    CPUState *cpu = ENV_GET_CPU(env);
320
    CPUClass *cc = CPU_GET_CLASS(cpu);
321 322
#ifdef TARGET_I386
    X86CPU *x86_cpu = X86_CPU(cpu);
323
#endif
324 325
    int ret, interrupt_request;
    TranslationBlock *tb;
bellard's avatar
bellard committed
326
    uint8_t *tc_ptr;
327
    uintptr_t next_tb;
328 329
    SyncClocks sc;

330 331
    /* This must be volatile so it is not trashed by longjmp() */
    volatile bool have_tb_lock = false;
332

333
    if (cpu->halted) {
334
        if (!cpu_has_work(cpu)) {
335 336 337
            return EXCP_HALTED;
        }

338
        cpu->halted = 0;
339
    }
bellard's avatar
bellard committed
340

341
    current_cpu = cpu;
342

343
    /* As long as current_cpu is null, up to the assignment just above,
344 345
     * requests by other threads to exit the execution loop are expected to
     * be issued using the exit_request global. We must make sure that our
346
     * evaluation of the global value is performed past the current_cpu
347 348 349 350
     * value transition point, which requires a memory barrier as well as
     * an instruction scheduling constraint on modern architectures.  */
    smp_mb();

351
    if (unlikely(exit_request)) {
352
        cpu->exit_request = 1;
353 354
    }

355
    cc->cpu_exec_enter(cpu);
356
    cpu->exception_index = -1;
357

358 359 360 361 362 363 364
    /* Calculate difference between guest clock and host clock.
     * This delay includes the delay of the last cycle, so
     * what we have to do is sleep until it is 0. As for the
     * advance/delay we gain here, we try to fix it next time.
     */
    init_delay_params(&sc, cpu);

bellard's avatar
bellard committed
365
    /* prepare setjmp context for exception handling */
366
    for(;;) {
367
        if (sigsetjmp(cpu->jmp_env, 0) == 0) {
368
            /* if an exception is pending, we execute it here */
369 370
            if (cpu->exception_index >= 0) {
                if (cpu->exception_index >= EXCP_INTERRUPT) {
371
                    /* exit request from the cpu execution loop */
372
                    ret = cpu->exception_index;
373 374 375
                    if (ret == EXCP_DEBUG) {
                        cpu_handle_debug_exception(env);
                    }
376
                    break;
aurel32's avatar
aurel32 committed
377 378
                } else {
#if defined(CONFIG_USER_ONLY)
379
                    /* if user mode only, we simulate a fake exception
380
                       which will be handled outside the cpu execution
381
                       loop */
bellard's avatar
bellard committed
382
#if defined(TARGET_I386)
383
                    cc->do_interrupt(cpu);
bellard's avatar
bellard committed
384
#endif
385
                    ret = cpu->exception_index;
386
                    break;
aurel32's avatar
aurel32 committed
387
#else
388
                    cc->do_interrupt(cpu);
389
                    cpu->exception_index = -1;
bellard's avatar
bellard committed
390
#endif
391
                }
392
            }
bellard's avatar
bellard committed
393

394
            next_tb = 0; /* force lookup of first TB */
395
            for(;;) {
396
                interrupt_request = cpu->interrupt_request;
397
                if (unlikely(interrupt_request)) {
398
                    if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
399
                        /* Mask out external interrupts for this step. */
400
                        interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
401
                    }
402
                    if (interrupt_request & CPU_INTERRUPT_DEBUG) {
403
                        cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
404
                        cpu->exception_index = EXCP_DEBUG;
405
                        cpu_loop_exit(cpu);
406
                    }
407
                    if (interrupt_request & CPU_INTERRUPT_HALT) {
408 409
                        cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
                        cpu->halted = 1;
410
                        cpu->exception_index = EXCP_HLT;
411
                        cpu_loop_exit(cpu);
412
                    }
413 414 415 416 417 418 419 420 421 422 423
#if defined(TARGET_I386)
                    if (interrupt_request & CPU_INTERRUPT_INIT) {
                        cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0);
                        do_cpu_init(x86_cpu);
                        cpu->exception_index = EXCP_HALTED;
                        cpu_loop_exit(cpu);
                    }
#else
                    if (interrupt_request & CPU_INTERRUPT_RESET) {
                        cpu_reset(cpu);
                    }
bellard's avatar
bellard committed
424
#endif
425 426 427 428 429 430 431 432 433
                    /* The target hook has 3 exit conditions:
                       False when the interrupt isn't processed,
                       True when it is, and we should restart on a new TB,
                       and via longjmp via cpu_loop_exit.  */
                    if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
                        next_tb = 0;
                    }
                    /* Don't use the cached interrupt_request value,
                       do_interrupt may have updated the EXITTB flag. */
434 435
                    if (cpu->interrupt_request & CPU_INTERRUPT_EXITTB) {
                        cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
436 437
                        /* ensure that no TB jump will be modified as
                           the program flow was changed */
438
                        next_tb = 0;
439
                    }
440
                }
441 442
                if (unlikely(cpu->exit_request)) {
                    cpu->exit_request = 0;
443
                    cpu->exception_index = EXCP_INTERRUPT;
444
                    cpu_loop_exit(cpu);
445
                }
446
                spin_lock(&tcg_ctx.tb_ctx.tb_lock);
447
                have_tb_lock = true;
Blue Swirl's avatar
Blue Swirl committed
448
                tb = tb_find_fast(env);
pbrook's avatar
pbrook committed
449 450
                /* Note: we do it here to avoid a gcc bug on Mac OS X when
                   doing it in tb_find_slow */
451
                if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
pbrook's avatar
pbrook committed
452 453 454 455
                    /* as some TB could have been invalidated because
                       of memory exceptions while generating the code, we
                       must recompute the hash index here */
                    next_tb = 0;
456
                    tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
pbrook's avatar
pbrook committed
457
                }
458 459 460 461
                if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
                    qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
                             tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
                }
462 463 464
                /* see if we can patch the calling TB. When the TB
                   spans two pages, we cannot safely do a direct
                   jump. */
Paolo Bonzini's avatar
Paolo Bonzini committed
465
                if (next_tb != 0 && tb->page_addr[1] == -1) {
466 467
                    tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                next_tb & TB_EXIT_MASK, tb);
468
                }
469
                have_tb_lock = false;
470
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
471 472 473 474 475

                /* cpu_interrupt might be called while translating the
                   TB, but before it is linked into a potentially
                   infinite loop and becomes env->current_tb. Avoid
                   starting execution if there is a pending interrupt. */
476
                cpu->current_tb = tb;
Jan Kiszka's avatar
Jan Kiszka committed
477
                barrier();
478
                if (likely(!cpu->exit_request)) {
479
                    trace_exec_tb(tb, tb->pc);
pbrook's avatar
pbrook committed
480
                    tc_ptr = tb->tc_ptr;
481
                    /* execute the generated code */
482
                    next_tb = cpu_tb_exec(cpu, tc_ptr);
483 484 485 486 487 488 489 490 491 492 493 494 495 496
                    switch (next_tb & TB_EXIT_MASK) {
                    case TB_EXIT_REQUESTED:
                        /* Something asked us to stop executing
                         * chained TBs; just continue round the main
                         * loop. Whatever requested the exit will also
                         * have set something else (eg exit_request or
                         * interrupt_request) which we will handle
                         * next time around the loop.
                         */
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
                        next_tb = 0;
                        break;
                    case TB_EXIT_ICOUNT_EXPIRED:
                    {
497
                        /* Instruction counter expired.  */
pbrook's avatar
pbrook committed
498
                        int insns_left;
499
                        tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
500
                        insns_left = cpu->icount_decr.u32;
501
                        if (cpu->icount_extra && insns_left >= 0) {
pbrook's avatar
pbrook committed
502
                            /* Refill decrementer and continue execution.  */
503 504
                            cpu->icount_extra += insns_left;
                            if (cpu->icount_extra > 0xffff) {
pbrook's avatar
pbrook committed
505 506
                                insns_left = 0xffff;
                            } else {
507
                                insns_left = cpu->icount_extra;
pbrook's avatar
pbrook committed
508
                            }
509
                            cpu->icount_extra -= insns_left;
510
                            cpu->icount_decr.u16.low = insns_left;
pbrook's avatar
pbrook committed
511 512 513
                        } else {
                            if (insns_left > 0) {
                                /* Execute remaining instructions.  */
Blue Swirl's avatar
Blue Swirl committed
514
                                cpu_exec_nocache(env, insns_left, tb);
515
                                align_clocks(&sc, cpu);
pbrook's avatar
pbrook committed
516
                            }
517
                            cpu->exception_index = EXCP_INTERRUPT;
pbrook's avatar
pbrook committed
518
                            next_tb = 0;
519
                            cpu_loop_exit(cpu);
pbrook's avatar
pbrook committed
520
                        }
521 522 523 524
                        break;
                    }
                    default:
                        break;
pbrook's avatar
pbrook committed
525 526
                    }
                }
527
                cpu->current_tb = NULL;
528 529 530
                /* Try to align the host and virtual clocks
                   if the guest is in advance */
                align_clocks(&sc, cpu);
bellard's avatar
bellard committed
531 532
                /* reset soft MMU for next block (it can currently
                   only be set by a memory fault) */
ths's avatar
ths committed
533
            } /* for(;;) */
534 535 536
        } else {
            /* Reload env after longjmp - the compiler may have smashed all
             * local variables as longjmp is marked 'noreturn'. */
537 538
            cpu = current_cpu;
            env = cpu->env_ptr;
539
            cc = CPU_GET_CLASS(cpu);
540 541
#ifdef TARGET_I386
            x86_cpu = X86_CPU(cpu);
542
#endif
543 544 545 546
            if (have_tb_lock) {
                spin_unlock(&tcg_ctx.tb_ctx.tb_lock);
                have_tb_lock = false;
            }
bellard's avatar
bellard committed
547
        }
548 549
    } /* for(;;) */

550
    cc->cpu_exec_exit(cpu);
551

552 553
    /* fail safe : never use current_cpu outside cpu_exec() */
    current_cpu = NULL;
bellard's avatar
bellard committed
554 555
    return ret;
}