exec.c 124 KB
Newer Older
bellard's avatar
bellard committed
1
/*
bellard's avatar
bellard committed
2
 *  virtual page mapping and translated block handling
3
 *
bellard's avatar
bellard committed
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18
 */
bellard's avatar
bellard committed
19
#include "config.h"
bellard's avatar
bellard committed
20 21 22
#ifdef _WIN32
#include <windows.h>
#else
bellard's avatar
bellard committed
23
#include <sys/types.h>
bellard's avatar
bellard committed
24 25
#include <sys/mman.h>
#endif
bellard's avatar
bellard committed
26

27
#include "qemu-common.h"
bellard's avatar
bellard committed
28
#include "cpu.h"
bellard's avatar
bellard committed
29
#include "tcg.h"
30
#include "hw/hw.h"
31
#include "hw/qdev.h"
32
#include "osdep.h"
aliguori's avatar
aliguori committed
33
#include "kvm.h"
Jun Nakajima's avatar
Jun Nakajima committed
34
#include "hw/xen.h"
Blue Swirl's avatar
Blue Swirl committed
35
#include "qemu-timer.h"
Avi Kivity's avatar
Avi Kivity committed
36 37
#include "memory.h"
#include "exec-memory.h"
38 39
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/param.h>
#if __FreeBSD_version >= 700104
#define HAVE_KINFO_GETVMMAP
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
#include <sys/time.h>
#include <sys/proc.h>
#include <machine/profile.h>
#define _KERNEL
#include <sys/user.h>
#undef _KERNEL
#undef sigqueue
#include <libutil.h>
#endif
#endif
Jun Nakajima's avatar
Jun Nakajima committed
55 56
#else /* !CONFIG_USER_ONLY */
#include "xen-mapcache.h"
57
#include "trace.h"
58
#endif
bellard's avatar
bellard committed
59

60 61
#include "cputlb.h"

62 63 64
#define WANT_EXEC_OBSOLETE
#include "exec-obsolete.h"

bellard's avatar
bellard committed
65
//#define DEBUG_TB_INVALIDATE
bellard's avatar
bellard committed
66
//#define DEBUG_FLUSH
pbrook's avatar
pbrook committed
67
//#define DEBUG_UNASSIGNED
bellard's avatar
bellard committed
68 69

/* make various TB consistency checks */
70
//#define DEBUG_TB_CHECK
bellard's avatar
bellard committed
71

ths's avatar
ths committed
72
//#define DEBUG_IOPORT
73
//#define DEBUG_SUBPAGE
ths's avatar
ths committed
74

75 76 77 78 79
#if !defined(CONFIG_USER_ONLY)
/* TB consistency checks only implemented for usermode emulation.  */
#undef DEBUG_TB_CHECK
#endif

80 81
#define SMC_BITMAP_USE_THRESHOLD 10

blueswir1's avatar
blueswir1 committed
82
static TranslationBlock *tbs;
83
static int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1's avatar
blueswir1 committed
85
static int nb_tbs;
bellard's avatar
bellard committed
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellard's avatar
bellard committed
88

89 90 91
#if defined(__arm__) || defined(__sparc_v9__)
/* The prologue must be reachable with a direct jump. ARM and Sparc64
 have limited branch ranges (possibly also PPC) so place it in a
92 93 94 95
 section close to code segment. */
#define code_gen_section                                \
    __attribute__((__section__(".gen_code")))           \
    __attribute__((aligned (32)))
96
#elif defined(_WIN32) && !defined(_WIN64)
97 98
#define code_gen_section                                \
    __attribute__((aligned (16)))
99 100 101 102 103 104
#else
#define code_gen_section                                \
    __attribute__((aligned (32)))
#endif

uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1's avatar
blueswir1 committed
105 106
static uint8_t *code_gen_buffer;
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
blueswir1's avatar
blueswir1 committed
108
static unsigned long code_gen_buffer_max_size;
109
static uint8_t *code_gen_ptr;
bellard's avatar
bellard committed
110

111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
static int in_migration;
114

Paolo Bonzini's avatar
Paolo Bonzini committed
115
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity's avatar
Avi Kivity committed
116 117

static MemoryRegion *system_memory;
118
static MemoryRegion *system_io;
Avi Kivity's avatar
Avi Kivity committed
119

120
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
121
static MemoryRegion io_mem_subpage_ram;
122

123
#endif
124

125
CPUArchState *first_cpu;
bellard's avatar
bellard committed
126 127
/* current CPU in the current thread. It is only valid inside
   cpu_exec() */
128
DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook's avatar
pbrook committed
129
/* 0 = Do not count executed instructions.
ths's avatar
ths committed
130
   1 = Precise instruction counting.
pbrook's avatar
pbrook committed
131 132
   2 = Adaptive rate instruction counting.  */
int use_icount = 0;
bellard's avatar
bellard committed
133

bellard's avatar
bellard committed
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
bellard's avatar
bellard committed
136
    TranslationBlock *first_tb;
137 138 139 140 141 142 143
    /* in order to optimize self modifying code, we count the number
       of lookups we do to a given page to use a bitmap */
    unsigned int code_write_count;
    uint8_t *code_bitmap;
#if defined(CONFIG_USER_ONLY)
    unsigned long flags;
#endif
bellard's avatar
bellard committed
144 145
} PageDesc;

Paul Brook's avatar
Paul Brook committed
146
/* In system mode we want L1_MAP to be based on ram offsets,
147 148
   while in user mode we want it to be based on virtual addresses.  */
#if !defined(CONFIG_USER_ONLY)
Paul Brook's avatar
Paul Brook committed
149 150 151
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
#else
152
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook's avatar
Paul Brook committed
153
#endif
154
#else
155
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
156
#endif
bellard's avatar
bellard committed
157

158 159
/* Size of the L2 (and L3, etc) page tables.  */
#define L2_BITS 10
bellard's avatar
bellard committed
160 161
#define L2_SIZE (1 << L2_BITS)

162 163 164
#define P_L2_LEVELS \
    (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)

165 166 167 168 169 170 171 172 173 174 175 176 177 178
/* The bits remaining after N lower levels of page tables.  */
#define V_L1_BITS_REM \
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)

#if V_L1_BITS_REM < 4
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
#else
#define V_L1_BITS  V_L1_BITS_REM
#endif

#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)

#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)

179 180 181
uintptr_t qemu_real_host_page_size;
uintptr_t qemu_host_page_size;
uintptr_t qemu_host_page_mask;
bellard's avatar
bellard committed
182

183 184 185
/* This is a multi-level map on the virtual address space.
   The bottom level has pointers to PageDesc.  */
static void *l1_map[V_L1_SIZE];
bellard's avatar
bellard committed
186

187
#if !defined(CONFIG_USER_ONLY)
188 189
typedef struct PhysPageEntry PhysPageEntry;

190 191 192
static MemoryRegionSection *phys_sections;
static unsigned phys_sections_nb, phys_sections_nb_alloc;
static uint16_t phys_section_unassigned;
193 194 195
static uint16_t phys_section_notdirty;
static uint16_t phys_section_rom;
static uint16_t phys_section_watch;
196

197
struct PhysPageEntry {
198 199 200
    uint16_t is_leaf : 1;
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
    uint16_t ptr : 15;
201 202
};

203 204 205 206
/* Simple allocator for PhysPageEntry nodes */
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;

207
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208

209
/* This is a multi-level map on the physical address space.
210
   The bottom level has pointers to MemoryRegionSections.  */
211
static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
212

213
static void io_mem_init(void);
Avi Kivity's avatar
Avi Kivity committed
214
static void memory_map_init(void);
215

216
static MemoryRegion io_mem_watch;
217
#endif
218

219
/* log support */
Juha Riihimäki's avatar
Juha Riihimäki committed
220 221 222
#ifdef WIN32
static const char *logfilename = "qemu.log";
#else
223
static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki's avatar
Juha Riihimäki committed
224
#endif
225 226
FILE *logfile;
int loglevel;
pbrook's avatar
pbrook committed
227
static int log_append = 0;
228

bellard's avatar
bellard committed
229 230 231 232
/* statistics */
static int tb_flush_count;
static int tb_phys_invalidate_count;

233 234 235 236 237 238 239 240 241 242 243
#ifdef _WIN32
static void map_exec(void *addr, long size)
{
    DWORD old_protect;
    VirtualProtect(addr, size,
                   PAGE_EXECUTE_READWRITE, &old_protect);
    
}
#else
static void map_exec(void *addr, long size)
{
244
    unsigned long start, end, page_size;
245
    
246
    page_size = getpagesize();
247
    start = (unsigned long)addr;
248
    start &= ~(page_size - 1);
249 250
    
    end = (unsigned long)addr + size;
251 252
    end += page_size - 1;
    end &= ~(page_size - 1);
253 254 255 256 257 258
    
    mprotect((void *)start, end - start,
             PROT_READ | PROT_WRITE | PROT_EXEC);
}
#endif

bellard's avatar
bellard committed
259
static void page_init(void)
bellard's avatar
bellard committed
260
{
261
    /* NOTE: we can always suppose that qemu_host_page_size >=
bellard's avatar
bellard committed
262
       TARGET_PAGE_SIZE */
263 264 265 266 267 268 269 270 271 272
#ifdef _WIN32
    {
        SYSTEM_INFO system_info;

        GetSystemInfo(&system_info);
        qemu_real_host_page_size = system_info.dwPageSize;
    }
#else
    qemu_real_host_page_size = getpagesize();
#endif
273 274 275 276 277
    if (qemu_host_page_size == 0)
        qemu_host_page_size = qemu_real_host_page_size;
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
        qemu_host_page_size = TARGET_PAGE_SIZE;
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
278

Paul Brook's avatar
Paul Brook committed
279
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280
    {
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
#ifdef HAVE_KINFO_GETVMMAP
        struct kinfo_vmentry *freep;
        int i, cnt;

        freep = kinfo_getvmmap(getpid(), &cnt);
        if (freep) {
            mmap_lock();
            for (i = 0; i < cnt; i++) {
                unsigned long startaddr, endaddr;

                startaddr = freep[i].kve_start;
                endaddr = freep[i].kve_end;
                if (h2g_valid(startaddr)) {
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;

                    if (h2g_valid(endaddr)) {
                        endaddr = h2g(endaddr);
298
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299 300 301
                    } else {
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
                        endaddr = ~0ul;
302
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303 304 305 306 307 308 309 310
#endif
                    }
                }
            }
            free(freep);
            mmap_unlock();
        }
#else
311 312
        FILE *f;

pbrook's avatar
pbrook committed
313
        last_brk = (unsigned long)sbrk(0);
314

315
        f = fopen("/compat/linux/proc/self/maps", "r");
316
        if (f) {
317 318
            mmap_lock();

319
            do {
320 321 322 323 324 325 326 327 328 329 330 331 332 333
                unsigned long startaddr, endaddr;
                int n;

                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);

                if (n == 2 && h2g_valid(startaddr)) {
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;

                    if (h2g_valid(endaddr)) {
                        endaddr = h2g(endaddr);
                    } else {
                        endaddr = ~0ul;
                    }
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334 335
                }
            } while (!feof(f));
336

337
            fclose(f);
338
            mmap_unlock();
339
        }
340
#endif
341 342
    }
#endif
bellard's avatar
bellard committed
343 344
}

Paul Brook's avatar
Paul Brook committed
345
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard's avatar
bellard committed
346
{
Paul Brook's avatar
Paul Brook committed
347 348 349 350
    PageDesc *pd;
    void **lp;
    int i;

351
#if defined(CONFIG_USER_ONLY)
352
    /* We can't use g_malloc because it may recurse into a locked mutex. */
353 354 355 356 357 358 359
# define ALLOC(P, SIZE)                                 \
    do {                                                \
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
    } while (0)
#else
# define ALLOC(P, SIZE) \
360
    do { P = g_malloc0(SIZE); } while (0)
361
#endif
362

363 364 365 366 367 368 369 370 371 372 373 374 375
    /* Level 1.  Always allocated.  */
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));

    /* Level 2..N-1.  */
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
        void **p = *lp;

        if (p == NULL) {
            if (!alloc) {
                return NULL;
            }
            ALLOC(p, sizeof(void *) * L2_SIZE);
            *lp = p;
376
        }
377 378 379 380 381 382 383 384 385 386 387

        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
    }

    pd = *lp;
    if (pd == NULL) {
        if (!alloc) {
            return NULL;
        }
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
        *lp = pd;
bellard's avatar
bellard committed
388
    }
389 390 391 392

#undef ALLOC

    return pd + (index & (L2_SIZE - 1));
bellard's avatar
bellard committed
393 394
}

Paul Brook's avatar
Paul Brook committed
395
static inline PageDesc *page_find(tb_page_addr_t index)
bellard's avatar
bellard committed
396
{
397
    return page_find_alloc(index, 0);
bellard's avatar
bellard committed
398 399
}

400
#if !defined(CONFIG_USER_ONLY)
401

402
static void phys_map_node_reserve(unsigned nodes)
403
{
404
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
405 406
        typedef PhysPageEntry Node[L2_SIZE];
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
407 408
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
                                      phys_map_nodes_nb + nodes);
409 410 411
        phys_map_nodes = g_renew(Node, phys_map_nodes,
                                 phys_map_nodes_nb_alloc);
    }
412 413 414 415 416 417 418 419 420 421
}

static uint16_t phys_map_node_alloc(void)
{
    unsigned i;
    uint16_t ret;

    ret = phys_map_nodes_nb++;
    assert(ret != PHYS_MAP_NODE_NIL);
    assert(ret != phys_map_nodes_nb_alloc);
422
    for (i = 0; i < L2_SIZE; ++i) {
423
        phys_map_nodes[ret][i].is_leaf = 0;
424
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
425
    }
426
    return ret;
427 428 429 430 431 432 433
}

static void phys_map_nodes_reset(void)
{
    phys_map_nodes_nb = 0;
}

434

435 436 437
static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
                                target_phys_addr_t *nb, uint16_t leaf,
                                int level)
438 439 440
{
    PhysPageEntry *p;
    int i;
441
    target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
442

443
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
444 445
        lp->ptr = phys_map_node_alloc();
        p = phys_map_nodes[lp->ptr];
446 447
        if (level == 0) {
            for (i = 0; i < L2_SIZE; i++) {
448
                p[i].is_leaf = 1;
449
                p[i].ptr = phys_section_unassigned;
450
            }
pbrook's avatar
pbrook committed
451
        }
452
    } else {
453
        p = phys_map_nodes[lp->ptr];
454
    }
455
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
456

457
    while (*nb && lp < &p[L2_SIZE]) {
458 459
        if ((*index & (step - 1)) == 0 && *nb >= step) {
            lp->is_leaf = true;
460
            lp->ptr = leaf;
461 462
            *index += step;
            *nb -= step;
463 464 465 466
        } else {
            phys_page_set_level(lp, index, nb, leaf, level - 1);
        }
        ++lp;
467 468 469
    }
}

470 471
static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
                          uint16_t leaf)
472
{
473
    /* Wildly overreserve - it doesn't matter much. */
474
    phys_map_node_reserve(3 * P_L2_LEVELS);
475

476
    phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
477 478
}

479
MemoryRegionSection *phys_page_find(target_phys_addr_t index)
480
{
481 482 483 484
    PhysPageEntry lp = phys_map;
    PhysPageEntry *p;
    int i;
    uint16_t s_index = phys_section_unassigned;
485

486
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
487
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
488 489
            goto not_found;
        }
490
        p = phys_map_nodes[lp.ptr];
491
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
492
    }
493

494
    s_index = lp.ptr;
495
not_found:
496 497 498
    return &phys_sections[s_index];
}

Blue Swirl's avatar
Blue Swirl committed
499 500 501 502 503 504 505
bool memory_region_is_unassigned(MemoryRegion *mr)
{
    return mr != &io_mem_ram && mr != &io_mem_rom
        && mr != &io_mem_notdirty && !mr->rom_device
        && mr != &io_mem_watch;
}

506 507
target_phys_addr_t section_addr(MemoryRegionSection *section,
                                target_phys_addr_t addr)
508 509 510 511
{
    addr -= section->offset_within_address_space;
    addr += section->offset_within_region;
    return addr;
512 513
}

pbrook's avatar
pbrook committed
514 515
#define mmap_lock() do { } while(0)
#define mmap_unlock() do { } while(0)
516
#endif
bellard's avatar
bellard committed
517

518 519 520
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)

#if defined(CONFIG_USER_ONLY)
Stuart Brady's avatar
Stuart Brady committed
521
/* Currently it is not recommended to allocate big chunks of data in
522 523 524 525 526
   user mode. It will change when a dedicated libc will be used */
#define USE_STATIC_CODE_GEN_BUFFER
#endif

#ifdef USE_STATIC_CODE_GEN_BUFFER
527 528
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
               __attribute__((aligned (CODE_GEN_ALIGN)));
529 530
#endif

531
static void code_gen_alloc(unsigned long tb_size)
532
{
533 534 535 536 537
#ifdef USE_STATIC_CODE_GEN_BUFFER
    code_gen_buffer = static_code_gen_buffer;
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
    map_exec(code_gen_buffer, code_gen_buffer_size);
#else
538 539
    code_gen_buffer_size = tb_size;
    if (code_gen_buffer_size == 0) {
540 541 542
#if defined(CONFIG_USER_ONLY)
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
#else
Stuart Brady's avatar
Stuart Brady committed
543
        /* XXX: needs adjustments */
544
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
545
#endif
546 547 548 549 550 551 552 553
    }
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
    /* The code gen buffer location may have constraints depending on
       the host cpu and OS */
#if defined(__linux__) 
    {
        int flags;
554 555
        void *start = NULL;

556 557 558 559 560 561
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(__x86_64__)
        flags |= MAP_32BIT;
        /* Cannot map more than that */
        if (code_gen_buffer_size > (800 * 1024 * 1024))
            code_gen_buffer_size = (800 * 1024 * 1024);
562 563 564 565 566 567
#elif defined(__sparc_v9__)
        // Map the buffer below 2G, so we can use direct calls and branches
        flags |= MAP_FIXED;
        start = (void *) 0x60000000UL;
        if (code_gen_buffer_size > (512 * 1024 * 1024))
            code_gen_buffer_size = (512 * 1024 * 1024);
568
#elif defined(__arm__)
569
        /* Keep the buffer no bigger than 16MB to branch between blocks */
570 571
        if (code_gen_buffer_size > 16 * 1024 * 1024)
            code_gen_buffer_size = 16 * 1024 * 1024;
572 573 574 575 576 577 578
#elif defined(__s390x__)
        /* Map the buffer so that we can use direct calls and branches.  */
        /* We have a +- 4GB range on the branches; leave some slop.  */
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
        }
        start = (void *)0x90000000UL;
579
#endif
580 581
        code_gen_buffer = mmap(start, code_gen_buffer_size,
                               PROT_WRITE | PROT_READ | PROT_EXEC,
582 583 584 585 586 587
                               flags, -1, 0);
        if (code_gen_buffer == MAP_FAILED) {
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
            exit(1);
        }
    }
588
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
589 590
    || defined(__DragonFly__) || defined(__OpenBSD__) \
    || defined(__NetBSD__)
591 592 593 594 595 596 597 598 599 600 601 602
    {
        int flags;
        void *addr = NULL;
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(__x86_64__)
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
         * 0x40000000 is free */
        flags |= MAP_FIXED;
        addr = (void *)0x40000000;
        /* Cannot map more than that */
        if (code_gen_buffer_size > (800 * 1024 * 1024))
            code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl's avatar
Blue Swirl committed
603 604 605 606 607 608 609
#elif defined(__sparc_v9__)
        // Map the buffer below 2G, so we can use direct calls and branches
        flags |= MAP_FIXED;
        addr = (void *) 0x60000000UL;
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
            code_gen_buffer_size = (512 * 1024 * 1024);
        }
610 611 612 613 614 615 616 617 618
#endif
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
                               flags, -1, 0);
        if (code_gen_buffer == MAP_FAILED) {
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
            exit(1);
        }
    }
619
#else
620
    code_gen_buffer = g_malloc(code_gen_buffer_size);
621 622
    map_exec(code_gen_buffer, code_gen_buffer_size);
#endif
623
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
624
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
625 626
    code_gen_buffer_max_size = code_gen_buffer_size -
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
627
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
628
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
629 630 631 632 633
}

/* Must be called before using the QEMU cpus. 'tb_size' is the size
   (in bytes) allocated to the translation buffer. Zero means default
   size. */
634
void tcg_exec_init(unsigned long tb_size)
635 636 637 638
{
    cpu_gen_init();
    code_gen_alloc(tb_size);
    code_gen_ptr = code_gen_buffer;
639
    tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
640
    page_init();
641 642 643 644 645
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
    /* There's no guest base to take into account, so go ahead and
       initialize the prologue now.  */
    tcg_prologue_init(&tcg_ctx);
#endif
646 647
}

648 649 650 651 652 653 654 655 656 657 658 659 660
bool tcg_enabled(void)
{
    return code_gen_buffer != NULL;
}

void cpu_exec_init_all(void)
{
#if !defined(CONFIG_USER_ONLY)
    memory_map_init();
    io_mem_init();
#endif
}

661 662
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)

663
static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintela's avatar
Juan Quintela committed
664
{
665
    CPUArchState *env = opaque;
666

667 668 669
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
       version_id is increased. */
    env->interrupt_request &= ~0x01;
670 671 672 673
    tlb_flush(env, 1);

    return 0;
}
Juan Quintela's avatar
Juan Quintela committed
674 675 676 677 678 679 680 681

static const VMStateDescription vmstate_cpu_common = {
    .name = "cpu_common",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .post_load = cpu_common_post_load,
    .fields      = (VMStateField []) {
682 683
        VMSTATE_UINT32(halted, CPUArchState),
        VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintela's avatar
Juan Quintela committed
684 685 686
        VMSTATE_END_OF_LIST()
    }
};
687 688
#endif

689
CPUArchState *qemu_get_cpu(int cpu)
690
{
691
    CPUArchState *env = first_cpu;
692 693 694 695 696 697 698 699 700 701

    while (env) {
        if (env->cpu_index == cpu)
            break;
        env = env->next_cpu;
    }

    return env;
}

702
void cpu_exec_init(CPUArchState *env)
bellard's avatar
bellard committed
703
{
704
    CPUArchState **penv;
bellard's avatar
bellard committed
705 706
    int cpu_index;

707 708 709
#if defined(CONFIG_USER_ONLY)
    cpu_list_lock();
#endif
bellard's avatar
bellard committed
710 711 712 713
    env->next_cpu = NULL;
    penv = &first_cpu;
    cpu_index = 0;
    while (*penv != NULL) {
714
        penv = &(*penv)->next_cpu;
bellard's avatar
bellard committed
715 716 717
        cpu_index++;
    }
    env->cpu_index = cpu_index;
718
    env->numa_node = 0;
Blue Swirl's avatar
Blue Swirl committed
719 720
    QTAILQ_INIT(&env->breakpoints);
    QTAILQ_INIT(&env->watchpoints);
Jan Kiszka's avatar
Jan Kiszka committed
721 722 723
#ifndef CONFIG_USER_ONLY
    env->thread_id = qemu_get_thread_id();
#endif
bellard's avatar
bellard committed
724
    *penv = env;
725 726 727
#if defined(CONFIG_USER_ONLY)
    cpu_list_unlock();
#endif
728
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
729 730
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
731 732
                    cpu_save, cpu_load, env);
#endif
bellard's avatar
bellard committed
733 734
}

Tristan Gingold's avatar
Tristan Gingold committed
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
/* Allocate a new translation block. Flush the translation buffer if
   too many translation blocks or too much generated code. */
static TranslationBlock *tb_alloc(target_ulong pc)
{
    TranslationBlock *tb;

    if (nb_tbs >= code_gen_max_blocks ||
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
        return NULL;
    tb = &tbs[nb_tbs++];
    tb->pc = pc;
    tb->cflags = 0;
    return tb;
}

void tb_free(TranslationBlock *tb)
{
    /* In practice this is mostly used for single use temporary TB
       Ignore the hard cases and just back up if this TB happens to
       be the last one generated.  */
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
        code_gen_ptr = tb->tc_ptr;
        nb_tbs--;
    }
}

761 762 763
static inline void invalidate_page_bitmap(PageDesc *p)
{
    if (p->code_bitmap) {
764
        g_free(p->code_bitmap);
765 766 767 768 769
        p->code_bitmap = NULL;
    }
    p->code_write_count = 0;
}

770 771 772
/* Set to NULL all the 'first_tb' fields in all PageDescs. */

static void page_flush_tb_1 (int level, void **lp)
bellard's avatar
bellard committed
773
{
774
    int i;
bellard's avatar
bellard committed
775

776 777 778 779 780
    if (*lp == NULL) {
        return;
    }
    if (level == 0) {
        PageDesc *pd = *lp;
Paul Brook's avatar
Paul Brook committed
781
        for (i = 0; i < L2_SIZE; ++i) {
782 783
            pd[i].first_tb = NULL;
            invalidate_page_bitmap(pd + i);
bellard's avatar
bellard committed
784
        }
785 786
    } else {
        void **pp = *lp;
Paul Brook's avatar
Paul Brook committed
787
        for (i = 0; i < L2_SIZE; ++i) {
788 789 790 791 792 793 794 795 796 797
            page_flush_tb_1 (level - 1, pp + i);
        }
    }
}

static void page_flush_tb(void)
{
    int i;
    for (i = 0; i < V_L1_SIZE; i++) {
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellard's avatar
bellard committed
798 799 800 801
    }
}

/* flush all the translation blocks */
bellard's avatar
bellard committed
802
/* XXX: tb_flush is currently not thread safe */
803
void tb_flush(CPUArchState *env1)
bellard's avatar
bellard committed
804
{
805
    CPUArchState *env;
806
#if defined(DEBUG_FLUSH)
blueswir1's avatar
blueswir1 committed
807 808 809 810
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
           (unsigned long)(code_gen_ptr - code_gen_buffer),
           nb_tbs, nb_tbs > 0 ?
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellard's avatar
bellard committed
811
#endif
812
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrook's avatar
pbrook committed
813 814
        cpu_abort(env1, "Internal error: code buffer overflow\n");

bellard's avatar
bellard committed
815
    nb_tbs = 0;
816

bellard's avatar
bellard committed
817 818 819
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
    }
820

bellard's avatar
bellard committed
821
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellard's avatar
bellard committed
822
    page_flush_tb();
823

bellard's avatar
bellard committed
824
    code_gen_ptr = code_gen_buffer;
bellard's avatar
bellard committed
825 826
    /* XXX: flush processor icache at this point if cache flush is
       expensive */
bellard's avatar
bellard committed
827
    tb_flush_count++;
bellard's avatar
bellard committed
828 829 830 831
}

#ifdef DEBUG_TB_CHECK

832
static void tb_invalidate_check(target_ulong address)
bellard's avatar
bellard committed
833 834 835 836
{
    TranslationBlock *tb;
    int i;
    address &= TARGET_PAGE_MASK;
837 838
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellard's avatar
bellard committed
839 840
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
                  address >= tb->pc + tb->size)) {
841 842
                printf("ERROR invalidate: address=" TARGET_FMT_lx
                       " PC=%08lx size=%04x\n",
843
                       address, (long)tb->pc, tb->size);
bellard's avatar
bellard committed
844 845 846 847 848 849 850 851 852 853
            }
        }
    }
}

/* verify that all the pages have correct rights for code */
static void tb_page_check(void)
{
    TranslationBlock *tb;
    int i, flags1, flags2;
854

855 856
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellard's avatar
bellard committed
857 858 859 860
            flags1 = page_get_flags(tb->pc);
            flags2 = page_get_flags(tb->pc + tb->size - 1);
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
861
                       (long)tb->pc, tb->size, flags1, flags2);
bellard's avatar
bellard committed
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
            }
        }
    }
}

#endif

/* invalidate one TB */
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
                             int next_offset)
{
    TranslationBlock *tb1;
    for(;;) {
        tb1 = *ptb;
        if (tb1 == tb) {
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
            break;
        }
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
    }
}

884 885 886 887 888 889 890
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
{
    TranslationBlock *tb1;
    unsigned int n1;

    for(;;) {
        tb1 = *ptb;
Stefan Weil's avatar
Stefan Weil committed
891 892
        n1 = (uintptr_t)tb1 & 3;
        tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
893 894 895 896 897 898 899 900
        if (tb1 == tb) {
            *ptb = tb1->page_next[n1];
            break;
        }
        ptb = &tb1->page_next[n1];
    }
}

bellard's avatar
bellard committed
901 902 903 904 905 906 907 908 909 910 911
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
{
    TranslationBlock *tb1, **ptb;
    unsigned int n1;

    ptb = &tb->jmp_next[n];
    tb1 = *ptb;
    if (tb1) {
        /* find tb(n) in circular list */
        for(;;) {
            tb1 = *ptb;
Stefan Weil's avatar
Stefan Weil committed
912 913
            n1 = (uintptr_t)tb1 & 3;
            tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard's avatar
bellard committed
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
            if (n1 == n && tb1 == tb)
                break;
            if (n1 == 2) {
                ptb = &tb1->jmp_first;
            } else {
                ptb = &tb1->jmp_next[n1];
            }
        }
        /* now we can suppress tb(n) from the list */
        *ptb = tb->jmp_next[n];

        tb->jmp_next[n] = NULL;
    }
}

/* reset the jump entry 'n' of a TB so that it is not chained to
   another TB */
static inline void tb_reset_jump(TranslationBlock *tb, int n)
{
Stefan Weil's avatar
Stefan Weil committed
933
    tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));