exec.c 70.7 KB
Newer Older
bellard's avatar
bellard committed
1
/*
2
 *  Virtual page mapping
3
 *
bellard's avatar
bellard committed
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18
 */
bellard's avatar
bellard committed
19
#include "config.h"
bellard's avatar
bellard committed
20 21 22
#ifdef _WIN32
#include <windows.h>
#else
bellard's avatar
bellard committed
23
#include <sys/types.h>
bellard's avatar
bellard committed
24 25
#include <sys/mman.h>
#endif
bellard's avatar
bellard committed
26

27
#include "qemu-common.h"
bellard's avatar
bellard committed
28
#include "cpu.h"
bellard's avatar
bellard committed
29
#include "tcg.h"
30
#include "hw/hw.h"
31
#include "hw/qdev.h"
32
#include "qemu/osdep.h"
33
#include "sysemu/kvm.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
34
#include "hw/xen/xen.h"
35 36
#include "qemu/timer.h"
#include "qemu/config-file.h"
37
#include "exec/memory.h"
38
#include "sysemu/dma.h"
39
#include "exec/address-spaces.h"
40 41
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
Jun Nakajima's avatar
Jun Nakajima committed
42
#else /* !CONFIG_USER_ONLY */
43
#include "sysemu/xen-mapcache.h"
44
#include "trace.h"
45
#endif
46
#include "exec/cpu-all.h"
bellard's avatar
bellard committed
47

48
#include "exec/cputlb.h"
49
#include "translate-all.h"
50

51
#include "exec/memory-internal.h"
52

53
//#define DEBUG_SUBPAGE
ths's avatar
ths committed
54

55
#if !defined(CONFIG_USER_ONLY)
56
int phys_ram_fd;
57
static int in_migration;
58

59
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity's avatar
Avi Kivity committed
60 61

static MemoryRegion *system_memory;
62
static MemoryRegion *system_io;
Avi Kivity's avatar
Avi Kivity committed
63

64 65
AddressSpace address_space_io;
AddressSpace address_space_memory;
66

67
MemoryRegion io_mem_rom, io_mem_notdirty;
68
static MemoryRegion io_mem_unassigned;
69

70
#endif
71

72
CPUArchState *first_cpu;
bellard's avatar
bellard committed
73 74
/* current CPU in the current thread. It is only valid inside
   cpu_exec() */
75
DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook's avatar
pbrook committed
76
/* 0 = Do not count executed instructions.
ths's avatar
ths committed
77
   1 = Precise instruction counting.
pbrook's avatar
pbrook committed
78
   2 = Adaptive rate instruction counting.  */
79
int use_icount;
bellard's avatar
bellard committed
80

81
#if !defined(CONFIG_USER_ONLY)
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96
typedef struct PhysPageEntry PhysPageEntry;

struct PhysPageEntry {
    uint16_t is_leaf : 1;
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
    uint16_t ptr : 15;
};

struct AddressSpaceDispatch {
    /* This is a multi-level map on the physical address space.
     * The bottom level has pointers to MemoryRegionSections.
     */
    PhysPageEntry phys_map;
    MemoryListener listener;
97
    AddressSpace *as;
98 99
};

100 101 102
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
    MemoryRegion iomem;
103
    AddressSpace *as;
104 105 106 107
    hwaddr base;
    uint16_t sub_section[TARGET_PAGE_SIZE];
} subpage_t;

108 109 110
static MemoryRegionSection *phys_sections;
static unsigned phys_sections_nb, phys_sections_nb_alloc;
static uint16_t phys_section_unassigned;
111 112 113
static uint16_t phys_section_notdirty;
static uint16_t phys_section_rom;
static uint16_t phys_section_watch;
114

115 116 117 118
/* Simple allocator for PhysPageEntry nodes */
static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;

119
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
120

121
static void io_mem_init(void);
Avi Kivity's avatar
Avi Kivity committed
122
static void memory_map_init(void);
Blue Swirl's avatar
Blue Swirl committed
123
static void *qemu_safe_ram_ptr(ram_addr_t addr);
124

125
static MemoryRegion io_mem_watch;
126
#endif
bellard's avatar
bellard committed
127

128
#if !defined(CONFIG_USER_ONLY)
129

130
static void phys_map_node_reserve(unsigned nodes)
131
{
132
    if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
133 134
        typedef PhysPageEntry Node[L2_SIZE];
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
135 136
        phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
                                      phys_map_nodes_nb + nodes);
137 138 139
        phys_map_nodes = g_renew(Node, phys_map_nodes,
                                 phys_map_nodes_nb_alloc);
    }
140 141 142 143 144 145 146 147 148 149
}

static uint16_t phys_map_node_alloc(void)
{
    unsigned i;
    uint16_t ret;

    ret = phys_map_nodes_nb++;
    assert(ret != PHYS_MAP_NODE_NIL);
    assert(ret != phys_map_nodes_nb_alloc);
150
    for (i = 0; i < L2_SIZE; ++i) {
151
        phys_map_nodes[ret][i].is_leaf = 0;
152
        phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
153
    }
154
    return ret;
155 156
}

157 158
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
                                hwaddr *nb, uint16_t leaf,
159
                                int level)
160 161 162
{
    PhysPageEntry *p;
    int i;
163
    hwaddr step = (hwaddr)1 << (level * L2_BITS);
164

165
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
166 167
        lp->ptr = phys_map_node_alloc();
        p = phys_map_nodes[lp->ptr];
168 169
        if (level == 0) {
            for (i = 0; i < L2_SIZE; i++) {
170
                p[i].is_leaf = 1;
171
                p[i].ptr = phys_section_unassigned;
172
            }
pbrook's avatar
pbrook committed
173
        }
174
    } else {
175
        p = phys_map_nodes[lp->ptr];
176
    }
177
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
178

179
    while (*nb && lp < &p[L2_SIZE]) {
180 181
        if ((*index & (step - 1)) == 0 && *nb >= step) {
            lp->is_leaf = true;
182
            lp->ptr = leaf;
183 184
            *index += step;
            *nb -= step;
185 186 187 188
        } else {
            phys_page_set_level(lp, index, nb, leaf, level - 1);
        }
        ++lp;
189 190 191
    }
}

Avi Kivity's avatar
Avi Kivity committed
192
static void phys_page_set(AddressSpaceDispatch *d,
193
                          hwaddr index, hwaddr nb,
194
                          uint16_t leaf)
195
{
196
    /* Wildly overreserve - it doesn't matter much. */
197
    phys_map_node_reserve(3 * P_L2_LEVELS);
198

Avi Kivity's avatar
Avi Kivity committed
199
    phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
200 201
}

202
static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
203
{
Avi Kivity's avatar
Avi Kivity committed
204
    PhysPageEntry lp = d->phys_map;
205 206
    PhysPageEntry *p;
    int i;
207

208
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
209
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
210
            return &phys_sections[phys_section_unassigned];
211
        }
212
        p = phys_map_nodes[lp.ptr];
213
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
214
    }
215
    return &phys_sections[lp.ptr];
216 217
}

Blue Swirl's avatar
Blue Swirl committed
218 219
bool memory_region_is_unassigned(MemoryRegion *mr)
{
Paolo Bonzini's avatar
Paolo Bonzini committed
220
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
221
        && mr != &io_mem_watch;
bellard's avatar
bellard committed
222
}
223

224
static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
225 226
                                                        hwaddr addr,
                                                        bool resolve_subpage)
227
{
228 229 230 231 232 233 234 235 236
    MemoryRegionSection *section;
    subpage_t *subpage;

    section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
    if (resolve_subpage && section->mr->subpage) {
        subpage = container_of(section->mr, subpage_t, iomem);
        section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
    }
    return section;
237 238
}

239 240 241
static MemoryRegionSection *
address_space_translate_internal(AddressSpace *as, hwaddr addr, hwaddr *xlat,
                                 hwaddr *plen, bool resolve_subpage)
242 243 244 245
{
    MemoryRegionSection *section;
    Int128 diff;

246
    section = address_space_lookup_region(as, addr, resolve_subpage);
247 248 249 250 251 252 253
    /* Compute offset within MemoryRegionSection */
    addr -= section->offset_within_address_space;

    /* Compute offset within MemoryRegion */
    *xlat = addr + section->offset_within_region;

    diff = int128_sub(section->mr->size, int128_make64(addr));
254
    *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
255 256
    return section;
}
257

258 259 260
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
                                      hwaddr *xlat, hwaddr *plen,
                                      bool is_write)
261
{
Avi Kivity's avatar
Avi Kivity committed
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
    IOMMUTLBEntry iotlb;
    MemoryRegionSection *section;
    MemoryRegion *mr;
    hwaddr len = *plen;

    for (;;) {
        section = address_space_translate_internal(as, addr, &addr, plen, true);
        mr = section->mr;

        if (!mr->iommu_ops) {
            break;
        }

        iotlb = mr->iommu_ops->translate(mr, addr);
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
        if (!(iotlb.perm & (1 << is_write))) {
            mr = &io_mem_unassigned;
            break;
        }

        as = iotlb.target_as;
    }

    *plen = len;
    *xlat = addr;
    return mr;
290 291 292 293 294 295
}

MemoryRegionSection *
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
                                  hwaddr *plen)
{
Avi Kivity's avatar
Avi Kivity committed
296 297 298 299 300
    MemoryRegionSection *section;
    section = address_space_translate_internal(as, addr, xlat, plen, false);

    assert(!section->mr->iommu_ops);
    return section;
301
}
302
#endif
bellard's avatar
bellard committed
303

304
void cpu_exec_init_all(void)
305
{
306
#if !defined(CONFIG_USER_ONLY)
307
    qemu_mutex_init(&ram_list.mutex);
308 309
    memory_map_init();
    io_mem_init();
310
#endif
311
}
312

313
#if !defined(CONFIG_USER_ONLY)
314 315

static int cpu_common_post_load(void *opaque, int version_id)
bellard's avatar
bellard committed
316
{
317
    CPUState *cpu = opaque;
bellard's avatar
bellard committed
318

319 320
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
       version_id is increased. */
321 322
    cpu->interrupt_request &= ~0x01;
    tlb_flush(cpu->env_ptr, 1);
323 324

    return 0;
bellard's avatar
bellard committed
325
}
bellard's avatar
bellard committed
326

327
const VMStateDescription vmstate_cpu_common = {
328 329 330 331 332 333
    .name = "cpu_common",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .post_load = cpu_common_post_load,
    .fields      = (VMStateField []) {
334 335
        VMSTATE_UINT32(halted, CPUState),
        VMSTATE_UINT32(interrupt_request, CPUState),
336 337 338
        VMSTATE_END_OF_LIST()
    }
};
339

340
#endif
bellard's avatar
bellard committed
341

342
CPUState *qemu_get_cpu(int index)
bellard's avatar
bellard committed
343
{
344
    CPUArchState *env = first_cpu;
345
    CPUState *cpu = NULL;
bellard's avatar
bellard committed
346

347
    while (env) {
348 349
        cpu = ENV_GET_CPU(env);
        if (cpu->cpu_index == index) {
350
            break;
351
        }
352
        env = env->next_cpu;
bellard's avatar
bellard committed
353
    }
354

355
    return env ? cpu : NULL;
bellard's avatar
bellard committed
356 357
}

358 359 360 361 362 363 364 365 366 367
void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data)
{
    CPUArchState *env = first_cpu;

    while (env) {
        func(ENV_GET_CPU(env), data);
        env = env->next_cpu;
    }
}

368
void cpu_exec_init(CPUArchState *env)
bellard's avatar
bellard committed
369
{
370
    CPUState *cpu = ENV_GET_CPU(env);
371
    CPUClass *cc = CPU_GET_CLASS(cpu);
372 373 374 375 376 377 378 379 380 381 382 383 384
    CPUArchState **penv;
    int cpu_index;

#if defined(CONFIG_USER_ONLY)
    cpu_list_lock();
#endif
    env->next_cpu = NULL;
    penv = &first_cpu;
    cpu_index = 0;
    while (*penv != NULL) {
        penv = &(*penv)->next_cpu;
        cpu_index++;
    }
385
    cpu->cpu_index = cpu_index;
386
    cpu->numa_node = 0;
387 388 389 390 391 392 393 394 395
    QTAILQ_INIT(&env->breakpoints);
    QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
    cpu->thread_id = qemu_get_thread_id();
#endif
    *penv = env;
#if defined(CONFIG_USER_ONLY)
    cpu_list_unlock();
#endif
396
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
397 398 399
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
                    cpu_save, cpu_load, env);
400
    assert(cc->vmsd == NULL);
401
#endif
402 403 404
    if (cc->vmsd != NULL) {
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
    }
bellard's avatar
bellard committed
405 406
}

407
#if defined(TARGET_HAS_ICE)
408
#if defined(CONFIG_USER_ONLY)
409
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
410 411 412 413
{
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
}
#else
414 415
static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
{
416 417
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
            (pc & ~TARGET_PAGE_MASK));
418
}
bellard's avatar
bellard committed
419
#endif
420
#endif /* TARGET_HAS_ICE */
bellard's avatar
bellard committed
421

422
#if defined(CONFIG_USER_ONLY)
423
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
424 425 426 427

{
}

428
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
429 430 431 432 433
                          int flags, CPUWatchpoint **watchpoint)
{
    return -ENOSYS;
}
#else
434
/* Add a watchpoint.  */
435
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
436
                          int flags, CPUWatchpoint **watchpoint)
437
{
438
    target_ulong len_mask = ~(len - 1);
439
    CPUWatchpoint *wp;
440

441
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
442 443
    if ((len & (len - 1)) || (addr & ~len_mask) ||
            len == 0 || len > TARGET_PAGE_SIZE) {
444 445 446 447
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
        return -EINVAL;
    }
448
    wp = g_malloc(sizeof(*wp));
449 450

    wp->vaddr = addr;
451
    wp->len_mask = len_mask;
452 453
    wp->flags = flags;

454
    /* keep all GDB-injected watchpoints in front */
455
    if (flags & BP_GDB)
Blue Swirl's avatar
Blue Swirl committed
456
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
457
    else
Blue Swirl's avatar
Blue Swirl committed
458
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
459 460

    tlb_flush_page(env, addr);
461 462 463 464

    if (watchpoint)
        *watchpoint = wp;
    return 0;
465 466
}

467
/* Remove a specific watchpoint.  */
468
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
469
                          int flags)
470
{
471
    target_ulong len_mask = ~(len - 1);
472
    CPUWatchpoint *wp;
473

Blue Swirl's avatar
Blue Swirl committed
474
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
475
        if (addr == wp->vaddr && len_mask == wp->len_mask
476
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
477
            cpu_watchpoint_remove_by_ref(env, wp);
478 479 480
            return 0;
        }
    }
481
    return -ENOENT;
482 483
}

484
/* Remove a specific watchpoint by reference.  */
485
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
486
{
Blue Swirl's avatar
Blue Swirl committed
487
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
488

489 490
    tlb_flush_page(env, watchpoint->vaddr);

491
    g_free(watchpoint);
492 493 494
}

/* Remove all matching watchpoints.  */
495
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
496
{
497
    CPUWatchpoint *wp, *next;
498

Blue Swirl's avatar
Blue Swirl committed
499
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
500 501
        if (wp->flags & mask)
            cpu_watchpoint_remove_by_ref(env, wp);
502
    }
503
}
504
#endif
505

506
/* Add a breakpoint.  */
507
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
508
                          CPUBreakpoint **breakpoint)
bellard's avatar
bellard committed
509
{
510
#if defined(TARGET_HAS_ICE)
511
    CPUBreakpoint *bp;
512

513
    bp = g_malloc(sizeof(*bp));
bellard's avatar
bellard committed
514

515 516 517
    bp->pc = pc;
    bp->flags = flags;

518
    /* keep all GDB-injected breakpoints in front */
519
    if (flags & BP_GDB)
Blue Swirl's avatar
Blue Swirl committed
520
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
521
    else
Blue Swirl's avatar
Blue Swirl committed
522
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
523

bellard's avatar
bellard committed
524
    breakpoint_invalidate(env, pc);
525 526 527

    if (breakpoint)
        *breakpoint = bp;
bellard's avatar
bellard committed
528 529
    return 0;
#else
530
    return -ENOSYS;
bellard's avatar
bellard committed
531 532 533
#endif
}

534
/* Remove a specific breakpoint.  */
535
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
536
{
537
#if defined(TARGET_HAS_ICE)
538 539
    CPUBreakpoint *bp;

Blue Swirl's avatar
Blue Swirl committed
540
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
541 542 543 544
        if (bp->pc == pc && bp->flags == flags) {
            cpu_breakpoint_remove_by_ref(env, bp);
            return 0;
        }
545
    }
546 547 548
    return -ENOENT;
#else
    return -ENOSYS;
549 550 551
#endif
}

552
/* Remove a specific breakpoint by reference.  */
553
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard's avatar
bellard committed
554
{
555
#if defined(TARGET_HAS_ICE)
Blue Swirl's avatar
Blue Swirl committed
556
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellard's avatar
bellard committed
557

558 559
    breakpoint_invalidate(env, breakpoint->pc);

560
    g_free(breakpoint);
561 562 563 564
#endif
}

/* Remove all matching breakpoints. */
565
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
566 567
{
#if defined(TARGET_HAS_ICE)
568
    CPUBreakpoint *bp, *next;
569

Blue Swirl's avatar
Blue Swirl committed
570
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
571 572
        if (bp->flags & mask)
            cpu_breakpoint_remove_by_ref(env, bp);
573
    }
bellard's avatar
bellard committed
574 575 576
#endif
}

577 578
/* enable or disable single step mode. EXCP_DEBUG is returned by the
   CPU loop after each instruction */
579
void cpu_single_step(CPUArchState *env, int enabled)
580
{
581
#if defined(TARGET_HAS_ICE)
582 583
    if (env->singlestep_enabled != enabled) {
        env->singlestep_enabled = enabled;
584 585 586
        if (kvm_enabled())
            kvm_update_guest_debug(env, 0);
        else {
Stuart Brady's avatar
Stuart Brady committed
587
            /* must flush all the translated code to avoid inconsistencies */
588 589 590
            /* XXX: only flush what is necessary */
            tb_flush(env);
        }
591 592 593 594
    }
#endif
}

595
void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard's avatar
bellard committed
596
{
597
    CPUState *cpu = ENV_GET_CPU(env);
bellard's avatar
bellard committed
598
    va_list ap;
pbrook's avatar
pbrook committed
599
    va_list ap2;
bellard's avatar
bellard committed
600 601

    va_start(ap, fmt);
pbrook's avatar
pbrook committed
602
    va_copy(ap2, ap);
bellard's avatar
bellard committed
603 604 605
    fprintf(stderr, "qemu: fatal: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
606
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
607 608 609 610
    if (qemu_log_enabled()) {
        qemu_log("qemu: fatal: ");
        qemu_log_vprintf(fmt, ap2);
        qemu_log("\n");
611
        log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
612
        qemu_log_flush();
613
        qemu_log_close();
614
    }
pbrook's avatar
pbrook committed
615
    va_end(ap2);
616
    va_end(ap);
617 618 619 620 621 622 623 624
#if defined(CONFIG_USER_ONLY)
    {
        struct sigaction act;
        sigfillset(&act.sa_mask);
        act.sa_handler = SIG_DFL;
        sigaction(SIGABRT, &act, NULL);
    }
#endif
bellard's avatar
bellard committed
625 626 627
    abort();
}

628
CPUArchState *cpu_copy(CPUArchState *env)
629
{
630 631
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
    CPUArchState *next_cpu = new_env->next_cpu;
632 633 634 635 636
#if defined(TARGET_HAS_ICE)
    CPUBreakpoint *bp;
    CPUWatchpoint *wp;
#endif

637
    memcpy(new_env, env, sizeof(CPUArchState));
638

639
    /* Preserve chaining. */
640
    new_env->next_cpu = next_cpu;
641 642 643 644

    /* Clone all break/watchpoints.
       Note: Once we support ptrace with hw-debug register access, make sure
       BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl's avatar
Blue Swirl committed
645 646
    QTAILQ_INIT(&env->breakpoints);
    QTAILQ_INIT(&env->watchpoints);
647
#if defined(TARGET_HAS_ICE)
Blue Swirl's avatar
Blue Swirl committed
648
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
649 650
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
    }
Blue Swirl's avatar
Blue Swirl committed
651
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
652 653 654 655 656
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
                              wp->flags, NULL);
    }
#endif

657 658 659
    return new_env;
}

660
#if !defined(CONFIG_USER_ONLY)
Juan Quintela's avatar
Juan Quintela committed
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
                                      uintptr_t length)
{
    uintptr_t start1;

    /* we modify the TLB cache so that the dirty bit will be set again
       when accessing the range */
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
    /* Check that we don't span multiple blocks - this breaks the
       address comparisons below.  */
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
            != (end - 1) - start) {
        abort();
    }
    cpu_tlb_reset_dirty_all(start1, length);

}

pbrook's avatar
pbrook committed
679
/* Note: start and end must be within the same ram block.  */
680
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard's avatar
bellard committed
681
                                     int dirty_flags)
682
{
Juan Quintela's avatar
Juan Quintela committed
683
    uintptr_t length;
684 685 686 687 688 689 690

    start &= TARGET_PAGE_MASK;
    end = TARGET_PAGE_ALIGN(end);

    length = end - start;
    if (length == 0)
        return;
691
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellard's avatar
bellard committed
692

Juan Quintela's avatar
Juan Quintela committed
693 694
    if (tcg_enabled()) {
        tlb_reset_dirty_range_all(start, end, length);
pbrook's avatar
pbrook committed
695
    }
696 697
}

Blue Swirl's avatar
Blue Swirl committed
698
static int cpu_physical_memory_set_dirty_tracking(int enable)
699
{
Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
700
    int ret = 0;
701
    in_migration = enable;
Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
702
    return ret;
703 704
}

705
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
706 707 708 709 710
                                       MemoryRegionSection *section,
                                       target_ulong vaddr,
                                       hwaddr paddr, hwaddr xlat,
                                       int prot,
                                       target_ulong *address)
Blue Swirl's avatar
Blue Swirl committed
711
{
712
    hwaddr iotlb;
Blue Swirl's avatar
Blue Swirl committed
713 714
    CPUWatchpoint *wp;

715
    if (memory_region_is_ram(section->mr)) {
Blue Swirl's avatar
Blue Swirl committed
716 717
        /* Normal RAM.  */
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
718
            + xlat;
Blue Swirl's avatar
Blue Swirl committed
719 720 721 722 723 724 725
        if (!section->readonly) {
            iotlb |= phys_section_notdirty;
        } else {
            iotlb |= phys_section_rom;
        }
    } else {
        iotlb = section - phys_sections;
726
        iotlb += xlat;
Blue Swirl's avatar
Blue Swirl committed
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
    }

    /* Make accesses to pages with watchpoints go via the
       watchpoint trap routines.  */
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
            /* Avoid trapping reads of pages with a write breakpoint. */
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
                iotlb = phys_section_watch + paddr;
                *address |= TLB_MMIO;
                break;
            }
        }
    }

    return iotlb;
}
744 745
#endif /* defined(CONFIG_USER_ONLY) */

746
#if !defined(CONFIG_USER_ONLY)
747

748
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
749
                             uint16_t section);
750
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
751

752 753
static uint16_t phys_section_add(MemoryRegionSection *section)
{
754 755 756 757 758 759
    /* The physical section number is ORed with a page-aligned
     * pointer to produce the iotlb entries.  Thus it should
     * never overflow into the page-aligned value.
     */
    assert(phys_sections_nb < TARGET_PAGE_SIZE);

760 761 762 763 764 765
    if (phys_sections_nb == phys_sections_nb_alloc) {
        phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
        phys_sections = g_renew(MemoryRegionSection, phys_sections,
                                phys_sections_nb_alloc);
    }
    phys_sections[phys_sections_nb] = *section;
Paolo Bonzini's avatar
Paolo Bonzini committed
766
    memory_region_ref(section->mr);
767 768 769
    return phys_sections_nb++;
}

770 771
static void phys_section_destroy(MemoryRegion *mr)
{
Paolo Bonzini's avatar
Paolo Bonzini committed
772 773
    memory_region_unref(mr);

774 775 776 777 778 779 780
    if (mr->subpage) {
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
        memory_region_destroy(&subpage->iomem);
        g_free(subpage);
    }
}