exec.c 73.4 KB
Newer Older
bellard's avatar
bellard committed
1
/*
2
 *  Virtual page mapping
3
 *
bellard's avatar
bellard committed
4
5
6
7
8
9
10
11
12
13
14
15
16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18
 */
bellard's avatar
bellard committed
19
#include "config.h"
bellard's avatar
bellard committed
20
21
22
#ifdef _WIN32
#include <windows.h>
#else
bellard's avatar
bellard committed
23
#include <sys/types.h>
bellard's avatar
bellard committed
24
25
#include <sys/mman.h>
#endif
bellard's avatar
bellard committed
26

27
#include "qemu-common.h"
bellard's avatar
bellard committed
28
#include "cpu.h"
bellard's avatar
bellard committed
29
#include "tcg.h"
30
#include "hw/hw.h"
31
#include "hw/qdev.h"
32
#include "qemu/osdep.h"
33
#include "sysemu/kvm.h"
34
#include "sysemu/sysemu.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
35
#include "hw/xen/xen.h"
36
37
#include "qemu/timer.h"
#include "qemu/config-file.h"
38
#include "exec/memory.h"
39
#include "sysemu/dma.h"
40
#include "exec/address-spaces.h"
41
42
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
Jun Nakajima's avatar
Jun Nakajima committed
43
#else /* !CONFIG_USER_ONLY */
44
#include "sysemu/xen-mapcache.h"
45
#include "trace.h"
46
#endif
47
#include "exec/cpu-all.h"
bellard's avatar
bellard committed
48

49
#include "exec/cputlb.h"
50
#include "translate-all.h"
51

52
#include "exec/memory-internal.h"
53

54
//#define DEBUG_SUBPAGE
ths's avatar
ths committed
55

56
#if !defined(CONFIG_USER_ONLY)
57
static int in_migration;
58

59
RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity's avatar
Avi Kivity committed
60
61

static MemoryRegion *system_memory;
62
static MemoryRegion *system_io;
Avi Kivity's avatar
Avi Kivity committed
63

64
65
AddressSpace address_space_io;
AddressSpace address_space_memory;
66

67
MemoryRegion io_mem_rom, io_mem_notdirty;
68
static MemoryRegion io_mem_unassigned;
69

70
#endif
71

Andreas Färber's avatar
Andreas Färber committed
72
struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard's avatar
bellard committed
73
74
/* current CPU in the current thread. It is only valid inside
   cpu_exec() */
75
DEFINE_TLS(CPUState *, current_cpu);
pbrook's avatar
pbrook committed
76
/* 0 = Do not count executed instructions.
ths's avatar
ths committed
77
   1 = Precise instruction counting.
pbrook's avatar
pbrook committed
78
   2 = Adaptive rate instruction counting.  */
79
int use_icount;
bellard's avatar
bellard committed
80

81
#if !defined(CONFIG_USER_ONLY)
82

83
84
85
86
87
88
89
90
typedef struct PhysPageEntry PhysPageEntry;

struct PhysPageEntry {
    uint16_t is_leaf : 1;
     /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
    uint16_t ptr : 15;
};

91
92
typedef PhysPageEntry Node[L2_SIZE];

93
94
95
96
97
struct AddressSpaceDispatch {
    /* This is a multi-level map on the physical address space.
     * The bottom level has pointers to MemoryRegionSections.
     */
    PhysPageEntry phys_map;
98
99
    Node *nodes;
    MemoryRegionSection *sections;
100
    AddressSpace *as;
101
102
};

103
104
105
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
typedef struct subpage_t {
    MemoryRegion iomem;
106
    AddressSpace *as;
107
108
109
110
    hwaddr base;
    uint16_t sub_section[TARGET_PAGE_SIZE];
} subpage_t;

111
112
113
114
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
115

116
117
118
119
120
121
122
123
124
typedef struct PhysPageMap {
    unsigned sections_nb;
    unsigned sections_nb_alloc;
    unsigned nodes_nb;
    unsigned nodes_nb_alloc;
    Node *nodes;
    MemoryRegionSection *sections;
} PhysPageMap;

Paolo Bonzini's avatar
Paolo Bonzini committed
125
static PhysPageMap *prev_map;
126
static PhysPageMap next_map;
127

128
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
129

130
static void io_mem_init(void);
Avi Kivity's avatar
Avi Kivity committed
131
static void memory_map_init(void);
Blue Swirl's avatar
Blue Swirl committed
132
static void *qemu_safe_ram_ptr(ram_addr_t addr);
133

134
static MemoryRegion io_mem_watch;
135
#endif
bellard's avatar
bellard committed
136

137
#if !defined(CONFIG_USER_ONLY)
138

139
static void phys_map_node_reserve(unsigned nodes)
140
{
141
142
143
144
145
146
147
    if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
        next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
                                            16);
        next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
                                      next_map.nodes_nb + nodes);
        next_map.nodes = g_renew(Node, next_map.nodes,
                                 next_map.nodes_nb_alloc);
148
    }
149
150
151
152
153
154
155
}

static uint16_t phys_map_node_alloc(void)
{
    unsigned i;
    uint16_t ret;

156
    ret = next_map.nodes_nb++;
157
    assert(ret != PHYS_MAP_NODE_NIL);
158
    assert(ret != next_map.nodes_nb_alloc);
159
    for (i = 0; i < L2_SIZE; ++i) {
160
161
        next_map.nodes[ret][i].is_leaf = 0;
        next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
162
    }
163
    return ret;
164
165
}

166
167
static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
                                hwaddr *nb, uint16_t leaf,
168
                                int level)
169
170
171
{
    PhysPageEntry *p;
    int i;
172
    hwaddr step = (hwaddr)1 << (level * L2_BITS);
173

174
    if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
175
        lp->ptr = phys_map_node_alloc();
176
        p = next_map.nodes[lp->ptr];
177
178
        if (level == 0) {
            for (i = 0; i < L2_SIZE; i++) {
179
                p[i].is_leaf = 1;
180
                p[i].ptr = PHYS_SECTION_UNASSIGNED;
181
            }
pbrook's avatar
pbrook committed
182
        }
183
    } else {
184
        p = next_map.nodes[lp->ptr];
185
    }
186
    lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
187

188
    while (*nb && lp < &p[L2_SIZE]) {
189
190
        if ((*index & (step - 1)) == 0 && *nb >= step) {
            lp->is_leaf = true;
191
            lp->ptr = leaf;
192
193
            *index += step;
            *nb -= step;
194
195
196
197
        } else {
            phys_page_set_level(lp, index, nb, leaf, level - 1);
        }
        ++lp;
198
199
200
    }
}

Avi Kivity's avatar
Avi Kivity committed
201
static void phys_page_set(AddressSpaceDispatch *d,
202
                          hwaddr index, hwaddr nb,
203
                          uint16_t leaf)
204
{
205
    /* Wildly overreserve - it doesn't matter much. */
206
    phys_map_node_reserve(3 * P_L2_LEVELS);
207

Avi Kivity's avatar
Avi Kivity committed
208
    phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
209
210
}

211
212
static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
                                           Node *nodes, MemoryRegionSection *sections)
213
{
214
215
    PhysPageEntry *p;
    int i;
216

217
    for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
218
        if (lp.ptr == PHYS_MAP_NODE_NIL) {
219
            return &sections[PHYS_SECTION_UNASSIGNED];
220
        }
221
        p = nodes[lp.ptr];
222
        lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
223
    }
224
    return &sections[lp.ptr];
225
226
}

Blue Swirl's avatar
Blue Swirl committed
227
228
bool memory_region_is_unassigned(MemoryRegion *mr)
{
Paolo Bonzini's avatar
Paolo Bonzini committed
229
    return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
230
        && mr != &io_mem_watch;
bellard's avatar
bellard committed
231
}
232

233
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
234
235
                                                        hwaddr addr,
                                                        bool resolve_subpage)
236
{
237
238
239
    MemoryRegionSection *section;
    subpage_t *subpage;

240
241
    section = phys_page_find(d->phys_map, addr >> TARGET_PAGE_BITS,
                             d->nodes, d->sections);
242
243
    if (resolve_subpage && section->mr->subpage) {
        subpage = container_of(section->mr, subpage_t, iomem);
244
        section = &d->sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
245
246
    }
    return section;
247
248
}

249
static MemoryRegionSection *
250
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
251
                                 hwaddr *plen, bool resolve_subpage)
252
253
254
255
{
    MemoryRegionSection *section;
    Int128 diff;

256
    section = address_space_lookup_region(d, addr, resolve_subpage);
257
258
259
260
261
262
263
    /* Compute offset within MemoryRegionSection */
    addr -= section->offset_within_address_space;

    /* Compute offset within MemoryRegion */
    *xlat = addr + section->offset_within_region;

    diff = int128_sub(section->mr->size, int128_make64(addr));
264
    *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
265
266
    return section;
}
267

268
269
270
MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
                                      hwaddr *xlat, hwaddr *plen,
                                      bool is_write)
271
{
Avi Kivity's avatar
Avi Kivity committed
272
273
274
275
276
277
    IOMMUTLBEntry iotlb;
    MemoryRegionSection *section;
    MemoryRegion *mr;
    hwaddr len = *plen;

    for (;;) {
278
        section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
Avi Kivity's avatar
Avi Kivity committed
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
        mr = section->mr;

        if (!mr->iommu_ops) {
            break;
        }

        iotlb = mr->iommu_ops->translate(mr, addr);
        addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
                | (addr & iotlb.addr_mask));
        len = MIN(len, (addr | iotlb.addr_mask) - addr + 1);
        if (!(iotlb.perm & (1 << is_write))) {
            mr = &io_mem_unassigned;
            break;
        }

        as = iotlb.target_as;
    }

    *plen = len;
    *xlat = addr;
    return mr;
300
301
302
303
304
305
}

MemoryRegionSection *
address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
                                  hwaddr *plen)
{
Avi Kivity's avatar
Avi Kivity committed
306
    MemoryRegionSection *section;
307
    section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
Avi Kivity's avatar
Avi Kivity committed
308
309
310

    assert(!section->mr->iommu_ops);
    return section;
311
}
312
#endif
bellard's avatar
bellard committed
313

314
void cpu_exec_init_all(void)
315
{
316
#if !defined(CONFIG_USER_ONLY)
317
    qemu_mutex_init(&ram_list.mutex);
318
319
    memory_map_init();
    io_mem_init();
320
#endif
321
}
322

323
#if !defined(CONFIG_USER_ONLY)
324
325

static int cpu_common_post_load(void *opaque, int version_id)
bellard's avatar
bellard committed
326
{
327
    CPUState *cpu = opaque;
bellard's avatar
bellard committed
328

329
330
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
       version_id is increased. */
331
332
    cpu->interrupt_request &= ~0x01;
    tlb_flush(cpu->env_ptr, 1);
333
334

    return 0;
bellard's avatar
bellard committed
335
}
bellard's avatar
bellard committed
336

337
const VMStateDescription vmstate_cpu_common = {
338
339
340
341
342
343
    .name = "cpu_common",
    .version_id = 1,
    .minimum_version_id = 1,
    .minimum_version_id_old = 1,
    .post_load = cpu_common_post_load,
    .fields      = (VMStateField []) {
344
345
        VMSTATE_UINT32(halted, CPUState),
        VMSTATE_UINT32(interrupt_request, CPUState),
346
347
348
        VMSTATE_END_OF_LIST()
    }
};
349

350
#endif
bellard's avatar
bellard committed
351

352
CPUState *qemu_get_cpu(int index)
bellard's avatar
bellard committed
353
{
Andreas Färber's avatar
Andreas Färber committed
354
    CPUState *cpu;
bellard's avatar
bellard committed
355

Andreas Färber's avatar
Andreas Färber committed
356
    CPU_FOREACH(cpu) {
357
        if (cpu->cpu_index == index) {
Andreas Färber's avatar
Andreas Färber committed
358
            return cpu;
359
        }
bellard's avatar
bellard committed
360
    }
361

Andreas Färber's avatar
Andreas Färber committed
362
    return NULL;
bellard's avatar
bellard committed
363
364
}

365
void cpu_exec_init(CPUArchState *env)
bellard's avatar
bellard committed
366
{
367
    CPUState *cpu = ENV_GET_CPU(env);
368
    CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färber's avatar
Andreas Färber committed
369
    CPUState *some_cpu;
370
371
372
373
374
375
    int cpu_index;

#if defined(CONFIG_USER_ONLY)
    cpu_list_lock();
#endif
    cpu_index = 0;
Andreas Färber's avatar
Andreas Färber committed
376
    CPU_FOREACH(some_cpu) {
377
378
        cpu_index++;
    }
379
    cpu->cpu_index = cpu_index;
380
    cpu->numa_node = 0;
381
382
383
384
385
    QTAILQ_INIT(&env->breakpoints);
    QTAILQ_INIT(&env->watchpoints);
#ifndef CONFIG_USER_ONLY
    cpu->thread_id = qemu_get_thread_id();
#endif
Andreas Färber's avatar
Andreas Färber committed
386
    QTAILQ_INSERT_TAIL(&cpus, cpu, node);
387
388
389
#if defined(CONFIG_USER_ONLY)
    cpu_list_unlock();
#endif
390
391
392
    if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
        vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
    }
393
394
395
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
                    cpu_save, cpu_load, env);
396
    assert(cc->vmsd == NULL);
397
    assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
398
#endif
399
400
401
    if (cc->vmsd != NULL) {
        vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
    }
bellard's avatar
bellard committed
402
403
}

404
#if defined(TARGET_HAS_ICE)
405
#if defined(CONFIG_USER_ONLY)
406
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
407
408
409
410
{
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
}
#else
411
static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
412
{
413
    tb_invalidate_phys_addr(cpu_get_phys_page_debug(cpu, pc) |
414
            (pc & ~TARGET_PAGE_MASK));
415
}
bellard's avatar
bellard committed
416
#endif
417
#endif /* TARGET_HAS_ICE */
bellard's avatar
bellard committed
418

419
#if defined(CONFIG_USER_ONLY)
420
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
421
422
423
424

{
}

425
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
426
427
428
429
430
                          int flags, CPUWatchpoint **watchpoint)
{
    return -ENOSYS;
}
#else
431
/* Add a watchpoint.  */
432
int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
433
                          int flags, CPUWatchpoint **watchpoint)
434
{
435
    target_ulong len_mask = ~(len - 1);
436
    CPUWatchpoint *wp;
437

438
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
439
440
    if ((len & (len - 1)) || (addr & ~len_mask) ||
            len == 0 || len > TARGET_PAGE_SIZE) {
441
442
443
444
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
        return -EINVAL;
    }
445
    wp = g_malloc(sizeof(*wp));
446
447

    wp->vaddr = addr;
448
    wp->len_mask = len_mask;
449
450
    wp->flags = flags;

451
    /* keep all GDB-injected watchpoints in front */
452
    if (flags & BP_GDB)
Blue Swirl's avatar
Blue Swirl committed
453
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
454
    else
Blue Swirl's avatar
Blue Swirl committed
455
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
456
457

    tlb_flush_page(env, addr);
458
459
460
461

    if (watchpoint)
        *watchpoint = wp;
    return 0;
462
463
}

464
/* Remove a specific watchpoint.  */
465
int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
466
                          int flags)
467
{
468
    target_ulong len_mask = ~(len - 1);
469
    CPUWatchpoint *wp;
470

Blue Swirl's avatar
Blue Swirl committed
471
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
472
        if (addr == wp->vaddr && len_mask == wp->len_mask
473
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
474
            cpu_watchpoint_remove_by_ref(env, wp);
475
476
477
            return 0;
        }
    }
478
    return -ENOENT;
479
480
}

481
/* Remove a specific watchpoint by reference.  */
482
void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
483
{
Blue Swirl's avatar
Blue Swirl committed
484
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
485

486
487
    tlb_flush_page(env, watchpoint->vaddr);

488
    g_free(watchpoint);
489
490
491
}

/* Remove all matching watchpoints.  */
492
void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
493
{
494
    CPUWatchpoint *wp, *next;
495

Blue Swirl's avatar
Blue Swirl committed
496
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
497
498
        if (wp->flags & mask)
            cpu_watchpoint_remove_by_ref(env, wp);
499
    }
500
}
501
#endif
502

503
/* Add a breakpoint.  */
504
int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
505
                          CPUBreakpoint **breakpoint)
bellard's avatar
bellard committed
506
{
507
#if defined(TARGET_HAS_ICE)
508
    CPUBreakpoint *bp;
509

510
    bp = g_malloc(sizeof(*bp));
bellard's avatar
bellard committed
511

512
513
514
    bp->pc = pc;
    bp->flags = flags;

515
    /* keep all GDB-injected breakpoints in front */
516
    if (flags & BP_GDB) {
Blue Swirl's avatar
Blue Swirl committed
517
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
518
    } else {
Blue Swirl's avatar
Blue Swirl committed
519
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
520
    }
521

522
    breakpoint_invalidate(ENV_GET_CPU(env), pc);
523

524
    if (breakpoint) {
525
        *breakpoint = bp;
526
    }
bellard's avatar
bellard committed
527
528
    return 0;
#else
529
    return -ENOSYS;
bellard's avatar
bellard committed
530
531
532
#endif
}

533
/* Remove a specific breakpoint.  */
534
int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
535
{
536
#if defined(TARGET_HAS_ICE)
537
538
    CPUBreakpoint *bp;

Blue Swirl's avatar
Blue Swirl committed
539
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
540
541
542
543
        if (bp->pc == pc && bp->flags == flags) {
            cpu_breakpoint_remove_by_ref(env, bp);
            return 0;
        }
544
    }
545
546
547
    return -ENOENT;
#else
    return -ENOSYS;
548
549
550
#endif
}

551
/* Remove a specific breakpoint by reference.  */
552
void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard's avatar
bellard committed
553
{
554
#if defined(TARGET_HAS_ICE)
Blue Swirl's avatar
Blue Swirl committed
555
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellard's avatar
bellard committed
556

557
    breakpoint_invalidate(ENV_GET_CPU(env), breakpoint->pc);
558

559
    g_free(breakpoint);
560
561
562
563
#endif
}

/* Remove all matching breakpoints. */
564
void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
565
566
{
#if defined(TARGET_HAS_ICE)
567
    CPUBreakpoint *bp, *next;
568

Blue Swirl's avatar
Blue Swirl committed
569
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
570
571
        if (bp->flags & mask)
            cpu_breakpoint_remove_by_ref(env, bp);
572
    }
bellard's avatar
bellard committed
573
574
575
#endif
}

576
577
/* enable or disable single step mode. EXCP_DEBUG is returned by the
   CPU loop after each instruction */
578
void cpu_single_step(CPUState *cpu, int enabled)
579
{
580
#if defined(TARGET_HAS_ICE)
581
582
583
    if (cpu->singlestep_enabled != enabled) {
        cpu->singlestep_enabled = enabled;
        if (kvm_enabled()) {
584
            kvm_update_guest_debug(cpu, 0);
585
        } else {
Stuart Brady's avatar
Stuart Brady committed
586
            /* must flush all the translated code to avoid inconsistencies */
587
            /* XXX: only flush what is necessary */
588
            CPUArchState *env = cpu->env_ptr;
589
590
            tb_flush(env);
        }
591
592
593
594
    }
#endif
}

595
void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard's avatar
bellard committed
596
{
597
    CPUState *cpu = ENV_GET_CPU(env);
bellard's avatar
bellard committed
598
    va_list ap;
pbrook's avatar
pbrook committed
599
    va_list ap2;
bellard's avatar
bellard committed
600
601

    va_start(ap, fmt);
pbrook's avatar
pbrook committed
602
    va_copy(ap2, ap);
bellard's avatar
bellard committed
603
604
605
    fprintf(stderr, "qemu: fatal: ");
    vfprintf(stderr, fmt, ap);
    fprintf(stderr, "\n");
606
    cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
607
608
609
610
    if (qemu_log_enabled()) {
        qemu_log("qemu: fatal: ");
        qemu_log_vprintf(fmt, ap2);
        qemu_log("\n");
611
        log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
612
        qemu_log_flush();
613
        qemu_log_close();
614
    }
pbrook's avatar
pbrook committed
615
    va_end(ap2);
616
    va_end(ap);
617
618
619
620
621
622
623
624
#if defined(CONFIG_USER_ONLY)
    {
        struct sigaction act;
        sigfillset(&act.sa_mask);
        act.sa_handler = SIG_DFL;
        sigaction(SIGABRT, &act, NULL);
    }
#endif
bellard's avatar
bellard committed
625
626
627
    abort();
}

628
CPUArchState *cpu_copy(CPUArchState *env)
629
{
630
    CPUArchState *new_env = cpu_init(env->cpu_model_str);
631
632
633
634
635
#if defined(TARGET_HAS_ICE)
    CPUBreakpoint *bp;
    CPUWatchpoint *wp;
#endif

636
637
638
639
    /* Reset non arch specific state */
    cpu_reset(ENV_GET_CPU(new_env));

    /* Copy arch specific state into the new CPU */
640
    memcpy(new_env, env, sizeof(CPUArchState));
641
642
643
644

    /* Clone all break/watchpoints.
       Note: Once we support ptrace with hw-debug register access, make sure
       BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl's avatar
Blue Swirl committed
645
646
    QTAILQ_INIT(&env->breakpoints);
    QTAILQ_INIT(&env->watchpoints);
647
#if defined(TARGET_HAS_ICE)
Blue Swirl's avatar
Blue Swirl committed
648
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
649
650
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
    }
Blue Swirl's avatar
Blue Swirl committed
651
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
652
653
654
655
656
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
                              wp->flags, NULL);
    }
#endif

657
658
659
    return new_env;
}

660
#if !defined(CONFIG_USER_ONLY)
Juan Quintela's avatar
Juan Quintela committed
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
                                      uintptr_t length)
{
    uintptr_t start1;

    /* we modify the TLB cache so that the dirty bit will be set again
       when accessing the range */
    start1 = (uintptr_t)qemu_safe_ram_ptr(start);
    /* Check that we don't span multiple blocks - this breaks the
       address comparisons below.  */
    if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
            != (end - 1) - start) {
        abort();
    }
    cpu_tlb_reset_dirty_all(start1, length);

}

pbrook's avatar
pbrook committed
679
/* Note: start and end must be within the same ram block.  */
680
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard's avatar
bellard committed
681
                                     int dirty_flags)
682
{
Juan Quintela's avatar
Juan Quintela committed
683
    uintptr_t length;
684
685
686
687
688
689
690

    start &= TARGET_PAGE_MASK;
    end = TARGET_PAGE_ALIGN(end);

    length = end - start;
    if (length == 0)
        return;
691
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellard's avatar
bellard committed
692

Juan Quintela's avatar
Juan Quintela committed
693
694
    if (tcg_enabled()) {
        tlb_reset_dirty_range_all(start, end, length);
pbrook's avatar
pbrook committed
695
    }
696
697
}

Blue Swirl's avatar
Blue Swirl committed
698
static int cpu_physical_memory_set_dirty_tracking(int enable)
699
{
Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
700
    int ret = 0;
701
    in_migration = enable;
Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
702
    return ret;
703
704
}

705
hwaddr memory_region_section_get_iotlb(CPUArchState *env,
706
707
708
709
710
                                       MemoryRegionSection *section,
                                       target_ulong vaddr,
                                       hwaddr paddr, hwaddr xlat,
                                       int prot,
                                       target_ulong *address)
Blue Swirl's avatar
Blue Swirl committed
711
{
712
    hwaddr iotlb;
Blue Swirl's avatar
Blue Swirl committed
713
714
    CPUWatchpoint *wp;

715
    if (memory_region_is_ram(section->mr)) {
Blue Swirl's avatar
Blue Swirl committed
716
717
        /* Normal RAM.  */
        iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
718
            + xlat;
Blue Swirl's avatar
Blue Swirl committed
719
        if (!section->readonly) {
720
            iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirl's avatar
Blue Swirl committed
721
        } else {
722
            iotlb |= PHYS_SECTION_ROM;
Blue Swirl's avatar
Blue Swirl committed
723
724
        }
    } else {
725
        iotlb = section - address_space_memory.dispatch->sections;
726
        iotlb += xlat;
Blue Swirl's avatar
Blue Swirl committed
727
728
729
730
731
732
733
734
    }

    /* Make accesses to pages with watchpoints go via the
       watchpoint trap routines.  */
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
            /* Avoid trapping reads of pages with a write breakpoint. */
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
735
                iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirl's avatar
Blue Swirl committed
736
737
738
739
740
741
742
743
                *address |= TLB_MMIO;
                break;
            }
        }
    }

    return iotlb;
}
744
745
#endif /* defined(CONFIG_USER_ONLY) */

746
#if !defined(CONFIG_USER_ONLY)
747

748
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
749
                             uint16_t section);
750
static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
751

752
753
static uint16_t phys_section_add(MemoryRegionSection *section)
{
754
755
756
757
    /* The physical section number is ORed with a page-aligned
     * pointer to produce the iotlb entries.  Thus it should
     * never overflow into the page-aligned value.
     */
758
    assert(next_map.sections_nb < TARGET_PAGE_SIZE);
759

760
761
762
763
764
    if (next_map.sections_nb == next_map.sections_nb_alloc) {
        next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
                                         16);
        next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
                                    next_map.sections_nb_alloc);
765
    }
766
    next_map.sections[next_map.sections_nb] = *section;
Paolo Bonzini's avatar
Paolo Bonzini committed
767
    memory_region_ref(section->mr);
768
    return next_map.sections_nb++;
769
770
}

771
772
static void phys_section_destroy(MemoryRegion *mr)
{
Paolo Bonzini's avatar
Paolo Bonzini committed
773
774
    memory_region_unref(mr);

775
776
777
778
779
780
781
    if (mr->subpage) {
        subpage_t *subpage = container_of(mr, subpage_t, iomem);
        memory_region_destroy(&subpage->iomem);
        g_free(subpage);
    }
}

Paolo Bonzini's avatar
Paolo Bonzini committed
782
static void phys_sections_free(PhysPageMap *map)
783
{
784
785
    while (map->sections_nb > 0) {
        MemoryRegionSection *section = &map->sections[--map->sections_nb];
786
787
        phys_section_destroy(section->mr);
    }
788
789
    g_free(map->sections);
    g_free(map->nodes);
Paolo Bonzini's avatar
Paolo Bonzini committed
790
    g_free(map);
791
792
}

Avi Kivity's avatar
Avi Kivity committed
793
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
794
795
{
    subpage_t *subpage;
796
    hwaddr base = section->offset_within_address_space
797
        & TARGET_PAGE_MASK;
798
799
    MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
                                                   next_map.nodes, next_map.sections);
800
801
    MemoryRegionSection subsection = {
        .offset_within_address_space = base,
802
        .size = int128_make64(TARGET_PAGE_SIZE),
803
    };
804
    hwaddr start, end;
805

806
    assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
807

808
    if (!(existing->mr->subpage)) {
809
        subpage = subpage_init(d->as, base);
810
        subsection.mr = &subpage->iomem;