arch_init.c 35.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <stdint.h>
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29 30 31
#include <sys/mman.h>
#endif
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu/sysemu.h"
34 35
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
36
#include "sysemu/arch_init.h"
37
#include "audio/audio.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
38
#include "hw/i386/pc.h"
39
#include "hw/pci/pci.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
40
#include "hw/audio/audio.h"
41
#include "sysemu/kvm.h"
42
#include "migration/migration.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
43
#include "hw/i386/smbios.h"
44
#include "exec/address-spaces.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
45
#include "hw/audio/pcspk.h"
46
#include "migration/page_cache.h"
47
#include "qemu/config-file.h"
48
#include "qmp-commands.h"
49
#include "trace.h"
50
#include "exec/cpu-all.h"
51
#include "exec/ram_addr.h"
52
#include "hw/acpi/acpi.h"
53
#include "qemu/host-utils.h"
54

Orit Wasserman's avatar
Orit Wasserman committed
55 56 57 58 59 60 61 62
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
    do { } while (0)
#endif

63 64 65 66 67 68 69
#ifdef TARGET_SPARC
int graphic_width = 1024;
int graphic_height = 768;
int graphic_depth = 8;
#else
int graphic_width = 800;
int graphic_height = 600;
70
int graphic_depth = 32;
71 72 73 74 75 76 77 78 79 80 81 82 83
#endif


#if defined(TARGET_ALPHA)
#define QEMU_ARCH QEMU_ARCH_ALPHA
#elif defined(TARGET_ARM)
#define QEMU_ARCH QEMU_ARCH_ARM
#elif defined(TARGET_CRIS)
#define QEMU_ARCH QEMU_ARCH_CRIS
#elif defined(TARGET_I386)
#define QEMU_ARCH QEMU_ARCH_I386
#elif defined(TARGET_M68K)
#define QEMU_ARCH QEMU_ARCH_M68K
Michael Walle's avatar
Michael Walle committed
84 85
#elif defined(TARGET_LM32)
#define QEMU_ARCH QEMU_ARCH_LM32
86 87 88 89
#elif defined(TARGET_MICROBLAZE)
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
#elif defined(TARGET_MIPS)
#define QEMU_ARCH QEMU_ARCH_MIPS
90 91
#elif defined(TARGET_MOXIE)
#define QEMU_ARCH QEMU_ARCH_MOXIE
92 93
#elif defined(TARGET_OPENRISC)
#define QEMU_ARCH QEMU_ARCH_OPENRISC
94 95 96 97 98 99 100 101
#elif defined(TARGET_PPC)
#define QEMU_ARCH QEMU_ARCH_PPC
#elif defined(TARGET_S390X)
#define QEMU_ARCH QEMU_ARCH_S390X
#elif defined(TARGET_SH4)
#define QEMU_ARCH QEMU_ARCH_SH4
#elif defined(TARGET_SPARC)
#define QEMU_ARCH QEMU_ARCH_SPARC
Max Filippov's avatar
Max Filippov committed
102 103
#elif defined(TARGET_XTENSA)
#define QEMU_ARCH QEMU_ARCH_XTENSA
104 105
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
106 107 108
#endif

const uint32_t arch_type = QEMU_ARCH;
109 110 111
static bool mig_throttle_on;
static int dirty_rate_high_cnt;
static void check_guest_throttling(void);
112 113 114 115

/***********************************************************/
/* ram save/restore */

116 117 118 119 120 121
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
#define RAM_SAVE_FLAG_COMPRESS 0x02
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
#define RAM_SAVE_FLAG_PAGE     0x08
#define RAM_SAVE_FLAG_EOS      0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
122
#define RAM_SAVE_FLAG_XBZRLE   0x40
Michael R. Hines's avatar
Michael R. Hines committed
123
/* 0x80 is reserved in migration.h start with 0x100 next */
124

125 126
static struct defconfig_file {
    const char *filename;
127 128
    /* Indicates it is an user config file (disabled by -no-user-config) */
    bool userconfig;
129
} default_config_files[] = {
130
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
131
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
132 133 134
    { NULL }, /* end of list */
};

135
static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
136

137
int qemu_read_default_config_files(bool userconfig)
138 139
{
    int ret;
140
    struct defconfig_file *f;
141

142
    for (f = default_config_files; f->filename; f++) {
143 144 145
        if (!userconfig && f->userconfig) {
            continue;
        }
146 147 148 149
        ret = qemu_read_config_file(f->filename);
        if (ret < 0 && ret != -ENOENT) {
            return ret;
        }
150
    }
Laszlo Ersek's avatar
Laszlo Ersek committed
151

152 153 154
    return 0;
}

155
static inline bool is_zero_range(uint8_t *p, uint64_t size)
156
{
157
    return buffer_find_nonzero_offset(p, size) == size;
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173
/* struct contains XBZRLE cache and a static page
   used by the compression */
static struct {
    /* buffer used for XBZRLE encoding */
    uint8_t *encoded_buf;
    /* buffer for storing page content */
    uint8_t *current_buf;
    /* Cache for XBZRLE */
    PageCache *cache;
} XBZRLE = {
    .encoded_buf = NULL,
    .current_buf = NULL,
    .cache = NULL,
};
174 175
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
176 177 178

int64_t xbzrle_cache_resize(int64_t new_size)
{
179 180 181 182
    if (new_size < TARGET_PAGE_SIZE) {
        return -1;
    }

183 184 185 186 187 188 189
    if (XBZRLE.cache != NULL) {
        return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
            TARGET_PAGE_SIZE;
    }
    return pow2floor(new_size);
}

190 191 192
/* accounting for migration statistics */
typedef struct AccountingInfo {
    uint64_t dup_pages;
193
    uint64_t skipped_pages;
194 195
    uint64_t norm_pages;
    uint64_t iterations;
Orit Wasserman's avatar
Orit Wasserman committed
196 197 198 199
    uint64_t xbzrle_bytes;
    uint64_t xbzrle_pages;
    uint64_t xbzrle_cache_miss;
    uint64_t xbzrle_overflows;
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
} AccountingInfo;

static AccountingInfo acct_info;

static void acct_clear(void)
{
    memset(&acct_info, 0, sizeof(acct_info));
}

uint64_t dup_mig_bytes_transferred(void)
{
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
}

uint64_t dup_mig_pages_transferred(void)
{
    return acct_info.dup_pages;
}

219 220 221 222 223 224 225 226 227 228
uint64_t skipped_mig_bytes_transferred(void)
{
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
}

uint64_t skipped_mig_pages_transferred(void)
{
    return acct_info.skipped_pages;
}

229 230 231 232 233 234 235 236 237 238
uint64_t norm_mig_bytes_transferred(void)
{
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
}

uint64_t norm_mig_pages_transferred(void)
{
    return acct_info.norm_pages;
}

Orit Wasserman's avatar
Orit Wasserman committed
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
uint64_t xbzrle_mig_bytes_transferred(void)
{
    return acct_info.xbzrle_bytes;
}

uint64_t xbzrle_mig_pages_transferred(void)
{
    return acct_info.xbzrle_pages;
}

uint64_t xbzrle_mig_pages_cache_miss(void)
{
    return acct_info.xbzrle_cache_miss;
}

uint64_t xbzrle_mig_pages_overflow(void)
{
    return acct_info.xbzrle_overflows;
}

259 260
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
                             int cont, int flag)
Orit Wasserman's avatar
Orit Wasserman committed
261
{
262 263 264 265
    size_t size;

    qemu_put_be64(f, offset | cont | flag);
    size = 8;
Orit Wasserman's avatar
Orit Wasserman committed
266

267 268 269 270 271 272 273
    if (!cont) {
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr,
                        strlen(block->idstr));
        size += 1 + strlen(block->idstr);
    }
    return size;
Orit Wasserman's avatar
Orit Wasserman committed
274 275
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
/* This is the last block that we have visited serching for dirty pages
 */
static RAMBlock *last_seen_block;
/* This is the last block from where we have sent data */
static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static unsigned long *migration_bitmap;
static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;

/* Update the xbzrle cache to reflect a page that's been sent as all 0.
 * The important thing is that a stale (not-yet-0'd) page be replaced
 * by the new data.
 * As a bonus, if the page wasn't in the cache it gets added so that
 * when a small write is made into the 0'd page it gets XBZRLE sent
 */
static void xbzrle_cache_zero_page(ram_addr_t current_addr)
{
    if (ram_bulk_stage || !migrate_use_xbzrle()) {
        return;
    }

    /* We don't care if this fails to allocate a new cache page
     * as long as it updated an old one */
    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
}

304 305 306 307
#define ENCODING_FLAG_XBZRLE 0x1

static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
                            ram_addr_t current_addr, RAMBlock *block,
308
                            ram_addr_t offset, int cont, bool last_stage)
309 310 311 312 313
{
    int encoded_len = 0, bytes_sent = -1;
    uint8_t *prev_cached_page;

    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
314
        if (!last_stage) {
315 316 317
            if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
                return -1;
            }
318
        }
Orit Wasserman's avatar
Orit Wasserman committed
319
        acct_info.xbzrle_cache_miss++;
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
        return -1;
    }

    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);

    /* save current buffer into memory */
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);

    /* XBZRLE encoding (if there is no overflow) */
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
                                       TARGET_PAGE_SIZE);
    if (encoded_len == 0) {
        DPRINTF("Skipping unmodified page\n");
        return 0;
    } else if (encoded_len == -1) {
        DPRINTF("Overflow\n");
Orit Wasserman's avatar
Orit Wasserman committed
337
        acct_info.xbzrle_overflows++;
338 339 340 341 342 343
        /* update data in the cache */
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
        return -1;
    }

    /* we need to update the data in the cache, in order to get the same data */
344 345 346
    if (!last_stage) {
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
    }
347 348

    /* Send XBZRLE based compressed page */
349
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
350 351 352
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
    qemu_put_be16(f, encoded_len);
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
353
    bytes_sent += encoded_len + 1 + 2;
Orit Wasserman's avatar
Orit Wasserman committed
354 355
    acct_info.xbzrle_pages++;
    acct_info.xbzrle_bytes += bytes_sent;
356 357 358 359

    return bytes_sent;
}

360 361 362
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                 ram_addr_t start)
363
{
364 365
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
366 367
    uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
    unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
Juan Quintela's avatar
Juan Quintela committed
368

369 370 371 372 373 374 375
    unsigned long next;

    if (ram_bulk_stage && nr > base) {
        next = nr + 1;
    } else {
        next = find_next_bit(migration_bitmap, size, nr);
    }
376

377 378
    if (next < size) {
        clear_bit(next, migration_bitmap);
Juan Quintela's avatar
Juan Quintela committed
379
        migration_dirty_pages--;
380
    }
381
    return (next - base) << TARGET_PAGE_BITS;
382 383
}

384
static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
385
{
Juan Quintela's avatar
Juan Quintela committed
386
    bool ret;
387
    int nr = addr >> TARGET_PAGE_BITS;
388

Juan Quintela's avatar
Juan Quintela committed
389 390 391 392
    ret = test_and_set_bit(nr, migration_bitmap);

    if (!ret) {
        migration_dirty_pages++;
393
    }
Juan Quintela's avatar
Juan Quintela committed
394
    return ret;
395 396
}

397 398 399
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
    ram_addr_t addr;
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);

    /* start address is aligned at the start of a word? */
    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
        int k;
        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];

        for (k = page; k < page + nr; k++) {
            if (src[k]) {
                unsigned long new_dirty;
                new_dirty = ~migration_bitmap[k];
                migration_bitmap[k] |= src[k];
                new_dirty &= src[k];
                migration_dirty_pages += ctpopl(new_dirty);
                src[k] = 0;
            }
        }
    } else {
        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
            if (cpu_physical_memory_get_dirty(start + addr,
                                              TARGET_PAGE_SIZE,
                                              DIRTY_MEMORY_MIGRATION)) {
                cpu_physical_memory_reset_dirty(start + addr,
                                                TARGET_PAGE_SIZE,
                                                DIRTY_MEMORY_MIGRATION);
                migration_bitmap_set_dirty(start + addr);
            }
428 429 430 431 432
        }
    }
}


433 434
/* Needs iothread lock! */

435 436
static void migration_bitmap_sync(void)
{
Juan Quintela's avatar
Juan Quintela committed
437 438
    RAMBlock *block;
    uint64_t num_dirty_pages_init = migration_dirty_pages;
439 440
    MigrationState *s = migrate_get_current();
    static int64_t start_time;
441
    static int64_t bytes_xfer_prev;
442 443
    static int64_t num_dirty_pages_period;
    int64_t end_time;
444 445 446 447 448
    int64_t bytes_xfer_now;

    if (!bytes_xfer_prev) {
        bytes_xfer_prev = ram_bytes_transferred();
    }
449 450

    if (!start_time) {
451
        start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
452
    }
453 454

    trace_migration_bitmap_sync_start();
455
    address_space_sync_dirty_bitmap(&address_space_memory);
Juan Quintela's avatar
Juan Quintela committed
456

457
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
458
        migration_bitmap_sync_range(block->mr->ram_addr, block->length);
Juan Quintela's avatar
Juan Quintela committed
459 460
    }
    trace_migration_bitmap_sync_end(migration_dirty_pages
461
                                    - num_dirty_pages_init);
462
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
463
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
464 465 466

    /* more than 1 second = 1000 millisecons */
    if (end_time > start_time + 1000) {
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
        if (migrate_auto_converge()) {
            /* The following detection logic can be refined later. For now:
               Check to see if the dirtied bytes is 50% more than the approx.
               amount of bytes that just got transferred since the last time we
               were in this routine. If that happens >N times (for now N==4)
               we turn on the throttle down logic */
            bytes_xfer_now = ram_bytes_transferred();
            if (s->dirty_pages_rate &&
               (num_dirty_pages_period * TARGET_PAGE_SIZE >
                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
               (dirty_rate_high_cnt++ > 4)) {
                    trace_migration_throttle();
                    mig_throttle_on = true;
                    dirty_rate_high_cnt = 0;
             }
             bytes_xfer_prev = bytes_xfer_now;
        } else {
             mig_throttle_on = false;
        }
486 487
        s->dirty_pages_rate = num_dirty_pages_period * 1000
            / (end_time - start_time);
488
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
489 490 491
        start_time = end_time;
        num_dirty_pages_period = 0;
    }
492 493
}

494 495 496
/*
 * ram_save_block: Writes a page of memory to the stream f
 *
497 498
 * Returns:  The number of bytes written.
 *           0 means no dirty pages
499 500
 */

501
static int ram_save_block(QEMUFile *f, bool last_stage)
502
{
503
    RAMBlock *block = last_seen_block;
504
    ram_addr_t offset = last_offset;
505
    bool complete_round = false;
506
    int bytes_sent = 0;
Avi Kivity's avatar
Avi Kivity committed
507
    MemoryRegion *mr;
508
    ram_addr_t current_addr;
509

510
    if (!block)
511
        block = QTAILQ_FIRST(&ram_list.blocks);
512

513
    while (true) {
Avi Kivity's avatar
Avi Kivity committed
514
        mr = block->mr;
515 516 517 518 519 520 521 522 523 524 525
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
        if (complete_round && block == last_seen_block &&
            offset >= last_offset) {
            break;
        }
        if (offset >= block->length) {
            offset = 0;
            block = QTAILQ_NEXT(block, next);
            if (!block) {
                block = QTAILQ_FIRST(&ram_list.blocks);
                complete_round = true;
526
                ram_bulk_stage = false;
527 528
            }
        } else {
Michael R. Hines's avatar
Michael R. Hines committed
529
            int ret;
530
            uint8_t *p;
531
            bool send_async = true;
Juan Quintela's avatar
Juan Quintela committed
532
            int cont = (block == last_sent_block) ?
533
                RAM_SAVE_FLAG_CONTINUE : 0;
534

Avi Kivity's avatar
Avi Kivity committed
535
            p = memory_region_get_ram_ptr(mr) + offset;
536

537 538
            /* In doubt sent page as normal */
            bytes_sent = -1;
Michael R. Hines's avatar
Michael R. Hines committed
539 540 541
            ret = ram_control_save_page(f, block->offset,
                               offset, TARGET_PAGE_SIZE, &bytes_sent);

542
            current_addr = block->offset + offset;
Michael R. Hines's avatar
Michael R. Hines committed
543 544 545 546 547 548 549 550
            if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
                if (ret != RAM_SAVE_CONTROL_DELAYED) {
                    if (bytes_sent > 0) {
                        acct_info.norm_pages++;
                    } else if (bytes_sent == 0) {
                        acct_info.dup_pages++;
                    }
                }
551
            } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
552
                acct_info.dup_pages++;
553 554 555 556
                bytes_sent = save_block_hdr(f, block, offset, cont,
                                            RAM_SAVE_FLAG_COMPRESS);
                qemu_put_byte(f, 0);
                bytes_sent++;
557 558 559 560
                /* Must let xbzrle know, otherwise a previous (now 0'd) cached
                 * page would be stale
                 */
                xbzrle_cache_zero_page(current_addr);
561
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
562
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
563 564
                                              offset, cont, last_stage);
                if (!last_stage) {
565 566 567 568
                    /* We must send exactly what's in the xbzrle cache
                     * even if the page wasn't xbzrle compressed, so that
                     * it's right next time.
                     */
569
                    p = get_cached_data(XBZRLE.cache, current_addr);
570 571 572 573 574

                    /* Can't send this cached data async, since the cache page
                     * might get updated before it gets to the wire
                     */
                    send_async = false;
575
                }
576 577
            }

578
            /* XBZRLE overflow or normal page */
579
            if (bytes_sent == -1) {
580
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
581 582 583 584 585
                if (send_async) {
                    qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
                } else {
                    qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
                }
586
                bytes_sent += TARGET_PAGE_SIZE;
587
                acct_info.norm_pages++;
588 589
            }

590
            /* if page is unmodified, continue to the next */
591
            if (bytes_sent > 0) {
Juan Quintela's avatar
Juan Quintela committed
592
                last_sent_block = block;
593 594
                break;
            }
595
        }
596
    }
597
    last_seen_block = block;
598
    last_offset = offset;
599

600
    return bytes_sent;
601 602 603 604
}

static uint64_t bytes_transferred;

605 606 607 608 609 610 611 612 613 614 615 616
void acct_update_position(QEMUFile *f, size_t size, bool zero)
{
    uint64_t pages = size / TARGET_PAGE_SIZE;
    if (zero) {
        acct_info.dup_pages += pages;
    } else {
        acct_info.norm_pages += pages;
        bytes_transferred += size;
        qemu_update_position(f, size);
    }
}

617 618
static ram_addr_t ram_save_remaining(void)
{
Juan Quintela's avatar
Juan Quintela committed
619
    return migration_dirty_pages;
620 621 622 623 624 625 626 627 628 629 630 631 632 633
}

uint64_t ram_bytes_remaining(void)
{
    return ram_save_remaining() * TARGET_PAGE_SIZE;
}

uint64_t ram_bytes_transferred(void)
{
    return bytes_transferred;
}

uint64_t ram_bytes_total(void)
{
634 635 636
    RAMBlock *block;
    uint64_t total = 0;

637
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
638 639 640
        total += block->length;

    return total;
641 642
}

643 644 645 646 647 648
void free_xbzrle_decoded_buf(void)
{
    g_free(xbzrle_decoded_buf);
    xbzrle_decoded_buf = NULL;
}

Orit Wasserman's avatar
Orit Wasserman committed
649 650
static void migration_end(void)
{
651 652 653 654 655
    if (migration_bitmap) {
        memory_global_dirty_log_stop();
        g_free(migration_bitmap);
        migration_bitmap = NULL;
    }
656

657
    if (XBZRLE.cache) {
658 659 660 661 662
        cache_fini(XBZRLE.cache);
        g_free(XBZRLE.cache);
        g_free(XBZRLE.encoded_buf);
        g_free(XBZRLE.current_buf);
        XBZRLE.cache = NULL;
663 664
        XBZRLE.encoded_buf = NULL;
        XBZRLE.current_buf = NULL;
665
    }
Orit Wasserman's avatar
Orit Wasserman committed
666 667
}

668 669 670 671 672
static void ram_migration_cancel(void *opaque)
{
    migration_end();
}

673 674
static void reset_ram_globals(void)
{
675
    last_seen_block = NULL;
Juan Quintela's avatar
Juan Quintela committed
676
    last_sent_block = NULL;
677
    last_offset = 0;
678
    last_version = ram_list.version;
679
    ram_bulk_stage = true;
680 681
}

682 683
#define MAX_WAIT 50 /* ms, half buffered_file limit */

684
static int ram_save_setup(QEMUFile *f, void *opaque)
685
{
686
    RAMBlock *block;
Juan Quintela's avatar
Juan Quintela committed
687 688 689
    int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;

    migration_bitmap = bitmap_new(ram_pages);
690
    bitmap_set(migration_bitmap, 0, ram_pages);
Juan Quintela's avatar
Juan Quintela committed
691
    migration_dirty_pages = ram_pages;
692 693
    mig_throttle_on = false;
    dirty_rate_high_cnt = 0;
694

695 696 697 698 699 700 701 702
    if (migrate_use_xbzrle()) {
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                  TARGET_PAGE_SIZE,
                                  TARGET_PAGE_SIZE);
        if (!XBZRLE.cache) {
            DPRINTF("Error creating cache\n");
            return -1;
        }
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718

        /* We prefer not to abort if there is no memory */
        XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
        if (!XBZRLE.encoded_buf) {
            DPRINTF("Error allocating encoded_buf\n");
            return -1;
        }

        XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
        if (!XBZRLE.current_buf) {
            DPRINTF("Error allocating current_buf\n");
            g_free(XBZRLE.encoded_buf);
            XBZRLE.encoded_buf = NULL;
            return -1;
        }

719
        acct_clear();
720 721
    }

722 723 724 725 726
    qemu_mutex_lock_iothread();
    qemu_mutex_lock_ramlist();
    bytes_transferred = 0;
    reset_ram_globals();

727
    memory_global_dirty_log_start();
Juan Quintela's avatar
Juan Quintela committed
728
    migration_bitmap_sync();
729
    qemu_mutex_unlock_iothread();
730

731
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
732

733
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
734 735 736
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
        qemu_put_be64(f, block->length);
737 738
    }

739
    qemu_mutex_unlock_ramlist();
Michael R. Hines's avatar
Michael R. Hines committed
740 741 742 743

    ram_control_before_iterate(f, RAM_CONTROL_SETUP);
    ram_control_after_iterate(f, RAM_CONTROL_SETUP);

744 745 746 747 748
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

    return 0;
}

749
static int ram_save_iterate(QEMUFile *f, void *opaque)
750 751 752
{
    int ret;
    int i;
753
    int64_t t0;
754
    int total_sent = 0;
755

756 757
    qemu_mutex_lock_ramlist();

758 759 760 761
    if (ram_list.version != last_version) {
        reset_ram_globals();
    }

Michael R. Hines's avatar
Michael R. Hines committed
762 763
    ram_control_before_iterate(f, RAM_CONTROL_ROUND);

764
    t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
765
    i = 0;
766
    while ((ret = qemu_file_rate_limit(f)) == 0) {
767
        int bytes_sent;
768

769
        bytes_sent = ram_save_block(f, false);
770
        /* no more blocks to sent */
771
        if (bytes_sent == 0) {
772 773
            break;
        }
774
        total_sent += bytes_sent;
775
        acct_info.iterations++;
776
        check_guest_throttling();
777 778 779 780 781 782
        /* we want to check in the 1st loop, just in case it was the 1st time
           and we had to sync the dirty bitmap.
           qemu_get_clock_ns() is a bit expensive, so we only check each some
           iterations
        */
        if ((i & 63) == 0) {
783
            uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
784
            if (t1 > MAX_WAIT) {
785
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
786 787 788 789 790
                        t1, i);
                break;
            }
        }
        i++;
791 792
    }

793 794
    qemu_mutex_unlock_ramlist();

Michael R. Hines's avatar
Michael R. Hines committed
795 796 797 798 799 800
    /*
     * Must occur before EOS (or any QEMUFile operation)
     * because of RDMA protocol.
     */
    ram_control_after_iterate(f, RAM_CONTROL_ROUND);

801 802 803 804 805 806 807 808 809 810
    bytes_transferred += total_sent;

    /*
     * Do not count these 8 bytes into total_sent, so that we can
     * return 0 if no page had been dirtied.
     */
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
    bytes_transferred += 8;

    ret = qemu_file_get_error(f);
811 812 813 814
    if (ret < 0) {
        return ret;
    }

815
    return total_sent;
816 817 818 819
}

static int ram_save_complete(QEMUFile *f, void *opaque)
{
820
    qemu_mutex_lock_ramlist();
821
    migration_bitmap_sync();
822

Michael R. Hines's avatar
Michael R. Hines committed
823 824
    ram_control_before_iterate(f, RAM_CONTROL_FINISH);

825
    /* try transferring iterative blocks of memory */
Orit Wasserman's avatar
Orit Wasserman committed
826

827
    /* flush all remaining blocks regardless of rate limiting */
828
    while (true) {
829 830
        int bytes_sent;

831
        bytes_sent = ram_save_block(f, true);
832
        /* no more blocks to sent */
833
        if (bytes_sent == 0) {
834
            break;
835
        }
836
        bytes_transferred += bytes_sent;
837
    }
Michael R. Hines's avatar
Michael R. Hines committed
838 839

    ram_control_after_iterate(f, RAM_CONTROL_FINISH);
840
    migration_end();
841

842
    qemu_mutex_unlock_ramlist();
843 844
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

845
    return 0;
846 847
}

848 849 850 851 852 853 854
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
{
    uint64_t remaining_size;

    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;

    if (remaining_size < max_size) {
855
        qemu_mutex_lock_iothread();
856
        migration_bitmap_sync();
857
        qemu_mutex_unlock_iothread();
858 859 860 861 862
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
    }
    return remaining_size;
}

863 864 865 866 867 868
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
{
    int ret, rc = 0;
    unsigned int xh_len;
    int xh_flags;

869 870
    if (!xbzrle_decoded_buf) {
        xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
    }

    /* extract RLE header */
    xh_flags = qemu_get_byte(f);
    xh_len = qemu_get_be16(f);

    if (xh_flags != ENCODING_FLAG_XBZRLE) {
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
        return -1;
    }

    if (xh_len > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
        return -1;
    }
    /* load data and decode */
887
    qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
888 889

    /* decode RLE */
890
    ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
891 892 893 894 895 896 897 898 899 900 901 902 903
                               TARGET_PAGE_SIZE);
    if (ret == -1) {
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
        rc = -1;
    } else  if (ret > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
                ret, TARGET_PAGE_SIZE);
        abort();
    }

    return rc;
}

904 905 906 907 908 909 910 911 912 913 914 915 916 917
static inline void *host_from_stream_offset(QEMUFile *f,
                                            ram_addr_t offset,
                                            int flags)
{
    static RAMBlock *block = NULL;
    char id[256];
    uint8_t len;

    if (flags & RAM_SAVE_FLAG_CONTINUE) {
        if (!block) {
            fprintf(stderr, "Ack, bad migration stream!\n");
            return NULL;
        }

918
        return memory_region_get_ram_ptr(block->mr) + offset;
919 920 921 922 923 924
    }

    len = qemu_get_byte(f);
    qemu_get_buffer(f, (uint8_t *)id, len);
    id[len] = 0;

925
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
926
        if (!strncmp(id, block->idstr, sizeof(id)))
927
            return memory_region_get_ram_ptr(block->mr) + offset;
928 929 930 931 932 933
    }

    fprintf(stderr, "Can't find block %s!\n", id);
    return NULL;
}

934 935 936 937 938 939
/*
 * If a page (or a whole RDMA chunk) has been
 * determined to be zero, then zap it.
 */
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
{
940
    if (ch != 0 || !is_zero_range(host, size)) {
941 942 943 944
        memset(host, ch, size);
    }
}

945
static int ram_load(QEMUFile *f, void *opaque, int version_id)
946 947
{
    ram_addr_t addr;
Orit Wasserman's avatar
Orit Wasserman committed
948
    int flags, ret = 0;
949
    int error;
Orit Wasserman's avatar
Orit Wasserman committed
950 951 952
    static uint64_t seq_iter;

    seq_iter++;
953

954
    if (version_id < 4 || version_id > 4) {
955 956 957 958 959 960 961 962 963 964
        return -EINVAL;
    }

    do {
        addr = qemu_get_be64(f);

        flags = addr & ~TARGET_PAGE_MASK;
        addr &= TARGET_PAGE_MASK;

        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
965
            if (version_id == 4) {
966 967 968 969 970 971 972 973 974 975 976 977 978 979
                /* Synchronize RAM block list */
                char id[256];
                ram_addr_t length;
                ram_addr_t total_ram_bytes = addr;

                while (total_ram_bytes) {
                    RAMBlock *block;
                    uint8_t len;

                    len = qemu_get_byte(f);
                    qemu_get_buffer(f, (uint8_t *)id, len);
                    id[len] = 0;
                    length = qemu_get_be64(f);

980
                    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
981
                        if (!strncmp(id, block->idstr, sizeof(id))) {
Orit Wasserman's avatar
Orit Wasserman committed
982
                            if (block->length != length) {
983 984 985
                                fprintf(stderr,
                                        "Length mismatch: %s: " RAM_ADDR_FMT
                                        " in != " RAM_ADDR_FMT "\n", id, length,
986
                                        block->length);
Orit Wasserman's avatar
Orit Wasserman committed
987 988 989
                                ret =  -EINVAL;
                                goto done;
                            }
990 991 992 993 994
                            break;
                        }
                    }

                    if (!block) {
995 996
                        fprintf(stderr, "Unknown ramblock \"%s\", cannot "
                                "accept migration\n", id);
Orit Wasserman's avatar
Orit Wasserman committed
997 998
                        ret = -EINVAL;
                        goto done;
999 1000 1001 1002
                    }

                    total_ram_bytes -= length;
                }
1003 1004 1005 1006
            }
        }

        if (flags & RAM_SAVE_FLAG_COMPRESS) {
1007 1008 1009
            void *host;
            uint8_t ch;

1010
            host = host_from_stream_offset(f, addr, flags);
1011 1012 1013
            if (!host) {
                return -EINVAL;
            }
1014 1015

            ch = qemu_get_byte(f);
1016
            ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
1017
        } else if (flags & RAM_SAVE_FLAG_PAGE) {
1018 1019
            void *host;

1020
            host = host_from_stream_offset(f, addr, flags);
1021 1022 1023
            if (!host) {
                return -EINVAL;
            }
1024 1025

            qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
        } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
            void *host = host_from_stream_offset(f, addr, flags);
            if (!host) {
                return -EINVAL;
            }

            if (load_xbzrle(f, addr, host) < 0) {
                ret = -EINVAL;
                goto done;
            }
Michael R. Hines's avatar
Michael R. Hines committed
1036 1037
        } else if (flags & RAM_SAVE_FLAG_HOOK) {
            ram_control_load_hook(f, flags);
1038
        }
1039 1040
        error = qemu_file_get_error(f);
        if (error) {
Orit Wasserman's avatar
Orit Wasserman committed
1041 1042
            ret = error;
            goto done;
1043 1044 1045
        }
    } while (!(flags & RAM_SAVE_FLAG_EOS));

Orit Wasserman's avatar
Orit Wasserman committed
1046
done:
1047 1048
    DPRINTF("Completed load of VM with exit code %d seq iteration "
            "%" PRIu64 "\n", ret, seq_iter);
Orit Wasserman's avatar
Orit Wasserman committed
1049
    return ret;
1050 1051
}

1052
SaveVMHandlers savevm_ram_handlers = {
1053
    .save_live_setup = ram_save_setup,
1054 1055
    .save_live_iterate = ram_save_iterate,
    .save_live_complete = ram_save_complete,
1056
    .save_live_pending = ram_save_pending,
1057
    .load_state = ram_load,
1058
    .cancel = ram_migration_cancel,
1059 1060
};

1061 1062 1063 1064 1065 1066
struct soundhw {
    const char *name;
    const char *descr;
    int enabled;
    int isa;
    union {
1067
        int (*init_isa) (ISABus *bus);
1068 1069 1070 1071
        int (*init_pci) (PCIBus *bus);
    } init;
};

1072 1073
static struct soundhw soundhw[9];
static int soundhw_count;
1074