arch_init.c 37.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <stdint.h>
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29 30 31
#include <sys/mman.h>
#endif
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu/sysemu.h"
34 35
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
36
#include "sysemu/arch_init.h"
37
#include "audio/audio.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
38
#include "hw/i386/pc.h"
39
#include "hw/pci/pci.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
40
#include "hw/audio/audio.h"
41
#include "sysemu/kvm.h"
42
#include "migration/migration.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
43
#include "hw/i386/smbios.h"
44
#include "exec/address-spaces.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
45
#include "hw/audio/pcspk.h"
46
#include "migration/page_cache.h"
47
#include "qemu/config-file.h"
48
#include "qemu/error-report.h"
49
#include "qmp-commands.h"
50
#include "trace.h"
51
#include "exec/cpu-all.h"
52
#include "exec/ram_addr.h"
53
#include "hw/acpi/acpi.h"
54
#include "qemu/host-utils.h"
55

Orit Wasserman's avatar
Orit Wasserman committed
56 57 58 59 60 61 62 63
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
    do { } while (0)
#endif

64 65 66 67 68 69 70
#ifdef TARGET_SPARC
int graphic_width = 1024;
int graphic_height = 768;
int graphic_depth = 8;
#else
int graphic_width = 800;
int graphic_height = 600;
71
int graphic_depth = 32;
72 73 74 75 76 77 78 79 80 81 82 83 84
#endif


#if defined(TARGET_ALPHA)
#define QEMU_ARCH QEMU_ARCH_ALPHA
#elif defined(TARGET_ARM)
#define QEMU_ARCH QEMU_ARCH_ARM
#elif defined(TARGET_CRIS)
#define QEMU_ARCH QEMU_ARCH_CRIS
#elif defined(TARGET_I386)
#define QEMU_ARCH QEMU_ARCH_I386
#elif defined(TARGET_M68K)
#define QEMU_ARCH QEMU_ARCH_M68K
Michael Walle's avatar
Michael Walle committed
85 86
#elif defined(TARGET_LM32)
#define QEMU_ARCH QEMU_ARCH_LM32
87 88 89 90
#elif defined(TARGET_MICROBLAZE)
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
#elif defined(TARGET_MIPS)
#define QEMU_ARCH QEMU_ARCH_MIPS
91 92
#elif defined(TARGET_MOXIE)
#define QEMU_ARCH QEMU_ARCH_MOXIE
93 94
#elif defined(TARGET_OPENRISC)
#define QEMU_ARCH QEMU_ARCH_OPENRISC
95 96 97 98 99 100 101 102
#elif defined(TARGET_PPC)
#define QEMU_ARCH QEMU_ARCH_PPC
#elif defined(TARGET_S390X)
#define QEMU_ARCH QEMU_ARCH_S390X
#elif defined(TARGET_SH4)
#define QEMU_ARCH QEMU_ARCH_SH4
#elif defined(TARGET_SPARC)
#define QEMU_ARCH QEMU_ARCH_SPARC
Max Filippov's avatar
Max Filippov committed
103 104
#elif defined(TARGET_XTENSA)
#define QEMU_ARCH QEMU_ARCH_XTENSA
105 106
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
107 108 109
#endif

const uint32_t arch_type = QEMU_ARCH;
110 111 112
static bool mig_throttle_on;
static int dirty_rate_high_cnt;
static void check_guest_throttling(void);
113 114 115 116

/***********************************************************/
/* ram save/restore */

117 118 119 120 121 122
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
#define RAM_SAVE_FLAG_COMPRESS 0x02
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
#define RAM_SAVE_FLAG_PAGE     0x08
#define RAM_SAVE_FLAG_EOS      0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
123
#define RAM_SAVE_FLAG_XBZRLE   0x40
Michael R. Hines's avatar
Michael R. Hines committed
124
/* 0x80 is reserved in migration.h start with 0x100 next */
125

126 127
static struct defconfig_file {
    const char *filename;
128 129
    /* Indicates it is an user config file (disabled by -no-user-config) */
    bool userconfig;
130
} default_config_files[] = {
131
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
132
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
133 134 135
    { NULL }, /* end of list */
};

136
static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
137

138
int qemu_read_default_config_files(bool userconfig)
139 140
{
    int ret;
141
    struct defconfig_file *f;
142

143
    for (f = default_config_files; f->filename; f++) {
144 145 146
        if (!userconfig && f->userconfig) {
            continue;
        }
147 148 149 150
        ret = qemu_read_config_file(f->filename);
        if (ret < 0 && ret != -ENOENT) {
            return ret;
        }
151
    }
Laszlo Ersek's avatar
Laszlo Ersek committed
152

153 154 155
    return 0;
}

156
static inline bool is_zero_range(uint8_t *p, uint64_t size)
157
{
158
    return buffer_find_nonzero_offset(p, size) == size;
159 160
}

161 162 163 164 165 166 167
/* struct contains XBZRLE cache and a static page
   used by the compression */
static struct {
    /* buffer used for XBZRLE encoding */
    uint8_t *encoded_buf;
    /* buffer for storing page content */
    uint8_t *current_buf;
168
    /* Cache for XBZRLE, Protected by lock. */
169
    PageCache *cache;
170
    QemuMutex lock;
171 172
} XBZRLE;

173 174
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
175

176 177 178 179 180 181 182 183 184 185 186 187
static void XBZRLE_cache_lock(void)
{
    if (migrate_use_xbzrle())
        qemu_mutex_lock(&XBZRLE.lock);
}

static void XBZRLE_cache_unlock(void)
{
    if (migrate_use_xbzrle())
        qemu_mutex_unlock(&XBZRLE.lock);
}

188 189 190 191 192 193
/*
 * called from qmp_migrate_set_cache_size in main thread, possibly while
 * a migration is in progress.
 * A running migration maybe using the cache and might finish during this
 * call, hence changes to the cache are protected by XBZRLE.lock().
 */
194 195
int64_t xbzrle_cache_resize(int64_t new_size)
{
196 197
    PageCache *new_cache;
    int64_t ret;
198

199 200 201 202
    if (new_size < TARGET_PAGE_SIZE) {
        return -1;
    }

203 204
    XBZRLE_cache_lock();

205
    if (XBZRLE.cache != NULL) {
206
        if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
207
            goto out_new_size;
208 209 210 211
        }
        new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
                                        TARGET_PAGE_SIZE);
        if (!new_cache) {
212 213 214
            error_report("Error creating cache");
            ret = -1;
            goto out;
215 216
        }

217 218
        cache_fini(XBZRLE.cache);
        XBZRLE.cache = new_cache;
219
    }
220

221 222 223 224 225
out_new_size:
    ret = pow2floor(new_size);
out:
    XBZRLE_cache_unlock();
    return ret;
226 227
}

228 229 230
/* accounting for migration statistics */
typedef struct AccountingInfo {
    uint64_t dup_pages;
231
    uint64_t skipped_pages;
232 233
    uint64_t norm_pages;
    uint64_t iterations;
Orit Wasserman's avatar
Orit Wasserman committed
234 235 236 237
    uint64_t xbzrle_bytes;
    uint64_t xbzrle_pages;
    uint64_t xbzrle_cache_miss;
    uint64_t xbzrle_overflows;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
} AccountingInfo;

static AccountingInfo acct_info;

static void acct_clear(void)
{
    memset(&acct_info, 0, sizeof(acct_info));
}

uint64_t dup_mig_bytes_transferred(void)
{
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
}

uint64_t dup_mig_pages_transferred(void)
{
    return acct_info.dup_pages;
}

257 258 259 260 261 262 263 264 265 266
uint64_t skipped_mig_bytes_transferred(void)
{
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
}

uint64_t skipped_mig_pages_transferred(void)
{
    return acct_info.skipped_pages;
}

267 268 269 270 271 272 273 274 275 276
uint64_t norm_mig_bytes_transferred(void)
{
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
}

uint64_t norm_mig_pages_transferred(void)
{
    return acct_info.norm_pages;
}

Orit Wasserman's avatar
Orit Wasserman committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
uint64_t xbzrle_mig_bytes_transferred(void)
{
    return acct_info.xbzrle_bytes;
}

uint64_t xbzrle_mig_pages_transferred(void)
{
    return acct_info.xbzrle_pages;
}

uint64_t xbzrle_mig_pages_cache_miss(void)
{
    return acct_info.xbzrle_cache_miss;
}

uint64_t xbzrle_mig_pages_overflow(void)
{
    return acct_info.xbzrle_overflows;
}

297 298
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
                             int cont, int flag)
Orit Wasserman's avatar
Orit Wasserman committed
299
{
300 301 302 303
    size_t size;

    qemu_put_be64(f, offset | cont | flag);
    size = 8;
Orit Wasserman's avatar
Orit Wasserman committed
304

305 306 307 308 309 310 311
    if (!cont) {
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr,
                        strlen(block->idstr));
        size += 1 + strlen(block->idstr);
    }
    return size;
Orit Wasserman's avatar
Orit Wasserman committed
312 313
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/* This is the last block that we have visited serching for dirty pages
 */
static RAMBlock *last_seen_block;
/* This is the last block from where we have sent data */
static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static unsigned long *migration_bitmap;
static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;

/* Update the xbzrle cache to reflect a page that's been sent as all 0.
 * The important thing is that a stale (not-yet-0'd) page be replaced
 * by the new data.
 * As a bonus, if the page wasn't in the cache it gets added so that
 * when a small write is made into the 0'd page it gets XBZRLE sent
 */
static void xbzrle_cache_zero_page(ram_addr_t current_addr)
{
    if (ram_bulk_stage || !migrate_use_xbzrle()) {
        return;
    }

    /* We don't care if this fails to allocate a new cache page
     * as long as it updated an old one */
    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
}

342 343 344 345
#define ENCODING_FLAG_XBZRLE 0x1

static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
                            ram_addr_t current_addr, RAMBlock *block,
346
                            ram_addr_t offset, int cont, bool last_stage)
347 348 349 350 351
{
    int encoded_len = 0, bytes_sent = -1;
    uint8_t *prev_cached_page;

    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
352
        if (!last_stage) {
353 354 355
            if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
                return -1;
            }
356
        }
Orit Wasserman's avatar
Orit Wasserman committed
357
        acct_info.xbzrle_cache_miss++;
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
        return -1;
    }

    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);

    /* save current buffer into memory */
    memcpy(XBZRLE.current_buf, current_data, TARGET_PAGE_SIZE);

    /* XBZRLE encoding (if there is no overflow) */
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
                                       TARGET_PAGE_SIZE);
    if (encoded_len == 0) {
        DPRINTF("Skipping unmodified page\n");
        return 0;
    } else if (encoded_len == -1) {
        DPRINTF("Overflow\n");
Orit Wasserman's avatar
Orit Wasserman committed
375
        acct_info.xbzrle_overflows++;
376 377 378 379 380 381
        /* update data in the cache */
        memcpy(prev_cached_page, current_data, TARGET_PAGE_SIZE);
        return -1;
    }

    /* we need to update the data in the cache, in order to get the same data */
382 383 384
    if (!last_stage) {
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
    }
385 386

    /* Send XBZRLE based compressed page */
387
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
388 389 390
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
    qemu_put_be16(f, encoded_len);
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
391
    bytes_sent += encoded_len + 1 + 2;
Orit Wasserman's avatar
Orit Wasserman committed
392 393
    acct_info.xbzrle_pages++;
    acct_info.xbzrle_bytes += bytes_sent;
394 395 396 397

    return bytes_sent;
}

398 399 400
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                 ram_addr_t start)
401
{
402 403
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
404 405
    uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
    unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
Juan Quintela's avatar
Juan Quintela committed
406

407 408 409 410 411 412 413
    unsigned long next;

    if (ram_bulk_stage && nr > base) {
        next = nr + 1;
    } else {
        next = find_next_bit(migration_bitmap, size, nr);
    }
414

415 416
    if (next < size) {
        clear_bit(next, migration_bitmap);
Juan Quintela's avatar
Juan Quintela committed
417
        migration_dirty_pages--;
418
    }
419
    return (next - base) << TARGET_PAGE_BITS;
420 421
}

422
static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
423
{
Juan Quintela's avatar
Juan Quintela committed
424
    bool ret;
425
    int nr = addr >> TARGET_PAGE_BITS;
426

Juan Quintela's avatar
Juan Quintela committed
427 428 429 430
    ret = test_and_set_bit(nr, migration_bitmap);

    if (!ret) {
        migration_dirty_pages++;
431
    }
Juan Quintela's avatar
Juan Quintela committed
432
    return ret;
433 434
}

435 436 437
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
    ram_addr_t addr;
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);

    /* start address is aligned at the start of a word? */
    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
        int k;
        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];

        for (k = page; k < page + nr; k++) {
            if (src[k]) {
                unsigned long new_dirty;
                new_dirty = ~migration_bitmap[k];
                migration_bitmap[k] |= src[k];
                new_dirty &= src[k];
                migration_dirty_pages += ctpopl(new_dirty);
                src[k] = 0;
            }
        }
    } else {
        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
            if (cpu_physical_memory_get_dirty(start + addr,
                                              TARGET_PAGE_SIZE,
                                              DIRTY_MEMORY_MIGRATION)) {
                cpu_physical_memory_reset_dirty(start + addr,
                                                TARGET_PAGE_SIZE,
                                                DIRTY_MEMORY_MIGRATION);
                migration_bitmap_set_dirty(start + addr);
            }
466 467 468 469 470
        }
    }
}


471 472
/* Needs iothread lock! */

473 474
static void migration_bitmap_sync(void)
{
Juan Quintela's avatar
Juan Quintela committed
475 476
    RAMBlock *block;
    uint64_t num_dirty_pages_init = migration_dirty_pages;
477 478
    MigrationState *s = migrate_get_current();
    static int64_t start_time;
479
    static int64_t bytes_xfer_prev;
480 481
    static int64_t num_dirty_pages_period;
    int64_t end_time;
482 483 484 485 486
    int64_t bytes_xfer_now;

    if (!bytes_xfer_prev) {
        bytes_xfer_prev = ram_bytes_transferred();
    }
487 488

    if (!start_time) {
489
        start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
490
    }
491 492

    trace_migration_bitmap_sync_start();
493
    address_space_sync_dirty_bitmap(&address_space_memory);
Juan Quintela's avatar
Juan Quintela committed
494

495
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
496
        migration_bitmap_sync_range(block->mr->ram_addr, block->length);
Juan Quintela's avatar
Juan Quintela committed
497 498
    }
    trace_migration_bitmap_sync_end(migration_dirty_pages
499
                                    - num_dirty_pages_init);
500
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
501
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
502 503 504

    /* more than 1 second = 1000 millisecons */
    if (end_time > start_time + 1000) {
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
        if (migrate_auto_converge()) {
            /* The following detection logic can be refined later. For now:
               Check to see if the dirtied bytes is 50% more than the approx.
               amount of bytes that just got transferred since the last time we
               were in this routine. If that happens >N times (for now N==4)
               we turn on the throttle down logic */
            bytes_xfer_now = ram_bytes_transferred();
            if (s->dirty_pages_rate &&
               (num_dirty_pages_period * TARGET_PAGE_SIZE >
                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
               (dirty_rate_high_cnt++ > 4)) {
                    trace_migration_throttle();
                    mig_throttle_on = true;
                    dirty_rate_high_cnt = 0;
             }
             bytes_xfer_prev = bytes_xfer_now;
        } else {
             mig_throttle_on = false;
        }
524 525
        s->dirty_pages_rate = num_dirty_pages_period * 1000
            / (end_time - start_time);
526
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
527 528 529
        start_time = end_time;
        num_dirty_pages_period = 0;
    }
530 531
}

532 533 534
/*
 * ram_save_block: Writes a page of memory to the stream f
 *
535 536
 * Returns:  The number of bytes written.
 *           0 means no dirty pages
537 538
 */

539
static int ram_save_block(QEMUFile *f, bool last_stage)
540
{
541
    RAMBlock *block = last_seen_block;
542
    ram_addr_t offset = last_offset;
543
    bool complete_round = false;
544
    int bytes_sent = 0;
Avi Kivity's avatar
Avi Kivity committed
545
    MemoryRegion *mr;
546
    ram_addr_t current_addr;
547

548
    if (!block)
549
        block = QTAILQ_FIRST(&ram_list.blocks);
550

551
    while (true) {
Avi Kivity's avatar
Avi Kivity committed
552
        mr = block->mr;
553 554 555 556 557 558 559 560 561 562 563
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
        if (complete_round && block == last_seen_block &&
            offset >= last_offset) {
            break;
        }
        if (offset >= block->length) {
            offset = 0;
            block = QTAILQ_NEXT(block, next);
            if (!block) {
                block = QTAILQ_FIRST(&ram_list.blocks);
                complete_round = true;
564
                ram_bulk_stage = false;
565 566
            }
        } else {
Michael R. Hines's avatar
Michael R. Hines committed
567
            int ret;
568
            uint8_t *p;
569
            bool send_async = true;
Juan Quintela's avatar
Juan Quintela committed
570
            int cont = (block == last_sent_block) ?
571
                RAM_SAVE_FLAG_CONTINUE : 0;
572

Avi Kivity's avatar
Avi Kivity committed
573
            p = memory_region_get_ram_ptr(mr) + offset;
574

575 576
            /* In doubt sent page as normal */
            bytes_sent = -1;
Michael R. Hines's avatar
Michael R. Hines committed
577 578 579
            ret = ram_control_save_page(f, block->offset,
                               offset, TARGET_PAGE_SIZE, &bytes_sent);

580 581
            XBZRLE_cache_lock();

582
            current_addr = block->offset + offset;
Michael R. Hines's avatar
Michael R. Hines committed
583 584 585 586 587 588 589 590
            if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
                if (ret != RAM_SAVE_CONTROL_DELAYED) {
                    if (bytes_sent > 0) {
                        acct_info.norm_pages++;
                    } else if (bytes_sent == 0) {
                        acct_info.dup_pages++;
                    }
                }
591
            } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
592
                acct_info.dup_pages++;
593 594 595 596
                bytes_sent = save_block_hdr(f, block, offset, cont,
                                            RAM_SAVE_FLAG_COMPRESS);
                qemu_put_byte(f, 0);
                bytes_sent++;
597 598 599 600
                /* Must let xbzrle know, otherwise a previous (now 0'd) cached
                 * page would be stale
                 */
                xbzrle_cache_zero_page(current_addr);
601
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
602
                bytes_sent = save_xbzrle_page(f, p, current_addr, block,
603 604
                                              offset, cont, last_stage);
                if (!last_stage) {
605 606 607 608
                    /* We must send exactly what's in the xbzrle cache
                     * even if the page wasn't xbzrle compressed, so that
                     * it's right next time.
                     */
609
                    p = get_cached_data(XBZRLE.cache, current_addr);
610 611 612 613 614

                    /* Can't send this cached data async, since the cache page
                     * might get updated before it gets to the wire
                     */
                    send_async = false;
615
                }
616 617
            }

618
            /* XBZRLE overflow or normal page */
619
            if (bytes_sent == -1) {
620
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
621 622 623 624 625
                if (send_async) {
                    qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
                } else {
                    qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
                }
626
                bytes_sent += TARGET_PAGE_SIZE;
627
                acct_info.norm_pages++;
628 629
            }

630
            XBZRLE_cache_unlock();
631
            /* if page is unmodified, continue to the next */
632
            if (bytes_sent > 0) {
Juan Quintela's avatar
Juan Quintela committed
633
                last_sent_block = block;
634 635
                break;
            }
636
        }
637
    }
638
    last_seen_block = block;
639
    last_offset = offset;
640

641
    return bytes_sent;
642 643 644 645
}

static uint64_t bytes_transferred;

646 647 648 649 650 651 652 653 654 655 656 657
void acct_update_position(QEMUFile *f, size_t size, bool zero)
{
    uint64_t pages = size / TARGET_PAGE_SIZE;
    if (zero) {
        acct_info.dup_pages += pages;
    } else {
        acct_info.norm_pages += pages;
        bytes_transferred += size;
        qemu_update_position(f, size);
    }
}

658 659
static ram_addr_t ram_save_remaining(void)
{
Juan Quintela's avatar
Juan Quintela committed
660
    return migration_dirty_pages;
661 662 663 664 665 666 667 668 669 670 671 672 673 674
}

uint64_t ram_bytes_remaining(void)
{
    return ram_save_remaining() * TARGET_PAGE_SIZE;
}

uint64_t ram_bytes_transferred(void)
{
    return bytes_transferred;
}

uint64_t ram_bytes_total(void)
{
675 676 677
    RAMBlock *block;
    uint64_t total = 0;

678
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
679 680 681
        total += block->length;

    return total;
682 683
}

684 685 686 687 688 689
void free_xbzrle_decoded_buf(void)
{
    g_free(xbzrle_decoded_buf);
    xbzrle_decoded_buf = NULL;
}

Orit Wasserman's avatar
Orit Wasserman committed
690 691
static void migration_end(void)
{
692 693 694 695 696
    if (migration_bitmap) {
        memory_global_dirty_log_stop();
        g_free(migration_bitmap);
        migration_bitmap = NULL;
    }
697

698
    XBZRLE_cache_lock();
699
    if (XBZRLE.cache) {
700 701 702 703 704
        cache_fini(XBZRLE.cache);
        g_free(XBZRLE.cache);
        g_free(XBZRLE.encoded_buf);
        g_free(XBZRLE.current_buf);
        XBZRLE.cache = NULL;
705 706
        XBZRLE.encoded_buf = NULL;
        XBZRLE.current_buf = NULL;
707
    }
708
    XBZRLE_cache_unlock();
Orit Wasserman's avatar
Orit Wasserman committed
709 710
}

711 712 713 714 715
static void ram_migration_cancel(void *opaque)
{
    migration_end();
}

716 717
static void reset_ram_globals(void)
{
718
    last_seen_block = NULL;
Juan Quintela's avatar
Juan Quintela committed
719
    last_sent_block = NULL;
720
    last_offset = 0;
721
    last_version = ram_list.version;
722
    ram_bulk_stage = true;
723 724
}

725 726
#define MAX_WAIT 50 /* ms, half buffered_file limit */

727
static int ram_save_setup(QEMUFile *f, void *opaque)
728
{
729
    RAMBlock *block;
730
    int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
Juan Quintela's avatar
Juan Quintela committed
731

732 733
    mig_throttle_on = false;
    dirty_rate_high_cnt = 0;
734

735
    if (migrate_use_xbzrle()) {
736
        XBZRLE_cache_lock();
737 738 739 740
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                  TARGET_PAGE_SIZE,
                                  TARGET_PAGE_SIZE);
        if (!XBZRLE.cache) {
741 742
            XBZRLE_cache_unlock();
            error_report("Error creating cache");
743 744
            return -1;
        }
745
        XBZRLE_cache_unlock();
746 747 748 749

        /* We prefer not to abort if there is no memory */
        XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
        if (!XBZRLE.encoded_buf) {
750
            error_report("Error allocating encoded_buf");
751 752 753 754 755
            return -1;
        }

        XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
        if (!XBZRLE.current_buf) {
756
            error_report("Error allocating current_buf");
757 758 759 760 761
            g_free(XBZRLE.encoded_buf);
            XBZRLE.encoded_buf = NULL;
            return -1;
        }

762
        acct_clear();
763 764
    }

765 766 767 768 769
    qemu_mutex_lock_iothread();
    qemu_mutex_lock_ramlist();
    bytes_transferred = 0;
    reset_ram_globals();

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
    ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
    migration_bitmap = bitmap_new(ram_bitmap_pages);
    bitmap_set(migration_bitmap, 0, ram_bitmap_pages);

    /*
     * Count the total number of pages used by ram blocks not including any
     * gaps due to alignment or unplugs.
     */
    migration_dirty_pages = 0;
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
        uint64_t block_pages;

        block_pages = block->length >> TARGET_PAGE_BITS;
        migration_dirty_pages += block_pages;
    }

786
    memory_global_dirty_log_start();
Juan Quintela's avatar
Juan Quintela committed
787
    migration_bitmap_sync();
788
    qemu_mutex_unlock_iothread();
789

790
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
791

792
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
793 794 795
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
        qemu_put_be64(f, block->length);
796 797
    }

798
    qemu_mutex_unlock_ramlist();
Michael R. Hines's avatar
Michael R. Hines committed
799 800 801 802

    ram_control_before_iterate(f, RAM_CONTROL_SETUP);
    ram_control_after_iterate(f, RAM_CONTROL_SETUP);

803 804 805 806 807
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

    return 0;
}

808
static int ram_save_iterate(QEMUFile *f, void *opaque)
809 810 811
{
    int ret;
    int i;
812
    int64_t t0;
813
    int total_sent = 0;
814

815 816
    qemu_mutex_lock_ramlist();

817 818 819 820
    if (ram_list.version != last_version) {
        reset_ram_globals();
    }

Michael R. Hines's avatar
Michael R. Hines committed
821 822
    ram_control_before_iterate(f, RAM_CONTROL_ROUND);

823
    t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
824
    i = 0;
825
    while ((ret = qemu_file_rate_limit(f)) == 0) {
826
        int bytes_sent;
827

828
        bytes_sent = ram_save_block(f, false);
829
        /* no more blocks to sent */
830
        if (bytes_sent == 0) {
831 832
            break;
        }
833
        total_sent += bytes_sent;
834
        acct_info.iterations++;
835
        check_guest_throttling();
836 837 838 839 840 841
        /* we want to check in the 1st loop, just in case it was the 1st time
           and we had to sync the dirty bitmap.
           qemu_get_clock_ns() is a bit expensive, so we only check each some
           iterations
        */
        if ((i & 63) == 0) {
842
            uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
843
            if (t1 > MAX_WAIT) {
844
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
845 846 847 848 849
                        t1, i);
                break;
            }
        }
        i++;
850 851
    }

852 853
    qemu_mutex_unlock_ramlist();

Michael R. Hines's avatar
Michael R. Hines committed
854 855 856 857 858 859
    /*
     * Must occur before EOS (or any QEMUFile operation)
     * because of RDMA protocol.
     */
    ram_control_after_iterate(f, RAM_CONTROL_ROUND);

860 861 862 863 864 865 866 867 868 869
    bytes_transferred += total_sent;

    /*
     * Do not count these 8 bytes into total_sent, so that we can
     * return 0 if no page had been dirtied.
     */
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
    bytes_transferred += 8;

    ret = qemu_file_get_error(f);
870 871 872 873
    if (ret < 0) {
        return ret;
    }

874
    return total_sent;
875 876 877 878
}

static int ram_save_complete(QEMUFile *f, void *opaque)
{
879
    qemu_mutex_lock_ramlist();
880
    migration_bitmap_sync();
881

Michael R. Hines's avatar
Michael R. Hines committed
882 883
    ram_control_before_iterate(f, RAM_CONTROL_FINISH);

884
    /* try transferring iterative blocks of memory */
Orit Wasserman's avatar
Orit Wasserman committed
885

886
    /* flush all remaining blocks regardless of rate limiting */
887
    while (true) {
888 889
        int bytes_sent;

890
        bytes_sent = ram_save_block(f, true);
891
        /* no more blocks to sent */
892
        if (bytes_sent == 0) {
893
            break;
894
        }
895
        bytes_transferred += bytes_sent;
896
    }
Michael R. Hines's avatar
Michael R. Hines committed
897 898

    ram_control_after_iterate(f, RAM_CONTROL_FINISH);
899
    migration_end();
900

901
    qemu_mutex_unlock_ramlist();
902 903
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

904
    return 0;
905 906
}

907 908 909 910 911 912 913
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
{
    uint64_t remaining_size;

    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;

    if (remaining_size < max_size) {
914
        qemu_mutex_lock_iothread();
915
        migration_bitmap_sync();
916
        qemu_mutex_unlock_iothread();
917 918 919 920 921
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
    }
    return remaining_size;
}

922 923 924 925 926 927
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
{
    int ret, rc = 0;
    unsigned int xh_len;
    int xh_flags;

928 929
    if (!xbzrle_decoded_buf) {
        xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
    }

    /* extract RLE header */
    xh_flags = qemu_get_byte(f);
    xh_len = qemu_get_be16(f);

    if (xh_flags != ENCODING_FLAG_XBZRLE) {
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
        return -1;
    }

    if (xh_len > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
        return -1;
    }
    /* load data and decode */
946
    qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
947 948

    /* decode RLE */
949
    ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
950 951 952 953 954 955 956 957 958 959 960 961 962
                               TARGET_PAGE_SIZE);
    if (ret == -1) {
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
        rc = -1;
    } else  if (ret > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
                ret, TARGET_PAGE_SIZE);
        abort();
    }

    return rc;
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976
static inline void *host_from_stream_offset(QEMUFile *f,
                                            ram_addr_t offset,
                                            int flags)
{
    static RAMBlock *block = NULL;
    char id[256];
    uint8_t len;

    if (flags & RAM_SAVE_FLAG_CONTINUE) {
        if (!block) {
            fprintf(stderr, "Ack, bad migration stream!\n");
            return NULL;
        }

977
        return memory_region_get_ram_ptr(block->mr) + offset;
978 979 980 981 982 983
    }

    len = qemu_get_byte(f);
    qemu_get_buffer(f, (uint8_t *)id, len);
    id[len] = 0;

984
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
985
        if (!strncmp(id, block->idstr, sizeof(id)))
986
            return memory_region_get_ram_ptr(block->mr) + offset;
987 988 989 990 991 992
    }

    fprintf(stderr, "Can't find block %s!\n", id);
    return NULL;
}

993 994 995 996 997 998
/*
 * If a page (or a whole RDMA chunk) has been
 * determined to be zero, then zap it.
 */
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
{
999
    if (ch != 0 || !is_zero_range(host, size)) {
1000 1001 1002 1003
        memset(host, ch, size);
    }
}

1004
static int ram_load(QEMUFile *f, void *opaque, int version_id)
1005 1006
{
    ram_addr_t addr;
Orit Wasserman's avatar
Orit Wasserman committed
1007
    int flags, ret = 0;
1008
    int error;
Orit Wasserman's avatar
Orit Wasserman committed
1009 1010 1011
    static uint64_t seq_iter;

    seq_iter++;
1012

ChenLiang's avatar
ChenLiang committed
1013
    if (version_id != 4) {
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
        return -EINVAL;
    }

    do {
        addr = qemu_get_be64(f);

        flags = addr & ~TARGET_PAGE_MASK;
        addr &= TARGET_PAGE_MASK;

        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
ChenLiang's avatar
ChenLiang committed
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
            /* Synchronize RAM block list */
            char id[256];
            ram_addr_t length;
            ram_addr_t total_ram_bytes = addr;

            while (total_ram_bytes) {
                RAMBlock *block;
                uint8_t len;

                len = qemu_get_byte(f);
                qemu_get_buffer(f, (uint8_t *)id, len);
                id[len] = 0;
                length = qemu_get_be64(f);

                QTAILQ_FOREACH(block, &ram_list.blocks, next) {
                    if (!strncmp(id, block->idstr, sizeof(id))) {
                        if (block->length != length) {
                            fprintf(stderr,
                                    "Length mismatch: %s: " RAM_ADDR_FMT
                                    " in != " RAM_ADDR_FMT "\n", id, length,
                                    block->length);
                            ret =  -EINVAL;
                            goto done;
1047
                        }
ChenLiang's avatar
ChenLiang committed
1048
                        break;
1049
                    }
ChenLiang's avatar
ChenLiang committed
1050
                }
1051

ChenLiang's avatar
ChenLiang committed
1052 1053 1054 1055 1056
                if (!block) {
                    fprintf(stderr, "Unknown ramblock \"%s\", cannot "
                            "accept migration\n", id);
                    ret = -EINVAL;
                    goto done;
1057
                }
ChenLiang's avatar
ChenLiang committed
1058 1059

                total_ram_bytes -= length;
1060 1061 1062 1063
            }
        }

        if (flags & RAM_SAVE_FLAG_COMPRESS) {
1064 1065 1066
            void *host;
            uint8_t ch;

1067
            host = host_from_stream_offset(f, addr, flags);
1068 1069 1070
            if (!host) {
                return -EINVAL;
            }
1071 1072

            ch = qemu_get_byte(f);