arch_init.c 37.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
#include <stdint.h>
#include <stdarg.h>
26
#include <stdlib.h>
27
#ifndef _WIN32
28
#include <sys/types.h>
29 30 31
#include <sys/mman.h>
#endif
#include "config.h"
32
#include "monitor/monitor.h"
33
#include "sysemu/sysemu.h"
34 35
#include "qemu/bitops.h"
#include "qemu/bitmap.h"
36
#include "sysemu/arch_init.h"
37
#include "audio/audio.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
38
#include "hw/i386/pc.h"
39
#include "hw/pci/pci.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
40
#include "hw/audio/audio.h"
41
#include "sysemu/kvm.h"
42
#include "migration/migration.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
43
#include "hw/i386/smbios.h"
44
#include "exec/address-spaces.h"
Paolo Bonzini's avatar
Paolo Bonzini committed
45
#include "hw/audio/pcspk.h"
46
#include "migration/page_cache.h"
47
#include "qemu/config-file.h"
48
#include "qemu/error-report.h"
49
#include "qmp-commands.h"
50
#include "trace.h"
51
#include "exec/cpu-all.h"
52
#include "exec/ram_addr.h"
53
#include "hw/acpi/acpi.h"
54
#include "qemu/host-utils.h"
55

Orit Wasserman's avatar
Orit Wasserman committed
56 57 58 59 60 61 62 63
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
    do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) \
    do { } while (0)
#endif

64 65 66 67 68 69 70
#ifdef TARGET_SPARC
int graphic_width = 1024;
int graphic_height = 768;
int graphic_depth = 8;
#else
int graphic_width = 800;
int graphic_height = 600;
71
int graphic_depth = 32;
72 73 74 75 76 77 78 79 80 81 82 83 84
#endif


#if defined(TARGET_ALPHA)
#define QEMU_ARCH QEMU_ARCH_ALPHA
#elif defined(TARGET_ARM)
#define QEMU_ARCH QEMU_ARCH_ARM
#elif defined(TARGET_CRIS)
#define QEMU_ARCH QEMU_ARCH_CRIS
#elif defined(TARGET_I386)
#define QEMU_ARCH QEMU_ARCH_I386
#elif defined(TARGET_M68K)
#define QEMU_ARCH QEMU_ARCH_M68K
Michael Walle's avatar
Michael Walle committed
85 86
#elif defined(TARGET_LM32)
#define QEMU_ARCH QEMU_ARCH_LM32
87 88 89 90
#elif defined(TARGET_MICROBLAZE)
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
#elif defined(TARGET_MIPS)
#define QEMU_ARCH QEMU_ARCH_MIPS
91 92
#elif defined(TARGET_MOXIE)
#define QEMU_ARCH QEMU_ARCH_MOXIE
93 94
#elif defined(TARGET_OPENRISC)
#define QEMU_ARCH QEMU_ARCH_OPENRISC
95 96 97 98 99 100 101 102
#elif defined(TARGET_PPC)
#define QEMU_ARCH QEMU_ARCH_PPC
#elif defined(TARGET_S390X)
#define QEMU_ARCH QEMU_ARCH_S390X
#elif defined(TARGET_SH4)
#define QEMU_ARCH QEMU_ARCH_SH4
#elif defined(TARGET_SPARC)
#define QEMU_ARCH QEMU_ARCH_SPARC
Max Filippov's avatar
Max Filippov committed
103 104
#elif defined(TARGET_XTENSA)
#define QEMU_ARCH QEMU_ARCH_XTENSA
105 106
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
107 108 109
#endif

const uint32_t arch_type = QEMU_ARCH;
110 111 112
static bool mig_throttle_on;
static int dirty_rate_high_cnt;
static void check_guest_throttling(void);
113 114 115 116

/***********************************************************/
/* ram save/restore */

117 118 119 120 121 122
#define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
#define RAM_SAVE_FLAG_COMPRESS 0x02
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
#define RAM_SAVE_FLAG_PAGE     0x08
#define RAM_SAVE_FLAG_EOS      0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
123
#define RAM_SAVE_FLAG_XBZRLE   0x40
Michael R. Hines's avatar
Michael R. Hines committed
124
/* 0x80 is reserved in migration.h start with 0x100 next */
125

126 127
static struct defconfig_file {
    const char *filename;
128 129
    /* Indicates it is an user config file (disabled by -no-user-config) */
    bool userconfig;
130
} default_config_files[] = {
131
    { CONFIG_QEMU_CONFDIR "/qemu.conf",                   true },
132
    { CONFIG_QEMU_CONFDIR "/target-" TARGET_NAME ".conf", true },
133 134 135
    { NULL }, /* end of list */
};

136
static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
137

138
int qemu_read_default_config_files(bool userconfig)
139 140
{
    int ret;
141
    struct defconfig_file *f;
142

143
    for (f = default_config_files; f->filename; f++) {
144 145 146
        if (!userconfig && f->userconfig) {
            continue;
        }
147 148 149 150
        ret = qemu_read_config_file(f->filename);
        if (ret < 0 && ret != -ENOENT) {
            return ret;
        }
151
    }
Laszlo Ersek's avatar
Laszlo Ersek committed
152

153 154 155
    return 0;
}

156
static inline bool is_zero_range(uint8_t *p, uint64_t size)
157
{
158
    return buffer_find_nonzero_offset(p, size) == size;
159 160
}

161 162 163 164 165 166 167
/* struct contains XBZRLE cache and a static page
   used by the compression */
static struct {
    /* buffer used for XBZRLE encoding */
    uint8_t *encoded_buf;
    /* buffer for storing page content */
    uint8_t *current_buf;
168
    /* Cache for XBZRLE, Protected by lock. */
169
    PageCache *cache;
170
    QemuMutex lock;
171 172
} XBZRLE;

173 174
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
175

176 177 178 179 180 181 182 183 184 185 186 187
static void XBZRLE_cache_lock(void)
{
    if (migrate_use_xbzrle())
        qemu_mutex_lock(&XBZRLE.lock);
}

static void XBZRLE_cache_unlock(void)
{
    if (migrate_use_xbzrle())
        qemu_mutex_unlock(&XBZRLE.lock);
}

188 189 190 191 192 193
/*
 * called from qmp_migrate_set_cache_size in main thread, possibly while
 * a migration is in progress.
 * A running migration maybe using the cache and might finish during this
 * call, hence changes to the cache are protected by XBZRLE.lock().
 */
194 195
int64_t xbzrle_cache_resize(int64_t new_size)
{
196 197
    PageCache *new_cache;
    int64_t ret;
198

199 200 201 202
    if (new_size < TARGET_PAGE_SIZE) {
        return -1;
    }

203 204
    XBZRLE_cache_lock();

205
    if (XBZRLE.cache != NULL) {
206
        if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
207
            goto out_new_size;
208 209 210 211
        }
        new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
                                        TARGET_PAGE_SIZE);
        if (!new_cache) {
212 213 214
            error_report("Error creating cache");
            ret = -1;
            goto out;
215 216
        }

217 218
        cache_fini(XBZRLE.cache);
        XBZRLE.cache = new_cache;
219
    }
220

221 222 223 224 225
out_new_size:
    ret = pow2floor(new_size);
out:
    XBZRLE_cache_unlock();
    return ret;
226 227
}

228 229 230
/* accounting for migration statistics */
typedef struct AccountingInfo {
    uint64_t dup_pages;
231
    uint64_t skipped_pages;
232 233
    uint64_t norm_pages;
    uint64_t iterations;
Orit Wasserman's avatar
Orit Wasserman committed
234 235 236 237
    uint64_t xbzrle_bytes;
    uint64_t xbzrle_pages;
    uint64_t xbzrle_cache_miss;
    uint64_t xbzrle_overflows;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
} AccountingInfo;

static AccountingInfo acct_info;

static void acct_clear(void)
{
    memset(&acct_info, 0, sizeof(acct_info));
}

uint64_t dup_mig_bytes_transferred(void)
{
    return acct_info.dup_pages * TARGET_PAGE_SIZE;
}

uint64_t dup_mig_pages_transferred(void)
{
    return acct_info.dup_pages;
}

257 258 259 260 261 262 263 264 265 266
uint64_t skipped_mig_bytes_transferred(void)
{
    return acct_info.skipped_pages * TARGET_PAGE_SIZE;
}

uint64_t skipped_mig_pages_transferred(void)
{
    return acct_info.skipped_pages;
}

267 268 269 270 271 272 273 274 275 276
uint64_t norm_mig_bytes_transferred(void)
{
    return acct_info.norm_pages * TARGET_PAGE_SIZE;
}

uint64_t norm_mig_pages_transferred(void)
{
    return acct_info.norm_pages;
}

Orit Wasserman's avatar
Orit Wasserman committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
uint64_t xbzrle_mig_bytes_transferred(void)
{
    return acct_info.xbzrle_bytes;
}

uint64_t xbzrle_mig_pages_transferred(void)
{
    return acct_info.xbzrle_pages;
}

uint64_t xbzrle_mig_pages_cache_miss(void)
{
    return acct_info.xbzrle_cache_miss;
}

uint64_t xbzrle_mig_pages_overflow(void)
{
    return acct_info.xbzrle_overflows;
}

297 298
static size_t save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
                             int cont, int flag)
Orit Wasserman's avatar
Orit Wasserman committed
299
{
300 301 302 303
    size_t size;

    qemu_put_be64(f, offset | cont | flag);
    size = 8;
Orit Wasserman's avatar
Orit Wasserman committed
304

305 306 307 308 309 310 311
    if (!cont) {
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr,
                        strlen(block->idstr));
        size += 1 + strlen(block->idstr);
    }
    return size;
Orit Wasserman's avatar
Orit Wasserman committed
312 313
}

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
/* This is the last block that we have visited serching for dirty pages
 */
static RAMBlock *last_seen_block;
/* This is the last block from where we have sent data */
static RAMBlock *last_sent_block;
static ram_addr_t last_offset;
static unsigned long *migration_bitmap;
static uint64_t migration_dirty_pages;
static uint32_t last_version;
static bool ram_bulk_stage;

/* Update the xbzrle cache to reflect a page that's been sent as all 0.
 * The important thing is that a stale (not-yet-0'd) page be replaced
 * by the new data.
 * As a bonus, if the page wasn't in the cache it gets added so that
 * when a small write is made into the 0'd page it gets XBZRLE sent
 */
static void xbzrle_cache_zero_page(ram_addr_t current_addr)
{
    if (ram_bulk_stage || !migrate_use_xbzrle()) {
        return;
    }

    /* We don't care if this fails to allocate a new cache page
     * as long as it updated an old one */
    cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
}

342 343
#define ENCODING_FLAG_XBZRLE 0x1

344
static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
345
                            ram_addr_t current_addr, RAMBlock *block,
346
                            ram_addr_t offset, int cont, bool last_stage)
347 348 349 350 351
{
    int encoded_len = 0, bytes_sent = -1;
    uint8_t *prev_cached_page;

    if (!cache_is_cached(XBZRLE.cache, current_addr)) {
352
        acct_info.xbzrle_cache_miss++;
353
        if (!last_stage) {
354
            if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
355
                return -1;
356 357 358 359
            } else {
                /* update *current_data when the page has been
                   inserted into cache */
                *current_data = get_cached_data(XBZRLE.cache, current_addr);
360
            }
361
        }
362 363 364 365 366 367
        return -1;
    }

    prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);

    /* save current buffer into memory */
368
    memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
369 370 371 372 373 374 375 376 377 378

    /* XBZRLE encoding (if there is no overflow) */
    encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
                                       TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
                                       TARGET_PAGE_SIZE);
    if (encoded_len == 0) {
        DPRINTF("Skipping unmodified page\n");
        return 0;
    } else if (encoded_len == -1) {
        DPRINTF("Overflow\n");
Orit Wasserman's avatar
Orit Wasserman committed
379
        acct_info.xbzrle_overflows++;
380
        /* update data in the cache */
381 382 383 384
        if (!last_stage) {
            memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
            *current_data = prev_cached_page;
        }
385 386 387 388
        return -1;
    }

    /* we need to update the data in the cache, in order to get the same data */
389 390 391
    if (!last_stage) {
        memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
    }
392 393

    /* Send XBZRLE based compressed page */
394
    bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_XBZRLE);
395 396 397
    qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
    qemu_put_be16(f, encoded_len);
    qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
398
    bytes_sent += encoded_len + 1 + 2;
Orit Wasserman's avatar
Orit Wasserman committed
399 400
    acct_info.xbzrle_pages++;
    acct_info.xbzrle_bytes += bytes_sent;
401 402 403 404

    return bytes_sent;
}

405 406 407
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
                                                 ram_addr_t start)
408
{
409 410
    unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
    unsigned long nr = base + (start >> TARGET_PAGE_BITS);
411 412
    uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
    unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
Juan Quintela's avatar
Juan Quintela committed
413

414 415 416 417 418 419 420
    unsigned long next;

    if (ram_bulk_stage && nr > base) {
        next = nr + 1;
    } else {
        next = find_next_bit(migration_bitmap, size, nr);
    }
421

422 423
    if (next < size) {
        clear_bit(next, migration_bitmap);
Juan Quintela's avatar
Juan Quintela committed
424
        migration_dirty_pages--;
425
    }
426
    return (next - base) << TARGET_PAGE_BITS;
427 428
}

429
static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
430
{
Juan Quintela's avatar
Juan Quintela committed
431
    bool ret;
432
    int nr = addr >> TARGET_PAGE_BITS;
433

Juan Quintela's avatar
Juan Quintela committed
434 435 436 437
    ret = test_and_set_bit(nr, migration_bitmap);

    if (!ret) {
        migration_dirty_pages++;
438
    }
Juan Quintela's avatar
Juan Quintela committed
439
    return ret;
440 441
}

442 443 444
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
    ram_addr_t addr;
445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);

    /* start address is aligned at the start of a word? */
    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
        int k;
        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];

        for (k = page; k < page + nr; k++) {
            if (src[k]) {
                unsigned long new_dirty;
                new_dirty = ~migration_bitmap[k];
                migration_bitmap[k] |= src[k];
                new_dirty &= src[k];
                migration_dirty_pages += ctpopl(new_dirty);
                src[k] = 0;
            }
        }
    } else {
        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
            if (cpu_physical_memory_get_dirty(start + addr,
                                              TARGET_PAGE_SIZE,
                                              DIRTY_MEMORY_MIGRATION)) {
                cpu_physical_memory_reset_dirty(start + addr,
                                                TARGET_PAGE_SIZE,
                                                DIRTY_MEMORY_MIGRATION);
                migration_bitmap_set_dirty(start + addr);
            }
473 474 475 476 477
        }
    }
}


478 479
/* Needs iothread lock! */

480 481
static void migration_bitmap_sync(void)
{
Juan Quintela's avatar
Juan Quintela committed
482 483
    RAMBlock *block;
    uint64_t num_dirty_pages_init = migration_dirty_pages;
484 485
    MigrationState *s = migrate_get_current();
    static int64_t start_time;
486
    static int64_t bytes_xfer_prev;
487 488
    static int64_t num_dirty_pages_period;
    int64_t end_time;
489 490 491 492 493
    int64_t bytes_xfer_now;

    if (!bytes_xfer_prev) {
        bytes_xfer_prev = ram_bytes_transferred();
    }
494 495

    if (!start_time) {
496
        start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
497
    }
498 499

    trace_migration_bitmap_sync_start();
500
    address_space_sync_dirty_bitmap(&address_space_memory);
Juan Quintela's avatar
Juan Quintela committed
501

502
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
503
        migration_bitmap_sync_range(block->mr->ram_addr, block->length);
Juan Quintela's avatar
Juan Quintela committed
504 505
    }
    trace_migration_bitmap_sync_end(migration_dirty_pages
506
                                    - num_dirty_pages_init);
507
    num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
508
    end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
509 510 511

    /* more than 1 second = 1000 millisecons */
    if (end_time > start_time + 1000) {
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
        if (migrate_auto_converge()) {
            /* The following detection logic can be refined later. For now:
               Check to see if the dirtied bytes is 50% more than the approx.
               amount of bytes that just got transferred since the last time we
               were in this routine. If that happens >N times (for now N==4)
               we turn on the throttle down logic */
            bytes_xfer_now = ram_bytes_transferred();
            if (s->dirty_pages_rate &&
               (num_dirty_pages_period * TARGET_PAGE_SIZE >
                   (bytes_xfer_now - bytes_xfer_prev)/2) &&
               (dirty_rate_high_cnt++ > 4)) {
                    trace_migration_throttle();
                    mig_throttle_on = true;
                    dirty_rate_high_cnt = 0;
             }
             bytes_xfer_prev = bytes_xfer_now;
        } else {
             mig_throttle_on = false;
        }
531 532
        s->dirty_pages_rate = num_dirty_pages_period * 1000
            / (end_time - start_time);
533
        s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
534 535 536
        start_time = end_time;
        num_dirty_pages_period = 0;
    }
537 538
}

539 540 541
/*
 * ram_save_block: Writes a page of memory to the stream f
 *
542 543
 * Returns:  The number of bytes written.
 *           0 means no dirty pages
544 545
 */

546
static int ram_save_block(QEMUFile *f, bool last_stage)
547
{
548
    RAMBlock *block = last_seen_block;
549
    ram_addr_t offset = last_offset;
550
    bool complete_round = false;
551
    int bytes_sent = 0;
Avi Kivity's avatar
Avi Kivity committed
552
    MemoryRegion *mr;
553
    ram_addr_t current_addr;
554

555
    if (!block)
556
        block = QTAILQ_FIRST(&ram_list.blocks);
557

558
    while (true) {
Avi Kivity's avatar
Avi Kivity committed
559
        mr = block->mr;
560 561 562 563 564 565 566 567 568 569 570
        offset = migration_bitmap_find_and_reset_dirty(mr, offset);
        if (complete_round && block == last_seen_block &&
            offset >= last_offset) {
            break;
        }
        if (offset >= block->length) {
            offset = 0;
            block = QTAILQ_NEXT(block, next);
            if (!block) {
                block = QTAILQ_FIRST(&ram_list.blocks);
                complete_round = true;
571
                ram_bulk_stage = false;
572 573
            }
        } else {
Michael R. Hines's avatar
Michael R. Hines committed
574
            int ret;
575
            uint8_t *p;
576
            bool send_async = true;
Juan Quintela's avatar
Juan Quintela committed
577
            int cont = (block == last_sent_block) ?
578
                RAM_SAVE_FLAG_CONTINUE : 0;
579

Avi Kivity's avatar
Avi Kivity committed
580
            p = memory_region_get_ram_ptr(mr) + offset;
581

582 583
            /* In doubt sent page as normal */
            bytes_sent = -1;
Michael R. Hines's avatar
Michael R. Hines committed
584 585 586
            ret = ram_control_save_page(f, block->offset,
                               offset, TARGET_PAGE_SIZE, &bytes_sent);

587 588
            XBZRLE_cache_lock();

589
            current_addr = block->offset + offset;
Michael R. Hines's avatar
Michael R. Hines committed
590 591 592 593 594 595 596 597
            if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
                if (ret != RAM_SAVE_CONTROL_DELAYED) {
                    if (bytes_sent > 0) {
                        acct_info.norm_pages++;
                    } else if (bytes_sent == 0) {
                        acct_info.dup_pages++;
                    }
                }
598
            } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
599
                acct_info.dup_pages++;
600 601 602 603
                bytes_sent = save_block_hdr(f, block, offset, cont,
                                            RAM_SAVE_FLAG_COMPRESS);
                qemu_put_byte(f, 0);
                bytes_sent++;
604 605 606 607
                /* Must let xbzrle know, otherwise a previous (now 0'd) cached
                 * page would be stale
                 */
                xbzrle_cache_zero_page(current_addr);
608
            } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
609
                bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
610 611
                                              offset, cont, last_stage);
                if (!last_stage) {
612 613 614 615
                    /* Can't send this cached data async, since the cache page
                     * might get updated before it gets to the wire
                     */
                    send_async = false;
616
                }
617 618
            }

619
            /* XBZRLE overflow or normal page */
620
            if (bytes_sent == -1) {
621
                bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
622 623 624 625 626
                if (send_async) {
                    qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
                } else {
                    qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
                }
627
                bytes_sent += TARGET_PAGE_SIZE;
628
                acct_info.norm_pages++;
629 630
            }

631
            XBZRLE_cache_unlock();
632
            /* if page is unmodified, continue to the next */
633
            if (bytes_sent > 0) {
Juan Quintela's avatar
Juan Quintela committed
634
                last_sent_block = block;
635 636
                break;
            }
637
        }
638
    }
639
    last_seen_block = block;
640
    last_offset = offset;
641

642
    return bytes_sent;
643 644 645 646
}

static uint64_t bytes_transferred;

647 648 649 650 651 652 653 654 655 656 657 658
void acct_update_position(QEMUFile *f, size_t size, bool zero)
{
    uint64_t pages = size / TARGET_PAGE_SIZE;
    if (zero) {
        acct_info.dup_pages += pages;
    } else {
        acct_info.norm_pages += pages;
        bytes_transferred += size;
        qemu_update_position(f, size);
    }
}

659 660
static ram_addr_t ram_save_remaining(void)
{
Juan Quintela's avatar
Juan Quintela committed
661
    return migration_dirty_pages;
662 663 664 665 666 667 668 669 670 671 672 673 674 675
}

uint64_t ram_bytes_remaining(void)
{
    return ram_save_remaining() * TARGET_PAGE_SIZE;
}

uint64_t ram_bytes_transferred(void)
{
    return bytes_transferred;
}

uint64_t ram_bytes_total(void)
{
676 677 678
    RAMBlock *block;
    uint64_t total = 0;

679
    QTAILQ_FOREACH(block, &ram_list.blocks, next)
680 681 682
        total += block->length;

    return total;
683 684
}

685 686 687 688 689 690
void free_xbzrle_decoded_buf(void)
{
    g_free(xbzrle_decoded_buf);
    xbzrle_decoded_buf = NULL;
}

Orit Wasserman's avatar
Orit Wasserman committed
691 692
static void migration_end(void)
{
693 694 695 696 697
    if (migration_bitmap) {
        memory_global_dirty_log_stop();
        g_free(migration_bitmap);
        migration_bitmap = NULL;
    }
698

699
    XBZRLE_cache_lock();
700
    if (XBZRLE.cache) {
701 702 703 704 705
        cache_fini(XBZRLE.cache);
        g_free(XBZRLE.cache);
        g_free(XBZRLE.encoded_buf);
        g_free(XBZRLE.current_buf);
        XBZRLE.cache = NULL;
706 707
        XBZRLE.encoded_buf = NULL;
        XBZRLE.current_buf = NULL;
708
    }
709
    XBZRLE_cache_unlock();
Orit Wasserman's avatar
Orit Wasserman committed
710 711
}

712 713 714 715 716
static void ram_migration_cancel(void *opaque)
{
    migration_end();
}

717 718
static void reset_ram_globals(void)
{
719
    last_seen_block = NULL;
Juan Quintela's avatar
Juan Quintela committed
720
    last_sent_block = NULL;
721
    last_offset = 0;
722
    last_version = ram_list.version;
723
    ram_bulk_stage = true;
724 725
}

726 727
#define MAX_WAIT 50 /* ms, half buffered_file limit */

728
static int ram_save_setup(QEMUFile *f, void *opaque)
729
{
730
    RAMBlock *block;
731
    int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
Juan Quintela's avatar
Juan Quintela committed
732

733 734
    mig_throttle_on = false;
    dirty_rate_high_cnt = 0;
735

736
    if (migrate_use_xbzrle()) {
737
        XBZRLE_cache_lock();
738 739 740 741
        XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
                                  TARGET_PAGE_SIZE,
                                  TARGET_PAGE_SIZE);
        if (!XBZRLE.cache) {
742 743
            XBZRLE_cache_unlock();
            error_report("Error creating cache");
744 745
            return -1;
        }
746
        XBZRLE_cache_unlock();
747 748 749 750

        /* We prefer not to abort if there is no memory */
        XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
        if (!XBZRLE.encoded_buf) {
751
            error_report("Error allocating encoded_buf");
752 753 754 755 756
            return -1;
        }

        XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
        if (!XBZRLE.current_buf) {
757
            error_report("Error allocating current_buf");
758 759 760 761 762
            g_free(XBZRLE.encoded_buf);
            XBZRLE.encoded_buf = NULL;
            return -1;
        }

763
        acct_clear();
764 765
    }

766 767 768 769 770
    qemu_mutex_lock_iothread();
    qemu_mutex_lock_ramlist();
    bytes_transferred = 0;
    reset_ram_globals();

771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
    ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
    migration_bitmap = bitmap_new(ram_bitmap_pages);
    bitmap_set(migration_bitmap, 0, ram_bitmap_pages);

    /*
     * Count the total number of pages used by ram blocks not including any
     * gaps due to alignment or unplugs.
     */
    migration_dirty_pages = 0;
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
        uint64_t block_pages;

        block_pages = block->length >> TARGET_PAGE_BITS;
        migration_dirty_pages += block_pages;
    }

787
    memory_global_dirty_log_start();
Juan Quintela's avatar
Juan Quintela committed
788
    migration_bitmap_sync();
789
    qemu_mutex_unlock_iothread();
790

791
    qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
792

793
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
794 795 796
        qemu_put_byte(f, strlen(block->idstr));
        qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
        qemu_put_be64(f, block->length);
797 798
    }

799
    qemu_mutex_unlock_ramlist();
Michael R. Hines's avatar
Michael R. Hines committed
800 801 802 803

    ram_control_before_iterate(f, RAM_CONTROL_SETUP);
    ram_control_after_iterate(f, RAM_CONTROL_SETUP);

804 805 806 807 808
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

    return 0;
}

809
static int ram_save_iterate(QEMUFile *f, void *opaque)
810 811 812
{
    int ret;
    int i;
813
    int64_t t0;
814
    int total_sent = 0;
815

816 817
    qemu_mutex_lock_ramlist();

818 819 820 821
    if (ram_list.version != last_version) {
        reset_ram_globals();
    }

Michael R. Hines's avatar
Michael R. Hines committed
822 823
    ram_control_before_iterate(f, RAM_CONTROL_ROUND);

824
    t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
825
    i = 0;
826
    while ((ret = qemu_file_rate_limit(f)) == 0) {
827
        int bytes_sent;
828

829
        bytes_sent = ram_save_block(f, false);
830
        /* no more blocks to sent */
831
        if (bytes_sent == 0) {
832 833
            break;
        }
834
        total_sent += bytes_sent;
835
        acct_info.iterations++;
836
        check_guest_throttling();
837 838 839 840 841 842
        /* we want to check in the 1st loop, just in case it was the 1st time
           and we had to sync the dirty bitmap.
           qemu_get_clock_ns() is a bit expensive, so we only check each some
           iterations
        */
        if ((i & 63) == 0) {
843
            uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
844
            if (t1 > MAX_WAIT) {
845
                DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
846 847 848 849 850
                        t1, i);
                break;
            }
        }
        i++;
851 852
    }

853 854
    qemu_mutex_unlock_ramlist();

Michael R. Hines's avatar
Michael R. Hines committed
855 856 857 858 859 860
    /*
     * Must occur before EOS (or any QEMUFile operation)
     * because of RDMA protocol.
     */
    ram_control_after_iterate(f, RAM_CONTROL_ROUND);

861 862 863 864 865 866 867 868 869 870
    bytes_transferred += total_sent;

    /*
     * Do not count these 8 bytes into total_sent, so that we can
     * return 0 if no page had been dirtied.
     */
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
    bytes_transferred += 8;

    ret = qemu_file_get_error(f);
871 872 873 874
    if (ret < 0) {
        return ret;
    }

875
    return total_sent;
876 877 878 879
}

static int ram_save_complete(QEMUFile *f, void *opaque)
{
880
    qemu_mutex_lock_ramlist();
881
    migration_bitmap_sync();
882

Michael R. Hines's avatar
Michael R. Hines committed
883 884
    ram_control_before_iterate(f, RAM_CONTROL_FINISH);

885
    /* try transferring iterative blocks of memory */
Orit Wasserman's avatar
Orit Wasserman committed
886

887
    /* flush all remaining blocks regardless of rate limiting */
888
    while (true) {
889 890
        int bytes_sent;

891
        bytes_sent = ram_save_block(f, true);
892
        /* no more blocks to sent */
893
        if (bytes_sent == 0) {
894
            break;
895
        }
896
        bytes_transferred += bytes_sent;
897
    }
Michael R. Hines's avatar
Michael R. Hines committed
898 899

    ram_control_after_iterate(f, RAM_CONTROL_FINISH);
900
    migration_end();
901

902
    qemu_mutex_unlock_ramlist();
903 904
    qemu_put_be64(f, RAM_SAVE_FLAG_EOS);

905
    return 0;
906 907
}

908 909 910 911 912 913 914
static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
{
    uint64_t remaining_size;

    remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;

    if (remaining_size < max_size) {
915
        qemu_mutex_lock_iothread();
916
        migration_bitmap_sync();
917
        qemu_mutex_unlock_iothread();
918 919 920 921 922
        remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
    }
    return remaining_size;
}

923 924 925 926 927 928
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
{
    int ret, rc = 0;
    unsigned int xh_len;
    int xh_flags;

929 930
    if (!xbzrle_decoded_buf) {
        xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
    }

    /* extract RLE header */
    xh_flags = qemu_get_byte(f);
    xh_len = qemu_get_be16(f);

    if (xh_flags != ENCODING_FLAG_XBZRLE) {
        fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
        return -1;
    }

    if (xh_len > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
        return -1;
    }
    /* load data and decode */
947
    qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
948 949

    /* decode RLE */
950
    ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
951 952 953 954 955 956 957 958 959 960 961 962 963
                               TARGET_PAGE_SIZE);
    if (ret == -1) {
        fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
        rc = -1;
    } else  if (ret > TARGET_PAGE_SIZE) {
        fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
                ret, TARGET_PAGE_SIZE);
        abort();
    }

    return rc;
}

964 965 966 967 968 969 970 971 972 973 974 975 976 977
static inline void *host_from_stream_offset(QEMUFile *f,
                                            ram_addr_t offset,
                                            int flags)
{
    static RAMBlock *block = NULL;
    char id[256];
    uint8_t len;

    if (flags & RAM_SAVE_FLAG_CONTINUE) {
        if (!block) {
            fprintf(stderr, "Ack, bad migration stream!\n");
            return NULL;
        }

978
        return memory_region_get_ram_ptr(block->mr) + offset;
979 980 981 982 983 984
    }

    len = qemu_get_byte(f);
    qemu_get_buffer(f, (uint8_t *)id, len);
    id[len] = 0;

985
    QTAILQ_FOREACH(block, &ram_list.blocks, next) {
986
        if (!strncmp(id, block->idstr, sizeof(id)))
987
            return memory_region_get_ram_ptr(block->mr) + offset;
988 989 990 991 992 993
    }

    fprintf(stderr, "Can't find block %s!\n", id);
    return NULL;
}

994 995 996 997 998 999
/*
 * If a page (or a whole RDMA chunk) has been
 * determined to be zero, then zap it.
 */
void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
{
1000
    if (ch != 0 || !is_zero_range(host, size)) {
1001 1002 1003 1004
        memset(host, ch, size);
    }
}

1005
static int ram_load(QEMUFile *f, void *opaque, int version_id)
1006 1007
{
    ram_addr_t addr;
Orit Wasserman's avatar
Orit Wasserman committed
1008
    int flags, ret = 0;
1009
    int error;
Orit Wasserman's avatar
Orit Wasserman committed
1010 1011 1012
    static uint64_t seq_iter;

    seq_iter++;
1013

ChenLiang's avatar
ChenLiang committed
1014
    if (version_id != 4) {
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
        return -EINVAL;
    }

    do {
        addr = qemu_get_be64(f);

        flags = addr & ~TARGET_PAGE_MASK;
        addr &= TARGET_PAGE_MASK;

        if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
ChenLiang's avatar
ChenLiang committed
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
            /* Synchronize RAM block list */
            char id[256];
            ram_addr_t length;
            ram_addr_t total_ram_bytes = addr;

            while (total_ram_bytes) {
                RAMBlock *block;
                uint8_t len;

                len = qemu_get_byte(f);
                qemu_get_buffer(f, (uint8_t *)id, len);
                id[len] = 0;
                length = qemu_get_be64(f);

                QTAILQ_FOREACH(block, &ram_list.blocks, next) {
                    if (!strncmp(id, block->idstr, sizeof(id))) {
                        if (block->length != length) {
                            fprintf(stderr,
                                    "Length mismatch: %s: " RAM_ADDR_FMT
                                    " in != " RAM_ADDR_FMT "\n", id, length,
                                    block->length);
                            ret =  -EINVAL;
                            goto done;
1048
                        }
ChenLiang's avatar
ChenLiang committed
1049
                        break;
1050
                    }
ChenLiang's avatar
ChenLiang committed
1051
                }
1052

ChenLiang's avatar
ChenLiang committed
1053 1054 1055 1056 1057
                if (!block) {
                    fprintf(stderr, "Unknown ramblock \"%s\", cannot "
                            "accept migration\n", id);
                    ret = -EINVAL;
                    goto done;
1058
                }
ChenLiang's avatar
ChenLiang committed
1059 1060

                total_ram_bytes -= length;
1061 1062 1063 1064