block-migration.c 16.9 KB
Newer Older
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * QEMU live block migration
 *
 * Copyright IBM, Corp. 2009
 *
 * Authors:
 *  Liran Schour   <lirans@il.ibm.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

#include "qemu-common.h"
#include "block_int.h"
#include "hw/hw.h"
17
#include "qemu-queue.h"
18
#include "qemu-timer.h"
19
#include "monitor.h"
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
20
#include "block-migration.h"
21
#include "migration.h"
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
22
23
#include <assert.h>

24
#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
25
26
27

#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
#define BLK_MIG_FLAG_EOS                0x02
28
#define BLK_MIG_FLAG_PROGRESS           0x04
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
29
30
31
32
33
34

#define MAX_IS_ALLOCATED_SEARCH 65536

//#define DEBUG_BLK_MIGRATION

#ifdef DEBUG_BLK_MIGRATION
malc's avatar
malc committed
35
#define DPRINTF(fmt, ...) \
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
36
37
    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
#else
malc's avatar
malc committed
38
#define DPRINTF(fmt, ...) \
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
39
40
41
    do { } while (0)
#endif

42
43
44
45
46
typedef struct BlkMigDevState {
    BlockDriverState *bs;
    int bulk_completed;
    int shared_base;
    int64_t cur_sector;
47
    int64_t cur_dirty;
48
    int64_t completed_sectors;
49
50
    int64_t total_sectors;
    int64_t dirty;
51
    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
52
53
} BlkMigDevState;

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
54
55
56
57
58
59
60
61
typedef struct BlkMigBlock {
    uint8_t *buf;
    BlkMigDevState *bmds;
    int64_t sector;
    struct iovec iov;
    QEMUIOVector qiov;
    BlockDriverAIOCB *aiocb;
    int ret;
62
    int64_t time;
63
    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
64
65
66
67
68
} BlkMigBlock;

typedef struct BlkMigState {
    int blk_enable;
    int shared_base;
69
70
    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
71
72
73
    int submitted;
    int read_done;
    int transferred;
74
    int64_t total_sector_sum;
75
    int prev_progress;
Liran Schour's avatar
Liran Schour committed
76
    int bulk_completed;
77
78
    long double total_time;
    int reads;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
79
80
} BlkMigState;

81
static BlkMigState block_mig_state;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
82

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
static void blk_send(QEMUFile *f, BlkMigBlock * blk)
{
    int len;

    /* sector number and flags */
    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
                     | BLK_MIG_FLAG_DEVICE_BLOCK);

    /* device name */
    len = strlen(blk->bmds->bs->device_name);
    qemu_put_byte(f, len);
    qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);

    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
}

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
int blk_mig_active(void)
{
    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
}

uint64_t blk_mig_bytes_transferred(void)
{
    BlkMigDevState *bmds;
    uint64_t sum = 0;

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
        sum += bmds->completed_sectors;
    }
    return sum << BDRV_SECTOR_BITS;
}

uint64_t blk_mig_bytes_remaining(void)
{
    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
}

uint64_t blk_mig_bytes_total(void)
{
    BlkMigDevState *bmds;
    uint64_t sum = 0;

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
        sum += bmds->total_sectors;
    }
    return sum << BDRV_SECTOR_BITS;
}

131
132
133
134
135
136
137
138
139
140
141
142
static inline void add_avg_read_time(int64_t time)
{
    block_mig_state.reads++;
    block_mig_state.total_time += time;
}

static inline long double compute_read_bwidth(void)
{
    assert(block_mig_state.total_time != 0);
    return  (block_mig_state.reads * BLOCK_SIZE)/ block_mig_state.total_time;
}

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
143
144
145
static void blk_mig_read_cb(void *opaque, int ret)
{
    BlkMigBlock *blk = opaque;
146

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
147
    blk->ret = ret;
148

149
150
151
152
    blk->time = qemu_get_clock_ns(rt_clock) - blk->time;

    add_avg_read_time(blk->time);

153
    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
154

155
156
157
    block_mig_state.submitted--;
    block_mig_state.read_done++;
    assert(block_mig_state.submitted >= 0);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
158
159
}

160
static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
Liran Schour's avatar
Liran Schour committed
161
                                BlkMigDevState *bmds)
162
{
163
164
165
    int64_t total_sectors = bmds->total_sectors;
    int64_t cur_sector = bmds->cur_sector;
    BlockDriverState *bs = bmds->bs;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
166
    BlkMigBlock *blk;
167
    int nr_sectors;
168

169
    if (bmds->shared_base) {
170
        while (cur_sector < total_sectors &&
171
172
               !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
                                  &nr_sectors)) {
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
173
174
175
            cur_sector += nr_sectors;
        }
    }
176
177

    if (cur_sector >= total_sectors) {
178
        bmds->cur_sector = bmds->completed_sectors = total_sectors;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
179
180
        return 1;
    }
181

182
    bmds->completed_sectors = cur_sector;
183

184
185
    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);

186
187
    /* we are going to transfer a full block even if it is not allocated */
    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
188

189
    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
190
        nr_sectors = total_sectors - cur_sector;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
191
    }
192

193
194
195
196
    blk = qemu_malloc(sizeof(BlkMigBlock));
    blk->buf = qemu_malloc(BLOCK_SIZE);
    blk->bmds = bmds;
    blk->sector = cur_sector;
197

Liran Schour's avatar
Liran Schour committed
198
199
200
    blk->iov.iov_base = blk->buf;
    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
201

202
203
    blk->time = qemu_get_clock_ns(rt_clock);

Liran Schour's avatar
Liran Schour committed
204
205
206
207
    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
                                nr_sectors, blk_mig_read_cb, blk);
    if (!blk->aiocb) {
        goto error;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
208
    }
Liran Schour's avatar
Liran Schour committed
209
    block_mig_state.submitted++;
210

211
212
    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
    bmds->cur_sector = cur_sector + nr_sectors;
213

214
    return (bmds->cur_sector >= total_sectors);
215
216

error:
217
    monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
218
219
220
221
    qemu_file_set_error(f);
    qemu_free(blk->buf);
    qemu_free(blk);
    return 0;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
222
223
224
225
226
}

static void set_dirty_tracking(int enable)
{
    BlkMigDevState *bmds;
227
228

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
229
        bdrv_set_dirty_tracking(bmds->bs, enable);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
230
231
232
    }
}

233
static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
234
{
235
    Monitor *mon = opaque;
236
    BlkMigDevState *bmds;
237
    int64_t sectors;
238

239
    if (!bdrv_is_read_only(bs)) {
240
        sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
241
        if (sectors <= 0) {
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
            return;
        }

        bmds = qemu_mallocz(sizeof(BlkMigDevState));
        bmds->bs = bs;
        bmds->bulk_completed = 0;
        bmds->total_sectors = sectors;
        bmds->completed_sectors = 0;
        bmds->shared_base = block_mig_state.shared_base;

        block_mig_state.total_sector_sum += sectors;

        if (bmds->shared_base) {
            monitor_printf(mon, "Start migration for %s with shared base "
                                "image\n",
                           bs->device_name);
        } else {
            monitor_printf(mon, "Start full migration for %s\n",
                           bs->device_name);
        }

        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
    }
}

static void init_blk_migration(Monitor *mon, QEMUFile *f)
{
269
270
271
    block_mig_state.submitted = 0;
    block_mig_state.read_done = 0;
    block_mig_state.transferred = 0;
272
    block_mig_state.total_sector_sum = 0;
273
    block_mig_state.prev_progress = -1;
Liran Schour's avatar
Liran Schour committed
274
    block_mig_state.bulk_completed = 0;
275
276
    block_mig_state.total_time = 0;
    block_mig_state.reads = 0;
277

278
    bdrv_iterate(init_blk_migration_it, mon);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
279
280
}

Liran Schour's avatar
Liran Schour committed
281
static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f)
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
282
{
283
    int64_t completed_sector_sum = 0;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
284
    BlkMigDevState *bmds;
285
    int progress;
286
    int ret = 0;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
287

288
    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
289
        if (bmds->bulk_completed == 0) {
Liran Schour's avatar
Liran Schour committed
290
            if (mig_save_device_bulk(mon, f, bmds) == 1) {
291
292
                /* completed bulk section for this device */
                bmds->bulk_completed = 1;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
293
            }
294
295
296
297
298
            completed_sector_sum += bmds->completed_sectors;
            ret = 1;
            break;
        } else {
            completed_sector_sum += bmds->completed_sectors;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
299
300
        }
    }
301

302
303
304
305
306
307
    progress = completed_sector_sum * 100 / block_mig_state.total_sector_sum;
    if (progress != block_mig_state.prev_progress) {
        block_mig_state.prev_progress = progress;
        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
                         | BLK_MIG_FLAG_PROGRESS);
        monitor_printf(mon, "Completed %d %%\r", progress);
308
        monitor_flush(mon);
309
310
311
    }

    return ret;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
312
313
}

314
static void blk_mig_reset_dirty_cursor(void)
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
315
316
{
    BlkMigDevState *bmds;
317
318
319
320
321
322
323
324
325
326
327

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
        bmds->cur_dirty = 0;
    }
}

static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
                                 BlkMigDevState *bmds, int is_async)
{
    BlkMigBlock *blk;
    int64_t total_sectors = bmds->total_sectors;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
328
    int64_t sector;
329
    int nr_sectors;
330

331
332
    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
        if (bdrv_get_dirty(bmds->bs, sector)) {
333

334
335
336
337
338
339
340
341
342
343
            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
                nr_sectors = total_sectors - sector;
            } else {
                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
            }
            blk = qemu_malloc(sizeof(BlkMigBlock));
            blk->buf = qemu_malloc(BLOCK_SIZE);
            blk->bmds = bmds;
            blk->sector = sector;

344
            if (is_async) {
345
346
347
348
                blk->iov.iov_base = blk->buf;
                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);

349
                blk->time = qemu_get_clock_ns(rt_clock);
350

351
352
353
354
355
356
357
358
359
360
                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
                                            nr_sectors, blk_mig_read_cb, blk);
                if (!blk->aiocb) {
                    goto error;
                }
                block_mig_state.submitted++;
            } else {
                if (bdrv_read(bmds->bs, sector, blk->buf,
                              nr_sectors) < 0) {
                    goto error;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
361
                }
362
                blk_send(f, blk);
363

364
365
                qemu_free(blk->buf);
                qemu_free(blk);
366
            }
367
368
369

            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
            break;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
370
        }
371
372
        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
        bmds->cur_dirty = sector;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
373
    }
374

375
376
    return (bmds->cur_dirty >= bmds->total_sectors);

377
error:
378
379
380
381
382
383
384
385
386
387
388
389
390
    monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
    qemu_file_set_error(f);
    qemu_free(blk->buf);
    qemu_free(blk);
    return 0;
}

static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async)
{
    BlkMigDevState *bmds;
    int ret = 0;

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
391
        if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
392
393
394
395
396
397
            ret = 1;
            break;
        }
    }

    return ret;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
398
399
400
401
}

static void flush_blks(QEMUFile* f)
{
402
    BlkMigBlock *blk;
403

malc's avatar
malc committed
404
    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
405
406
            __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
            block_mig_state.transferred);
407

408
409
410
411
    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
        if (qemu_file_rate_limit(f)) {
            break;
        }
412
413
414
415
        if (blk->ret < 0) {
            qemu_file_set_error(f);
            break;
        }
416
        blk_send(f, blk);
417

418
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
419
420
        qemu_free(blk->buf);
        qemu_free(blk);
421

422
423
424
        block_mig_state.read_done--;
        block_mig_state.transferred++;
        assert(block_mig_state.read_done >= 0);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
425
426
    }

malc's avatar
malc committed
427
    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
428
429
            block_mig_state.submitted, block_mig_state.read_done,
            block_mig_state.transferred);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
430
431
}

432
433
434
435
436
437
438
439
440
441
442
443
static int64_t get_remaining_dirty(void)
{
    BlkMigDevState *bmds;
    int64_t dirty = 0;

    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
        dirty += bdrv_get_dirty_count(bmds->bs);
    }

    return dirty * BLOCK_SIZE;
}

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
444
445
static int is_stage2_completed(void)
{
446
447
448
449
450
451
    int64_t remaining_dirty;
    long double bwidth;

    if (block_mig_state.bulk_completed == 1) {

        remaining_dirty = get_remaining_dirty();
452
453
454
        if (remaining_dirty == 0) {
            return 1;
        }
455

456
        bwidth = compute_read_bwidth();
457

458
        if ((remaining_dirty / bwidth) <=
459
460
461
462
463
464
465
466
467
            migrate_max_downtime()) {
            /* finish stage2 because we think that we can finish remaing work
               below max_downtime */

            return 1;
        }
    }

    return 0;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
468
469
}

470
static void blk_mig_cleanup(Monitor *mon)
471
{
472
473
    BlkMigDevState *bmds;
    BlkMigBlock *blk;
474

475
476
    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
477
478
479
        qemu_free(bmds);
    }

480
481
    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
482
483
484
485
486
487
        qemu_free(blk->buf);
        qemu_free(blk);
    }

    set_dirty_tracking(0);

488
    monitor_printf(mon, "\n");
489
490
}

491
static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
492
{
malc's avatar
malc committed
493
    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
494
            stage, block_mig_state.submitted, block_mig_state.transferred);
495

496
    if (stage < 0) {
497
        blk_mig_cleanup(mon);
498
499
500
        return 0;
    }

501
    if (block_mig_state.blk_enable != 1) {
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
502
        /* no need to migrate storage */
503
        qemu_put_be64(f, BLK_MIG_FLAG_EOS);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
504
505
        return 1;
    }
506
507

    if (stage == 1) {
508
        init_blk_migration(mon, f);
509

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
510
511
512
513
514
        /* start track dirty blocks */
        set_dirty_tracking(1);
    }

    flush_blks(f);
515

516
    if (qemu_file_has_error(f)) {
517
        blk_mig_cleanup(mon);
518
519
520
        return 0;
    }

521
522
    blk_mig_reset_dirty_cursor();

523
    if (stage == 2) {
524
525
526
527
528
529
530
        /* control the rate of transfer */
        while ((block_mig_state.submitted +
                block_mig_state.read_done) * BLOCK_SIZE <
               qemu_file_get_rate_limit(f)) {
            if (block_mig_state.bulk_completed == 0) {
                /* first finish the bulk phase */
                if (blk_mig_save_bulked_block(mon, f) == 0) {
531
                    /* finished saving bulk on all devices */
532
533
534
535
536
537
538
539
                    block_mig_state.bulk_completed = 1;
                }
            } else {
                if (blk_mig_save_dirty_block(mon, f, 1) == 0) {
                    /* no more dirty blocks */
                    break;
                }
            }
540
541
        }

542
        flush_blks(f);
543

544
545
546
547
        if (qemu_file_has_error(f)) {
            blk_mig_cleanup(mon);
            return 0;
        }
548
549
    }

550
    if (stage == 3) {
551
552
553
        /* we know for sure that save bulk is completed and
           all async read completed */
        assert(block_mig_state.submitted == 0);
554

555
        while (blk_mig_save_dirty_block(mon, f, 0) != 0);
556
        blk_mig_cleanup(mon);
557

558
559
560
        /* report completion */
        qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);

561
562
563
564
        if (qemu_file_has_error(f)) {
            return 0;
        }

565
        monitor_printf(mon, "Block migration completed\n");
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
566
    }
567
568
569

    qemu_put_be64(f, BLK_MIG_FLAG_EOS);

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
570
571
572
573
574
    return ((stage == 2) && is_stage2_completed());
}

static int block_load(QEMUFile *f, void *opaque, int version_id)
{
575
    static int banner_printed;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
576
577
578
579
580
    int len, flags;
    char device_name[256];
    int64_t addr;
    BlockDriverState *bs;
    uint8_t *buf;
581

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
582
583
    do {
        addr = qemu_get_be64(f);
584

585
586
        flags = addr & ~BDRV_SECTOR_MASK;
        addr >>= BDRV_SECTOR_BITS;
587
588

        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
589
            int ret;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
590
591
592
593
            /* get device name */
            len = qemu_get_byte(f);
            qemu_get_buffer(f, (uint8_t *)device_name, len);
            device_name[len] = '\0';
594

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
595
            bs = bdrv_find(device_name);
596
597
598
599
600
            if (!bs) {
                fprintf(stderr, "Error unknown block device %s\n",
                        device_name);
                return -EINVAL;
            }
601

602
603
            buf = qemu_malloc(BLOCK_SIZE);

604
            qemu_get_buffer(f, buf, BLOCK_SIZE);
605
            ret = bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
606
607

            qemu_free(buf);
608
609
610
            if (ret < 0) {
                return ret;
            }
611
612
613
614
615
616
617
618
        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
            if (!banner_printed) {
                printf("Receiving block device images\n");
                banner_printed = 1;
            }
            printf("Completed %d %%%c", (int)addr,
                   (addr == 100) ? '\n' : '\r');
            fflush(stdout);
619
        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
620
621
622
623
624
            fprintf(stderr, "Unknown flags\n");
            return -EINVAL;
        }
        if (qemu_file_has_error(f)) {
            return -EIO;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
625
        }
626
627
    } while (!(flags & BLK_MIG_FLAG_EOS));

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
628
629
630
631
632
    return 0;
}

static void block_set_params(int blk_enable, int shared_base, void *opaque)
{
633
634
    block_mig_state.blk_enable = blk_enable;
    block_mig_state.shared_base = shared_base;
635

lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
636
    /* shared base means that blk_enable = 1 */
637
    block_mig_state.blk_enable |= shared_base;
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
638
639
640
}

void blk_mig_init(void)
641
{
642
643
644
    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
    QSIMPLEQ_INIT(&block_mig_state.blk_list);

645
646
    register_savevm_live(NULL, "block", 0, 1, block_set_params,
                         block_save_live, NULL, block_load, &block_mig_state);
lirans@il.ibm.com's avatar
lirans@il.ibm.com committed
647
}