Commit 9a33c0c8 authored by Peter Maydell's avatar Peter Maydell

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

# gpg: Signature made Mon 03 Nov 2014 11:50:53 GMT using RSA key ID 81AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>"

* remotes/stefanha/tags/block-pull-request: (53 commits)
  block: declare blockjobs and dataplane friends!
  block: let commit blockjob run in BDS AioContext
  block: let mirror blockjob run in BDS AioContext
  block: let stream blockjob run in BDS AioContext
  block: let backup blockjob run in BDS AioContext
  block: add bdrv_drain()
  blockjob: add block_job_defer_to_main_loop()
  blockdev: add note that block_job_cb() must be thread-safe
  blockdev: acquire AioContext in blockdev_mark_auto_del()
  blockdev: acquire AioContext in do_qmp_query_block_jobs_one()
  block: acquire AioContext in generic blockjob QMP commands
  iotests: Expand test 061
  block/qcow2: Simplify shared L2 handling in amend
  block/qcow2: Make get_refcount() global
  block/qcow2: Implement status CB for amend
  qemu-img: Fix insignificant memleak
  qemu-img: Add progress output for amend
  block: Add status callback to bdrv_amend_options()
  block: qemu-iotest 107 supports NFS
  iotests: Add test for qcow2's bdrv_make_empty
  ...
Signed-off-by: default avatarPeter Maydell <peter.maydell@linaro.org>
parents eb5f222b b112a65c
......@@ -519,6 +519,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
return;
}
bs->bl.opt_transfer_length = bs->file->bl.opt_transfer_length;
bs->bl.max_transfer_length = bs->file->bl.max_transfer_length;
bs->bl.opt_mem_alignment = bs->file->bl.opt_mem_alignment;
} else {
bs->bl.opt_mem_alignment = 512;
......@@ -533,6 +534,9 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.opt_transfer_length =
MAX(bs->bl.opt_transfer_length,
bs->backing_hd->bl.opt_transfer_length);
bs->bl.max_transfer_length =
MIN_NON_ZERO(bs->bl.max_transfer_length,
bs->backing_hd->bl.max_transfer_length);
bs->bl.opt_mem_alignment =
MAX(bs->bl.opt_mem_alignment,
bs->backing_hd->bl.opt_mem_alignment);
......@@ -1900,6 +1904,34 @@ static bool bdrv_requests_pending(BlockDriverState *bs)
return false;
}
static bool bdrv_drain_one(BlockDriverState *bs)
{
bool bs_busy;
bdrv_flush_io_queue(bs);
bdrv_start_throttled_reqs(bs);
bs_busy = bdrv_requests_pending(bs);
bs_busy |= aio_poll(bdrv_get_aio_context(bs), bs_busy);
return bs_busy;
}
/*
* Wait for pending requests to complete on a single BlockDriverState subtree
*
* See the warning in bdrv_drain_all(). This function can only be called if
* you are sure nothing can generate I/O because you have op blockers
* installed.
*
* Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
* AioContext.
*/
void bdrv_drain(BlockDriverState *bs)
{
while (bdrv_drain_one(bs)) {
/* Keep iterating */
}
}
/*
* Wait for pending requests to complete across all BlockDriverStates
*
......@@ -1923,16 +1955,10 @@ void bdrv_drain_all(void)
QTAILQ_FOREACH(bs, &bdrv_states, device_list) {
AioContext *aio_context = bdrv_get_aio_context(bs);
bool bs_busy;
aio_context_acquire(aio_context);
bdrv_flush_io_queue(bs);
bdrv_start_throttled_reqs(bs);
bs_busy = bdrv_requests_pending(bs);
bs_busy |= aio_poll(aio_context, bs_busy);
busy |= bdrv_drain_one(bs);
aio_context_release(aio_context);
busy |= bs_busy;
}
}
}
......@@ -3540,10 +3566,10 @@ static void send_qmp_error_event(BlockDriverState *bs,
BlockErrorAction action,
bool is_read, int error)
{
BlockErrorAction ac;
IoOperationType optype;
ac = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
qapi_event_send_block_io_error(bdrv_get_device_name(bs), ac, action,
optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
qapi_event_send_block_io_error(bdrv_get_device_name(bs), optype, action,
bdrv_iostatus_is_enabled(bs),
error == ENOSPC, strerror(error),
&error_abort);
......@@ -4442,6 +4468,11 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
merge = 0;
}
if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
reqs[i].nb_sectors > bs->bl.max_transfer_length) {
merge = 0;
}
if (merge) {
size_t size;
QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
......@@ -5750,12 +5781,13 @@ void bdrv_add_before_write_notifier(BlockDriverState *bs,
notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
}
int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts)
int bdrv_amend_options(BlockDriverState *bs, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb)
{
if (!bs->drv->bdrv_amend_options) {
return -ENOTSUP;
}
return bs->drv->bdrv_amend_options(bs, opts);
return bs->drv->bdrv_amend_options(bs, opts, status_cb);
}
/* This function will be called by the bdrv_recurse_is_first_non_filter method
......@@ -5818,13 +5850,19 @@ bool bdrv_is_first_non_filter(BlockDriverState *candidate)
BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
{
BlockDriverState *to_replace_bs = bdrv_find_node(node_name);
AioContext *aio_context;
if (!to_replace_bs) {
error_setg(errp, "Node name '%s' not found", node_name);
return NULL;
}
aio_context = bdrv_get_aio_context(to_replace_bs);
aio_context_acquire(aio_context);
if (bdrv_op_is_blocked(to_replace_bs, BLOCK_OP_TYPE_REPLACE, errp)) {
return NULL;
to_replace_bs = NULL;
goto out;
}
/* We don't want arbitrary node of the BDS chain to be replaced only the top
......@@ -5834,9 +5872,12 @@ BlockDriverState *check_to_replace_node(const char *node_name, Error **errp)
*/
if (!bdrv_is_first_non_filter(to_replace_bs)) {
error_setg(errp, "Only top most non filter can be replaced");
return NULL;
to_replace_bs = NULL;
goto out;
}
out:
aio_context_release(aio_context);
return to_replace_bs;
}
......
......@@ -9,7 +9,7 @@ block-obj-y += block-backend.o snapshot.o qapi.o
block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
block-obj-$(CONFIG_POSIX) += raw-posix.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += null.o
block-obj-y += null.o mirror.o
block-obj-y += nbd.o nbd-client.o sheepdog.o
block-obj-$(CONFIG_LIBISCSI) += iscsi.o
......@@ -23,7 +23,6 @@ block-obj-y += accounting.o
common-obj-y += stream.o
common-obj-y += commit.o
common-obj-y += mirror.o
common-obj-y += backup.o
iscsi.o-cflags := $(LIBISCSI_CFLAGS)
......
......@@ -227,9 +227,25 @@ static BlockErrorAction backup_error_action(BackupBlockJob *job,
}
}
typedef struct {
int ret;
} BackupCompleteData;
static void backup_complete(BlockJob *job, void *opaque)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common);
BackupCompleteData *data = opaque;
bdrv_unref(s->target);
block_job_completed(job, data->ret);
g_free(data);
}
static void coroutine_fn backup_run(void *opaque)
{
BackupBlockJob *job = opaque;
BackupCompleteData *data;
BlockDriverState *bs = job->common.bs;
BlockDriverState *target = job->target;
BlockdevOnError on_target_error = job->on_target_error;
......@@ -344,9 +360,10 @@ static void coroutine_fn backup_run(void *opaque)
hbitmap_free(job->bitmap);
bdrv_iostatus_disable(target);
bdrv_unref(target);
block_job_completed(&job->common, ret);
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&job->common, backup_complete, data);
}
void backup_start(BlockDriverState *bs, BlockDriverState *target,
......
......@@ -195,6 +195,8 @@ static const char *event_names[BLKDBG_EVENT_MAX] = {
[BLKDBG_PWRITEV] = "pwritev",
[BLKDBG_PWRITEV_ZERO] = "pwritev_zero",
[BLKDBG_PWRITEV_DONE] = "pwritev_done",
[BLKDBG_EMPTY_IMAGE_PREPARE] = "empty_image_prepare",
};
static int get_event_by_name(const char *name, BlkDebugEvent *event)
......
......@@ -60,17 +60,50 @@ static int coroutine_fn commit_populate(BlockDriverState *bs,
return 0;
}
static void coroutine_fn commit_run(void *opaque)
typedef struct {
int ret;
} CommitCompleteData;
static void commit_complete(BlockJob *job, void *opaque)
{
CommitBlockJob *s = opaque;
CommitBlockJob *s = container_of(job, CommitBlockJob, common);
CommitCompleteData *data = opaque;
BlockDriverState *active = s->active;
BlockDriverState *top = s->top;
BlockDriverState *base = s->base;
BlockDriverState *overlay_bs;
int ret = data->ret;
if (!block_job_is_cancelled(&s->common) && ret == 0) {
/* success */
ret = bdrv_drop_intermediate(active, top, base, s->backing_file_str);
}
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
if (s->base_flags != bdrv_get_flags(base)) {
bdrv_reopen(base, s->base_flags, NULL);
}
overlay_bs = bdrv_find_overlay(active, top);
if (overlay_bs && s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) {
bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL);
}
g_free(s->backing_file_str);
block_job_completed(&s->common, ret);
g_free(data);
}
static void coroutine_fn commit_run(void *opaque)
{
CommitBlockJob *s = opaque;
CommitCompleteData *data;
BlockDriverState *top = s->top;
BlockDriverState *base = s->base;
int64_t sector_num, end;
int ret = 0;
int n = 0;
void *buf;
void *buf = NULL;
int bytes_written = 0;
int64_t base_len;
......@@ -78,18 +111,18 @@ static void coroutine_fn commit_run(void *opaque)
if (s->common.len < 0) {
goto exit_restore_reopen;
goto out;
}
ret = base_len = bdrv_getlength(base);
if (base_len < 0) {
goto exit_restore_reopen;
goto out;
}
if (base_len < s->common.len) {
ret = bdrv_truncate(base, s->common.len);
if (ret) {
goto exit_restore_reopen;
goto out;
}
}
......@@ -128,7 +161,7 @@ wait:
if (s->on_error == BLOCKDEV_ON_ERROR_STOP ||
s->on_error == BLOCKDEV_ON_ERROR_REPORT||
(s->on_error == BLOCKDEV_ON_ERROR_ENOSPC && ret == -ENOSPC)) {
goto exit_free_buf;
goto out;
} else {
n = 0;
continue;
......@@ -140,27 +173,12 @@ wait:
ret = 0;
if (!block_job_is_cancelled(&s->common) && sector_num == end) {
/* success */
ret = bdrv_drop_intermediate(active, top, base, s->backing_file_str);
}
exit_free_buf:
out:
qemu_vfree(buf);
exit_restore_reopen:
/* restore base open flags here if appropriate (e.g., change the base back
* to r/o). These reopens do not need to be atomic, since we won't abort
* even on failure here */
if (s->base_flags != bdrv_get_flags(base)) {
bdrv_reopen(base, s->base_flags, NULL);
}
overlay_bs = bdrv_find_overlay(active, top);
if (overlay_bs && s->orig_overlay_flags != bdrv_get_flags(overlay_bs)) {
bdrv_reopen(overlay_bs, s->orig_overlay_flags, NULL);
}
g_free(s->backing_file_str);
block_job_completed(&s->common, ret);
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&s->common, commit_complete, data);
}
static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp)
......
......@@ -64,6 +64,7 @@ static CURLMcode __curl_multi_socket_action(CURLM *multi_handle,
#define SECTOR_SIZE 512
#define READ_AHEAD_DEFAULT (256 * 1024)
#define CURL_TIMEOUT_DEFAULT 5
#define CURL_TIMEOUT_MAX 10000
#define FIND_RET_NONE 0
#define FIND_RET_OK 1
......@@ -112,7 +113,7 @@ typedef struct BDRVCURLState {
char *url;
size_t readahead_size;
bool sslverify;
int timeout;
uint64_t timeout;
char *cookie;
bool accept_range;
AioContext *aio_context;
......@@ -390,7 +391,7 @@ static CURLState *curl_init_state(BlockDriverState *bs, BDRVCURLState *s)
if (s->cookie) {
curl_easy_setopt(state->curl, CURLOPT_COOKIE, s->cookie);
}
curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, s->timeout);
curl_easy_setopt(state->curl, CURLOPT_TIMEOUT, (long)s->timeout);
curl_easy_setopt(state->curl, CURLOPT_WRITEFUNCTION,
(void *)curl_read_cb);
curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state);
......@@ -546,6 +547,10 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
s->timeout = qemu_opt_get_number(opts, CURL_BLOCK_OPT_TIMEOUT,
CURL_TIMEOUT_DEFAULT);
if (s->timeout > CURL_TIMEOUT_MAX) {
error_setg(errp, "timeout parameter is too large or negative");
goto out_noclean;
}
s->sslverify = qemu_opt_get_bool(opts, CURL_BLOCK_OPT_SSLVERIFY, true);
......
......@@ -362,6 +362,12 @@ static int coroutine_fn iscsi_co_writev(BlockDriverState *bs,
return -EINVAL;
}
if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len "
"of %d sectors", nb_sectors, bs->bl.max_transfer_length);
return -EINVAL;
}
lba = sector_qemu2lun(sector_num, iscsilun);
num_sectors = sector_qemu2lun(nb_sectors, iscsilun);
iscsi_co_init_iscsitask(iscsilun, &iTask);
......@@ -529,6 +535,12 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
return -EINVAL;
}
if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len "
"of %d sectors", nb_sectors, bs->bl.max_transfer_length);
return -EINVAL;
}
if (iscsilun->lbprz && nb_sectors >= ISCSI_CHECKALLOC_THRES &&
!iscsi_allocationmap_is_allocated(iscsilun, sector_num, nb_sectors)) {
int64_t ret;
......@@ -1489,31 +1501,44 @@ static void iscsi_close(BlockDriverState *bs)
memset(iscsilun, 0, sizeof(IscsiLun));
}
static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
static int sector_limits_lun2qemu(int64_t sector, IscsiLun *iscsilun)
{
IscsiLun *iscsilun = bs->opaque;
return MIN(sector_lun2qemu(sector, iscsilun), INT_MAX / 2 + 1);
}
static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
{
/* We don't actually refresh here, but just return data queried in
* iscsi_open(): iscsi targets don't change their limits. */
IscsiLun *iscsilun = bs->opaque;
uint32_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
if (iscsilun->bl.max_xfer_len) {
max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len);
}
bs->bl.max_transfer_length = sector_limits_lun2qemu(max_xfer_len, iscsilun);
if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff) {
bs->bl.max_discard = sector_lun2qemu(iscsilun->bl.max_unmap,
iscsilun);
bs->bl.max_discard =
sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun);
}
bs->bl.discard_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
iscsilun);
bs->bl.discard_alignment =
sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
}
if (iscsilun->bl.max_ws_len < 0xffffffff) {
bs->bl.max_write_zeroes = sector_lun2qemu(iscsilun->bl.max_ws_len,
iscsilun);
bs->bl.max_write_zeroes =
sector_limits_lun2qemu(iscsilun->bl.max_ws_len, iscsilun);
}
if (iscsilun->lbp.lbpws) {
bs->bl.write_zeroes_alignment = sector_lun2qemu(iscsilun->bl.opt_unmap_gran,
iscsilun);
bs->bl.write_zeroes_alignment =
sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
}
bs->bl.opt_transfer_length = sector_lun2qemu(iscsilun->bl.opt_xfer_len,
iscsilun);
bs->bl.opt_transfer_length =
sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun);
}
/* Since iscsi_open() ignores bdrv_flags, there is nothing to do here in
......
......@@ -45,6 +45,7 @@ typedef struct MirrorBlockJob {
int64_t sector_num;
int64_t granularity;
size_t buf_size;
int64_t bdev_length;
unsigned long *cow_bitmap;
BdrvDirtyBitmap *dirty_bitmap;
HBitmapIter hbi;
......@@ -54,6 +55,7 @@ typedef struct MirrorBlockJob {
unsigned long *in_flight_bitmap;
int in_flight;
int sectors_in_flight;
int ret;
} MirrorBlockJob;
......@@ -87,6 +89,7 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
s->in_flight--;
s->sectors_in_flight -= op->nb_sectors;
iov = op->qiov.iov;
for (i = 0; i < op->qiov.niov; i++) {
MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
......@@ -98,8 +101,11 @@ static void mirror_iteration_done(MirrorOp *op, int ret)
chunk_num = op->sector_num / sectors_per_chunk;
nb_chunks = op->nb_sectors / sectors_per_chunk;
bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
if (s->cow_bitmap && ret >= 0) {
bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
if (ret >= 0) {
if (s->cow_bitmap) {
bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
}
s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
}
qemu_iovec_destroy(&op->qiov);
......@@ -172,7 +178,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
hbitmap_next_sector = s->sector_num;
sector_num = s->sector_num;
sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
end = s->common.len >> BDRV_SECTOR_BITS;
end = s->bdev_length / BDRV_SECTOR_SIZE;
/* Extend the QEMUIOVector to include all adjacent blocks that will
* be copied in this operation.
......@@ -284,6 +290,7 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
/* Copy the dirty cluster. */
s->in_flight++;
s->sectors_in_flight += nb_sectors;
trace_mirror_one_iteration(s, sector_num, nb_sectors);
bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
mirror_read_complete, op);
......@@ -314,9 +321,56 @@ static void mirror_drain(MirrorBlockJob *s)
}
}
typedef struct {
int ret;
} MirrorExitData;
static void mirror_exit(BlockJob *job, void *opaque)
{
MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
MirrorExitData *data = opaque;
AioContext *replace_aio_context = NULL;
if (s->to_replace) {
replace_aio_context = bdrv_get_aio_context(s->to_replace);
aio_context_acquire(replace_aio_context);
}
if (s->should_complete && data->ret == 0) {
BlockDriverState *to_replace = s->common.bs;
if (s->to_replace) {
to_replace = s->to_replace;
}
if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
}
bdrv_swap(s->target, to_replace);
if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
/* drop the bs loop chain formed by the swap: break the loop then
* trigger the unref from the top one */
BlockDriverState *p = s->base->backing_hd;
bdrv_set_backing_hd(s->base, NULL);
bdrv_unref(p);
}
}
if (s->to_replace) {
bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
error_free(s->replace_blocker);
bdrv_unref(s->to_replace);
}
if (replace_aio_context) {
aio_context_release(replace_aio_context);
}
g_free(s->replaces);
bdrv_unref(s->target);
block_job_completed(&s->common, data->ret);
g_free(data);
}
static void coroutine_fn mirror_run(void *opaque)
{
MirrorBlockJob *s = opaque;
MirrorExitData *data;
BlockDriverState *bs = s->common.bs;
int64_t sector_num, end, sectors_per_chunk, length;
uint64_t last_pause_ns;
......@@ -329,11 +383,11 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit;
}
s->common.len = bdrv_getlength(bs);
if (s->common.len < 0) {
ret = s->common.len;
s->bdev_length = bdrv_getlength(bs);
if (s->bdev_length < 0) {
ret = s->bdev_length;
goto immediate_exit;
} else if (s->common.len == 0) {
} else if (s->bdev_length == 0) {
/* Report BLOCK_JOB_READY and wait for complete. */
block_job_event_ready(&s->common);
s->synced = true;
......@@ -344,7 +398,7 @@ static void coroutine_fn mirror_run(void *opaque)
goto immediate_exit;
}
length = DIV_ROUND_UP(s->common.len, s->granularity);
length = DIV_ROUND_UP(s->bdev_length, s->granularity);
s->in_flight_bitmap = bitmap_new(length);
/* If we have no backing file yet in the destination, we cannot let
......@@ -364,7 +418,7 @@ static void coroutine_fn mirror_run(void *opaque)
}
}
end = s->common.len >> BDRV_SECTOR_BITS;
end = s->bdev_length / BDRV_SECTOR_SIZE;
s->buf = qemu_try_blockalign(bs, s->buf_size);
if (s->buf == NULL) {
ret = -ENOMEM;
......@@ -409,6 +463,12 @@ static void coroutine_fn mirror_run(void *opaque)
}
cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
/* s->common.offset contains the number of bytes already processed so
* far, cnt is the number of dirty sectors remaining and
* s->sectors_in_flight is the number of sectors currently being
* processed; together those are the current total operation length */
s->common.len = s->common.offset +
(cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
/* Note that even when no rate limit is applied we need to yield
* periodically with no pending I/O so that qemu_aio_flush() returns.
......@@ -445,7 +505,6 @@ static void coroutine_fn mirror_run(void *opaque)
* report completion. This way, block-job-cancel will leave
* the target in a consistent state.
*/
s->common.offset = end * BDRV_SECTOR_SIZE;
if (!s->synced) {
block_job_event_ready(&s->common);
s->synced = true;
......@@ -467,15 +526,13 @@ static void coroutine_fn mirror_run(void *opaque)
* mirror_populate runs.
*/
trace_mirror_before_drain(s, cnt);
bdrv_drain_all();
bdrv_drain(bs);
cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
}
ret = 0;
trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
if (!s->synced) {
/* Publish progress */
s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
if (block_job_is_cancelled(&s->common)) {
break;
......@@ -510,31 +567,10 @@ immediate_exit:
g_free(s->in_flight_bitmap);
bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
bdrv_iostatus_disable(s->target);
if (s->should_complete && ret == 0) {
BlockDriverState *to_replace = s->common.bs;
if (s->to_replace) {
to_replace = s->to_replace;
}
if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
}
bdrv_swap(s->target, to_replace);
if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
/* drop the bs loop chain formed by the swap: break the loop then
* trigger the unref from the top one */
BlockDriverState *p = s->base->backing_hd;
bdrv_set_backing_hd(s->base, NULL);
bdrv_unref(p);
}
}
if (s->to_replace) {
bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
error_free(s->replace_blocker);
bdrv_unref(s->to_replace);
}
g_free(s->replaces);
bdrv_unref(s->target);
block_job_completed(&s->common, ret);
data = g_malloc(sizeof(*data));
data->ret = ret;
block_job_defer_to_main_loop(&s->common, mirror_exit, data);
}
static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
......@@ -574,16 +610,23 @@ static void mirror_complete(BlockJob *job, Error **errp)
/* check the target bs is not blocked and block all operations on it */
if (s->replaces) {
AioContext *replace_aio_context;
s->to_replace = check_to_replace_node(s->replaces, &local_err);
if (!s->to_replace) {
error_propagate(errp, local_err);
return;
}
replace_aio_context = bdrv_get_aio_context(s->to_replace);
aio_context_acquire(replace_aio_context);
error_setg(&s->replace_blocker,
"block device is in use by block-job-complete");
bdrv_op_block_all(s->to_replace, s->replace_blocker);
bdrv_ref(s->to_replace);
aio_context_release(replace_aio_context);
}
s->should_complete = true;
......
......@@ -155,7 +155,7 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
offset = sector_num % s->tracks;
/* not allocated */
if ((index > s->catalog_size) || (s->catalog_bitmap[index] == 0))
if ((index >= s->catalog_size) || (s->catalog_bitmap[index] == 0))
return -1;
return
((uint64_t)s->catalog_bitmap[index] * s->off_multiplier + offset) * 512;
......
This diff is collapsed.
......@@ -91,7 +91,7 @@ static int load_refcount_block(BlockDriverState *bs,
* return value is the refcount of the cluster, negative values are -errno
* and indicate an error.
*/
static int get_refcount(BlockDriverState *bs, int64_t cluster_index)
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index)
{
BDRVQcowState *s = bs->opaque;
uint64_t refcount_table_index, block_index;
......@@ -629,8 +629,7 @@ fail:
}
/*
* Increases or decreases the refcount of a given cluster by one.
* addend must be 1 or -1.
* Increases or decreases the refcount of a given cluster.
*
* If the return value is non-negative, it is the new refcount of the cluster.
* If it is negative, it is -errno and indicates an error.
......@@ -649,7 +648,7 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
return ret;
}
return get_refcount(bs, cluster_index);
return qcow2_get_refcount(bs, cluster_index);
}
......@@ -670,7 +669,7 @@ static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size)
retry:
for(