Commit ad1db3b3 authored by Anthony Liguori's avatar Anthony Liguori

Merge remote-tracking branch 'kwolf/for-anthony' into staging

* kwolf/for-anthony: (26 commits)
  qemu-io: Use bdrv_drain_all instead of qemu_aio_flush
  megasas: Use bdrv_drain_all instead of qemu_aio_flush
  vmdk: Fix data corruption bug in WRITE and READ handling
  fdc: remove last usage of FD_STATE_SEEK
  fdc: fix typo in zero constant
  fdc: remove double affectation of FD_MSR_CMDBUSY flag
  fdc-tests: add tests for VERIFY command
  fdc: implement VERIFY command
  fdc-test: Check READ ID
  fdc: fix false FD_SR0_SEEK
  fdc: fix FD_SR0_SEEK for initial seek on DMA transfers
  fdc: fix FD_SR0_SEEK for non-DMA transfers and multi sectors transfers
  fdc: use status0 field instead of a local variable
  fdc-test: add tests for non-DMA READ command
  fdc-test: insert media before fuzzing registers
  fdc-test: split test_media_change() test, so insert part can be reused
  fdc: Remove status0 parameter from fdctrl_set_fifo()
  aio: rename AIOPool to AIOCBInfo
  aio: use g_slice_alloc() for AIOCB pooling
  aio: switch aiocb_size type int -> size_t
  ...
Signed-off-by: default avatarAnthony Liguori <aliguori@us.ibm.com>
parents 5cc82c2d e7c8b094
......@@ -553,6 +553,7 @@ T: git git://github.com/kvaneesh/QEMU.git
virtio-blk
M: Kevin Wolf <kwolf@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
S: Supported
F: hw/virtio-blk*
......@@ -583,6 +584,7 @@ F: audio/
Block
M: Kevin Wolf <kwolf@redhat.com>
M: Stefan Hajnoczi <stefanha@redhat.com>
S: Supported
F: block*
F: block/
......
......@@ -3521,7 +3521,7 @@ int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
void bdrv_aio_cancel(BlockDriverAIOCB *acb)
{
acb->pool->cancel(acb);
acb->aiocb_info->cancel(acb);
}
/* block I/O throttling */
......@@ -3711,7 +3711,7 @@ static void bdrv_aio_cancel_em(BlockDriverAIOCB *blockacb)
qemu_aio_release(acb);
}
static AIOPool bdrv_em_aio_pool = {
static const AIOCBInfo bdrv_em_aiocb_info = {
.aiocb_size = sizeof(BlockDriverAIOCBSync),
.cancel = bdrv_aio_cancel_em,
};
......@@ -3740,7 +3740,7 @@ static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
{
BlockDriverAIOCBSync *acb;
acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
acb->is_write = is_write;
acb->qiov = qiov;
acb->bounce = qemu_blockalign(bs, qiov->size);
......@@ -3785,7 +3785,7 @@ static void bdrv_aio_co_cancel_em(BlockDriverAIOCB *blockacb)
qemu_aio_flush();
}
static AIOPool bdrv_em_co_aio_pool = {
static const AIOCBInfo bdrv_em_co_aiocb_info = {
.aiocb_size = sizeof(BlockDriverAIOCBCoroutine),
.cancel = bdrv_aio_co_cancel_em,
};
......@@ -3828,7 +3828,7 @@ static BlockDriverAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
Coroutine *co;
BlockDriverAIOCBCoroutine *acb;
acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->req.sector = sector_num;
acb->req.nb_sectors = nb_sectors;
acb->req.qiov = qiov;
......@@ -3858,7 +3858,7 @@ BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
Coroutine *co;
BlockDriverAIOCBCoroutine *acb;
acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
qemu_coroutine_enter(co, acb);
......@@ -3884,7 +3884,7 @@ BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb->req.sector = sector_num;
acb->req.nb_sectors = nb_sectors;
co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
......@@ -3904,18 +3904,13 @@ void bdrv_init_with_whitelist(void)
bdrv_init();
}
void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
BlockDriverAIOCB *acb;
if (pool->free_aiocb) {
acb = pool->free_aiocb;
pool->free_aiocb = acb->next;
} else {
acb = g_malloc0(pool->aiocb_size);
acb->pool = pool;
}
acb = g_slice_alloc(aiocb_info->aiocb_size);
acb->aiocb_info = aiocb_info;
acb->bs = bs;
acb->cb = cb;
acb->opaque = opaque;
......@@ -3924,10 +3919,8 @@ void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
void qemu_aio_release(void *p)
{
BlockDriverAIOCB *acb = (BlockDriverAIOCB *)p;
AIOPool *pool = acb->pool;
acb->next = pool->free_aiocb;
pool->free_aiocb = acb;
BlockDriverAIOCB *acb = p;
g_slice_free1(acb->aiocb_info->aiocb_size, acb);
}
/**************************************************************/
......
......@@ -41,7 +41,7 @@ typedef struct BlkdebugAIOCB {
static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb);
static AIOPool blkdebug_aio_pool = {
static const AIOCBInfo blkdebug_aiocb_info = {
.aiocb_size = sizeof(BlkdebugAIOCB),
.cancel = blkdebug_aio_cancel,
};
......@@ -335,7 +335,7 @@ static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
return NULL;
}
acb = qemu_aio_get(&blkdebug_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&blkdebug_aiocb_info, bs, cb, opaque);
acb->ret = -error;
bh = qemu_bh_new(error_callback_bh, acb);
......
......@@ -48,7 +48,7 @@ static void blkverify_aio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool blkverify_aio_pool = {
static const AIOCBInfo blkverify_aiocb_info = {
.aiocb_size = sizeof(BlkverifyAIOCB),
.cancel = blkverify_aio_cancel,
};
......@@ -233,7 +233,7 @@ static BlkverifyAIOCB *blkverify_aio_get(BlockDriverState *bs, bool is_write,
BlockDriverCompletionFunc *cb,
void *opaque)
{
BlkverifyAIOCB *acb = qemu_aio_get(&blkverify_aio_pool, bs, cb, opaque);
BlkverifyAIOCB *acb = qemu_aio_get(&blkverify_aiocb_info, bs, cb, opaque);
acb->bh = NULL;
acb->is_write = is_write;
......
......@@ -438,7 +438,7 @@ static void curl_aio_cancel(BlockDriverAIOCB *blockacb)
// Do we have to implement canceling? Seems to work without...
}
static AIOPool curl_aio_pool = {
static const AIOCBInfo curl_aiocb_info = {
.aiocb_size = sizeof(CURLAIOCB),
.cancel = curl_aio_cancel,
};
......@@ -505,7 +505,7 @@ static BlockDriverAIOCB *curl_aio_readv(BlockDriverState *bs,
{
CURLAIOCB *acb;
acb = qemu_aio_get(&curl_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&curl_aiocb_info, bs, cb, opaque);
acb->qiov = qiov;
acb->sector_num = sector_num;
......
......@@ -388,7 +388,7 @@ static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool gluster_aio_pool = {
static const AIOCBInfo gluster_aiocb_info = {
.aiocb_size = sizeof(GlusterAIOCB),
.cancel = qemu_gluster_aio_cancel,
};
......@@ -439,7 +439,7 @@ static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
size = nb_sectors * BDRV_SECTOR_SIZE;
s->qemu_aio_count++;
acb = qemu_aio_get(&gluster_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = size;
acb->ret = 0;
acb->finished = NULL;
......@@ -484,7 +484,7 @@ static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
GlusterAIOCB *acb;
BDRVGlusterState *s = bs->opaque;
acb = qemu_aio_get(&gluster_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
acb->size = 0;
acb->ret = 0;
acb->finished = NULL;
......
......@@ -133,7 +133,7 @@ iscsi_aio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool iscsi_aio_pool = {
static const AIOCBInfo iscsi_aiocb_info = {
.aiocb_size = sizeof(IscsiAIOCB),
.cancel = iscsi_aio_cancel,
};
......@@ -234,7 +234,7 @@ iscsi_aio_writev(BlockDriverState *bs, int64_t sector_num,
uint64_t lba;
struct iscsi_data data;
acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
trace_iscsi_aio_writev(iscsi, sector_num, nb_sectors, opaque, acb);
acb->iscsilun = iscsilun;
......@@ -325,7 +325,7 @@ iscsi_aio_readv(BlockDriverState *bs, int64_t sector_num,
qemu_read_size = BDRV_SECTOR_SIZE * (size_t)nb_sectors;
acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
trace_iscsi_aio_readv(iscsi, sector_num, nb_sectors, opaque, acb);
acb->iscsilun = iscsilun;
......@@ -430,7 +430,7 @@ iscsi_aio_flush(BlockDriverState *bs,
struct iscsi_context *iscsi = iscsilun->iscsi;
IscsiAIOCB *acb;
acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
acb->iscsilun = iscsilun;
acb->canceled = 0;
......@@ -483,7 +483,7 @@ iscsi_aio_discard(BlockDriverState *bs,
IscsiAIOCB *acb;
struct unmap_list list[1];
acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
acb->iscsilun = iscsilun;
acb->canceled = 0;
......@@ -558,7 +558,7 @@ static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
assert(req == SG_IO);
acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&iscsi_aiocb_info, bs, cb, opaque);
acb->iscsilun = iscsilun;
acb->canceled = 0;
......
......@@ -140,7 +140,7 @@ static void laio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool laio_pool = {
static const AIOCBInfo laio_aiocb_info = {
.aiocb_size = sizeof(struct qemu_laiocb),
.cancel = laio_cancel,
};
......@@ -154,7 +154,7 @@ BlockDriverAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd,
struct iocb *iocbs;
off_t offset = sector_num * 512;
laiocb = qemu_aio_get(&laio_pool, bs, cb, opaque);
laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
laiocb->nbytes = nb_sectors * 512;
laiocb->ctx = s;
laiocb->ret = -EINPROGRESS;
......
......@@ -301,7 +301,8 @@ static int alloc_refcount_block(BlockDriverState *bs,
uint64_t last_table_size;
uint64_t blocks_clusters;
do {
uint64_t table_clusters = size_to_clusters(s, table_size);
uint64_t table_clusters =
size_to_clusters(s, table_size * sizeof(uint64_t));
blocks_clusters = 1 +
((table_clusters + refcount_block_clusters - 1)
/ refcount_block_clusters);
......
......@@ -30,7 +30,7 @@ static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool qed_aio_pool = {
static const AIOCBInfo qed_aiocb_info = {
.aiocb_size = sizeof(QEDAIOCB),
.cancel = qed_aio_cancel,
};
......@@ -1311,7 +1311,7 @@ static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
BlockDriverCompletionFunc *cb,
void *opaque, int flags)
{
QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque);
QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
opaque, flags);
......
......@@ -570,7 +570,7 @@ static void qemu_rbd_aio_cancel(BlockDriverAIOCB *blockacb)
acb->cancelled = 1;
}
static AIOPool rbd_aio_pool = {
static const AIOCBInfo rbd_aiocb_info = {
.aiocb_size = sizeof(RBDAIOCB),
.cancel = qemu_rbd_aio_cancel,
};
......@@ -672,7 +672,7 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
BDRVRBDState *s = bs->opaque;
acb = qemu_aio_get(&rbd_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&rbd_aiocb_info, bs, cb, opaque);
acb->cmd = cmd;
acb->qiov = qiov;
if (cmd == RBD_AIO_DISCARD) {
......
......@@ -420,7 +420,7 @@ static void sd_aio_cancel(BlockDriverAIOCB *blockacb)
acb->canceled = true;
}
static AIOPool sd_aio_pool = {
static const AIOCBInfo sd_aiocb_info = {
.aiocb_size = sizeof(SheepdogAIOCB),
.cancel = sd_aio_cancel,
};
......@@ -431,7 +431,7 @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
{
SheepdogAIOCB *acb;
acb = qemu_aio_get(&sd_aio_pool, bs, cb, opaque);
acb = qemu_aio_get(&sd_aiocb_info, bs, cb, opaque);
acb->qiov = qiov;
......
......@@ -1092,6 +1092,7 @@ static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
BDRVVmdkState *s = bs->opaque;
int ret;
uint64_t n, index_in_cluster;
uint64_t extent_begin_sector, extent_relative_sector_num;
VmdkExtent *extent = NULL;
uint64_t cluster_offset;
......@@ -1103,7 +1104,9 @@ static int vmdk_read(BlockDriverState *bs, int64_t sector_num,
ret = get_cluster_offset(
bs, extent, NULL,
sector_num << 9, 0, &cluster_offset);
index_in_cluster = sector_num % extent->cluster_sectors;
extent_begin_sector = extent->end_sector - extent->sectors;
extent_relative_sector_num = sector_num - extent_begin_sector;
index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
n = extent->cluster_sectors - index_in_cluster;
if (n > nb_sectors) {
n = nb_sectors;
......@@ -1154,6 +1157,7 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
VmdkExtent *extent = NULL;
int n, ret;
int64_t index_in_cluster;
uint64_t extent_begin_sector, extent_relative_sector_num;
uint64_t cluster_offset;
VmdkMetaData m_data;
......@@ -1196,7 +1200,9 @@ static int vmdk_write(BlockDriverState *bs, int64_t sector_num,
if (ret) {
return -EINVAL;
}
index_in_cluster = sector_num % extent->cluster_sectors;
extent_begin_sector = extent->end_sector - extent->sectors;
extent_relative_sector_num = sector_num - extent_begin_sector;
index_in_cluster = extent_relative_sector_num % extent->cluster_sectors;
n = extent->cluster_sectors - index_in_cluster;
if (n > nb_sectors) {
n = nb_sectors;
......
......@@ -131,7 +131,7 @@ static void win32_aio_cancel(BlockDriverAIOCB *blockacb)
}
}
static AIOPool win32_aio_pool = {
static const AIOCBInfo win32_aiocb_info = {
.aiocb_size = sizeof(QEMUWin32AIOCB),
.cancel = win32_aio_cancel,
};
......@@ -145,7 +145,7 @@ BlockDriverAIOCB *win32_aio_submit(BlockDriverState *bs,
uint64_t offset = sector_num * 512;
DWORD rc;
waiocb = qemu_aio_get(&win32_aio_pool, bs, cb, opaque);
waiocb = qemu_aio_get(&win32_aiocb_info, bs, cb, opaque);
waiocb->nbytes = nb_sectors * 512;
waiocb->qiov = qiov;
waiocb->is_read = (type == QEMU_AIO_READ);
......@@ -167,11 +167,11 @@ BlockDriverAIOCB *win32_aio_submit(BlockDriverState *bs,
waiocb->is_linear = true;
}
waiocb->ov = (OVERLAPPED) {
.Offset = (DWORD) offset,
.OffsetHigh = (DWORD) (offset >> 32),
.hEvent = event_notifier_get_handle(&aio->e)
};
memset(&waiocb->ov, 0, sizeof(waiocb->ov));
waiocb->ov.Offset = (DWORD)offset;
waiocb->ov.OffsetHigh = (DWORD)(offset >> 32);
waiocb->ov.hEvent = event_notifier_get_handle(&aio->e);
aio->count++;
if (type & QEMU_AIO_READ) {
......
......@@ -195,7 +195,7 @@ static void dma_aio_cancel(BlockDriverAIOCB *acb)
dma_complete(dbs, 0);
}
static AIOPool dma_aio_pool = {
static const AIOCBInfo dma_aiocb_info = {
.aiocb_size = sizeof(DMAAIOCB),
.cancel = dma_aio_cancel,
};
......@@ -205,7 +205,7 @@ BlockDriverAIOCB *dma_bdrv_io(
DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
void *opaque, DMADirection dir)
{
DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
......
This diff is collapsed.
......@@ -336,7 +336,7 @@ static void trim_aio_cancel(BlockDriverAIOCB *acb)
qemu_aio_release(iocb);
}
static AIOPool trim_aio_pool = {
static const AIOCBInfo trim_aiocb_info = {
.aiocb_size = sizeof(TrimAIOCB),
.cancel = trim_aio_cancel,
};
......@@ -360,7 +360,7 @@ BlockDriverAIOCB *ide_issue_trim(BlockDriverState *bs,
TrimAIOCB *iocb;
int i, j, ret;
iocb = qemu_aio_get(&trim_aio_pool, bs, cb, opaque);
iocb = qemu_aio_get(&trim_aiocb_info, bs, cb, opaque);
iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
iocb->ret = 0;
......
......@@ -1296,7 +1296,7 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
static int megasas_cache_flush(MegasasState *s, MegasasCmd *cmd)
{
qemu_aio_flush();
bdrv_drain_all();
return MFI_STAT_OK;
}
......
......@@ -21,21 +21,19 @@
typedef struct BlockDriverAIOCB BlockDriverAIOCB;
typedef void BlockDriverCompletionFunc(void *opaque, int ret);
typedef struct AIOPool {
typedef struct AIOCBInfo {
void (*cancel)(BlockDriverAIOCB *acb);
int aiocb_size;
BlockDriverAIOCB *free_aiocb;
} AIOPool;
size_t aiocb_size;
} AIOCBInfo;
struct BlockDriverAIOCB {
AIOPool *pool;
const AIOCBInfo *aiocb_info;
BlockDriverState *bs;
BlockDriverCompletionFunc *cb;
void *opaque;
BlockDriverAIOCB *next;
};
void *qemu_aio_get(AIOPool *pool, BlockDriverState *bs,
void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque);
void qemu_aio_release(void *p);
......
......@@ -421,6 +421,7 @@ snapshots.
* disk_images_nbd:: NBD access
* disk_images_sheepdog:: Sheepdog disk images
* disk_images_iscsi:: iSCSI LUNs
* disk_images_gluster:: GlusterFS disk images
@end menu
@node disk_images_quickstart
......@@ -814,7 +815,55 @@ qemu-system-i386 -iscsi initiator-name=iqn.qemu.test:my-initiator \
-cdrom iscsi://127.0.0.1/iqn.qemu.test/2
@end example
@node disk_images_gluster
@subsection GlusterFS disk images
GlusterFS is an user space distributed file system.
You can boot from the GlusterFS disk image with the command:
@example
qemu-system-x86_64 -drive file=gluster[+@var{transport}]://[@var{server}[:@var{port}]]/@var{volname}/@var{image}[?socket=...]
@end example
@var{gluster} is the protocol.
@var{transport} specifies the transport type used to connect to gluster
management daemon (glusterd). Valid transport types are
tcp, unix and rdma. If a transport type isn't specified, then tcp
type is assumed.
@var{server} specifies the server where the volume file specification for
the given volume resides. This can be either hostname, ipv4 address
or ipv6 address. ipv6 address needs to be within square brackets [ ].
If transport type is unix, then @var{server} field should not be specifed.
Instead @var{socket} field needs to be populated with the path to unix domain
socket.
@var{port} is the port number on which glusterd is listening. This is optional
and if not specified, QEMU will send 0 which will make gluster to use the
default port. If the transport type is unix, then @var{port} should not be
specified.
@var{volname} is the name of the gluster volume which contains the disk image.
@var{image} is the path to the actual disk image that resides on gluster volume.
You can create a GlusterFS disk image with the command:
@example
qemu-img create gluster://@var{server}/@var{volname}/@var{image} @var{size}
@end example
Examples
@example
qemu-system-x86_64 -drive file=gluster://1.2.3.4/testvol/a.img
qemu-system-x86_64 -drive file=gluster+tcp://1.2.3.4/testvol/a.img
qemu-system-x86_64 -drive file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
qemu-system-x86_64 -drive file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
qemu-system-x86_64 -drive file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
qemu-system-x86_64 -drive file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
qemu-system-x86_64 -drive file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
qemu-system-x86_64 -drive file=gluster+rdma://1.2.3.4:24007/testvol/a.img
@end example
@node pcsys_network
@section Network emulation
......
......@@ -1362,7 +1362,7 @@ static int aio_write_f(int argc, char **argv)
static int aio_flush_f(int argc, char **argv)
{
qemu_aio_flush();
bdrv_drain_all();
return 0;
}
......
......@@ -2054,6 +2054,23 @@ qemu-system-i386 --drive file=sheepdog:192.0.2.1:30000:MyVirtualMachine
See also @url{http://http://www.osrg.net/sheepdog/}.
@item GlusterFS
GlusterFS is an user space distributed file system.
QEMU supports the use of GlusterFS volumes for hosting VM disk images using
TCP, Unix Domain Sockets and RDMA transport protocols.
Syntax for specifying a VM disk image on GlusterFS volume is
@example
gluster[+transport]://[server[:port]]/volname/image[?socket=...]
@end example
Example
@example
qemu-system-x86_84 --drive file=gluster://192.0.2.1/testvol/a.img
@end example
See also @url{http://www.gluster.org}.
@end table
ETEXI
......
......@@ -48,13 +48,17 @@ enum {
enum {
CMD_SENSE_INT = 0x08,
CMD_READ_ID = 0x0a,
CMD_SEEK = 0x0f,
CMD_VERIFY = 0x16,
CMD_READ = 0xe6,
CMD_RELATIVE_SEEK_OUT = 0x8f,
CMD_RELATIVE_SEEK_IN = 0xcf,
};
enum {
BUSY = 0x10,
NONDMA = 0x20,
RQM = 0x80,
DIO = 0x40,
......@@ -110,7 +114,7 @@ static void ack_irq(uint8_t *pcn)
g_assert(!get_irq(FLOPPY_IRQ));
}
static uint8_t send_read_command(void)
static uint8_t send_read_command(uint8_t cmd)
{
uint8_t drive = 0;
uint8_t head = 0;
......@@ -126,7 +130,7 @@ static uint8_t send_read_command(void)
uint8_t ret = 0;
floppy_send(CMD_READ);
floppy_send(cmd);
floppy_send(head << 2 | drive);
g_assert(!get_irq(FLOPPY_IRQ));
floppy_send(cyl);
......@@ -152,7 +156,70 @@ static uint8_t send_read_command(void)
}
st0 = floppy_recv();
if (st0 != 0x60) {
if (st0 != 0x40) {
ret = 1;
}
floppy_recv();
floppy_recv();
floppy_recv();
floppy_recv();
floppy_recv();
floppy_recv();
return ret;
}
static uint8_t send_read_no_dma_command(int nb_sect, uint8_t expected_st0)
{
uint8_t drive = 0;
uint8_t head = 0;
uint8_t cyl = 0;
uint8_t sect_addr = 1;
uint8_t sect_size = 2;
uint8_t eot = nb_sect;
uint8_t gap = 0x1b;
uint8_t gpl = 0xff;
uint8_t msr = 0;
uint8_t st0;
uint8_t ret = 0;
floppy_send(CMD_READ);
floppy_send(head << 2 | drive);
g_assert(!get_irq(FLOPPY_IRQ));
floppy_send(cyl);
floppy_send(head);
floppy_send(sect_addr);
floppy_send(sect_size);
floppy_send(eot);
floppy_send(gap);
floppy_send(gpl);
uint16_t i = 0;
uint8_t n = 2;
for (; i < n; i++) {
msr = inb(FLOPPY_BASE + reg_msr);
if (msr == (BUSY | NONDMA | DIO | RQM)) {
break;
}
sleep(1);
}
if (i >= n) {
return 1;
}
/* Non-DMA mode */
for (i = 0; i < 512 * 2 * nb_sect; i++) {
msr = inb(FLOPPY_BASE + reg_msr);
assert_bit_set(msr, BUSY | RQM | DIO);
inb(FLOPPY_BASE + reg_fifo);
}
st0 = floppy_recv();
if (st0 != expected_st0) {
ret = 1;
}
......@@ -213,11 +280,11 @@ static void test_read_without_media(void)
{
uint8_t ret;
ret = send_read_command();
ret = send_read_command(CMD_READ);
g_assert(ret == 0);