Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Xing Lin
qemu
Commits
ce1a14dc
Commit
ce1a14dc
authored
Aug 07, 2006
by
pbrook
Browse files
Dynamically allocate AIO Completion Blocks.
git-svn-id:
svn://svn.savannah.nongnu.org/qemu/trunk@2098
c046a42c-6fe2-441c-8c8c-71466251a162
parent
51d6bae7
Changes
6
Hide whitespace changes
Inline
Side-by-side
block-qcow.c
View file @
ce1a14dc
...
...
@@ -522,7 +522,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num,
return
0
;
}
typedef
struct
{
typedef
struct
QCowAIOCB
{
BlockDriverAIOCB
common
;
int64_t
sector_num
;
uint8_t
*
buf
;
int
nb_sectors
;
...
...
@@ -530,223 +531,198 @@ typedef struct {
uint64_t
cluster_offset
;
uint8_t
*
cluster_data
;
BlockDriverAIOCB
*
hd_aiocb
;
BlockDriverAIOCB
*
backing_hd_aiocb
;
}
QCowAIOCB
;
static
void
qcow_aio_delete
(
BlockDriverAIOCB
*
acb
);
static
int
qcow_aio_new
(
BlockDriverAIOCB
*
acb
)
{
BlockDriverState
*
bs
=
acb
->
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
;
acb1
=
qemu_mallocz
(
sizeof
(
QCowAIOCB
));
if
(
!
acb1
)
return
-
1
;
acb
->
opaque
=
acb1
;
acb1
->
hd_aiocb
=
bdrv_aio_new
(
s
->
hd
);
if
(
!
acb1
->
hd_aiocb
)
goto
fail
;
if
(
bs
->
backing_hd
)
{
acb1
->
backing_hd_aiocb
=
bdrv_aio_new
(
bs
->
backing_hd
);
if
(
!
acb1
->
backing_hd_aiocb
)
goto
fail
;
}
return
0
;
fail:
qcow_aio_delete
(
acb
);
return
-
1
;
}
static
void
qcow_aio_read_cb
(
void
*
opaque
,
int
ret
)
{
BlockDriver
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
bs
;
QCow
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
common
.
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
=
acb
->
opaque
;
int
index_in_cluster
;
acb
->
hd_aiocb
=
NULL
;
if
(
ret
<
0
)
{
fail:
acb
->
cb
(
acb
->
cb_opaque
,
ret
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
ret
);
qemu_aio_release
(
acb
);
return
;
}
redo:
/* post process the read buffer */
if
(
!
acb
1
->
cluster_offset
)
{
if
(
!
acb
->
cluster_offset
)
{
/* nothing to do */
}
else
if
(
acb
1
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
}
else
if
(
acb
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
/* nothing to do */
}
else
{
if
(
s
->
crypt_method
)
{
encrypt_sectors
(
s
,
acb
1
->
sector_num
,
acb
1
->
buf
,
acb
1
->
buf
,
acb
1
->
n
,
0
,
encrypt_sectors
(
s
,
acb
->
sector_num
,
acb
->
buf
,
acb
->
buf
,
acb
->
n
,
0
,
&
s
->
aes_decrypt_key
);
}
}
acb
1
->
nb_sectors
-=
acb
1
->
n
;
acb
1
->
sector_num
+=
acb
1
->
n
;
acb
1
->
buf
+=
acb
1
->
n
*
512
;
acb
->
nb_sectors
-=
acb
->
n
;
acb
->
sector_num
+=
acb
->
n
;
acb
->
buf
+=
acb
->
n
*
512
;
if
(
acb
1
->
nb_sectors
==
0
)
{
if
(
acb
->
nb_sectors
==
0
)
{
/* request completed */
acb
->
cb
(
acb
->
cb_opaque
,
0
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
0
);
qemu_aio_release
(
acb
);
return
;
}
/* prepare next AIO request */
acb1
->
cluster_offset
=
get_cluster_offset
(
bs
,
acb1
->
sector_num
<<
9
,
0
,
0
,
0
,
0
);
index_in_cluster
=
acb1
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb1
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb1
->
n
>
acb1
->
nb_sectors
)
acb1
->
n
=
acb1
->
nb_sectors
;
if
(
!
acb1
->
cluster_offset
)
{
acb
->
cluster_offset
=
get_cluster_offset
(
bs
,
acb
->
sector_num
<<
9
,
0
,
0
,
0
,
0
);
index_in_cluster
=
acb
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
->
n
>
acb
->
nb_sectors
)
acb
->
n
=
acb
->
nb_sectors
;
if
(
!
acb
->
cluster_offset
)
{
if
(
bs
->
backing_hd
)
{
/* read from the base image */
ret
=
bdrv_aio_read
(
acb1
->
backing_hd
_aiocb
,
acb1
->
sector_num
,
acb
1
->
buf
,
acb
1
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
ret
<
0
)
acb
->
hd_aiocb
=
bdrv_aio_read
(
bs
->
backing_hd
,
acb
->
sector_num
,
acb
->
buf
,
acb
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
acb
->
hd_aiocb
==
NULL
)
goto
fail
;
}
else
{
/* Note: in this case, no need to wait */
memset
(
acb
1
->
buf
,
0
,
512
*
acb
1
->
n
);
memset
(
acb
->
buf
,
0
,
512
*
acb
->
n
);
goto
redo
;
}
}
else
if
(
acb
1
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
}
else
if
(
acb
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
/* add AIO support for compressed blocks ? */
if
(
decompress_cluster
(
s
,
acb
1
->
cluster_offset
)
<
0
)
if
(
decompress_cluster
(
s
,
acb
->
cluster_offset
)
<
0
)
goto
fail
;
memcpy
(
acb
1
->
buf
,
s
->
cluster_cache
+
index_in_cluster
*
512
,
512
*
acb
1
->
n
);
memcpy
(
acb
->
buf
,
s
->
cluster_cache
+
index_in_cluster
*
512
,
512
*
acb
->
n
);
goto
redo
;
}
else
{
if
((
acb
1
->
cluster_offset
&
511
)
!=
0
)
{
if
((
acb
->
cluster_offset
&
511
)
!=
0
)
{
ret
=
-
EIO
;
goto
fail
;
}
ret
=
bdrv_aio_read
(
acb1
->
hd_aiocb
,
(
acb
1
->
cluster_offset
>>
9
)
+
index_in_cluster
,
acb
1
->
buf
,
acb
1
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
ret
<
0
)
acb
->
hd_aiocb
=
bdrv_aio_read
(
s
->
hd
,
(
acb
->
cluster_offset
>>
9
)
+
index_in_cluster
,
acb
->
buf
,
acb
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
acb
->
hd_aiocb
==
NULL
)
goto
fail
;
}
}
static
int
qcow_aio_read
(
BlockDriverAIOCB
*
acb
,
int64_t
sector_num
,
uint8_t
*
buf
,
int
nb_sectors
)
static
BlockDriverAIOCB
*
qcow_aio_read
(
BlockDriverState
*
bs
,
int64_t
sector_num
,
uint8_t
*
buf
,
int
nb_sectors
,
BlockDriverCompletionFunc
*
cb
,
void
*
opaque
)
{
QCowAIOCB
*
acb1
=
acb
->
opaque
;
acb1
->
sector_num
=
sector_num
;
acb1
->
buf
=
buf
;
acb1
->
nb_sectors
=
nb_sectors
;
acb1
->
n
=
0
;
acb1
->
cluster_offset
=
0
;
QCowAIOCB
*
acb
;
acb
=
qemu_aio_get
(
bs
,
cb
,
opaque
);
if
(
!
acb
)
return
NULL
;
acb
->
hd_aiocb
=
NULL
;
acb
->
sector_num
=
sector_num
;
acb
->
buf
=
buf
;
acb
->
nb_sectors
=
nb_sectors
;
acb
->
n
=
0
;
acb
->
cluster_offset
=
0
;
qcow_aio_read_cb
(
acb
,
0
);
return
0
;
return
&
acb
->
common
;
}
static
void
qcow_aio_write_cb
(
void
*
opaque
,
int
ret
)
{
BlockDriver
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
bs
;
QCow
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
common
.
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
=
acb
->
opaque
;
int
index_in_cluster
;
uint64_t
cluster_offset
;
const
uint8_t
*
src_buf
;
acb
->
hd_aiocb
=
NULL
;
if
(
ret
<
0
)
{
fail:
acb
->
cb
(
acb
->
cb_opaque
,
ret
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
ret
);
qemu_aio_release
(
acb
);
return
;
}
acb
1
->
nb_sectors
-=
acb
1
->
n
;
acb
1
->
sector_num
+=
acb
1
->
n
;
acb
1
->
buf
+=
acb
1
->
n
*
512
;
acb
->
nb_sectors
-=
acb
->
n
;
acb
->
sector_num
+=
acb
->
n
;
acb
->
buf
+=
acb
->
n
*
512
;
if
(
acb
1
->
nb_sectors
==
0
)
{
if
(
acb
->
nb_sectors
==
0
)
{
/* request completed */
acb
->
cb
(
acb
->
cb_opaque
,
0
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
0
);
qemu_aio_release
(
acb
);
return
;
}
index_in_cluster
=
acb
1
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
1
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
1
->
n
>
acb
1
->
nb_sectors
)
acb
1
->
n
=
acb
1
->
nb_sectors
;
cluster_offset
=
get_cluster_offset
(
bs
,
acb
1
->
sector_num
<<
9
,
1
,
0
,
index_in_cluster
=
acb
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
->
n
>
acb
->
nb_sectors
)
acb
->
n
=
acb
->
nb_sectors
;
cluster_offset
=
get_cluster_offset
(
bs
,
acb
->
sector_num
<<
9
,
1
,
0
,
index_in_cluster
,
index_in_cluster
+
acb
1
->
n
);
index_in_cluster
+
acb
->
n
);
if
(
!
cluster_offset
||
(
cluster_offset
&
511
)
!=
0
)
{
ret
=
-
EIO
;
goto
fail
;
}
if
(
s
->
crypt_method
)
{
if
(
!
acb
1
->
cluster_data
)
{
acb
1
->
cluster_data
=
qemu_mallocz
(
s
->
cluster_size
);
if
(
!
acb
1
->
cluster_data
)
{
if
(
!
acb
->
cluster_data
)
{
acb
->
cluster_data
=
qemu_mallocz
(
s
->
cluster_size
);
if
(
!
acb
->
cluster_data
)
{
ret
=
-
ENOMEM
;
goto
fail
;
}
}
encrypt_sectors
(
s
,
acb
1
->
sector_num
,
acb
1
->
cluster_data
,
acb
1
->
buf
,
acb
1
->
n
,
1
,
&
s
->
aes_encrypt_key
);
src_buf
=
acb
1
->
cluster_data
;
encrypt_sectors
(
s
,
acb
->
sector_num
,
acb
->
cluster_data
,
acb
->
buf
,
acb
->
n
,
1
,
&
s
->
aes_encrypt_key
);
src_buf
=
acb
->
cluster_data
;
}
else
{
src_buf
=
acb
1
->
buf
;
src_buf
=
acb
->
buf
;
}
ret
=
bdrv_aio_write
(
acb1
->
hd_aiocb
,
(
cluster_offset
>>
9
)
+
index_in_cluster
,
src_buf
,
acb
1
->
n
,
qcow_aio_write_cb
,
acb
);
if
(
ret
<
0
)
acb
->
hd_aiocb
=
bdrv_aio_write
(
s
->
hd
,
(
cluster_offset
>>
9
)
+
index_in_cluster
,
src_buf
,
acb
->
n
,
qcow_aio_write_cb
,
acb
);
if
(
acb
->
hd_aiocb
==
NULL
)
goto
fail
;
}
static
int
qcow_aio_write
(
BlockDriverAIOCB
*
acb
,
int64_t
sector_num
,
const
uint8_t
*
buf
,
int
nb_sectors
)
static
BlockDriverAIOCB
*
qcow_aio_write
(
BlockDriverState
*
bs
,
int64_t
sector_num
,
const
uint8_t
*
buf
,
int
nb_sectors
,
BlockDriverCompletionFunc
*
cb
,
void
*
opaque
)
{
QCowAIOCB
*
acb1
=
acb
->
opaque
;
BlockDriverState
*
bs
=
acb
->
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb
;
s
->
cluster_cache_offset
=
-
1
;
/* disable compressed cache */
acb1
->
sector_num
=
sector_num
;
acb1
->
buf
=
(
uint8_t
*
)
buf
;
acb1
->
nb_sectors
=
nb_sectors
;
acb1
->
n
=
0
;
acb
=
qemu_aio_get
(
bs
,
cb
,
opaque
);
if
(
!
acb
)
return
NULL
;
acb
->
hd_aiocb
=
NULL
;
acb
->
sector_num
=
sector_num
;
acb
->
buf
=
(
uint8_t
*
)
buf
;
acb
->
nb_sectors
=
nb_sectors
;
acb
->
n
=
0
;
qcow_aio_write_cb
(
acb
,
0
);
return
0
;
}
static
void
qcow_aio_cancel
(
BlockDriverAIOCB
*
acb
)
{
QCowAIOCB
*
acb1
=
acb
->
opaque
;
if
(
acb1
->
hd_aiocb
)
bdrv_aio_cancel
(
acb1
->
hd_aiocb
);
if
(
acb1
->
backing_hd_aiocb
)
bdrv_aio_cancel
(
acb1
->
backing_hd_aiocb
);
return
&
acb
->
common
;
}
static
void
qcow_aio_
delete
(
BlockDriverAIOCB
*
acb
)
static
void
qcow_aio_
cancel
(
BlockDriverAIOCB
*
block
acb
)
{
QCowAIOCB
*
acb1
=
acb
->
opaque
;
if
(
acb1
->
hd_aiocb
)
bdrv_aio_delete
(
acb1
->
hd_aiocb
);
if
(
acb1
->
backing_hd_aiocb
)
bdrv_aio_delete
(
acb1
->
backing_hd_aiocb
);
qemu_free
(
acb1
->
cluster_data
);
qemu_free
(
acb1
);
QCowAIOCB
*
acb
=
(
QCowAIOCB
*
)
blockacb
;
if
(
acb
->
hd_aiocb
)
bdrv_aio_cancel
(
acb
->
hd_aiocb
);
qemu_aio_release
(
acb
);
}
static
void
qcow_close
(
BlockDriverState
*
bs
)
...
...
@@ -920,11 +896,10 @@ BlockDriver bdrv_qcow = {
qcow_set_key
,
qcow_make_empty
,
.
bdrv_aio_new
=
qcow_aio_new
,
.
bdrv_aio_read
=
qcow_aio_read
,
.
bdrv_aio_write
=
qcow_aio_write
,
.
bdrv_aio_cancel
=
qcow_aio_cancel
,
.
bdrv_aio_delete
=
qcow_aio_delete
,
.
aiocb_size
=
sizeof
(
QCowAIOCB
)
,
.
bdrv_write_compressed
=
qcow_write_compressed
,
.
bdrv_get_info
=
qcow_get_info
,
};
block-qcow2.c
View file @
ce1a14dc
...
...
@@ -791,7 +791,8 @@ static int qcow_write(BlockDriverState *bs, int64_t sector_num,
return
0
;
}
typedef
struct
{
typedef
struct
QCowAIOCB
{
BlockDriverAIOCB
common
;
int64_t
sector_num
;
uint8_t
*
buf
;
int
nb_sectors
;
...
...
@@ -799,229 +800,211 @@ typedef struct {
uint64_t
cluster_offset
;
uint8_t
*
cluster_data
;
BlockDriverAIOCB
*
hd_aiocb
;
BlockDriverAIOCB
*
backing_hd_aiocb
;
}
QCowAIOCB
;
static
void
qcow_aio_delete
(
BlockDriverAIOCB
*
acb
);
static
int
qcow_aio_new
(
BlockDriverAIOCB
*
acb
)
{
BlockDriverState
*
bs
=
acb
->
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
;
acb1
=
qemu_mallocz
(
sizeof
(
QCowAIOCB
));
if
(
!
acb1
)
return
-
1
;
acb
->
opaque
=
acb1
;
acb1
->
hd_aiocb
=
bdrv_aio_new
(
s
->
hd
);
if
(
!
acb1
->
hd_aiocb
)
goto
fail
;
if
(
bs
->
backing_hd
)
{
acb1
->
backing_hd_aiocb
=
bdrv_aio_new
(
bs
->
backing_hd
);
if
(
!
acb1
->
backing_hd_aiocb
)
goto
fail
;
}
return
0
;
fail:
qcow_aio_delete
(
acb
);
return
-
1
;
}
static
void
qcow_aio_read_cb
(
void
*
opaque
,
int
ret
)
{
BlockDriver
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
bs
;
QCow
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
common
.
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
=
acb
->
opaque
;
int
index_in_cluster
,
n1
;
acb
->
hd_aiocb
=
NULL
;
if
(
ret
<
0
)
{
fail:
acb
->
cb
(
acb
->
cb_opaque
,
ret
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
ret
);
qemu_aio_release
(
acb
);
return
;
}
redo:
/* post process the read buffer */
if
(
!
acb
1
->
cluster_offset
)
{
if
(
!
acb
->
cluster_offset
)
{
/* nothing to do */
}
else
if
(
acb
1
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
}
else
if
(
acb
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
/* nothing to do */
}
else
{
if
(
s
->
crypt_method
)
{
encrypt_sectors
(
s
,
acb
1
->
sector_num
,
acb
1
->
buf
,
acb
1
->
buf
,
acb
1
->
n
,
0
,
encrypt_sectors
(
s
,
acb
->
sector_num
,
acb
->
buf
,
acb
->
buf
,
acb
->
n
,
0
,
&
s
->
aes_decrypt_key
);
}
}
acb
1
->
nb_sectors
-=
acb
1
->
n
;
acb
1
->
sector_num
+=
acb
1
->
n
;
acb
1
->
buf
+=
acb
1
->
n
*
512
;
acb
->
nb_sectors
-=
acb
->
n
;
acb
->
sector_num
+=
acb
->
n
;
acb
->
buf
+=
acb
->
n
*
512
;
if
(
acb
1
->
nb_sectors
==
0
)
{
if
(
acb
->
nb_sectors
==
0
)
{
/* request completed */
acb
->
cb
(
acb
->
cb_opaque
,
0
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
0
);
qemu_aio_release
(
acb
);
return
;
}
/* prepare next AIO request */
acb1
->
cluster_offset
=
get_cluster_offset
(
bs
,
acb1
->
sector_num
<<
9
,
0
,
0
,
0
,
0
);
index_in_cluster
=
acb1
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb1
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb1
->
n
>
acb1
->
nb_sectors
)
acb1
->
n
=
acb1
->
nb_sectors
;
if
(
!
acb1
->
cluster_offset
)
{
acb
->
cluster_offset
=
get_cluster_offset
(
bs
,
acb
->
sector_num
<<
9
,
0
,
0
,
0
,
0
);
index_in_cluster
=
acb
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
->
n
>
acb
->
nb_sectors
)
acb
->
n
=
acb
->
nb_sectors
;
if
(
!
acb
->
cluster_offset
)
{
if
(
bs
->
backing_hd
)
{
/* read from the base image */
n1
=
backing_read1
(
bs
->
backing_hd
,
acb
1
->
sector_num
,
acb
1
->
buf
,
acb
1
->
n
);
n1
=
backing_read1
(
bs
->
backing_hd
,
acb
->
sector_num
,
acb
->
buf
,
acb
->
n
);
if
(
n1
>
0
)
{
ret
=
bdrv_aio_read
(
acb1
->
backing_hd
_aiocb
,
acb
1
->
sector_num
,
acb
1
->
buf
,
n
1
,
qcow_aio_read_cb
,
acb
);
if
(
ret
<
0
)
acb
->
hd_aiocb
=
bdrv_aio_read
(
bs
->
backing_hd
,
acb
->
sector_num
,
acb
->
buf
,
acb
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
acb
->
hd_aiocb
==
NULL
)
goto
fail
;
}
else
{
goto
redo
;
}
}
else
{
/* Note: in this case, no need to wait */
memset
(
acb
1
->
buf
,
0
,
512
*
acb
1
->
n
);
memset
(
acb
->
buf
,
0
,
512
*
acb
->
n
);
goto
redo
;
}
}
else
if
(
acb
1
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
}
else
if
(
acb
->
cluster_offset
&
QCOW_OFLAG_COMPRESSED
)
{
/* add AIO support for compressed blocks ? */
if
(
decompress_cluster
(
s
,
acb
1
->
cluster_offset
)
<
0
)
if
(
decompress_cluster
(
s
,
acb
->
cluster_offset
)
<
0
)
goto
fail
;
memcpy
(
acb
1
->
buf
,
s
->
cluster_cache
+
index_in_cluster
*
512
,
512
*
acb
1
->
n
);
memcpy
(
acb
->
buf
,
s
->
cluster_cache
+
index_in_cluster
*
512
,
512
*
acb
->
n
);
goto
redo
;
}
else
{
if
((
acb
1
->
cluster_offset
&
511
)
!=
0
)
{
if
((
acb
->
cluster_offset
&
511
)
!=
0
)
{
ret
=
-
EIO
;
goto
fail
;
}
ret
=
bdrv_aio_read
(
acb1
->
hd_aiocb
,
(
acb
1
->
cluster_offset
>>
9
)
+
index_in_cluster
,
acb
1
->
buf
,
acb
1
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
ret
<
0
)
acb
->
hd_aiocb
=
bdrv_aio_read
(
s
->
hd
,
(
acb
->
cluster_offset
>>
9
)
+
index_in_cluster
,
acb
->
buf
,
acb
->
n
,
qcow_aio_read_cb
,
acb
);
if
(
acb
->
hd_aiocb
==
NULL
)
goto
fail
;
}
}
static
int
qcow_aio_read
(
BlockDriverAIOCB
*
acb
,
int64_t
sector_num
,
uint8_t
*
buf
,
int
nb_sectors
)
static
QCowAIOCB
*
qcow_aio_setup
(
BlockDriverState
*
bs
,
int64_t
sector_num
,
uint8_t
*
buf
,
int
nb_sectors
,
BlockDriverCompletionFunc
*
cb
,
void
*
opaque
)
{
QCowAIOCB
*
acb1
=
acb
->
opaque
;
acb1
->
sector_num
=
sector_num
;
acb1
->
buf
=
buf
;
acb1
->
nb_sectors
=
nb_sectors
;
acb1
->
n
=
0
;
acb1
->
cluster_offset
=
0
;
QCowAIOCB
*
acb
;
acb
=
qemu_aio_get
(
bs
,
cb
,
opaque
);
if
(
!
acb
)
return
NULL
;
acb
->
hd_aiocb
=
NULL
;
acb
->
sector_num
=
sector_num
;
acb
->
buf
=
buf
;
acb
->
nb_sectors
=
nb_sectors
;
acb
->
n
=
0
;
acb
->
cluster_offset
=
0
;
return
acb
;
}
static
BlockDriverAIOCB
*
qcow_aio_read
(
BlockDriverState
*
bs
,
int64_t
sector_num
,
uint8_t
*
buf
,
int
nb_sectors
,
BlockDriverCompletionFunc
*
cb
,
void
*
opaque
)
{
QCowAIOCB
*
acb
;
acb
=
qcow_aio_setup
(
bs
,
sector_num
,
buf
,
nb_sectors
,
cb
,
opaque
);
if
(
!
acb
)
return
NULL
;
qcow_aio_read_cb
(
acb
,
0
);
return
0
;
return
&
acb
->
common
;
}
static
void
qcow_aio_write_cb
(
void
*
opaque
,
int
ret
)
{
BlockDriver
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
bs
;
QCow
AIOCB
*
acb
=
opaque
;
BlockDriverState
*
bs
=
acb
->
common
.
bs
;
BDRVQcowState
*
s
=
bs
->
opaque
;
QCowAIOCB
*
acb1
=
acb
->
opaque
;
int
index_in_cluster
;
uint64_t
cluster_offset
;
const
uint8_t
*
src_buf
;
acb
->
hd_aiocb
=
NULL
;
if
(
ret
<
0
)
{
fail:
acb
->
cb
(
acb
->
cb_opaque
,
ret
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
ret
);
qemu_aio_release
(
acb
);
return
;
}
acb
1
->
nb_sectors
-=
acb
1
->
n
;
acb
1
->
sector_num
+=
acb
1
->
n
;
acb
1
->
buf
+=
acb
1
->
n
*
512
;
acb
->
nb_sectors
-=
acb
->
n
;
acb
->
sector_num
+=
acb
->
n
;
acb
->
buf
+=
acb
->
n
*
512
;
if
(
acb
1
->
nb_sectors
==
0
)
{
if
(
acb
->
nb_sectors
==
0
)
{
/* request completed */
acb
->
cb
(
acb
->
cb_opaque
,
0
);
acb
->
common
.
cb
(
acb
->
common
.
opaque
,
0
);
qemu_aio_release
(
acb
);
return
;
}
index_in_cluster
=
acb
1
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
1
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
1
->
n
>
acb
1
->
nb_sectors
)
acb
1
->
n
=
acb
1
->
nb_sectors
;
cluster_offset
=
get_cluster_offset
(
bs
,
acb
1
->
sector_num
<<
9
,
1
,
0
,
index_in_cluster
=
acb
->
sector_num
&
(
s
->
cluster_sectors
-
1
);
acb
->
n
=
s
->
cluster_sectors
-
index_in_cluster
;
if
(
acb
->
n
>
acb
->
nb_sectors
)
acb
->
n
=
acb
->
nb_sectors
;
cluster_offset
=
get_cluster_offset
(
bs
,
acb
->
sector_num
<<
9
,
1
,
0
,
index_in_cluster
,
index_in_cluster
+
acb
1
->
n
);
index_in_cluster
+
acb
->
n
);
if
(
!
cluster_offset
||
(
cluster_offset
&
511
)
!=
0
)
{
ret
=
-
EIO
;
goto
fail
;