Commit 8c8bee1d authored by Chris Mason's avatar Chris Mason
Browse files

Btrfs: Wait for IO on the block device inodes of newly added devices

btrfs-vol -a /dev/xxx will zero the first and last two MB of the device.
The kernel code needs to wait for this IO to finish before it adds
the device.

btrfs metadata IO does not happen through the block device inode.  A
separate address space is used, allowing the zero filled buffer heads in
the block device inode to be written to disk after FS metadata starts
going down to the disk via the btrfs metadata inode.

The end result is zero filled metadata blocks after adding new devices
into the filesystem.

The fix is a simple filemap_write_and_wait on the block device inode
before actually inserting it into the pool of available devices.
Signed-off-by: default avatarChris Mason <>
parent 1a40e23b
......@@ -610,6 +610,7 @@ struct btrfs_fs_info {
struct list_head dead_roots;
atomic_t nr_async_submits;
atomic_t async_submit_draining;
atomic_t nr_async_bios;
atomic_t tree_log_writers;
atomic_t tree_log_commit;
......@@ -460,6 +460,13 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
async->submit_bio_hook = submit_bio_hook;
async->work.func = run_one_async_submit;
async->work.flags = 0;
while(atomic_read(&fs_info->async_submit_draining) &&
atomic_read(&fs_info->nr_async_submits)) {
(atomic_read(&fs_info->nr_async_submits) == 0));
btrfs_queue_worker(&fs_info->workers, &async->work);
......@@ -495,11 +502,8 @@ static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
int mirror_num)
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 offset;
int ret;
offset = bio->bi_sector << 9;
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
......@@ -1360,6 +1364,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
atomic_set(&fs_info->nr_async_submits, 0);
atomic_set(&fs_info->async_submit_draining, 0);
atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
atomic_set(&fs_info->throttle_gen, 0);
......@@ -3440,13 +3440,24 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
if (inode) {
spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
while(atomic_read(&root->fs_info->nr_async_submits)) {
(atomic_read(&root->fs_info->nr_async_submits) == 0));
return 0;
......@@ -1038,6 +1038,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return -EIO;
trans = btrfs_start_transaction(root, 1);
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment