All new accounts created on Gitlab now require administrator approval. If you invite any collaborators, please let Flux staff know so they can approve the accounts.

direct-io.c 38.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * fs/direct-io.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * O_DIRECT
 *
8
 * 04Jul2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11
 *		Initial version
 * 11Sep2002	janetinc@us.ibm.com
 * 		added readv/writev support.
12
 * 29Oct2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *		rewrote bio_add_page() support.
 * 30Oct2002	pbadari@us.ibm.com
 *		added support for non-aligned IO.
 * 06Nov2002	pbadari@us.ibm.com
 *		added asynchronous IO support.
 * 21Jul2003	nathans@sgi.com
 *		added IO completion notifier.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
30
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
Arun Sharma's avatar
Arun Sharma committed
38
#include <linux/atomic.h>
39
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42

/*
 * How many user pages to map in one call to get_user_pages().  This determines
Andi Kleen's avatar
Andi Kleen committed
43
 * the size of a structure in the slab cache
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
 */
#define DIO_PAGES	64

/*
 * This code generally works in units of "dio_blocks".  A dio_block is
 * somewhere between the hard sector size and the filesystem block size.  it
 * is determined on a per-invocation basis.   When talking to the filesystem
 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 * to bio_block quantities by shifting left by blkfactor.
 *
 * If blkfactor is zero then the user's request was aligned to the filesystem's
 * blocksize.
 */

59 60 61
/* dio_state only used in the submission path */

struct dio_submit {
Linus Torvalds's avatar
Linus Torvalds committed
62 63 64 65 66 67 68 69 70 71 72 73 74 75
	struct bio *bio;		/* bio under assembly */
	unsigned blkbits;		/* doesn't change */
	unsigned blkfactor;		/* When we're using an alignment which
					   is finer than the filesystem's soft
					   blocksize, this specifies how much
					   finer.  blkfactor=2 means 1/4-block
					   alignment.  Does not change */
	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
					   been performed at the start of a
					   write */
	int pages_in_io;		/* approximate total IO pages */
	sector_t block_in_file;		/* Current offset into the underlying
					   file in dio_block units. */
	unsigned blocks_available;	/* At block_in_file.  changes */
76
	int reap_counter;		/* rate limit reaping */
Linus Torvalds's avatar
Linus Torvalds committed
77 78
	sector_t final_block_in_request;/* doesn't change */
	int boundary;			/* prev block is at a boundary */
79
	get_block_t *get_block;		/* block mapping function */
80
	dio_submit_t *submit_io;	/* IO submition function */
81

82
	loff_t logical_offset_in_bio;	/* current first logical block in bio */
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87 88 89 90 91 92 93 94 95
	sector_t final_block_in_bio;	/* current final block in bio + 1 */
	sector_t next_block_for_io;	/* next block to be put under IO,
					   in dio_blocks units */

	/*
	 * Deferred addition of a page to the dio.  These variables are
	 * private to dio_send_cur_page(), submit_page_section() and
	 * dio_bio_add_page().
	 */
	struct page *cur_page;		/* The page */
	unsigned cur_page_offset;	/* Offset into it, in bytes */
	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
	sector_t cur_page_block;	/* Where it starts */
96
	loff_t cur_page_fs_offset;	/* Offset in file */
Linus Torvalds's avatar
Linus Torvalds committed
97

Al Viro's avatar
Al Viro committed
98
	struct iov_iter *iter;
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101 102 103 104
	/*
	 * Page queue.  These variables belong to dio_refill_pages() and
	 * dio_get_page().
	 */
	unsigned head;			/* next page to process */
	unsigned tail;			/* last valid page + 1 */
Al Viro's avatar
Al Viro committed
105
	size_t from, to;
106 107 108 109 110
};

/* dio_state communicated between submission path and end_io */
struct dio {
	int flags;			/* doesn't change */
111 112
	int op;
	int op_flags;
113 114
	blk_qc_t bio_cookie;
	struct block_device *bio_bdev;
115
	struct inode *inode;
116 117 118
	loff_t i_size;			/* i_size when submitted */
	dio_iodone_t *end_io;		/* IO completion function */

119
	void *private;			/* copy from map_bh.b_private */
120 121 122

	/* BIO completion state */
	spinlock_t bio_lock;		/* protects BIO fields below */
123 124
	int page_errors;		/* errno from get_user_pages() */
	int is_async;			/* is IO async ? */
125
	bool defer_completion;		/* defer AIO completion to workqueue? */
126
	bool should_dirty;		/* if pages should be dirtied */
127
	int io_error;			/* IO error in completion path */
128 129 130 131 132 133 134 135
	unsigned long refcount;		/* direct_io_worker() and bios */
	struct bio *bio_list;		/* singly linked via bi_private */
	struct task_struct *waiter;	/* waiting task (NULL if none) */

	/* AIO related stuff */
	struct kiocb *iocb;		/* kiocb */
	ssize_t result;                 /* IO result */

136 137 138 139 140
	/*
	 * pages[] (and any fields placed after it) are not zeroed out at
	 * allocation time.  Don't add new fields after pages[] unless you
	 * wish that they not be zeroed.
	 */
141 142 143 144
	union {
		struct page *pages[DIO_PAGES];	/* page buffer */
		struct work_struct complete_work;/* deferred AIO completion */
	};
145 146 147
} ____cacheline_aligned_in_smp;

static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
148 149 150 151

/*
 * How many pages are in the queue?
 */
152
static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
153
{
154
	return sdio->tail - sdio->head;
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159
}

/*
 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 */
160
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
161
{
Al Viro's avatar
Al Viro committed
162
	ssize_t ret;
Linus Torvalds's avatar
Linus Torvalds committed
163

164
	ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
Al Viro's avatar
Al Viro committed
165
				&sdio->from);
Linus Torvalds's avatar
Linus Torvalds committed
166

167
	if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
Nick Piggin's avatar
Nick Piggin committed
168
		struct page *page = ZERO_PAGE(0);
Linus Torvalds's avatar
Linus Torvalds committed
169 170 171 172 173 174 175
		/*
		 * A memory fault, but the filesystem has some outstanding
		 * mapped blocks.  We need to use those blocks up to avoid
		 * leaking stale data in the file.
		 */
		if (dio->page_errors == 0)
			dio->page_errors = ret;
176
		get_page(page);
Nick Piggin's avatar
Nick Piggin committed
177
		dio->pages[0] = page;
178 179
		sdio->head = 0;
		sdio->tail = 1;
Al Viro's avatar
Al Viro committed
180 181 182
		sdio->from = 0;
		sdio->to = PAGE_SIZE;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
183 184 185
	}

	if (ret >= 0) {
Al Viro's avatar
Al Viro committed
186 187
		iov_iter_advance(sdio->iter, ret);
		ret += sdio->from;
188
		sdio->head = 0;
Al Viro's avatar
Al Viro committed
189 190 191
		sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
		sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
192 193 194 195 196 197 198 199 200 201
	}
	return ret;	
}

/*
 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 * buffered inside the dio so that we can call get_user_pages() against a
 * decent number of pages, less frequently.  To provide nicer use of the
 * L1 cache.
 */
202
static inline struct page *dio_get_page(struct dio *dio,
203
					struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
204
{
205
	if (dio_pages_present(sdio) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
206 207
		int ret;

208
		ret = dio_refill_pages(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
209 210
		if (ret)
			return ERR_PTR(ret);
211
		BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds's avatar
Linus Torvalds committed
212
	}
213
	return dio->pages[sdio->head];
Linus Torvalds's avatar
Linus Torvalds committed
214 215
}

216 217 218 219
/**
 * dio_complete() - called when all DIO BIO I/O has been completed
 * @offset: the byte offset in the file of the completed operation
 *
220 221
 * This drops i_dio_count, lets interested parties know that a DIO operation
 * has completed, and calculates the resulting return code for the operation.
222 223 224 225 226
 *
 * It lets the filesystem know if it registered an interest earlier via
 * get_block.  Pass the private field of the map buffer_head so that
 * filesystems can use it to hold additional state between get_block calls and
 * dio_complete.
Linus Torvalds's avatar
Linus Torvalds committed
227
 */
228
static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
Linus Torvalds's avatar
Linus Torvalds committed
229
{
230
	loff_t offset = dio->iocb->ki_pos;
231 232
	ssize_t transferred = 0;

233 234 235 236 237 238 239 240 241
	/*
	 * AIO submission can race with bio completion to get here while
	 * expecting to have the last io completed by bio completion.
	 * In that case -EIOCBQUEUED is in fact not an error we want
	 * to preserve through this call.
	 */
	if (ret == -EIOCBQUEUED)
		ret = 0;

242 243 244 245
	if (dio->result) {
		transferred = dio->result;

		/* Check for short read case */
246 247
		if ((dio->op == REQ_OP_READ) &&
		    ((offset + transferred) > dio->i_size))
248 249 250 251 252 253 254 255 256 257
			transferred = dio->i_size - offset;
	}

	if (ret == 0)
		ret = dio->page_errors;
	if (ret == 0)
		ret = dio->io_error;
	if (ret == 0)
		ret = transferred;

258 259 260
	if (dio->end_io) {
		int err;

261
		// XXX: ki_pos??
262 263 264 265
		err = dio->end_io(dio->iocb, offset, ret, dio->private);
		if (err)
			ret = err;
	}
266

267 268 269
	if (!(dio->flags & DIO_SKIP_DIO_COUNT))
		inode_dio_end(dio->inode);

270
	if (is_async) {
271 272 273 274 275 276
		/*
		 * generic_write_sync expects ki_pos to have been updated
		 * already, but the submission path only does this for
		 * synchronous I/O.
		 */
		dio->iocb->ki_pos += transferred;
277

278
		if (dio->op == REQ_OP_WRITE)
279
			ret = generic_write_sync(dio->iocb,  transferred);
280
		dio->iocb->ki_complete(dio->iocb, ret, 0);
281
	}
282

283
	kmem_cache_free(dio_cache, dio);
284
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
285 286
}

287 288 289 290
static void dio_aio_complete_work(struct work_struct *work)
{
	struct dio *dio = container_of(work, struct dio, complete_work);

291
	dio_complete(dio, 0, true);
292 293
}

Linus Torvalds's avatar
Linus Torvalds committed
294
static int dio_bio_complete(struct dio *dio, struct bio *bio);
295

Linus Torvalds's avatar
Linus Torvalds committed
296 297 298
/*
 * Asynchronous IO callback. 
 */
299
static void dio_bio_end_aio(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
300 301
{
	struct dio *dio = bio->bi_private;
302 303
	unsigned long remaining;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
304 305 306

	/* cleanup the bio */
	dio_bio_complete(dio, bio);
307

308 309 310
	spin_lock_irqsave(&dio->bio_lock, flags);
	remaining = --dio->refcount;
	if (remaining == 1 && dio->waiter)
311
		wake_up_process(dio->waiter);
312
	spin_unlock_irqrestore(&dio->bio_lock, flags);
313

314
	if (remaining == 0) {
315 316 317 318 319
		if (dio->result && dio->defer_completion) {
			INIT_WORK(&dio->complete_work, dio_aio_complete_work);
			queue_work(dio->inode->i_sb->s_dio_done_wq,
				   &dio->complete_work);
		} else {
320
			dio_complete(dio, 0, true);
321
		}
322
	}
Linus Torvalds's avatar
Linus Torvalds committed
323 324 325 326 327 328 329 330 331
}

/*
 * The BIO completion handler simply queues the BIO up for the process-context
 * handler.
 *
 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 */
332
static void dio_bio_end_io(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
333 334 335 336 337 338 339
{
	struct dio *dio = bio->bi_private;
	unsigned long flags;

	spin_lock_irqsave(&dio->bio_lock, flags);
	bio->bi_private = dio->bio_list;
	dio->bio_list = bio;
340
	if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds's avatar
Linus Torvalds committed
341 342 343 344
		wake_up_process(dio->waiter);
	spin_unlock_irqrestore(&dio->bio_lock, flags);
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358
/**
 * dio_end_io - handle the end io action for the given bio
 * @bio: The direct io bio thats being completed
 * @error: Error if there was one
 *
 * This is meant to be called by any filesystem that uses their own dio_submit_t
 * so that the DIO specific endio actions are dealt with after the filesystem
 * has done it's completion work.
 */
void dio_end_io(struct bio *bio, int error)
{
	struct dio *dio = bio->bi_private;

	if (dio->is_async)
359
		dio_bio_end_aio(bio);
360
	else
361
		dio_bio_end_io(bio);
362 363 364
}
EXPORT_SYMBOL_GPL(dio_end_io);

365
static inline void
366 367 368
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
	      struct block_device *bdev,
	      sector_t first_sector, int nr_vecs)
Linus Torvalds's avatar
Linus Torvalds committed
369 370 371
{
	struct bio *bio;

372 373
	/*
	 * bio_alloc() is guaranteed to return a bio when called with
374
	 * __GFP_RECLAIM and we request a valid number of vectors.
375
	 */
Linus Torvalds's avatar
Linus Torvalds committed
376 377 378
	bio = bio_alloc(GFP_KERNEL, nr_vecs);

	bio->bi_bdev = bdev;
379
	bio->bi_iter.bi_sector = first_sector;
380
	bio_set_op_attrs(bio, dio->op, dio->op_flags);
Linus Torvalds's avatar
Linus Torvalds committed
381 382 383 384 385
	if (dio->is_async)
		bio->bi_end_io = dio_bio_end_aio;
	else
		bio->bi_end_io = dio_bio_end_io;

386 387
	sdio->bio = bio;
	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds's avatar
Linus Torvalds committed
388 389 390 391 392 393
}

/*
 * In the AIO read case we speculatively dirty the pages before starting IO.
 * During IO completion, any of these pages which happen to have been written
 * back will be redirtied by bio_check_pages_dirty().
394 395
 *
 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds's avatar
Linus Torvalds committed
396
 */
397
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
398
{
399
	struct bio *bio = sdio->bio;
400
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
401 402

	bio->bi_private = dio;
403 404 405 406 407

	spin_lock_irqsave(&dio->bio_lock, flags);
	dio->refcount++;
	spin_unlock_irqrestore(&dio->bio_lock, flags);

408
	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
409
		bio_set_pages_dirty(bio);
410

411 412
	dio->bio_bdev = bio->bi_bdev;

413
	if (sdio->submit_io) {
414
		sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
415
		dio->bio_cookie = BLK_QC_T_NONE;
416
	} else
417
		dio->bio_cookie = submit_bio(bio);
Linus Torvalds's avatar
Linus Torvalds committed
418

419 420 421
	sdio->bio = NULL;
	sdio->boundary = 0;
	sdio->logical_offset_in_bio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
422 423 424 425 426
}

/*
 * Release any resources in case of a failure
 */
427
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
428
{
Al Viro's avatar
Al Viro committed
429
	while (sdio->head < sdio->tail)
430
		put_page(dio->pages[sdio->head++]);
Linus Torvalds's avatar
Linus Torvalds committed
431 432 433
}

/*
434 435 436 437
 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 * returned once all BIOs have been completed.  This must only be called once
 * all bios have been issued so that dio->refcount can only decrease.  This
 * requires that that the caller hold a reference on the dio.
Linus Torvalds's avatar
Linus Torvalds committed
438 439 440 441
 */
static struct bio *dio_await_one(struct dio *dio)
{
	unsigned long flags;
442
	struct bio *bio = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
443 444

	spin_lock_irqsave(&dio->bio_lock, flags);
445 446 447 448 449 450 451 452 453 454 455

	/*
	 * Wait as long as the list is empty and there are bios in flight.  bio
	 * completion drops the count, maybe adds to the list, and wakes while
	 * holding the bio_lock so we don't need set_current_state()'s barrier
	 * and can call it after testing our condition.
	 */
	while (dio->refcount > 1 && dio->bio_list == NULL) {
		__set_current_state(TASK_UNINTERRUPTIBLE);
		dio->waiter = current;
		spin_unlock_irqrestore(&dio->bio_lock, flags);
456 457
		if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
		    !blk_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie))
458
			io_schedule();
459 460 461
		/* wake up sets us TASK_RUNNING */
		spin_lock_irqsave(&dio->bio_lock, flags);
		dio->waiter = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
462
	}
463 464 465 466
	if (dio->bio_list) {
		bio = dio->bio_list;
		dio->bio_list = bio->bi_private;
	}
Linus Torvalds's avatar
Linus Torvalds committed
467 468 469 470 471 472 473 474 475
	spin_unlock_irqrestore(&dio->bio_lock, flags);
	return bio;
}

/*
 * Process one completed BIO.  No locks are held.
 */
static int dio_bio_complete(struct dio *dio, struct bio *bio)
{
476 477
	struct bio_vec *bvec;
	unsigned i;
478
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
479

480
	if (bio->bi_error)
481
		dio->io_error = -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
482

483
	if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
484
		err = bio->bi_error;
485
		bio_check_pages_dirty(bio);	/* transfers ownership */
Linus Torvalds's avatar
Linus Torvalds committed
486
	} else {
487 488
		bio_for_each_segment_all(bvec, bio, i) {
			struct page *page = bvec->bv_page;
Linus Torvalds's avatar
Linus Torvalds committed
489

490
			if (dio->op == REQ_OP_READ && !PageCompound(page) &&
491
					dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
492
				set_page_dirty_lock(page);
493
			put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
494
		}
495
		err = bio->bi_error;
Linus Torvalds's avatar
Linus Torvalds committed
496 497
		bio_put(bio);
	}
498
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
499 500 501
}

/*
502 503 504
 * Wait on and process all in-flight BIOs.  This must only be called once
 * all bios have been issued so that the refcount can only decrease.
 * This just waits for all bios to make it through dio_bio_complete.  IO
505
 * errors are propagated through dio->io_error and should be propagated via
506
 * dio_complete().
Linus Torvalds's avatar
Linus Torvalds committed
507
 */
508
static void dio_await_completion(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
509
{
510 511 512 513 514 515
	struct bio *bio;
	do {
		bio = dio_await_one(dio);
		if (bio)
			dio_bio_complete(dio, bio);
	} while (bio);
Linus Torvalds's avatar
Linus Torvalds committed
516 517 518 519 520 521 522 523 524
}

/*
 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 * to keep the memory consumption sane we periodically reap any completed BIOs
 * during the BIO generation phase.
 *
 * This also helps to limit the peak amount of pinned userspace memory.
 */
525
static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
526 527 528
{
	int ret = 0;

529
	if (sdio->reap_counter++ >= 64) {
Linus Torvalds's avatar
Linus Torvalds committed
530 531 532 533 534 535 536 537 538 539 540 541 542
		while (dio->bio_list) {
			unsigned long flags;
			struct bio *bio;
			int ret2;

			spin_lock_irqsave(&dio->bio_lock, flags);
			bio = dio->bio_list;
			dio->bio_list = bio->bi_private;
			spin_unlock_irqrestore(&dio->bio_lock, flags);
			ret2 = dio_bio_complete(dio, bio);
			if (ret == 0)
				ret = ret2;
		}
543
		sdio->reap_counter = 0;
Linus Torvalds's avatar
Linus Torvalds committed
544 545 546 547
	}
	return ret;
}

548 549 550 551 552 553 554 555
/*
 * Create workqueue for deferred direct IO completions. We allocate the
 * workqueue when it's first needed. This avoids creating workqueue for
 * filesystems that don't need it and also allows us to create the workqueue
 * late enough so the we can include s_id in the name of the workqueue.
 */
static int sb_init_dio_done_wq(struct super_block *sb)
{
556
	struct workqueue_struct *old;
557 558 559 560 561 562 563 564
	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
						      WQ_MEM_RECLAIM, 0,
						      sb->s_id);
	if (!wq)
		return -ENOMEM;
	/*
	 * This has to be atomic as more DIOs can race to create the workqueue
	 */
565
	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
566
	/* Someone created workqueue before us? Free ours... */
567
	if (old)
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
		destroy_workqueue(wq);
	return 0;
}

static int dio_set_defer_completion(struct dio *dio)
{
	struct super_block *sb = dio->inode->i_sb;

	if (dio->defer_completion)
		return 0;
	dio->defer_completion = true;
	if (!sb->s_dio_done_wq)
		return sb_init_dio_done_wq(sb);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
584 585
/*
 * Call into the fs to map some more disk blocks.  We record the current number
586
 * of available blocks at sdio->blocks_available.  These are in units of the
Linus Torvalds's avatar
Linus Torvalds committed
587 588 589 590 591
 * fs blocksize, (1 << inode->i_blkbits).
 *
 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 * it uses the passed inode-relative block number as the file offset, as usual.
 *
592
 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds's avatar
Linus Torvalds committed
593 594 595 596 597 598 599 600 601 602 603 604
 * has remaining to do.  The fs should not map more than this number of blocks.
 *
 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 * indicate how much contiguous disk space has been made available at
 * bh->b_blocknr.
 *
 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 * This isn't very efficient...
 *
 * In the case of filesystem holes: the fs may return an arbitrarily-large
 * hole by returning an appropriate value in b_size and by clearing
 * buffer_mapped().  However the direct-io code will only process holes one
605
 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds's avatar
Linus Torvalds committed
606
 */
607 608
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
			   struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
609 610 611
{
	int ret;
	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
612
	sector_t fs_endblk;	/* Into file, in filesystem-sized blocks */
Linus Torvalds's avatar
Linus Torvalds committed
613 614
	unsigned long fs_count;	/* Number of filesystem-sized blocks */
	int create;
615
	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
616 617 618 619 620 621 622

	/*
	 * If there was a memory error and we've overwritten all the
	 * mapped blocks then we can now return that memory error
	 */
	ret = dio->page_errors;
	if (ret == 0) {
623 624
		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
625 626 627
		fs_endblk = (sdio->final_block_in_request - 1) >>
					sdio->blkfactor;
		fs_count = fs_endblk - fs_startblk + 1;
Linus Torvalds's avatar
Linus Torvalds committed
628

629
		map_bh->b_state = 0;
630
		map_bh->b_size = fs_count << i_blkbits;
631

632
		/*
633 634 635 636 637
		 * For writes that could fill holes inside i_size on a
		 * DIO_SKIP_HOLES filesystem we forbid block creations: only
		 * overwrites are permitted. We will return early to the caller
		 * once we see an unmapped buffer head returned, and the caller
		 * will fall back to buffered I/O.
638 639 640 641 642
		 *
		 * Otherwise the decision is left to the get_blocks method,
		 * which may decide to handle it or also return an unmapped
		 * buffer head.
		 */
643
		create = dio->op == REQ_OP_WRITE;
644
		if (dio->flags & DIO_SKIP_HOLES) {
645 646
			if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
							i_blkbits))
Linus Torvalds's avatar
Linus Torvalds committed
647 648
				create = 0;
		}
649

650
		ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds's avatar
Linus Torvalds committed
651
						map_bh, create);
652 653 654

		/* Store for completion */
		dio->private = map_bh->b_private;
655 656 657

		if (ret == 0 && buffer_defer_completion(map_bh))
			ret = dio_set_defer_completion(dio);
Linus Torvalds's avatar
Linus Torvalds committed
658 659 660 661 662 663 664
	}
	return ret;
}

/*
 * There is no bio.  Make one now.
 */
665 666
static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
		sector_t start_sector, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669 670
{
	sector_t sector;
	int ret, nr_pages;

671
	ret = dio_bio_reap(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
672 673
	if (ret)
		goto out;
674
	sector = start_sector << (sdio->blkbits - 9);
675
	nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
676
	BUG_ON(nr_pages <= 0);
677
	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
678
	sdio->boundary = 0;
Linus Torvalds's avatar
Linus Torvalds committed
679 680 681 682 683 684 685 686 687 688 689
out:
	return ret;
}

/*
 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 * that was successful then update final_block_in_bio and take a ref against
 * the just-added page.
 *
 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 */
690
static inline int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
691 692 693
{
	int ret;

694 695 696
	ret = bio_add_page(sdio->bio, sdio->cur_page,
			sdio->cur_page_len, sdio->cur_page_offset);
	if (ret == sdio->cur_page_len) {
Linus Torvalds's avatar
Linus Torvalds committed
697 698 699
		/*
		 * Decrement count only, if we are done with this page
		 */
700 701
		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
			sdio->pages_in_io--;
702
		get_page(sdio->cur_page);
703 704
		sdio->final_block_in_bio = sdio->cur_page_block +
			(sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
		ret = 0;
	} else {
		ret = 1;
	}
	return ret;
}
		
/*
 * Put cur_page under IO.  The section of cur_page which is described by
 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 * starts on-disk at cur_page_block.
 *
 * We take a ref against the page here (on behalf of its presence in the bio).
 *
 * The caller of this function is responsible for removing cur_page from the
 * dio, and for dropping the refcount which came from that presence.
 */
722 723
static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
		struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
724 725 726
{
	int ret = 0;

727 728 729
	if (sdio->bio) {
		loff_t cur_offset = sdio->cur_page_fs_offset;
		loff_t bio_next_offset = sdio->logical_offset_in_bio +
730
			sdio->bio->bi_iter.bi_size;
731

Linus Torvalds's avatar
Linus Torvalds committed
732
		/*
733 734
		 * See whether this new request is contiguous with the old.
		 *
Namhyung Kim's avatar
Namhyung Kim committed
735 736
		 * Btrfs cannot handle having logically non-contiguous requests
		 * submitted.  For example if you have
737 738
		 *
		 * Logical:  [0-4095][HOLE][8192-12287]
Namhyung Kim's avatar
Namhyung Kim committed
739
		 * Physical: [0-4095]      [4096-8191]
740 741 742 743 744
		 *
		 * We cannot submit those pages together as one BIO.  So if our
		 * current logical offset in the file does not equal what would
		 * be the next logical offset in the bio, submit the bio we
		 * have.
Linus Torvalds's avatar
Linus Torvalds committed
745
		 */
746
		if (sdio->final_block_in_bio != sdio->cur_page_block ||
747
		    cur_offset != bio_next_offset)
748
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
749 750
	}

751
	if (sdio->bio == NULL) {
752
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
753 754 755 756
		if (ret)
			goto out;
	}

757 758
	if (dio_bio_add_page(sdio) != 0) {
		dio_bio_submit(dio, sdio);
759
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
760
		if (ret == 0) {
761
			ret = dio_bio_add_page(sdio);
Linus Torvalds's avatar
Linus Torvalds committed
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
			BUG_ON(ret != 0);
		}
	}
out:
	return ret;
}

/*
 * An autonomous function to put a chunk of a page under deferred IO.
 *
 * The caller doesn't actually know (or care) whether this piece of page is in
 * a BIO, or is under IO or whatever.  We just take care of all possible 
 * situations here.  The separation between the logic of do_direct_IO() and
 * that of submit_page_section() is important for clarity.  Please don't break.
 *
 * The chunk of page starts on-disk at blocknr.
 *
 * We perform deferred IO, by recording the last-submitted page inside our
 * private part of the dio structure.  If possible, we just expand the IO
 * across that page here.
 *
 * If that doesn't work out then we put the old page into the bio and add this
 * page to the dio instead.
 */
786
static inline int
787
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
788 789
		    unsigned offset, unsigned len, sector_t blocknr,
		    struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
790 791 792
{
	int ret = 0;

793
	if (dio->op == REQ_OP_WRITE) {
794 795 796 797 798 799
		/*
		 * Read accounting is performed in submit_bio()
		 */
		task_io_account_write(len);
	}

Linus Torvalds's avatar
Linus Torvalds committed
800 801 802
	/*
	 * Can we just grow the current page's presence in the dio?
	 */
803 804 805 806 807
	if (sdio->cur_page == page &&
	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
	    sdio->cur_page_block +
	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
		sdio->cur_page_len += len;
Linus Torvalds's avatar
Linus Torvalds committed
808 809 810 811 812 813
		goto out;
	}

	/*
	 * If there's a deferred page already there then send it.
	 */
814
	if (sdio->cur_page) {
815
		ret = dio_send_cur_page(dio, sdio, map_bh);
816
		put_page(sdio->cur_page);
817
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
818
		if (ret)
819
			return ret;
Linus Torvalds's avatar
Linus Torvalds committed
820 821
	}

822
	get_page(page);		/* It is in dio */
823 824 825 826 827
	sdio->cur_page = page;
	sdio->cur_page_offset = offset;
	sdio->cur_page_len = len;
	sdio->cur_page_block = blocknr;
	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
828
out:
829 830 831 832 833 834 835
	/*
	 * If sdio->boundary then we want to schedule the IO now to
	 * avoid metadata seeks.
	 */
	if (sdio->boundary) {
		ret = dio_send_cur_page(dio, sdio, map_bh);
		dio_bio_submit(dio, sdio);
836
		put_page(sdio->cur_page);
837 838
		sdio->cur_page = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
839 840 841 842 843 844 845 846
	return ret;
}

/*
 * Clean any dirty buffers in the blockdev mapping which alias newly-created
 * file blocks.  Only called for S_ISREG files - blockdevs do not set
 * buffer_new
 */
847
static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
848 849 850 851
{
	unsigned i;
	unsigned nblocks;

852
	nblocks = map_bh->b_size >> dio->inode->i_blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
853 854

	for (i = 0; i < nblocks; i++) {
855 856
		unmap_underlying_metadata(map_bh->b_bdev,
					  map_bh->b_blocknr + i);
Linus Torvalds's avatar
Linus Torvalds committed
857 858 859 860 861 862 863 864 865 866 867 868
	}
}

/*
 * If we are not writing the entire block and get_block() allocated
 * the block for us, we need to fill-in the unused portion of the
 * block with zeros. This happens only if user-buffer, fileoffset or
 * io length is not filesystem block-size multiple.
 *
 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 * IO.
 */
869 870
static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
		int end, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
871 872 873 874 875 876
{
	unsigned dio_blocks_per_fs_block;
	unsigned this_chunk_blocks;	/* In dio_blocks */
	unsigned this_chunk_bytes;
	struct page *page;

877
	sdio->start_zero_done = 1;
878
	if (!sdio->blkfactor || !buffer_new(map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
879 880
		return;

881 882
	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds's avatar
Linus Torvalds committed
883 884 885 886 887 888 889 890 891 892 893

	if (!this_chunk_blocks)
		return;

	/*
	 * We need to zero out part of an fs block.  It is either at the
	 * beginning or the end of the fs block.
	 */
	if (end) 
		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;

894
	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
895

Nick Piggin's avatar
Nick Piggin committed
896
	page = ZERO_PAGE(0);
897
	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
898
				sdio->next_block_for_io, map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
899 900
		return;

901
	sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
902 903 904 905 906 907 908 909 910 911 912
}

/*
 * Walk the user pages, and the file, mapping blocks to disk and generating
 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 * into submit_page_section(), which takes care of the next stage of submission
 *
 * Direct IO against a blockdev is different from a file.  Because we can
 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 * blockdev IO be able to have fine alignment and large sizes.
 *
913
 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds's avatar
Linus Torvalds committed
914 915 916
 * with the size of IO which is permitted at this offset and this i_blkbits.
 *
 * For best results, the blockdev should be set up with 512-byte i_blkbits and
917
 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
Linus Torvalds's avatar
Linus Torvalds committed
918 919
 * fine alignment but still allows this function to work in PAGE_SIZE units.
 */
920 921
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
			struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
922
{
923
	const unsigned blkbits = sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
924 925
	int ret = 0;

926
	while (sdio->block_in_file < sdio->final_block_in_request) {
Al Viro's avatar
Al Viro committed
927 928
		struct page *page;
		size_t from, to;
929 930

		page = dio_get_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
931 932 933 934
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}
935 936 937
		from = sdio->head ? 0 : sdio->from;
		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
		sdio->head++;
Linus Torvalds's avatar
Linus Torvalds committed
938

Al Viro's avatar
Al Viro committed
939
		while (from < to) {
Linus Torvalds's avatar
Linus Torvalds committed
940 941 942 943
			unsigned this_chunk_bytes;	/* # of bytes mapped */
			unsigned this_chunk_blocks;	/* # of blocks */
			unsigned u;

944
			if (sdio->blocks_available == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
945 946 947 948 949 950
				/*
				 * Need to go and map some more disk
				 */
				unsigned long blkmask;
				unsigned long dio_remainder;

951
				ret = get_more_blocks(dio, sdio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
952
				if (ret) {
953
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
954 955 956 957 958
					goto out;
				}
				if (!buffer_mapped(map_bh))
					goto do_holes;

959 960 961 962
				sdio->blocks_available =
						map_bh->b_size >> sdio->blkbits;
				sdio->next_block_for_io =
					map_bh->b_blocknr << sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
963
				if (buffer_new(map_bh))
964
					clean_blockdev_aliases(dio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
965

966
				if (!sdio->blkfactor)
Linus Torvalds's avatar
Linus Torvalds committed
967 968
					goto do_holes;

969 970
				blkmask = (1 << sdio->blkfactor) - 1;
				dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds's avatar
Linus Torvalds committed
971 972 973 974 975 976 977 978 979 980 981 982 983

				/*
				 * If we are at the start of IO and that IO
				 * starts partway into a fs-block,
				 * dio_remainder will be non-zero.  If the IO
				 * is a read then we can simply advance the IO
				 * cursor to the first block which is to be
				 * read.  But if the IO is a write and the
				 * block was newly allocated we cannot do that;
				 * the start of the fs block must be zeroed out
				 * on-disk
				 */
				if (!buffer_new(map_bh))
984 985
					sdio->next_block_for_io += dio_remainder;
				sdio->blocks_available -= dio_remainder;
Linus Torvalds's avatar
Linus Torvalds committed
986 987 988 989
			}
do_holes:
			/* Handle holes */
			if (!buffer_mapped(map_bh)) {
990
				loff_t i_size_aligned;
Linus Torvalds's avatar
Linus Torvalds committed
991 992

				/* AKPM: eargh, -ENOTBLK is a hack */
993
				if (dio->op == REQ_OP_WRITE) {
994
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
995 996 997
					return -ENOTBLK;
				}

998 999 1000 1001 1002 1003
				/*
				 * Be sure to account for a partial block as the
				 * last block in the file
				 */
				i_size_aligned = ALIGN(i_size_read(dio->inode),
							1 << blkbits);
1004
				if (sdio->block_in_file >=
1005
						i_size_aligned >> blkbits) {
Linus Torvalds's avatar
Linus Torvalds committed
1006
					/* We hit eof */
1007
					put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1008 1009
					goto out;
				}
Al Viro's avatar
Al Viro committed
1010
				zero_user(page, from, 1 << blkbits);
1011
				sdio->block_in_file++;
Al Viro's avatar
Al Viro committed
1012
				from += 1 << blkbits;
1013
				dio->result += 1 << blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1014 1015 1016 1017 1018 1019 1020 1021
				goto next_block;
			}

			/*
			 * If we're performing IO which has an alignment which
			 * is finer than the underlying fs, go check to see if
			 * we must zero out the start of this block.
			 */
1022
			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1023
				dio_zero_block(dio, sdio, 0, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1024 1025 1026 1027 1028

			/*
			 * Work out, in this_chunk_blocks, how much disk we
			 * can add to this page
			 */
1029
			this_chunk_blocks = sdio->blocks_available;
Al Viro's avatar
Al Viro committed
1030
			u = (to - from) >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1031 1032
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
1033
			u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
1034 1035 1036 1037 1038
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
			this_chunk_bytes = this_chunk_blocks << blkbits;
			BUG_ON(this_chunk_bytes == 0);

1039 1040
			if (this_chunk_blocks == sdio->blocks_available)
				sdio->boundary = buffer_boundary(map_bh);
1041
			ret = submit_page_section(dio, sdio, page,
Al Viro's avatar
Al Viro committed
1042
						  from,
1043
						  this_chunk_bytes,
1044 1045
						  sdio->next_block_for_io,
						  map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1046
			if (ret) {
1047
				put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1048 1049
				goto out;
			}
1050
			sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1051

1052
			sdio->block_in_file += this_chunk_blocks;
Al Viro's avatar
Al Viro committed
1053 1054
			from += this_chunk_bytes;
			dio->result += this_chunk_bytes;
1055
			sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1056
next_block:
1057 1058
			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
			if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds's avatar
Linus Torvalds committed
1059 1060 1061 1062
				break;
		}

		/* Drop the ref which was taken in get_user_pages() */
1063
		put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1064 1065 1066 1067 1068
	}
out:
	return ret;
}

1069
static inline int drop_refcount(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
1070
{
1071
	int ret2;
1072
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1073

1074 1075
	/*
	 * Sync will always be dropping the final ref and completing the
1076 1077 1078
	 * operation.  AIO can if it was a broken operation described above or
	 * in fact if all the bios race to complete before we get here.  In
	 * that case dio_complete() translates the EIOCBQUEUED into the proper
1079
	 * return code that the caller will hand to ->complete().
1080 1081 1082 1083
	 *
	 * This is managed by the bio_lock instead of being an atomic_t so that
	 * completion paths can drop their ref and use the remaining count to
	 * decide to wake the submission path atomically.
1084
	 */
1085 1086 1087
	spin_lock_irqsave(&dio->bio_lock, flags);
	ret2 = --dio->refcount;
	spin_unlock_irqrestore(&dio->bio_lock, flags);
1088
	return ret2;