direct-io.c 37.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * fs/direct-io.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * O_DIRECT
 *
8
 * 04Jul2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11
 *		Initial version
 * 11Sep2002	janetinc@us.ibm.com
 * 		added readv/writev support.
12
 * 29Oct2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *		rewrote bio_add_page() support.
 * 30Oct2002	pbadari@us.ibm.com
 *		added support for non-aligned IO.
 * 06Nov2002	pbadari@us.ibm.com
 *		added asynchronous IO support.
 * 21Jul2003	nathans@sgi.com
 *		added IO completion notifier.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
30
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
Arun Sharma's avatar
Arun Sharma committed
38
#include <linux/atomic.h>
39
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42

/*
 * How many user pages to map in one call to get_user_pages().  This determines
Andi Kleen's avatar
Andi Kleen committed
43
 * the size of a structure in the slab cache
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
 */
#define DIO_PAGES	64

/*
 * This code generally works in units of "dio_blocks".  A dio_block is
 * somewhere between the hard sector size and the filesystem block size.  it
 * is determined on a per-invocation basis.   When talking to the filesystem
 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 * to bio_block quantities by shifting left by blkfactor.
 *
 * If blkfactor is zero then the user's request was aligned to the filesystem's
 * blocksize.
 */

59 60 61
/* dio_state only used in the submission path */

struct dio_submit {
Linus Torvalds's avatar
Linus Torvalds committed
62 63 64 65 66 67 68 69 70 71 72 73 74 75
	struct bio *bio;		/* bio under assembly */
	unsigned blkbits;		/* doesn't change */
	unsigned blkfactor;		/* When we're using an alignment which
					   is finer than the filesystem's soft
					   blocksize, this specifies how much
					   finer.  blkfactor=2 means 1/4-block
					   alignment.  Does not change */
	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
					   been performed at the start of a
					   write */
	int pages_in_io;		/* approximate total IO pages */
	sector_t block_in_file;		/* Current offset into the underlying
					   file in dio_block units. */
	unsigned blocks_available;	/* At block_in_file.  changes */
76
	int reap_counter;		/* rate limit reaping */
Linus Torvalds's avatar
Linus Torvalds committed
77 78
	sector_t final_block_in_request;/* doesn't change */
	int boundary;			/* prev block is at a boundary */
79
	get_block_t *get_block;		/* block mapping function */
80
	dio_submit_t *submit_io;	/* IO submition function */
81

82
	loff_t logical_offset_in_bio;	/* current first logical block in bio */
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87 88 89 90 91 92 93 94 95
	sector_t final_block_in_bio;	/* current final block in bio + 1 */
	sector_t next_block_for_io;	/* next block to be put under IO,
					   in dio_blocks units */

	/*
	 * Deferred addition of a page to the dio.  These variables are
	 * private to dio_send_cur_page(), submit_page_section() and
	 * dio_bio_add_page().
	 */
	struct page *cur_page;		/* The page */
	unsigned cur_page_offset;	/* Offset into it, in bytes */
	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
	sector_t cur_page_block;	/* Where it starts */
96
	loff_t cur_page_fs_offset;	/* Offset in file */
Linus Torvalds's avatar
Linus Torvalds committed
97

Al Viro's avatar
Al Viro committed
98
	struct iov_iter *iter;
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101 102 103 104
	/*
	 * Page queue.  These variables belong to dio_refill_pages() and
	 * dio_get_page().
	 */
	unsigned head;			/* next page to process */
	unsigned tail;			/* last valid page + 1 */
Al Viro's avatar
Al Viro committed
105
	size_t from, to;
106 107 108 109 110 111
};

/* dio_state communicated between submission path and end_io */
struct dio {
	int flags;			/* doesn't change */
	int rw;
112
	struct inode *inode;
113 114 115
	loff_t i_size;			/* i_size when submitted */
	dio_iodone_t *end_io;		/* IO completion function */

116
	void *private;			/* copy from map_bh.b_private */
117 118 119

	/* BIO completion state */
	spinlock_t bio_lock;		/* protects BIO fields below */
120 121
	int page_errors;		/* errno from get_user_pages() */
	int is_async;			/* is IO async ? */
122
	bool defer_completion;		/* defer AIO completion to workqueue? */
123
	bool should_dirty;		/* if pages should be dirtied */
124
	int io_error;			/* IO error in completion path */
125 126 127 128 129 130 131 132
	unsigned long refcount;		/* direct_io_worker() and bios */
	struct bio *bio_list;		/* singly linked via bi_private */
	struct task_struct *waiter;	/* waiting task (NULL if none) */

	/* AIO related stuff */
	struct kiocb *iocb;		/* kiocb */
	ssize_t result;                 /* IO result */

133 134 135 136 137
	/*
	 * pages[] (and any fields placed after it) are not zeroed out at
	 * allocation time.  Don't add new fields after pages[] unless you
	 * wish that they not be zeroed.
	 */
138 139 140 141
	union {
		struct page *pages[DIO_PAGES];	/* page buffer */
		struct work_struct complete_work;/* deferred AIO completion */
	};
142 143 144
} ____cacheline_aligned_in_smp;

static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
145 146 147 148

/*
 * How many pages are in the queue?
 */
149
static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
150
{
151
	return sdio->tail - sdio->head;
Linus Torvalds's avatar
Linus Torvalds committed
152 153 154 155 156
}

/*
 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 */
157
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
158
{
Al Viro's avatar
Al Viro committed
159
	ssize_t ret;
Linus Torvalds's avatar
Linus Torvalds committed
160

161
	ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
Al Viro's avatar
Al Viro committed
162
				&sdio->from);
Linus Torvalds's avatar
Linus Torvalds committed
163

164
	if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
Nick Piggin's avatar
Nick Piggin committed
165
		struct page *page = ZERO_PAGE(0);
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168 169 170 171 172
		/*
		 * A memory fault, but the filesystem has some outstanding
		 * mapped blocks.  We need to use those blocks up to avoid
		 * leaking stale data in the file.
		 */
		if (dio->page_errors == 0)
			dio->page_errors = ret;
Nick Piggin's avatar
Nick Piggin committed
173 174
		page_cache_get(page);
		dio->pages[0] = page;
175 176
		sdio->head = 0;
		sdio->tail = 1;
Al Viro's avatar
Al Viro committed
177 178 179
		sdio->from = 0;
		sdio->to = PAGE_SIZE;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
180 181 182
	}

	if (ret >= 0) {
Al Viro's avatar
Al Viro committed
183 184
		iov_iter_advance(sdio->iter, ret);
		ret += sdio->from;
185
		sdio->head = 0;
Al Viro's avatar
Al Viro committed
186 187 188
		sdio->tail = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
		sdio->to = ((ret - 1) & (PAGE_SIZE - 1)) + 1;
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
189 190 191 192 193 194 195 196 197 198
	}
	return ret;	
}

/*
 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 * buffered inside the dio so that we can call get_user_pages() against a
 * decent number of pages, less frequently.  To provide nicer use of the
 * L1 cache.
 */
199
static inline struct page *dio_get_page(struct dio *dio,
200
					struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
201
{
202
	if (dio_pages_present(sdio) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
203 204
		int ret;

205
		ret = dio_refill_pages(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
206 207
		if (ret)
			return ERR_PTR(ret);
208
		BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds's avatar
Linus Torvalds committed
209
	}
210
	return dio->pages[sdio->head];
Linus Torvalds's avatar
Linus Torvalds committed
211 212
}

213 214 215 216
/**
 * dio_complete() - called when all DIO BIO I/O has been completed
 * @offset: the byte offset in the file of the completed operation
 *
217 218
 * This drops i_dio_count, lets interested parties know that a DIO operation
 * has completed, and calculates the resulting return code for the operation.
219 220 221 222 223
 *
 * It lets the filesystem know if it registered an interest earlier via
 * get_block.  Pass the private field of the map buffer_head so that
 * filesystems can use it to hold additional state between get_block calls and
 * dio_complete.
Linus Torvalds's avatar
Linus Torvalds committed
224
 */
225 226
static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
		bool is_async)
Linus Torvalds's avatar
Linus Torvalds committed
227
{
228 229
	ssize_t transferred = 0;

230 231 232 233 234 235 236 237 238
	/*
	 * AIO submission can race with bio completion to get here while
	 * expecting to have the last io completed by bio completion.
	 * In that case -EIOCBQUEUED is in fact not an error we want
	 * to preserve through this call.
	 */
	if (ret == -EIOCBQUEUED)
		ret = 0;

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	if (dio->result) {
		transferred = dio->result;

		/* Check for short read case */
		if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
			transferred = dio->i_size - offset;
	}

	if (ret == 0)
		ret = dio->page_errors;
	if (ret == 0)
		ret = dio->io_error;
	if (ret == 0)
		ret = transferred;

254 255 256
	if (dio->end_io && dio->result)
		dio->end_io(dio->iocb, offset, transferred, dio->private);

257 258 259
	if (!(dio->flags & DIO_SKIP_DIO_COUNT))
		inode_dio_end(dio->inode);

260 261 262 263 264 265 266 267 268 269
	if (is_async) {
		if (dio->rw & WRITE) {
			int err;

			err = generic_write_sync(dio->iocb->ki_filp, offset,
						 transferred);
			if (err < 0 && ret > 0)
				ret = err;
		}

270
		dio->iocb->ki_complete(dio->iocb, ret, 0);
271
	}
272

273
	kmem_cache_free(dio_cache, dio);
274
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
275 276
}

277 278 279 280 281 282 283
static void dio_aio_complete_work(struct work_struct *work)
{
	struct dio *dio = container_of(work, struct dio, complete_work);

	dio_complete(dio, dio->iocb->ki_pos, 0, true);
}

Linus Torvalds's avatar
Linus Torvalds committed
284
static int dio_bio_complete(struct dio *dio, struct bio *bio);
285

Linus Torvalds's avatar
Linus Torvalds committed
286 287 288
/*
 * Asynchronous IO callback. 
 */
289
static void dio_bio_end_aio(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
290 291
{
	struct dio *dio = bio->bi_private;
292 293
	unsigned long remaining;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
294 295 296

	/* cleanup the bio */
	dio_bio_complete(dio, bio);
297

298 299 300
	spin_lock_irqsave(&dio->bio_lock, flags);
	remaining = --dio->refcount;
	if (remaining == 1 && dio->waiter)
301
		wake_up_process(dio->waiter);
302
	spin_unlock_irqrestore(&dio->bio_lock, flags);
303

304
	if (remaining == 0) {
305 306 307 308 309 310 311
		if (dio->result && dio->defer_completion) {
			INIT_WORK(&dio->complete_work, dio_aio_complete_work);
			queue_work(dio->inode->i_sb->s_dio_done_wq,
				   &dio->complete_work);
		} else {
			dio_complete(dio, dio->iocb->ki_pos, 0, true);
		}
312
	}
Linus Torvalds's avatar
Linus Torvalds committed
313 314 315 316 317 318 319 320 321
}

/*
 * The BIO completion handler simply queues the BIO up for the process-context
 * handler.
 *
 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 */
322
static void dio_bio_end_io(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
323 324 325 326 327 328 329
{
	struct dio *dio = bio->bi_private;
	unsigned long flags;

	spin_lock_irqsave(&dio->bio_lock, flags);
	bio->bi_private = dio->bio_list;
	dio->bio_list = bio;
330
	if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds's avatar
Linus Torvalds committed
331 332 333 334
		wake_up_process(dio->waiter);
	spin_unlock_irqrestore(&dio->bio_lock, flags);
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348
/**
 * dio_end_io - handle the end io action for the given bio
 * @bio: The direct io bio thats being completed
 * @error: Error if there was one
 *
 * This is meant to be called by any filesystem that uses their own dio_submit_t
 * so that the DIO specific endio actions are dealt with after the filesystem
 * has done it's completion work.
 */
void dio_end_io(struct bio *bio, int error)
{
	struct dio *dio = bio->bi_private;

	if (dio->is_async)
349
		dio_bio_end_aio(bio);
350
	else
351
		dio_bio_end_io(bio);
352 353 354
}
EXPORT_SYMBOL_GPL(dio_end_io);

355
static inline void
356 357 358
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
	      struct block_device *bdev,
	      sector_t first_sector, int nr_vecs)
Linus Torvalds's avatar
Linus Torvalds committed
359 360 361
{
	struct bio *bio;

362 363 364 365
	/*
	 * bio_alloc() is guaranteed to return a bio when called with
	 * __GFP_WAIT and we request a valid number of vectors.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
366 367 368
	bio = bio_alloc(GFP_KERNEL, nr_vecs);

	bio->bi_bdev = bdev;
369
	bio->bi_iter.bi_sector = first_sector;
Linus Torvalds's avatar
Linus Torvalds committed
370 371 372 373 374
	if (dio->is_async)
		bio->bi_end_io = dio_bio_end_aio;
	else
		bio->bi_end_io = dio_bio_end_io;

375 376
	sdio->bio = bio;
	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379 380 381 382
}

/*
 * In the AIO read case we speculatively dirty the pages before starting IO.
 * During IO completion, any of these pages which happen to have been written
 * back will be redirtied by bio_check_pages_dirty().
383 384
 *
 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds's avatar
Linus Torvalds committed
385
 */
386
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
387
{
388
	struct bio *bio = sdio->bio;
389
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
390 391

	bio->bi_private = dio;
392 393 394 395 396

	spin_lock_irqsave(&dio->bio_lock, flags);
	dio->refcount++;
	spin_unlock_irqrestore(&dio->bio_lock, flags);

397
	if (dio->is_async && dio->rw == READ && dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
398
		bio_set_pages_dirty(bio);
399

400 401 402
	if (sdio->submit_io)
		sdio->submit_io(dio->rw, bio, dio->inode,
			       sdio->logical_offset_in_bio);
403 404
	else
		submit_bio(dio->rw, bio);
Linus Torvalds's avatar
Linus Torvalds committed
405

406 407 408
	sdio->bio = NULL;
	sdio->boundary = 0;
	sdio->logical_offset_in_bio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
409 410 411 412 413
}

/*
 * Release any resources in case of a failure
 */
414
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
415
{
Al Viro's avatar
Al Viro committed
416 417
	while (sdio->head < sdio->tail)
		page_cache_release(dio->pages[sdio->head++]);
Linus Torvalds's avatar
Linus Torvalds committed
418 419 420
}

/*
421 422 423 424
 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 * returned once all BIOs have been completed.  This must only be called once
 * all bios have been issued so that dio->refcount can only decrease.  This
 * requires that that the caller hold a reference on the dio.
Linus Torvalds's avatar
Linus Torvalds committed
425 426 427 428
 */
static struct bio *dio_await_one(struct dio *dio)
{
	unsigned long flags;
429
	struct bio *bio = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
430 431

	spin_lock_irqsave(&dio->bio_lock, flags);
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

	/*
	 * Wait as long as the list is empty and there are bios in flight.  bio
	 * completion drops the count, maybe adds to the list, and wakes while
	 * holding the bio_lock so we don't need set_current_state()'s barrier
	 * and can call it after testing our condition.
	 */
	while (dio->refcount > 1 && dio->bio_list == NULL) {
		__set_current_state(TASK_UNINTERRUPTIBLE);
		dio->waiter = current;
		spin_unlock_irqrestore(&dio->bio_lock, flags);
		io_schedule();
		/* wake up sets us TASK_RUNNING */
		spin_lock_irqsave(&dio->bio_lock, flags);
		dio->waiter = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
447
	}
448 449 450 451
	if (dio->bio_list) {
		bio = dio->bio_list;
		dio->bio_list = bio->bi_private;
	}
Linus Torvalds's avatar
Linus Torvalds committed
452 453 454 455 456 457 458 459 460
	spin_unlock_irqrestore(&dio->bio_lock, flags);
	return bio;
}

/*
 * Process one completed BIO.  No locks are held.
 */
static int dio_bio_complete(struct dio *dio, struct bio *bio)
{
461 462
	struct bio_vec *bvec;
	unsigned i;
463
	int err;
Linus Torvalds's avatar
Linus Torvalds committed
464

465
	if (bio->bi_error)
466
		dio->io_error = -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
467

468
	if (dio->is_async && dio->rw == READ && dio->should_dirty) {
Linus Torvalds's avatar
Linus Torvalds committed
469
		bio_check_pages_dirty(bio);	/* transfers ownership */
470
		err = bio->bi_error;
Linus Torvalds's avatar
Linus Torvalds committed
471
	} else {
472 473
		bio_for_each_segment_all(bvec, bio, i) {
			struct page *page = bvec->bv_page;
Linus Torvalds's avatar
Linus Torvalds committed
474

475 476
			if (dio->rw == READ && !PageCompound(page) &&
					dio->should_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
477 478 479
				set_page_dirty_lock(page);
			page_cache_release(page);
		}
480
		err = bio->bi_error;
Linus Torvalds's avatar
Linus Torvalds committed
481 482
		bio_put(bio);
	}
483
	return err;
Linus Torvalds's avatar
Linus Torvalds committed
484 485 486
}

/*
487 488 489
 * Wait on and process all in-flight BIOs.  This must only be called once
 * all bios have been issued so that the refcount can only decrease.
 * This just waits for all bios to make it through dio_bio_complete.  IO
490
 * errors are propagated through dio->io_error and should be propagated via
491
 * dio_complete().
Linus Torvalds's avatar
Linus Torvalds committed
492
 */
493
static void dio_await_completion(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
494
{
495 496 497 498 499 500
	struct bio *bio;
	do {
		bio = dio_await_one(dio);
		if (bio)
			dio_bio_complete(dio, bio);
	} while (bio);
Linus Torvalds's avatar
Linus Torvalds committed
501 502 503 504 505 506 507 508 509
}

/*
 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 * to keep the memory consumption sane we periodically reap any completed BIOs
 * during the BIO generation phase.
 *
 * This also helps to limit the peak amount of pinned userspace memory.
 */
510
static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
511 512 513
{
	int ret = 0;

514
	if (sdio->reap_counter++ >= 64) {
Linus Torvalds's avatar
Linus Torvalds committed
515 516 517 518 519 520 521 522 523 524 525 526 527
		while (dio->bio_list) {
			unsigned long flags;
			struct bio *bio;
			int ret2;

			spin_lock_irqsave(&dio->bio_lock, flags);
			bio = dio->bio_list;
			dio->bio_list = bio->bi_private;
			spin_unlock_irqrestore(&dio->bio_lock, flags);
			ret2 = dio_bio_complete(dio, bio);
			if (ret == 0)
				ret = ret2;
		}
528
		sdio->reap_counter = 0;
Linus Torvalds's avatar
Linus Torvalds committed
529 530 531 532
	}
	return ret;
}

533 534 535 536 537 538 539 540
/*
 * Create workqueue for deferred direct IO completions. We allocate the
 * workqueue when it's first needed. This avoids creating workqueue for
 * filesystems that don't need it and also allows us to create the workqueue
 * late enough so the we can include s_id in the name of the workqueue.
 */
static int sb_init_dio_done_wq(struct super_block *sb)
{
541
	struct workqueue_struct *old;
542 543 544 545 546 547 548 549
	struct workqueue_struct *wq = alloc_workqueue("dio/%s",
						      WQ_MEM_RECLAIM, 0,
						      sb->s_id);
	if (!wq)
		return -ENOMEM;
	/*
	 * This has to be atomic as more DIOs can race to create the workqueue
	 */
550
	old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
551
	/* Someone created workqueue before us? Free ours... */
552
	if (old)
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
		destroy_workqueue(wq);
	return 0;
}

static int dio_set_defer_completion(struct dio *dio)
{
	struct super_block *sb = dio->inode->i_sb;

	if (dio->defer_completion)
		return 0;
	dio->defer_completion = true;
	if (!sb->s_dio_done_wq)
		return sb_init_dio_done_wq(sb);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
569 570
/*
 * Call into the fs to map some more disk blocks.  We record the current number
571
 * of available blocks at sdio->blocks_available.  These are in units of the
Linus Torvalds's avatar
Linus Torvalds committed
572 573 574 575 576
 * fs blocksize, (1 << inode->i_blkbits).
 *
 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 * it uses the passed inode-relative block number as the file offset, as usual.
 *
577
 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds's avatar
Linus Torvalds committed
578 579 580 581 582 583 584 585 586 587 588 589
 * has remaining to do.  The fs should not map more than this number of blocks.
 *
 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 * indicate how much contiguous disk space has been made available at
 * bh->b_blocknr.
 *
 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 * This isn't very efficient...
 *
 * In the case of filesystem holes: the fs may return an arbitrarily-large
 * hole by returning an appropriate value in b_size and by clearing
 * buffer_mapped().  However the direct-io code will only process holes one
590
 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds's avatar
Linus Torvalds committed
591
 */
592 593
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
			   struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
594 595 596
{
	int ret;
	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
597
	sector_t fs_endblk;	/* Into file, in filesystem-sized blocks */
Linus Torvalds's avatar
Linus Torvalds committed
598 599
	unsigned long fs_count;	/* Number of filesystem-sized blocks */
	int create;
600
	unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
601 602 603 604 605 606 607

	/*
	 * If there was a memory error and we've overwritten all the
	 * mapped blocks then we can now return that memory error
	 */
	ret = dio->page_errors;
	if (ret == 0) {
608 609
		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
610 611 612
		fs_endblk = (sdio->final_block_in_request - 1) >>
					sdio->blkfactor;
		fs_count = fs_endblk - fs_startblk + 1;
Linus Torvalds's avatar
Linus Torvalds committed
613

614
		map_bh->b_state = 0;
615
		map_bh->b_size = fs_count << i_blkbits;
616

617 618 619 620 621 622 623 624 625 626 627
		/*
		 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
		 * forbid block creations: only overwrites are permitted.
		 * We will return early to the caller once we see an
		 * unmapped buffer head returned, and the caller will fall
		 * back to buffered I/O.
		 *
		 * Otherwise the decision is left to the get_blocks method,
		 * which may decide to handle it or also return an unmapped
		 * buffer head.
		 */
Jens Axboe's avatar
Jens Axboe committed
628
		create = dio->rw & WRITE;
629
		if (dio->flags & DIO_SKIP_HOLES) {
630 631
			if (sdio->block_in_file < (i_size_read(dio->inode) >>
							sdio->blkbits))
Linus Torvalds's avatar
Linus Torvalds committed
632 633
				create = 0;
		}
634

635
		ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds's avatar
Linus Torvalds committed
636
						map_bh, create);
637 638 639

		/* Store for completion */
		dio->private = map_bh->b_private;
640 641 642

		if (ret == 0 && buffer_defer_completion(map_bh))
			ret = dio_set_defer_completion(dio);
Linus Torvalds's avatar
Linus Torvalds committed
643 644 645 646 647 648 649
	}
	return ret;
}

/*
 * There is no bio.  Make one now.
 */
650 651
static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
		sector_t start_sector, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
652 653 654 655
{
	sector_t sector;
	int ret, nr_pages;

656
	ret = dio_bio_reap(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
657 658
	if (ret)
		goto out;
659
	sector = start_sector << (sdio->blkbits - 9);
660
	nr_pages = min(sdio->pages_in_io, BIO_MAX_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
661
	BUG_ON(nr_pages <= 0);
662
	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
663
	sdio->boundary = 0;
Linus Torvalds's avatar
Linus Torvalds committed
664 665 666 667 668 669 670 671 672 673 674
out:
	return ret;
}

/*
 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 * that was successful then update final_block_in_bio and take a ref against
 * the just-added page.
 *
 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 */
675
static inline int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
676 677 678
{
	int ret;

679 680 681
	ret = bio_add_page(sdio->bio, sdio->cur_page,
			sdio->cur_page_len, sdio->cur_page_offset);
	if (ret == sdio->cur_page_len) {
Linus Torvalds's avatar
Linus Torvalds committed
682 683 684
		/*
		 * Decrement count only, if we are done with this page
		 */
685 686 687 688 689
		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
			sdio->pages_in_io--;
		page_cache_get(sdio->cur_page);
		sdio->final_block_in_bio = sdio->cur_page_block +
			(sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
		ret = 0;
	} else {
		ret = 1;
	}
	return ret;
}
		
/*
 * Put cur_page under IO.  The section of cur_page which is described by
 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 * starts on-disk at cur_page_block.
 *
 * We take a ref against the page here (on behalf of its presence in the bio).
 *
 * The caller of this function is responsible for removing cur_page from the
 * dio, and for dropping the refcount which came from that presence.
 */
707 708
static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
		struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
709 710 711
{
	int ret = 0;

712 713 714
	if (sdio->bio) {
		loff_t cur_offset = sdio->cur_page_fs_offset;
		loff_t bio_next_offset = sdio->logical_offset_in_bio +
715
			sdio->bio->bi_iter.bi_size;
716

Linus Torvalds's avatar
Linus Torvalds committed
717
		/*
718 719
		 * See whether this new request is contiguous with the old.
		 *
Namhyung Kim's avatar
Namhyung Kim committed
720 721
		 * Btrfs cannot handle having logically non-contiguous requests
		 * submitted.  For example if you have
722 723
		 *
		 * Logical:  [0-4095][HOLE][8192-12287]
Namhyung Kim's avatar
Namhyung Kim committed
724
		 * Physical: [0-4095]      [4096-8191]
725 726 727 728 729
		 *
		 * We cannot submit those pages together as one BIO.  So if our
		 * current logical offset in the file does not equal what would
		 * be the next logical offset in the bio, submit the bio we
		 * have.
Linus Torvalds's avatar
Linus Torvalds committed
730
		 */
731
		if (sdio->final_block_in_bio != sdio->cur_page_block ||
732
		    cur_offset != bio_next_offset)
733
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
734 735
	}

736
	if (sdio->bio == NULL) {
737
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
738 739 740 741
		if (ret)
			goto out;
	}

742 743
	if (dio_bio_add_page(sdio) != 0) {
		dio_bio_submit(dio, sdio);
744
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
745
		if (ret == 0) {
746
			ret = dio_bio_add_page(sdio);
Linus Torvalds's avatar
Linus Torvalds committed
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
			BUG_ON(ret != 0);
		}
	}
out:
	return ret;
}

/*
 * An autonomous function to put a chunk of a page under deferred IO.
 *
 * The caller doesn't actually know (or care) whether this piece of page is in
 * a BIO, or is under IO or whatever.  We just take care of all possible 
 * situations here.  The separation between the logic of do_direct_IO() and
 * that of submit_page_section() is important for clarity.  Please don't break.
 *
 * The chunk of page starts on-disk at blocknr.
 *
 * We perform deferred IO, by recording the last-submitted page inside our
 * private part of the dio structure.  If possible, we just expand the IO
 * across that page here.
 *
 * If that doesn't work out then we put the old page into the bio and add this
 * page to the dio instead.
 */
771
static inline int
772
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
773 774
		    unsigned offset, unsigned len, sector_t blocknr,
		    struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
775 776 777
{
	int ret = 0;

778 779 780 781 782 783 784
	if (dio->rw & WRITE) {
		/*
		 * Read accounting is performed in submit_bio()
		 */
		task_io_account_write(len);
	}

Linus Torvalds's avatar
Linus Torvalds committed
785 786 787
	/*
	 * Can we just grow the current page's presence in the dio?
	 */
788 789 790 791 792
	if (sdio->cur_page == page &&
	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
	    sdio->cur_page_block +
	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
		sdio->cur_page_len += len;
Linus Torvalds's avatar
Linus Torvalds committed
793 794 795 796 797 798
		goto out;
	}

	/*
	 * If there's a deferred page already there then send it.
	 */
799
	if (sdio->cur_page) {
800
		ret = dio_send_cur_page(dio, sdio, map_bh);
801 802
		page_cache_release(sdio->cur_page);
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
803
		if (ret)
804
			return ret;
Linus Torvalds's avatar
Linus Torvalds committed
805 806 807
	}

	page_cache_get(page);		/* It is in dio */
808 809 810 811 812
	sdio->cur_page = page;
	sdio->cur_page_offset = offset;
	sdio->cur_page_len = len;
	sdio->cur_page_block = blocknr;
	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
813
out:
814 815 816 817 818 819 820 821 822 823
	/*
	 * If sdio->boundary then we want to schedule the IO now to
	 * avoid metadata seeks.
	 */
	if (sdio->boundary) {
		ret = dio_send_cur_page(dio, sdio, map_bh);
		dio_bio_submit(dio, sdio);
		page_cache_release(sdio->cur_page);
		sdio->cur_page = NULL;
	}
Linus Torvalds's avatar
Linus Torvalds committed
824 825 826 827 828 829 830 831
	return ret;
}

/*
 * Clean any dirty buffers in the blockdev mapping which alias newly-created
 * file blocks.  Only called for S_ISREG files - blockdevs do not set
 * buffer_new
 */
832
static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
833 834 835 836
{
	unsigned i;
	unsigned nblocks;

837
	nblocks = map_bh->b_size >> dio->inode->i_blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
838 839

	for (i = 0; i < nblocks; i++) {
840 841
		unmap_underlying_metadata(map_bh->b_bdev,
					  map_bh->b_blocknr + i);
Linus Torvalds's avatar
Linus Torvalds committed
842 843 844 845 846 847 848 849 850 851 852 853
	}
}

/*
 * If we are not writing the entire block and get_block() allocated
 * the block for us, we need to fill-in the unused portion of the
 * block with zeros. This happens only if user-buffer, fileoffset or
 * io length is not filesystem block-size multiple.
 *
 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 * IO.
 */
854 855
static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
		int end, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
856 857 858 859 860 861
{
	unsigned dio_blocks_per_fs_block;
	unsigned this_chunk_blocks;	/* In dio_blocks */
	unsigned this_chunk_bytes;
	struct page *page;

862
	sdio->start_zero_done = 1;
863
	if (!sdio->blkfactor || !buffer_new(map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
864 865
		return;

866 867
	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds's avatar
Linus Torvalds committed
868 869 870 871 872 873 874 875 876 877 878

	if (!this_chunk_blocks)
		return;

	/*
	 * We need to zero out part of an fs block.  It is either at the
	 * beginning or the end of the fs block.
	 */
	if (end) 
		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;

879
	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
880

Nick Piggin's avatar
Nick Piggin committed
881
	page = ZERO_PAGE(0);
882
	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
883
				sdio->next_block_for_io, map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
884 885
		return;

886
	sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
887 888 889 890 891 892 893 894 895 896 897
}

/*
 * Walk the user pages, and the file, mapping blocks to disk and generating
 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 * into submit_page_section(), which takes care of the next stage of submission
 *
 * Direct IO against a blockdev is different from a file.  Because we can
 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 * blockdev IO be able to have fine alignment and large sizes.
 *
898
 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds's avatar
Linus Torvalds committed
899 900 901
 * with the size of IO which is permitted at this offset and this i_blkbits.
 *
 * For best results, the blockdev should be set up with 512-byte i_blkbits and
902
 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
Linus Torvalds's avatar
Linus Torvalds committed
903 904
 * fine alignment but still allows this function to work in PAGE_SIZE units.
 */
905 906
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
			struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
907
{
908
	const unsigned blkbits = sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
909 910
	int ret = 0;

911
	while (sdio->block_in_file < sdio->final_block_in_request) {
Al Viro's avatar
Al Viro committed
912 913
		struct page *page;
		size_t from, to;
914 915

		page = dio_get_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
916 917 918 919
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}
920 921 922
		from = sdio->head ? 0 : sdio->from;
		to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
		sdio->head++;
Linus Torvalds's avatar
Linus Torvalds committed
923

Al Viro's avatar
Al Viro committed
924
		while (from < to) {
Linus Torvalds's avatar
Linus Torvalds committed
925 926 927 928
			unsigned this_chunk_bytes;	/* # of bytes mapped */
			unsigned this_chunk_blocks;	/* # of blocks */
			unsigned u;

929
			if (sdio->blocks_available == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
930 931 932 933 934 935
				/*
				 * Need to go and map some more disk
				 */
				unsigned long blkmask;
				unsigned long dio_remainder;

936
				ret = get_more_blocks(dio, sdio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
937 938 939 940 941 942 943
				if (ret) {
					page_cache_release(page);
					goto out;
				}
				if (!buffer_mapped(map_bh))
					goto do_holes;

944 945 946 947
				sdio->blocks_available =
						map_bh->b_size >> sdio->blkbits;
				sdio->next_block_for_io =
					map_bh->b_blocknr << sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
948
				if (buffer_new(map_bh))
949
					clean_blockdev_aliases(dio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
950

951
				if (!sdio->blkfactor)
Linus Torvalds's avatar
Linus Torvalds committed
952 953
					goto do_holes;

954 955
				blkmask = (1 << sdio->blkfactor) - 1;
				dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds's avatar
Linus Torvalds committed
956 957 958 959 960 961 962 963 964 965 966 967 968

				/*
				 * If we are at the start of IO and that IO
				 * starts partway into a fs-block,
				 * dio_remainder will be non-zero.  If the IO
				 * is a read then we can simply advance the IO
				 * cursor to the first block which is to be
				 * read.  But if the IO is a write and the
				 * block was newly allocated we cannot do that;
				 * the start of the fs block must be zeroed out
				 * on-disk
				 */
				if (!buffer_new(map_bh))
969 970
					sdio->next_block_for_io += dio_remainder;
				sdio->blocks_available -= dio_remainder;
Linus Torvalds's avatar
Linus Torvalds committed
971 972 973 974
			}
do_holes:
			/* Handle holes */
			if (!buffer_mapped(map_bh)) {
975
				loff_t i_size_aligned;
Linus Torvalds's avatar
Linus Torvalds committed
976 977

				/* AKPM: eargh, -ENOTBLK is a hack */
Jens Axboe's avatar
Jens Axboe committed
978
				if (dio->rw & WRITE) {
Linus Torvalds's avatar
Linus Torvalds committed
979 980 981 982
					page_cache_release(page);
					return -ENOTBLK;
				}

983 984 985 986 987 988
				/*
				 * Be sure to account for a partial block as the
				 * last block in the file
				 */
				i_size_aligned = ALIGN(i_size_read(dio->inode),
							1 << blkbits);
989
				if (sdio->block_in_file >=
990
						i_size_aligned >> blkbits) {
Linus Torvalds's avatar
Linus Torvalds committed
991 992 993 994
					/* We hit eof */
					page_cache_release(page);
					goto out;
				}
Al Viro's avatar
Al Viro committed
995
				zero_user(page, from, 1 << blkbits);
996
				sdio->block_in_file++;
Al Viro's avatar
Al Viro committed
997
				from += 1 << blkbits;
998
				dio->result += 1 << blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
999 1000 1001 1002 1003 1004 1005 1006
				goto next_block;
			}

			/*
			 * If we're performing IO which has an alignment which
			 * is finer than the underlying fs, go check to see if
			 * we must zero out the start of this block.
			 */
1007
			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
1008
				dio_zero_block(dio, sdio, 0, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1009 1010 1011 1012 1013

			/*
			 * Work out, in this_chunk_blocks, how much disk we
			 * can add to this page
			 */
1014
			this_chunk_blocks = sdio->blocks_available;
Al Viro's avatar
Al Viro committed
1015
			u = (to - from) >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1016 1017
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
1018
			u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
1019 1020 1021 1022 1023
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
			this_chunk_bytes = this_chunk_blocks << blkbits;
			BUG_ON(this_chunk_bytes == 0);

1024 1025
			if (this_chunk_blocks == sdio->blocks_available)
				sdio->boundary = buffer_boundary(map_bh);
1026
			ret = submit_page_section(dio, sdio, page,
Al Viro's avatar
Al Viro committed
1027
						  from,
1028
						  this_chunk_bytes,
1029 1030
						  sdio->next_block_for_io,
						  map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1031 1032 1033 1034
			if (ret) {
				page_cache_release(page);
				goto out;
			}
1035
			sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1036

1037
			sdio->block_in_file += this_chunk_blocks;
Al Viro's avatar
Al Viro committed
1038 1039
			from += this_chunk_bytes;
			dio->result += this_chunk_bytes;
1040
			sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1041
next_block:
1042 1043
			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
			if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds's avatar
Linus Torvalds committed
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
				break;
		}

		/* Drop the ref which was taken in get_user_pages() */
		page_cache_release(page);
	}
out:
	return ret;
}

1054
static inline int drop_refcount(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
1055
{
1056
	int ret2;
1057
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1058

1059 1060
	/*
	 * Sync will always be dropping the final ref and completing the
1061 1062 1063
	 * operation.  AIO can if it was a broken operation described above or
	 * in fact if all the bios race to complete before we get here.  In
	 * that case dio_complete() translates the EIOCBQUEUED into the proper
1064
	 * return code that the caller will hand to ->complete().
1065 1066 1067 1068
	 *
	 * This is managed by the bio_lock instead of being an atomic_t so that
	 * completion paths can drop their ref and use the remaining count to
	 * decide to wake the submission path atomically.
1069
	 */
1070 1071 1072
	spin_lock_irqsave(&dio->bio_lock, flags);
	ret2 = --dio->refcount;
	spin_unlock_irqrestore(&dio->bio_lock, flags);
1073
	return ret2;
Linus Torvalds's avatar
Linus Torvalds committed
1074 1075
}

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
/*
 * This is a library function for use by filesystem drivers.
 *
 * The locking rules are governed by the flags parameter:
 *  - if the flags value contains DIO_LOCKING we use a fancy locking
 *    scheme for dumb filesystems.
 *    For writes this function is called under i_mutex and returns with
 *    i_mutex held, for reads, i_mutex is not held on entry, but it is
 *    taken and dropped again before returning.
 *  - if the flags value does NOT contain DIO_LOCKING we don't use any
 *    internal locking but rather rely on the filesystem to synchronize
 *    direct I/O reads/writes versus each other and truncate.
1088 1089 1090 1091 1092 1093 1094
 *
 * To help with locking against truncate we incremented the i_dio_count
 * counter before starting direct I/O, and decrement it once we are done.
 * Truncate can wait for it to reach zero to provide exclusion.  It is
 * expected that filesystem provide exclusion between new direct I/O
 * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
 * but other filesystems need to take care of this on their own.
1095 1096 1097 1098 1099
 *
 * NOTE: if you pass "sdio" to anything by pointer make sure that function
 * is always inlined. Otherwise gcc is unable to split the structure into
 * individual fields and will generate much worse code. This is important
 * for the whole file.
1100
 */
1101
static inline ssize_t
1102 1103 1104 1105
do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
		      struct block_device *bdev, struct iov_iter *iter,
		      loff_t offset, get_block_t get_block, dio_iodone_t end_io,
		      dio_submit_t submit_io, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1106
{
1107 1108
	unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
	unsigned blkbits = i_blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1109 1110
	unsigned blocksize_mask = (1 << blkbits) - 1;
	ssize_t retval = -EINVAL;
1111 1112
	size_t count = iov_iter_count(iter);
	loff_t end = offset + count;
Linus Torvalds's avatar
Linus Torvalds committed
1113
	struct dio *dio;
1114
	struct dio_submit sdio = { 0, };
1115
	struct buffer_head map_bh = { 0, };
1116
	struct blk_plug plug;
Al Viro's avatar
Al Viro committed
1117
	unsigned long align = offset | iov_iter_alignment(iter);
Linus Torvalds's avatar
Linus Torvalds committed
1118

1119 1120 1121 1122
	/*
	 * Avoid references to bdev if not absolutely needed to give
	 * the early prefetch in the caller enough time.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1123

Al Viro's avatar
Al Viro committed
1124
	if (align & blocksize_mask) {
Linus Torvalds's avatar
Linus Torvalds committed
1125
		if (bdev)
1126
			blkbits = blksize_bits(bdev_logical_block_size(bdev));
Linus Torvalds's avatar
Linus Torvalds committed
1127
		blocksize_mask = (1 << blkbits) - 1;
Al Viro's avatar
Al Viro committed
1128
		if (align & blocksize_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1129 1130 1131
			goto out;
	}

1132
	/* watch out for a 0 len io from a tricksy fs */
1133
	if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
1134 1135
		return 0;

1136
	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1137 1138 1139
	retval = -ENOMEM;
	if (!dio)
		goto out;
1140 1141 1142 1143 1144 1145
	/*
	 * Believe it or not, zeroing out the page array caused a .5%
	 * performance regression in a database benchmark.  So, we take
	 * care to only zero out what's needed.
	 */
	memset(dio, 0, offsetof(struct dio, pages));
Linus Torvalds's avatar
Linus Torvalds committed
1146

1147 1148
	dio->flags = flags;
	if (dio->flags & DIO_LOCKING) {
1149
		if (iov_iter_rw(iter) == READ) {
1150