direct-io.c 37.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * fs/direct-io.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * O_DIRECT
 *
8
 * 04Jul2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11
 *		Initial version
 * 11Sep2002	janetinc@us.ibm.com
 * 		added readv/writev support.
12
 * 29Oct2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *		rewrote bio_add_page() support.
 * 30Oct2002	pbadari@us.ibm.com
 *		added support for non-aligned IO.
 * 06Nov2002	pbadari@us.ibm.com
 *		added asynchronous IO support.
 * 21Jul2003	nathans@sgi.com
 *		added IO completion notifier.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
30
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
Arun Sharma's avatar
Arun Sharma committed
38
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
39 40 41

/*
 * How many user pages to map in one call to get_user_pages().  This determines
Andi Kleen's avatar
Andi Kleen committed
42
 * the size of a structure in the slab cache
Linus Torvalds's avatar
Linus Torvalds committed
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 */
#define DIO_PAGES	64

/*
 * This code generally works in units of "dio_blocks".  A dio_block is
 * somewhere between the hard sector size and the filesystem block size.  it
 * is determined on a per-invocation basis.   When talking to the filesystem
 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 * to bio_block quantities by shifting left by blkfactor.
 *
 * If blkfactor is zero then the user's request was aligned to the filesystem's
 * blocksize.
 */

58 59 60
/* dio_state only used in the submission path */

struct dio_submit {
Linus Torvalds's avatar
Linus Torvalds committed
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
	struct bio *bio;		/* bio under assembly */
	unsigned blkbits;		/* doesn't change */
	unsigned blkfactor;		/* When we're using an alignment which
					   is finer than the filesystem's soft
					   blocksize, this specifies how much
					   finer.  blkfactor=2 means 1/4-block
					   alignment.  Does not change */
	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
					   been performed at the start of a
					   write */
	int pages_in_io;		/* approximate total IO pages */
	size_t	size;			/* total request size (doesn't change)*/
	sector_t block_in_file;		/* Current offset into the underlying
					   file in dio_block units. */
	unsigned blocks_available;	/* At block_in_file.  changes */
76
	int reap_counter;		/* rate limit reaping */
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79
	sector_t final_block_in_request;/* doesn't change */
	unsigned first_block_in_page;	/* doesn't change, Used only once */
	int boundary;			/* prev block is at a boundary */
80
	get_block_t *get_block;		/* block mapping function */
81
	dio_submit_t *submit_io;	/* IO submition function */
82

83
	loff_t logical_offset_in_bio;	/* current first logical block in bio */
Linus Torvalds's avatar
Linus Torvalds committed
84 85 86 87 88 89 90 91 92 93 94 95 96
	sector_t final_block_in_bio;	/* current final block in bio + 1 */
	sector_t next_block_for_io;	/* next block to be put under IO,
					   in dio_blocks units */

	/*
	 * Deferred addition of a page to the dio.  These variables are
	 * private to dio_send_cur_page(), submit_page_section() and
	 * dio_bio_add_page().
	 */
	struct page *cur_page;		/* The page */
	unsigned cur_page_offset;	/* Offset into it, in bytes */
	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
	sector_t cur_page_block;	/* Where it starts */
97
	loff_t cur_page_fs_offset;	/* Offset in file */
Linus Torvalds's avatar
Linus Torvalds committed
98 99 100 101 102 103 104 105 106 107 108 109 110 111

	/*
	 * Page fetching state. These variables belong to dio_refill_pages().
	 */
	int curr_page;			/* changes */
	int total_pages;		/* doesn't change */
	unsigned long curr_user_address;/* changes */

	/*
	 * Page queue.  These variables belong to dio_refill_pages() and
	 * dio_get_page().
	 */
	unsigned head;			/* next page to process */
	unsigned tail;			/* last valid page + 1 */
112 113 114 115 116 117
};

/* dio_state communicated between submission path and end_io */
struct dio {
	int flags;			/* doesn't change */
	int rw;
118
	struct inode *inode;
119 120 121
	loff_t i_size;			/* i_size when submitted */
	dio_iodone_t *end_io;		/* IO completion function */

122
	void *private;			/* copy from map_bh.b_private */
123 124 125

	/* BIO completion state */
	spinlock_t bio_lock;		/* protects BIO fields below */
126 127 128
	int page_errors;		/* errno from get_user_pages() */
	int is_async;			/* is IO async ? */
	int io_error;			/* IO error in completion path */
129 130 131 132 133 134 135 136
	unsigned long refcount;		/* direct_io_worker() and bios */
	struct bio *bio_list;		/* singly linked via bi_private */
	struct task_struct *waiter;	/* waiting task (NULL if none) */

	/* AIO related stuff */
	struct kiocb *iocb;		/* kiocb */
	ssize_t result;                 /* IO result */

137 138 139 140 141 142
	/*
	 * pages[] (and any fields placed after it) are not zeroed out at
	 * allocation time.  Don't add new fields after pages[] unless you
	 * wish that they not be zeroed.
	 */
	struct page *pages[DIO_PAGES];	/* page buffer */
143 144 145
} ____cacheline_aligned_in_smp;

static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
146

Christoph Hellwig's avatar
Christoph Hellwig committed
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static void __inode_dio_wait(struct inode *inode)
{
	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);

	do {
		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
		if (atomic_read(&inode->i_dio_count))
			schedule();
	} while (atomic_read(&inode->i_dio_count));
	finish_wait(wq, &q.wait);
}

/**
 * inode_dio_wait - wait for outstanding DIO requests to finish
 * @inode: inode to wait for
 *
 * Waits for all pending direct I/O requests to finish so that we can
 * proceed with a truncate or equivalent operation.
 *
 * Must be called under a lock that serializes taking new references
 * to i_dio_count, usually by inode->i_mutex.
 */
void inode_dio_wait(struct inode *inode)
{
	if (atomic_read(&inode->i_dio_count))
		__inode_dio_wait(inode);
}
EXPORT_SYMBOL_GPL(inode_dio_wait);

/*
 * inode_dio_done - signal finish of a direct I/O requests
 * @inode: inode the direct I/O happens on
 *
 * This is called once we've finished processing a direct I/O request,
 * and is used to wake up callers waiting for direct I/O to be quiesced.
 */
void inode_dio_done(struct inode *inode)
{
	if (atomic_dec_and_test(&inode->i_dio_count))
		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
EXPORT_SYMBOL_GPL(inode_dio_done);

Linus Torvalds's avatar
Linus Torvalds committed
191 192 193
/*
 * How many pages are in the queue?
 */
194
static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
195
{
196
	return sdio->tail - sdio->head;
Linus Torvalds's avatar
Linus Torvalds committed
197 198 199 200 201
}

/*
 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 */
202
static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
203 204 205 206
{
	int ret;
	int nr_pages;

207
	nr_pages = min(sdio->total_pages - sdio->curr_page, DIO_PAGES);
Nick Piggin's avatar
Nick Piggin committed
208
	ret = get_user_pages_fast(
209
		sdio->curr_user_address,		/* Where from? */
Linus Torvalds's avatar
Linus Torvalds committed
210 211
		nr_pages,			/* How many pages? */
		dio->rw == READ,		/* Write to memory? */
Nick Piggin's avatar
Nick Piggin committed
212
		&dio->pages[0]);		/* Put results here */
Linus Torvalds's avatar
Linus Torvalds committed
213

214
	if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
Nick Piggin's avatar
Nick Piggin committed
215
		struct page *page = ZERO_PAGE(0);
Linus Torvalds's avatar
Linus Torvalds committed
216 217 218 219 220 221 222
		/*
		 * A memory fault, but the filesystem has some outstanding
		 * mapped blocks.  We need to use those blocks up to avoid
		 * leaking stale data in the file.
		 */
		if (dio->page_errors == 0)
			dio->page_errors = ret;
Nick Piggin's avatar
Nick Piggin committed
223 224
		page_cache_get(page);
		dio->pages[0] = page;
225 226
		sdio->head = 0;
		sdio->tail = 1;
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229 230 231
		ret = 0;
		goto out;
	}

	if (ret >= 0) {
232 233 234 235
		sdio->curr_user_address += ret * PAGE_SIZE;
		sdio->curr_page += ret;
		sdio->head = 0;
		sdio->tail = ret;
Linus Torvalds's avatar
Linus Torvalds committed
236 237 238 239 240 241 242 243 244 245 246 247
		ret = 0;
	}
out:
	return ret;	
}

/*
 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 * buffered inside the dio so that we can call get_user_pages() against a
 * decent number of pages, less frequently.  To provide nicer use of the
 * L1 cache.
 */
248 249
static inline struct page *dio_get_page(struct dio *dio,
		struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
250
{
251
	if (dio_pages_present(sdio) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
252 253
		int ret;

254
		ret = dio_refill_pages(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
255 256
		if (ret)
			return ERR_PTR(ret);
257
		BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds's avatar
Linus Torvalds committed
258
	}
259
	return dio->pages[sdio->head++];
Linus Torvalds's avatar
Linus Torvalds committed
260 261
}

262 263 264 265 266 267 268 269 270 271 272 273
/**
 * dio_complete() - called when all DIO BIO I/O has been completed
 * @offset: the byte offset in the file of the completed operation
 *
 * This releases locks as dictated by the locking type, lets interested parties
 * know that a DIO operation has completed, and calculates the resulting return
 * code for the operation.
 *
 * It lets the filesystem know if it registered an interest earlier via
 * get_block.  Pass the private field of the map buffer_head so that
 * filesystems can use it to hold additional state between get_block calls and
 * dio_complete.
Linus Torvalds's avatar
Linus Torvalds committed
274
 */
275
static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
Linus Torvalds's avatar
Linus Torvalds committed
276
{
277 278
	ssize_t transferred = 0;

279 280 281 282 283 284 285 286 287
	/*
	 * AIO submission can race with bio completion to get here while
	 * expecting to have the last io completed by bio completion.
	 * In that case -EIOCBQUEUED is in fact not an error we want
	 * to preserve through this call.
	 */
	if (ret == -EIOCBQUEUED)
		ret = 0;

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	if (dio->result) {
		transferred = dio->result;

		/* Check for short read case */
		if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
			transferred = dio->i_size - offset;
	}

	if (ret == 0)
		ret = dio->page_errors;
	if (ret == 0)
		ret = dio->io_error;
	if (ret == 0)
		ret = transferred;

303 304
	if (dio->end_io && dio->result) {
		dio->end_io(dio->iocb, offset, transferred,
305
			    dio->private, ret, is_async);
306 307 308 309
	} else {
		if (is_async)
			aio_complete(dio->iocb, ret, 0);
		inode_dio_done(dio->inode);
310 311
	}

312
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
313 314 315 316 317 318
}

static int dio_bio_complete(struct dio *dio, struct bio *bio);
/*
 * Asynchronous IO callback. 
 */
319
static void dio_bio_end_aio(struct bio *bio, int error)
Linus Torvalds's avatar
Linus Torvalds committed
320 321
{
	struct dio *dio = bio->bi_private;
322 323
	unsigned long remaining;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
324 325 326

	/* cleanup the bio */
	dio_bio_complete(dio, bio);
327

328 329 330
	spin_lock_irqsave(&dio->bio_lock, flags);
	remaining = --dio->refcount;
	if (remaining == 1 && dio->waiter)
331
		wake_up_process(dio->waiter);
332
	spin_unlock_irqrestore(&dio->bio_lock, flags);
333

334
	if (remaining == 0) {
335
		dio_complete(dio, dio->iocb->ki_pos, 0, true);
336
		kmem_cache_free(dio_cache, dio);
337
	}
Linus Torvalds's avatar
Linus Torvalds committed
338 339 340 341 342 343 344 345 346
}

/*
 * The BIO completion handler simply queues the BIO up for the process-context
 * handler.
 *
 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 */
347
static void dio_bio_end_io(struct bio *bio, int error)
Linus Torvalds's avatar
Linus Torvalds committed
348 349 350 351 352 353 354
{
	struct dio *dio = bio->bi_private;
	unsigned long flags;

	spin_lock_irqsave(&dio->bio_lock, flags);
	bio->bi_private = dio->bio_list;
	dio->bio_list = bio;
355
	if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds's avatar
Linus Torvalds committed
356 357 358 359
		wake_up_process(dio->waiter);
	spin_unlock_irqrestore(&dio->bio_lock, flags);
}

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
/**
 * dio_end_io - handle the end io action for the given bio
 * @bio: The direct io bio thats being completed
 * @error: Error if there was one
 *
 * This is meant to be called by any filesystem that uses their own dio_submit_t
 * so that the DIO specific endio actions are dealt with after the filesystem
 * has done it's completion work.
 */
void dio_end_io(struct bio *bio, int error)
{
	struct dio *dio = bio->bi_private;

	if (dio->is_async)
		dio_bio_end_aio(bio, error);
	else
		dio_bio_end_io(bio, error);
}
EXPORT_SYMBOL_GPL(dio_end_io);

380
static inline void
381 382 383
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
	      struct block_device *bdev,
	      sector_t first_sector, int nr_vecs)
Linus Torvalds's avatar
Linus Torvalds committed
384 385 386
{
	struct bio *bio;

387 388 389 390
	/*
	 * bio_alloc() is guaranteed to return a bio when called with
	 * __GFP_WAIT and we request a valid number of vectors.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
391 392 393 394 395 396 397 398 399
	bio = bio_alloc(GFP_KERNEL, nr_vecs);

	bio->bi_bdev = bdev;
	bio->bi_sector = first_sector;
	if (dio->is_async)
		bio->bi_end_io = dio_bio_end_aio;
	else
		bio->bi_end_io = dio_bio_end_io;

400 401
	sdio->bio = bio;
	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds's avatar
Linus Torvalds committed
402 403 404 405 406 407
}

/*
 * In the AIO read case we speculatively dirty the pages before starting IO.
 * During IO completion, any of these pages which happen to have been written
 * back will be redirtied by bio_check_pages_dirty().
408 409
 *
 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds's avatar
Linus Torvalds committed
410
 */
411
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
412
{
413
	struct bio *bio = sdio->bio;
414
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
415 416

	bio->bi_private = dio;
417 418 419 420 421

	spin_lock_irqsave(&dio->bio_lock, flags);
	dio->refcount++;
	spin_unlock_irqrestore(&dio->bio_lock, flags);

Linus Torvalds's avatar
Linus Torvalds committed
422 423
	if (dio->is_async && dio->rw == READ)
		bio_set_pages_dirty(bio);
424

425 426 427
	if (sdio->submit_io)
		sdio->submit_io(dio->rw, bio, dio->inode,
			       sdio->logical_offset_in_bio);
428 429
	else
		submit_bio(dio->rw, bio);
Linus Torvalds's avatar
Linus Torvalds committed
430

431 432 433
	sdio->bio = NULL;
	sdio->boundary = 0;
	sdio->logical_offset_in_bio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
434 435 436 437 438
}

/*
 * Release any resources in case of a failure
 */
439
static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
440
{
441 442
	while (dio_pages_present(sdio))
		page_cache_release(dio_get_page(dio, sdio));
Linus Torvalds's avatar
Linus Torvalds committed
443 444 445
}

/*
446 447 448 449
 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 * returned once all BIOs have been completed.  This must only be called once
 * all bios have been issued so that dio->refcount can only decrease.  This
 * requires that that the caller hold a reference on the dio.
Linus Torvalds's avatar
Linus Torvalds committed
450 451 452 453
 */
static struct bio *dio_await_one(struct dio *dio)
{
	unsigned long flags;
454
	struct bio *bio = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
455 456

	spin_lock_irqsave(&dio->bio_lock, flags);
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471

	/*
	 * Wait as long as the list is empty and there are bios in flight.  bio
	 * completion drops the count, maybe adds to the list, and wakes while
	 * holding the bio_lock so we don't need set_current_state()'s barrier
	 * and can call it after testing our condition.
	 */
	while (dio->refcount > 1 && dio->bio_list == NULL) {
		__set_current_state(TASK_UNINTERRUPTIBLE);
		dio->waiter = current;
		spin_unlock_irqrestore(&dio->bio_lock, flags);
		io_schedule();
		/* wake up sets us TASK_RUNNING */
		spin_lock_irqsave(&dio->bio_lock, flags);
		dio->waiter = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
472
	}
473 474 475 476
	if (dio->bio_list) {
		bio = dio->bio_list;
		dio->bio_list = bio->bi_private;
	}
Linus Torvalds's avatar
Linus Torvalds committed
477 478 479 480 481 482 483 484 485 486 487 488 489 490
	spin_unlock_irqrestore(&dio->bio_lock, flags);
	return bio;
}

/*
 * Process one completed BIO.  No locks are held.
 */
static int dio_bio_complete(struct dio *dio, struct bio *bio)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec;
	int page_no;

	if (!uptodate)
491
		dio->io_error = -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508

	if (dio->is_async && dio->rw == READ) {
		bio_check_pages_dirty(bio);	/* transfers ownership */
	} else {
		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
			struct page *page = bvec[page_no].bv_page;

			if (dio->rw == READ && !PageCompound(page))
				set_page_dirty_lock(page);
			page_cache_release(page);
		}
		bio_put(bio);
	}
	return uptodate ? 0 : -EIO;
}

/*
509 510 511
 * Wait on and process all in-flight BIOs.  This must only be called once
 * all bios have been issued so that the refcount can only decrease.
 * This just waits for all bios to make it through dio_bio_complete.  IO
512
 * errors are propagated through dio->io_error and should be propagated via
513
 * dio_complete().
Linus Torvalds's avatar
Linus Torvalds committed
514
 */
515
static void dio_await_completion(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
516
{
517 518 519 520 521 522
	struct bio *bio;
	do {
		bio = dio_await_one(dio);
		if (bio)
			dio_bio_complete(dio, bio);
	} while (bio);
Linus Torvalds's avatar
Linus Torvalds committed
523 524 525 526 527 528 529 530 531
}

/*
 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 * to keep the memory consumption sane we periodically reap any completed BIOs
 * during the BIO generation phase.
 *
 * This also helps to limit the peak amount of pinned userspace memory.
 */
532
static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
533 534 535
{
	int ret = 0;

536
	if (sdio->reap_counter++ >= 64) {
Linus Torvalds's avatar
Linus Torvalds committed
537 538 539 540 541 542 543 544 545 546 547 548 549
		while (dio->bio_list) {
			unsigned long flags;
			struct bio *bio;
			int ret2;

			spin_lock_irqsave(&dio->bio_lock, flags);
			bio = dio->bio_list;
			dio->bio_list = bio->bi_private;
			spin_unlock_irqrestore(&dio->bio_lock, flags);
			ret2 = dio_bio_complete(dio, bio);
			if (ret == 0)
				ret = ret2;
		}
550
		sdio->reap_counter = 0;
Linus Torvalds's avatar
Linus Torvalds committed
551 552 553 554 555 556
	}
	return ret;
}

/*
 * Call into the fs to map some more disk blocks.  We record the current number
557
 * of available blocks at sdio->blocks_available.  These are in units of the
Linus Torvalds's avatar
Linus Torvalds committed
558 559 560 561 562
 * fs blocksize, (1 << inode->i_blkbits).
 *
 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 * it uses the passed inode-relative block number as the file offset, as usual.
 *
563
 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds's avatar
Linus Torvalds committed
564 565 566 567 568 569 570 571 572 573 574 575
 * has remaining to do.  The fs should not map more than this number of blocks.
 *
 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 * indicate how much contiguous disk space has been made available at
 * bh->b_blocknr.
 *
 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 * This isn't very efficient...
 *
 * In the case of filesystem holes: the fs may return an arbitrarily-large
 * hole by returning an appropriate value in b_size and by clearing
 * buffer_mapped().  However the direct-io code will only process holes one
576
 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds's avatar
Linus Torvalds committed
577
 */
578 579
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
			   struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
580 581 582
{
	int ret;
	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
583
	sector_t fs_endblk;	/* Into file, in filesystem-sized blocks */
Linus Torvalds's avatar
Linus Torvalds committed
584 585 586 587 588 589 590 591 592
	unsigned long fs_count;	/* Number of filesystem-sized blocks */
	int create;

	/*
	 * If there was a memory error and we've overwritten all the
	 * mapped blocks then we can now return that memory error
	 */
	ret = dio->page_errors;
	if (ret == 0) {
593 594
		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
595 596 597
		fs_endblk = (sdio->final_block_in_request - 1) >>
					sdio->blkfactor;
		fs_count = fs_endblk - fs_startblk + 1;
Linus Torvalds's avatar
Linus Torvalds committed
598

599 600 601
		map_bh->b_state = 0;
		map_bh->b_size = fs_count << dio->inode->i_blkbits;

602 603 604 605 606 607 608 609 610 611 612
		/*
		 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
		 * forbid block creations: only overwrites are permitted.
		 * We will return early to the caller once we see an
		 * unmapped buffer head returned, and the caller will fall
		 * back to buffered I/O.
		 *
		 * Otherwise the decision is left to the get_blocks method,
		 * which may decide to handle it or also return an unmapped
		 * buffer head.
		 */
Jens Axboe's avatar
Jens Axboe committed
613
		create = dio->rw & WRITE;
614
		if (dio->flags & DIO_SKIP_HOLES) {
615 616
			if (sdio->block_in_file < (i_size_read(dio->inode) >>
							sdio->blkbits))
Linus Torvalds's avatar
Linus Torvalds committed
617 618
				create = 0;
		}
619

620
		ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds's avatar
Linus Torvalds committed
621
						map_bh, create);
622 623 624

		/* Store for completion */
		dio->private = map_bh->b_private;
Linus Torvalds's avatar
Linus Torvalds committed
625 626 627 628 629 630 631
	}
	return ret;
}

/*
 * There is no bio.  Make one now.
 */
632 633
static inline int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
		sector_t start_sector, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
634 635 636 637
{
	sector_t sector;
	int ret, nr_pages;

638
	ret = dio_bio_reap(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
639 640
	if (ret)
		goto out;
641
	sector = start_sector << (sdio->blkbits - 9);
642
	nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(map_bh->b_bdev));
643
	nr_pages = min(nr_pages, BIO_MAX_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
644
	BUG_ON(nr_pages <= 0);
645
	dio_bio_alloc(dio, sdio, map_bh->b_bdev, sector, nr_pages);
646
	sdio->boundary = 0;
Linus Torvalds's avatar
Linus Torvalds committed
647 648 649 650 651 652 653 654 655 656 657
out:
	return ret;
}

/*
 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 * that was successful then update final_block_in_bio and take a ref against
 * the just-added page.
 *
 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 */
658
static inline int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
659 660 661
{
	int ret;

662 663 664
	ret = bio_add_page(sdio->bio, sdio->cur_page,
			sdio->cur_page_len, sdio->cur_page_offset);
	if (ret == sdio->cur_page_len) {
Linus Torvalds's avatar
Linus Torvalds committed
665 666 667
		/*
		 * Decrement count only, if we are done with this page
		 */
668 669 670 671 672
		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
			sdio->pages_in_io--;
		page_cache_get(sdio->cur_page);
		sdio->final_block_in_bio = sdio->cur_page_block +
			(sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
		ret = 0;
	} else {
		ret = 1;
	}
	return ret;
}
		
/*
 * Put cur_page under IO.  The section of cur_page which is described by
 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 * starts on-disk at cur_page_block.
 *
 * We take a ref against the page here (on behalf of its presence in the bio).
 *
 * The caller of this function is responsible for removing cur_page from the
 * dio, and for dropping the refcount which came from that presence.
 */
690 691
static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
		struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
692 693 694
{
	int ret = 0;

695 696 697 698
	if (sdio->bio) {
		loff_t cur_offset = sdio->cur_page_fs_offset;
		loff_t bio_next_offset = sdio->logical_offset_in_bio +
			sdio->bio->bi_size;
699

Linus Torvalds's avatar
Linus Torvalds committed
700
		/*
701 702
		 * See whether this new request is contiguous with the old.
		 *
Namhyung Kim's avatar
Namhyung Kim committed
703 704
		 * Btrfs cannot handle having logically non-contiguous requests
		 * submitted.  For example if you have
705 706
		 *
		 * Logical:  [0-4095][HOLE][8192-12287]
Namhyung Kim's avatar
Namhyung Kim committed
707
		 * Physical: [0-4095]      [4096-8191]
708 709 710 711 712
		 *
		 * We cannot submit those pages together as one BIO.  So if our
		 * current logical offset in the file does not equal what would
		 * be the next logical offset in the bio, submit the bio we
		 * have.
Linus Torvalds's avatar
Linus Torvalds committed
713
		 */
714
		if (sdio->final_block_in_bio != sdio->cur_page_block ||
715
		    cur_offset != bio_next_offset)
716
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
717 718 719 720
		/*
		 * Submit now if the underlying fs is about to perform a
		 * metadata read
		 */
721 722
		else if (sdio->boundary)
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
723 724
	}

725
	if (sdio->bio == NULL) {
726
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
727 728 729 730
		if (ret)
			goto out;
	}

731 732
	if (dio_bio_add_page(sdio) != 0) {
		dio_bio_submit(dio, sdio);
733
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
734
		if (ret == 0) {
735
			ret = dio_bio_add_page(sdio);
Linus Torvalds's avatar
Linus Torvalds committed
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
			BUG_ON(ret != 0);
		}
	}
out:
	return ret;
}

/*
 * An autonomous function to put a chunk of a page under deferred IO.
 *
 * The caller doesn't actually know (or care) whether this piece of page is in
 * a BIO, or is under IO or whatever.  We just take care of all possible 
 * situations here.  The separation between the logic of do_direct_IO() and
 * that of submit_page_section() is important for clarity.  Please don't break.
 *
 * The chunk of page starts on-disk at blocknr.
 *
 * We perform deferred IO, by recording the last-submitted page inside our
 * private part of the dio structure.  If possible, we just expand the IO
 * across that page here.
 *
 * If that doesn't work out then we put the old page into the bio and add this
 * page to the dio instead.
 */
760
static inline int
761
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
762 763
		    unsigned offset, unsigned len, sector_t blocknr,
		    struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
764 765 766
{
	int ret = 0;

767 768 769 770 771 772 773
	if (dio->rw & WRITE) {
		/*
		 * Read accounting is performed in submit_bio()
		 */
		task_io_account_write(len);
	}

Linus Torvalds's avatar
Linus Torvalds committed
774 775 776
	/*
	 * Can we just grow the current page's presence in the dio?
	 */
777 778 779 780 781
	if (sdio->cur_page == page &&
	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
	    sdio->cur_page_block +
	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
		sdio->cur_page_len += len;
Linus Torvalds's avatar
Linus Torvalds committed
782 783

		/*
784
		 * If sdio->boundary then we want to schedule the IO now to
Linus Torvalds's avatar
Linus Torvalds committed
785 786
		 * avoid metadata seeks.
		 */
787
		if (sdio->boundary) {
788
			ret = dio_send_cur_page(dio, sdio, map_bh);
789 790
			page_cache_release(sdio->cur_page);
			sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
791 792 793 794 795 796 797
		}
		goto out;
	}

	/*
	 * If there's a deferred page already there then send it.
	 */
798
	if (sdio->cur_page) {
799
		ret = dio_send_cur_page(dio, sdio, map_bh);
800 801
		page_cache_release(sdio->cur_page);
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
802 803 804 805 806
		if (ret)
			goto out;
	}

	page_cache_get(page);		/* It is in dio */
807 808 809 810 811
	sdio->cur_page = page;
	sdio->cur_page_offset = offset;
	sdio->cur_page_len = len;
	sdio->cur_page_block = blocknr;
	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
812 813 814 815 816 817 818 819 820
out:
	return ret;
}

/*
 * Clean any dirty buffers in the blockdev mapping which alias newly-created
 * file blocks.  Only called for S_ISREG files - blockdevs do not set
 * buffer_new
 */
821
static void clean_blockdev_aliases(struct dio *dio, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
822 823 824 825
{
	unsigned i;
	unsigned nblocks;

826
	nblocks = map_bh->b_size >> dio->inode->i_blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
827 828

	for (i = 0; i < nblocks; i++) {
829 830
		unmap_underlying_metadata(map_bh->b_bdev,
					  map_bh->b_blocknr + i);
Linus Torvalds's avatar
Linus Torvalds committed
831 832 833 834 835 836 837 838 839 840 841 842
	}
}

/*
 * If we are not writing the entire block and get_block() allocated
 * the block for us, we need to fill-in the unused portion of the
 * block with zeros. This happens only if user-buffer, fileoffset or
 * io length is not filesystem block-size multiple.
 *
 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 * IO.
 */
843 844
static inline void dio_zero_block(struct dio *dio, struct dio_submit *sdio,
		int end, struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
845 846 847 848 849 850
{
	unsigned dio_blocks_per_fs_block;
	unsigned this_chunk_blocks;	/* In dio_blocks */
	unsigned this_chunk_bytes;
	struct page *page;

851
	sdio->start_zero_done = 1;
852
	if (!sdio->blkfactor || !buffer_new(map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
853 854
		return;

855 856
	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds's avatar
Linus Torvalds committed
857 858 859 860 861 862 863 864 865 866 867

	if (!this_chunk_blocks)
		return;

	/*
	 * We need to zero out part of an fs block.  It is either at the
	 * beginning or the end of the fs block.
	 */
	if (end) 
		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;

868
	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
869

Nick Piggin's avatar
Nick Piggin committed
870
	page = ZERO_PAGE(0);
871
	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
872
				sdio->next_block_for_io, map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
873 874
		return;

875
	sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
876 877 878 879 880 881 882 883 884 885 886
}

/*
 * Walk the user pages, and the file, mapping blocks to disk and generating
 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 * into submit_page_section(), which takes care of the next stage of submission
 *
 * Direct IO against a blockdev is different from a file.  Because we can
 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 * blockdev IO be able to have fine alignment and large sizes.
 *
887
 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds's avatar
Linus Torvalds committed
888 889 890
 * with the size of IO which is permitted at this offset and this i_blkbits.
 *
 * For best results, the blockdev should be set up with 512-byte i_blkbits and
891
 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
Linus Torvalds's avatar
Linus Torvalds committed
892 893
 * fine alignment but still allows this function to work in PAGE_SIZE units.
 */
894 895
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
			struct buffer_head *map_bh)
Linus Torvalds's avatar
Linus Torvalds committed
896
{
897
	const unsigned blkbits = sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
898 899 900 901 902 903
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
	struct page *page;
	unsigned block_in_page;
	int ret = 0;

	/* The I/O can start at any block offset within the first page */
904
	block_in_page = sdio->first_block_in_page;
Linus Torvalds's avatar
Linus Torvalds committed
905

906 907
	while (sdio->block_in_file < sdio->final_block_in_request) {
		page = dio_get_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
908 909 910 911 912 913 914 915 916 917 918
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}

		while (block_in_page < blocks_per_page) {
			unsigned offset_in_page = block_in_page << blkbits;
			unsigned this_chunk_bytes;	/* # of bytes mapped */
			unsigned this_chunk_blocks;	/* # of blocks */
			unsigned u;

919
			if (sdio->blocks_available == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
920 921 922 923 924 925
				/*
				 * Need to go and map some more disk
				 */
				unsigned long blkmask;
				unsigned long dio_remainder;

926
				ret = get_more_blocks(dio, sdio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
927 928 929 930 931 932 933
				if (ret) {
					page_cache_release(page);
					goto out;
				}
				if (!buffer_mapped(map_bh))
					goto do_holes;

934 935 936 937
				sdio->blocks_available =
						map_bh->b_size >> sdio->blkbits;
				sdio->next_block_for_io =
					map_bh->b_blocknr << sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
938
				if (buffer_new(map_bh))
939
					clean_blockdev_aliases(dio, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
940

941
				if (!sdio->blkfactor)
Linus Torvalds's avatar
Linus Torvalds committed
942 943
					goto do_holes;

944 945
				blkmask = (1 << sdio->blkfactor) - 1;
				dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds's avatar
Linus Torvalds committed
946 947 948 949 950 951 952 953 954 955 956 957 958

				/*
				 * If we are at the start of IO and that IO
				 * starts partway into a fs-block,
				 * dio_remainder will be non-zero.  If the IO
				 * is a read then we can simply advance the IO
				 * cursor to the first block which is to be
				 * read.  But if the IO is a write and the
				 * block was newly allocated we cannot do that;
				 * the start of the fs block must be zeroed out
				 * on-disk
				 */
				if (!buffer_new(map_bh))
959 960
					sdio->next_block_for_io += dio_remainder;
				sdio->blocks_available -= dio_remainder;
Linus Torvalds's avatar
Linus Torvalds committed
961 962 963 964
			}
do_holes:
			/* Handle holes */
			if (!buffer_mapped(map_bh)) {
965
				loff_t i_size_aligned;
Linus Torvalds's avatar
Linus Torvalds committed
966 967

				/* AKPM: eargh, -ENOTBLK is a hack */
Jens Axboe's avatar
Jens Axboe committed
968
				if (dio->rw & WRITE) {
Linus Torvalds's avatar
Linus Torvalds committed
969 970 971 972
					page_cache_release(page);
					return -ENOTBLK;
				}

973 974 975 976 977 978
				/*
				 * Be sure to account for a partial block as the
				 * last block in the file
				 */
				i_size_aligned = ALIGN(i_size_read(dio->inode),
							1 << blkbits);
979
				if (sdio->block_in_file >=
980
						i_size_aligned >> blkbits) {
Linus Torvalds's avatar
Linus Torvalds committed
981 982 983 984
					/* We hit eof */
					page_cache_release(page);
					goto out;
				}
985 986
				zero_user(page, block_in_page << blkbits,
						1 << blkbits);
987
				sdio->block_in_file++;
Linus Torvalds's avatar
Linus Torvalds committed
988 989 990 991 992 993 994 995 996
				block_in_page++;
				goto next_block;
			}

			/*
			 * If we're performing IO which has an alignment which
			 * is finer than the underlying fs, go check to see if
			 * we must zero out the start of this block.
			 */
997
			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
998
				dio_zero_block(dio, sdio, 0, map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
999 1000 1001 1002 1003

			/*
			 * Work out, in this_chunk_blocks, how much disk we
			 * can add to this page
			 */
1004
			this_chunk_blocks = sdio->blocks_available;
Linus Torvalds's avatar
Linus Torvalds committed
1005 1006 1007
			u = (PAGE_SIZE - offset_in_page) >> blkbits;
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
1008
			u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
1009 1010 1011 1012 1013
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
			this_chunk_bytes = this_chunk_blocks << blkbits;
			BUG_ON(this_chunk_bytes == 0);

1014 1015 1016 1017
			sdio->boundary = buffer_boundary(map_bh);
			ret = submit_page_section(dio, sdio, page,
						  offset_in_page,
						  this_chunk_bytes,
1018 1019
						  sdio->next_block_for_io,
						  map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
1020 1021 1022 1023
			if (ret) {
				page_cache_release(page);
				goto out;
			}
1024
			sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1025

1026
			sdio->block_in_file += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1027
			block_in_page += this_chunk_blocks;
1028
			sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1029
next_block:
1030 1031
			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
			if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds's avatar
Linus Torvalds committed
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
				break;
		}

		/* Drop the ref which was taken in get_user_pages() */
		page_cache_release(page);
		block_in_page = 0;
	}
out:
	return ret;
}

1043
static inline int drop_refcount(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
1044
{
1045
	int ret2;
1046
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1047

1048 1049
	/*
	 * Sync will always be dropping the final ref and completing the
1050 1051 1052 1053 1054 1055 1056 1057
	 * operation.  AIO can if it was a broken operation described above or
	 * in fact if all the bios race to complete before we get here.  In
	 * that case dio_complete() translates the EIOCBQUEUED into the proper
	 * return code that the caller will hand to aio_complete().
	 *
	 * This is managed by the bio_lock instead of being an atomic_t so that
	 * completion paths can drop their ref and use the remaining count to
	 * decide to wake the submission path atomically.
1058
	 */
1059 1060 1061
	spin_lock_irqsave(&dio->bio_lock, flags);
	ret2 = --dio->refcount;
	spin_unlock_irqrestore(&dio->bio_lock, flags);
1062
	return ret2;
Linus Torvalds's avatar
Linus Torvalds committed
1063 1064
}

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
/*
 * This is a library function for use by filesystem drivers.
 *
 * The locking rules are governed by the flags parameter:
 *  - if the flags value contains DIO_LOCKING we use a fancy locking
 *    scheme for dumb filesystems.
 *    For writes this function is called under i_mutex and returns with
 *    i_mutex held, for reads, i_mutex is not held on entry, but it is
 *    taken and dropped again before returning.
 *  - if the flags value does NOT contain DIO_LOCKING we don't use any
 *    internal locking but rather rely on the filesystem to synchronize
 *    direct I/O reads/writes versus each other and truncate.
1077 1078 1079 1080 1081 1082 1083
 *
 * To help with locking against truncate we incremented the i_dio_count
 * counter before starting direct I/O, and decrement it once we are done.
 * Truncate can wait for it to reach zero to provide exclusion.  It is
 * expected that filesystem provide exclusion between new direct I/O
 * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
 * but other filesystems need to take care of this on their own.
1084 1085 1086 1087 1088
 *
 * NOTE: if you pass "sdio" to anything by pointer make sure that function
 * is always inlined. Otherwise gcc is unable to split the structure into
 * individual fields and will generate much worse code. This is important
 * for the whole file.
1089
 */
Linus Torvalds's avatar
Linus Torvalds committed
1090
ssize_t
1091
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
Linus Torvalds's avatar
Linus Torvalds committed
1092
	struct block_device *bdev, const struct iovec *iov, loff_t offset, 
1093
	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1094
	dio_submit_t submit_io,	int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
{
	int seg;
	size_t size;
	unsigned long addr;
	unsigned blkbits = inode->i_blkbits;
	unsigned bdev_blkbits = 0;
	unsigned blocksize_mask = (1 << blkbits) - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;
	struct dio *dio;
1105
	struct dio_submit sdio = { 0, };
1106 1107 1108
	unsigned long user_addr;
	size_t bytes;
	struct buffer_head map_bh = { 0, };
Linus Torvalds's avatar
Linus Torvalds committed
1109 1110

	if (rw & WRITE)
Jens Axboe's avatar
Jens Axboe committed
1111
		rw = WRITE_ODIRECT;
Linus Torvalds's avatar
Linus Torvalds committed
1112 1113

	if (bdev)
1114
		bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
Linus Torvalds's avatar
Linus Torvalds committed
1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137

	if (offset & blocksize_mask) {
		if (bdev)
			 blkbits = bdev_blkbits;
		blocksize_mask = (1 << blkbits) - 1;
		if (offset & blocksize_mask)
			goto out;
	}

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
			if (bdev)
				 blkbits = bdev_blkbits;
			blocksize_mask = (1 << blkbits) - 1;
			if ((addr & blocksize_mask) || (size & blocksize_mask))  
				goto out;
		}
	}

1138 1139 1140 1141
	/* watch out for a 0 len io from a tricksy fs */
	if (rw == READ && end == offset)
		return 0;

1142
	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1143 1144 1145
	retval = -ENOMEM;
	if (!dio)
		goto out;
1146 1147 1148 1149 1150 1151
	/*
	 * Believe it or not, zeroing out the page array caused a .5%
	 * performance regression in a database benchmark.  So, we take
	 * care to only zero out what's needed.
	 */
	memset(dio, 0, offsetof(struct dio, pages));
Linus Torvalds's avatar
Linus Torvalds committed
1152

1153 1154
	dio->flags = flags;
	if (dio->flags & DIO_LOCKING) {
1155
		if (rw == READ) {
1156 1157
			struct address_space *mapping =
					iocb->ki_filp->f_mapping;
Linus Torvalds's avatar
Linus Torvalds committed
1158

1159 1160
			/* will be released by direct_io_worker */
			mutex_lock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1161 1162 1163 1164

			retval = filemap_write_and_wait_range(mapping, offset,
							      end - 1);
			if (retval) {
1165
				mutex_unlock(&inode->i_mutex);
1166
				kmem_cache_free(dio_cache, dio);
Linus Torvalds's avatar
Linus Torvalds committed
1167 1168 1169 1170 1171
				goto out;
			}
		}
	}

1172 1173 1174 1175 1176
	/*
	 * Will be decremented at I/O completion time.
	 */
	atomic_inc(&inode->i_dio_count);

Linus Torvalds's avatar
Linus Torvalds committed
1177 1178 1179 1180 1181 1182
	/*
	 * For file extending writes updating i_size before data
	 * writeouts complete can expose uninitialized blocks. So
	 * even for AIO, we need to wait for i/o to complete before
	 * returning in this case.
	 */
Jens Axboe's avatar
Jens Axboe committed
1183
	dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
Linus Torvalds's avatar
Linus Torvalds committed
1184 1185
		(end > i_size_read(inode)));

1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	retval = 0;

	dio->inode = inode;
	dio->rw = rw;
	sdio.blkbits = blkbits;
	sdio.blkfactor = inode->i_blkbits - blkbits;
	sdio.block_in_file = offset >> blkbits;

	sdio.get_block = get_block;
	dio->end_io = end_io;
	sdio.submit_io = submit_io;
	sdio.final_block_in_bio = -1;
	sdio.next_block_for_io = -1;

	dio->iocb = iocb;
	dio->i_size = i_size_read(inode);

	spin_lock_init(&dio->bio_lock);
	dio->refcount = 1;

	/*
	 * In case of non-aligned buffers, we may need 2 more
	 * pages since we need to zero out first and last block.
	 */
	if (unlikely(sdio.blkfactor))
		sdio.pages_in_io = 2;

	for (seg = 0; seg < nr_segs; seg++) {
		user_addr = (unsigned long)iov[seg].iov_base;
		sdio.pages_in_io +=
			((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
				PAGE_SIZE - user_addr / PAGE_SIZE);
	}

	for (seg = 0; seg < nr_segs; seg++) {
		user_addr = (unsigned long)iov[seg].iov_base;
		sdio.size += bytes = iov[seg].iov_len;

		/* Index into the first page of the first block */
		sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
		sdio.final_block_in_request = sdio.block_in_file +
						(bytes >> blkbits);
		/* Page fetching state */
		sdio.head = 0;
		sdio.tail = 0;
		sdio.curr_page = 0;

		sdio.total_pages = 0;
		if (user_addr & (PAGE_SIZE-1)) {
			sdio.total_pages++;
			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
		}
		sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
		sdio.curr_user_address = user_addr;

		retval = do_direct_IO(dio, &sdio, &map_bh);

		dio->result += iov[seg].iov_len -
			((sdio.final_block_in_request - sdio.block_in_file) <<
					blkbits);

		if (retval) {
			dio_cleanup(dio, &sdio);
			break;
		}
	} /* end iovec loop */

	if (retval == -ENOTBLK) {
		/*
		 * The remaining part of the request will be
		 * be handled by buffered I/O when we return
		 */
		retval = 0;
	}
	/*
	 * There may be some unwritten disk at the end of a part-written
	 * fs-block-sized block.  Go zero that now.
	 */
	dio_zero_block(dio, &sdio, 1, &map_bh);

	if (sdio.cur_page) {
		ssize_t ret2;

		ret2 = dio_send_cur_page(dio, &sdio, &map_bh);
		if (retval == 0)
			retval = ret2;
		page_cache_release(sdio.cur_page);
		sdio.cur_page = NULL;
	}
	if (sdio.bio)
		dio_bio_submit(dio, &sdio);

	/*
	 * It is possible that, we return short IO due to end of file.
	 * In that case, we need to release all the pages we got hold on.
	 */
	dio_cleanup(dio, &sdio);

	/*
	 * All block lookups have been performed. For READ requests
	 * we can let i_mutex go now that its achieved its purpose
	 * of protecting us from looking up uninitialized blocks.
	 */
	if (rw == READ && (dio->flags & DIO_LOCKING))
		mutex_unlock(&dio->inode->i_mutex);

	/*
	 * The only time we want to leave bios in flight is when a successful
	 * partial aio read or full aio write have been setup.  In that case
	 * bio completion will call aio_complete.  The only time it's safe to
	 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
	 * This had *better* be the only place that raises -EIOCBQUEUED.
	 */
	BUG_ON(retval == -EIOCBQUEUED);
	if (dio->is_async && retval == 0 && dio->result &&
	    ((rw & READ) || (dio->result == sdio.size)))
		retval = -EIOCBQUEUED;

	if (retval != -EIOCBQUEUED)
		dio_await_completion(dio);

	if (drop_refcount(dio) == 0) {
		retval = dio_complete(dio, offset, retval, false);
		kmem_cache_free(dio_cache, dio);
	} else
		BUG_ON(retval != -EIOCBQUEUED);
Linus Torvalds's avatar
Linus Torvalds committed
1312

1313 1314 1315
out:
	return retval;
}
Linus Torvalds's avatar
Linus Torvalds committed
1316
EXPORT_SYMBOL(__blockdev_direct_IO);
1317 1318 1319 1320 1321 1322 1323

static __init int dio_init(void)
{
	dio_cache = KMEM_CACHE(dio, SLAB_PANIC);
	return 0;
}
module_init(dio_init)