direct-io.c 37.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * fs/direct-io.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * O_DIRECT
 *
8
 * 04Jul2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11
 *		Initial version
 * 11Sep2002	janetinc@us.ibm.com
 * 		added readv/writev support.
12
 * 29Oct2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *		rewrote bio_add_page() support.
 * 30Oct2002	pbadari@us.ibm.com
 *		added support for non-aligned IO.
 * 06Nov2002	pbadari@us.ibm.com
 *		added asynchronous IO support.
 * 21Jul2003	nathans@sgi.com
 *		added IO completion notifier.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
30
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37
#include <linux/bio.h>
#include <linux/wait.h>
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/rwsem.h>
#include <linux/uio.h>
Arun Sharma's avatar
Arun Sharma committed
38
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
39 40 41

/*
 * How many user pages to map in one call to get_user_pages().  This determines
Andi Kleen's avatar
Andi Kleen committed
42
 * the size of a structure in the slab cache
Linus Torvalds's avatar
Linus Torvalds committed
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
 */
#define DIO_PAGES	64

/*
 * This code generally works in units of "dio_blocks".  A dio_block is
 * somewhere between the hard sector size and the filesystem block size.  it
 * is determined on a per-invocation basis.   When talking to the filesystem
 * we need to convert dio_blocks to fs_blocks by scaling the dio_block quantity
 * down by dio->blkfactor.  Similarly, fs-blocksize quantities are converted
 * to bio_block quantities by shifting left by blkfactor.
 *
 * If blkfactor is zero then the user's request was aligned to the filesystem's
 * blocksize.
 */

58 59 60
/* dio_state only used in the submission path */

struct dio_submit {
Linus Torvalds's avatar
Linus Torvalds committed
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
	struct bio *bio;		/* bio under assembly */
	unsigned blkbits;		/* doesn't change */
	unsigned blkfactor;		/* When we're using an alignment which
					   is finer than the filesystem's soft
					   blocksize, this specifies how much
					   finer.  blkfactor=2 means 1/4-block
					   alignment.  Does not change */
	unsigned start_zero_done;	/* flag: sub-blocksize zeroing has
					   been performed at the start of a
					   write */
	int pages_in_io;		/* approximate total IO pages */
	size_t	size;			/* total request size (doesn't change)*/
	sector_t block_in_file;		/* Current offset into the underlying
					   file in dio_block units. */
	unsigned blocks_available;	/* At block_in_file.  changes */
76
	int reap_counter;		/* rate limit reaping */
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79
	sector_t final_block_in_request;/* doesn't change */
	unsigned first_block_in_page;	/* doesn't change, Used only once */
	int boundary;			/* prev block is at a boundary */
80
	get_block_t *get_block;		/* block mapping function */
81
	dio_submit_t *submit_io;	/* IO submition function */
82

83
	loff_t logical_offset_in_bio;	/* current first logical block in bio */
Linus Torvalds's avatar
Linus Torvalds committed
84 85 86 87 88 89 90 91 92 93 94 95 96
	sector_t final_block_in_bio;	/* current final block in bio + 1 */
	sector_t next_block_for_io;	/* next block to be put under IO,
					   in dio_blocks units */

	/*
	 * Deferred addition of a page to the dio.  These variables are
	 * private to dio_send_cur_page(), submit_page_section() and
	 * dio_bio_add_page().
	 */
	struct page *cur_page;		/* The page */
	unsigned cur_page_offset;	/* Offset into it, in bytes */
	unsigned cur_page_len;		/* Nr of bytes at cur_page_offset */
	sector_t cur_page_block;	/* Where it starts */
97
	loff_t cur_page_fs_offset;	/* Offset in file */
Linus Torvalds's avatar
Linus Torvalds committed
98 99 100 101 102 103 104 105 106 107 108 109 110 111

	/*
	 * Page fetching state. These variables belong to dio_refill_pages().
	 */
	int curr_page;			/* changes */
	int total_pages;		/* doesn't change */
	unsigned long curr_user_address;/* changes */

	/*
	 * Page queue.  These variables belong to dio_refill_pages() and
	 * dio_get_page().
	 */
	unsigned head;			/* next page to process */
	unsigned tail;			/* last valid page + 1 */
112 113 114 115 116 117
};

/* dio_state communicated between submission path and end_io */
struct dio {
	int flags;			/* doesn't change */
	int rw;
118
	struct inode *inode;
119 120 121 122 123 124
	loff_t i_size;			/* i_size when submitted */
	dio_iodone_t *end_io;		/* IO completion function */


	/* BIO completion state */
	spinlock_t bio_lock;		/* protects BIO fields below */
125 126 127
	int page_errors;		/* errno from get_user_pages() */
	int is_async;			/* is IO async ? */
	int io_error;			/* IO error in completion path */
128 129 130 131 132 133 134 135
	unsigned long refcount;		/* direct_io_worker() and bios */
	struct bio *bio_list;		/* singly linked via bi_private */
	struct task_struct *waiter;	/* waiting task (NULL if none) */

	/* AIO related stuff */
	struct kiocb *iocb;		/* kiocb */
	ssize_t result;                 /* IO result */

136
	struct buffer_head map_bh;	/* last get_block() result */
137 138 139 140 141 142
	/*
	 * pages[] (and any fields placed after it) are not zeroed out at
	 * allocation time.  Don't add new fields after pages[] unless you
	 * wish that they not be zeroed.
	 */
	struct page *pages[DIO_PAGES];	/* page buffer */
143 144 145
} ____cacheline_aligned_in_smp;

static struct kmem_cache *dio_cache __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
146

Christoph Hellwig's avatar
Christoph Hellwig committed
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static void __inode_dio_wait(struct inode *inode)
{
	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);

	do {
		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
		if (atomic_read(&inode->i_dio_count))
			schedule();
	} while (atomic_read(&inode->i_dio_count));
	finish_wait(wq, &q.wait);
}

/**
 * inode_dio_wait - wait for outstanding DIO requests to finish
 * @inode: inode to wait for
 *
 * Waits for all pending direct I/O requests to finish so that we can
 * proceed with a truncate or equivalent operation.
 *
 * Must be called under a lock that serializes taking new references
 * to i_dio_count, usually by inode->i_mutex.
 */
void inode_dio_wait(struct inode *inode)
{
	if (atomic_read(&inode->i_dio_count))
		__inode_dio_wait(inode);
}
EXPORT_SYMBOL_GPL(inode_dio_wait);

/*
 * inode_dio_done - signal finish of a direct I/O requests
 * @inode: inode the direct I/O happens on
 *
 * This is called once we've finished processing a direct I/O request,
 * and is used to wake up callers waiting for direct I/O to be quiesced.
 */
void inode_dio_done(struct inode *inode)
{
	if (atomic_dec_and_test(&inode->i_dio_count))
		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
EXPORT_SYMBOL_GPL(inode_dio_done);

Linus Torvalds's avatar
Linus Torvalds committed
191 192 193
/*
 * How many pages are in the queue?
 */
194
static inline unsigned dio_pages_present(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
195
{
196
	return sdio->tail - sdio->head;
Linus Torvalds's avatar
Linus Torvalds committed
197 198 199 200 201
}

/*
 * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
 */
202
static int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
203 204 205 206
{
	int ret;
	int nr_pages;

207
	nr_pages = min(sdio->total_pages - sdio->curr_page, DIO_PAGES);
Nick Piggin's avatar
Nick Piggin committed
208
	ret = get_user_pages_fast(
209
		sdio->curr_user_address,		/* Where from? */
Linus Torvalds's avatar
Linus Torvalds committed
210 211
		nr_pages,			/* How many pages? */
		dio->rw == READ,		/* Write to memory? */
Nick Piggin's avatar
Nick Piggin committed
212
		&dio->pages[0]);		/* Put results here */
Linus Torvalds's avatar
Linus Torvalds committed
213

214
	if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
Nick Piggin's avatar
Nick Piggin committed
215
		struct page *page = ZERO_PAGE(0);
Linus Torvalds's avatar
Linus Torvalds committed
216 217 218 219 220 221 222
		/*
		 * A memory fault, but the filesystem has some outstanding
		 * mapped blocks.  We need to use those blocks up to avoid
		 * leaking stale data in the file.
		 */
		if (dio->page_errors == 0)
			dio->page_errors = ret;
Nick Piggin's avatar
Nick Piggin committed
223 224
		page_cache_get(page);
		dio->pages[0] = page;
225 226
		sdio->head = 0;
		sdio->tail = 1;
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229 230 231
		ret = 0;
		goto out;
	}

	if (ret >= 0) {
232 233 234 235
		sdio->curr_user_address += ret * PAGE_SIZE;
		sdio->curr_page += ret;
		sdio->head = 0;
		sdio->tail = ret;
Linus Torvalds's avatar
Linus Torvalds committed
236 237 238 239 240 241 242 243 244 245 246 247
		ret = 0;
	}
out:
	return ret;	
}

/*
 * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
 * buffered inside the dio so that we can call get_user_pages() against a
 * decent number of pages, less frequently.  To provide nicer use of the
 * L1 cache.
 */
248
static struct page *dio_get_page(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
249
{
250
	if (dio_pages_present(sdio) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
251 252
		int ret;

253
		ret = dio_refill_pages(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
254 255
		if (ret)
			return ERR_PTR(ret);
256
		BUG_ON(dio_pages_present(sdio) == 0);
Linus Torvalds's avatar
Linus Torvalds committed
257
	}
258
	return dio->pages[sdio->head++];
Linus Torvalds's avatar
Linus Torvalds committed
259 260
}

261 262 263 264 265 266 267 268 269 270 271 272
/**
 * dio_complete() - called when all DIO BIO I/O has been completed
 * @offset: the byte offset in the file of the completed operation
 *
 * This releases locks as dictated by the locking type, lets interested parties
 * know that a DIO operation has completed, and calculates the resulting return
 * code for the operation.
 *
 * It lets the filesystem know if it registered an interest earlier via
 * get_block.  Pass the private field of the map buffer_head so that
 * filesystems can use it to hold additional state between get_block calls and
 * dio_complete.
Linus Torvalds's avatar
Linus Torvalds committed
273
 */
274
static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
Linus Torvalds's avatar
Linus Torvalds committed
275
{
276 277
	ssize_t transferred = 0;

278 279 280 281 282 283 284 285 286
	/*
	 * AIO submission can race with bio completion to get here while
	 * expecting to have the last io completed by bio completion.
	 * In that case -EIOCBQUEUED is in fact not an error we want
	 * to preserve through this call.
	 */
	if (ret == -EIOCBQUEUED)
		ret = 0;

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
	if (dio->result) {
		transferred = dio->result;

		/* Check for short read case */
		if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
			transferred = dio->i_size - offset;
	}

	if (ret == 0)
		ret = dio->page_errors;
	if (ret == 0)
		ret = dio->io_error;
	if (ret == 0)
		ret = transferred;

302 303 304
	if (dio->end_io && dio->result) {
		dio->end_io(dio->iocb, offset, transferred,
			    dio->map_bh.b_private, ret, is_async);
305 306 307 308
	} else {
		if (is_async)
			aio_complete(dio->iocb, ret, 0);
		inode_dio_done(dio->inode);
309 310
	}

311
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
312 313 314 315 316 317
}

static int dio_bio_complete(struct dio *dio, struct bio *bio);
/*
 * Asynchronous IO callback. 
 */
318
static void dio_bio_end_aio(struct bio *bio, int error)
Linus Torvalds's avatar
Linus Torvalds committed
319 320
{
	struct dio *dio = bio->bi_private;
321 322
	unsigned long remaining;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
323 324 325

	/* cleanup the bio */
	dio_bio_complete(dio, bio);
326

327 328 329
	spin_lock_irqsave(&dio->bio_lock, flags);
	remaining = --dio->refcount;
	if (remaining == 1 && dio->waiter)
330
		wake_up_process(dio->waiter);
331
	spin_unlock_irqrestore(&dio->bio_lock, flags);
332

333
	if (remaining == 0) {
334
		dio_complete(dio, dio->iocb->ki_pos, 0, true);
335
		kmem_cache_free(dio_cache, dio);
336
	}
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339 340 341 342 343 344 345
}

/*
 * The BIO completion handler simply queues the BIO up for the process-context
 * handler.
 *
 * During I/O bi_private points at the dio.  After I/O, bi_private is used to
 * implement a singly-linked list of completed BIOs, at dio->bio_list.
 */
346
static void dio_bio_end_io(struct bio *bio, int error)
Linus Torvalds's avatar
Linus Torvalds committed
347 348 349 350 351 352 353
{
	struct dio *dio = bio->bi_private;
	unsigned long flags;

	spin_lock_irqsave(&dio->bio_lock, flags);
	bio->bi_private = dio->bio_list;
	dio->bio_list = bio;
354
	if (--dio->refcount == 1 && dio->waiter)
Linus Torvalds's avatar
Linus Torvalds committed
355 356 357 358
		wake_up_process(dio->waiter);
	spin_unlock_irqrestore(&dio->bio_lock, flags);
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
/**
 * dio_end_io - handle the end io action for the given bio
 * @bio: The direct io bio thats being completed
 * @error: Error if there was one
 *
 * This is meant to be called by any filesystem that uses their own dio_submit_t
 * so that the DIO specific endio actions are dealt with after the filesystem
 * has done it's completion work.
 */
void dio_end_io(struct bio *bio, int error)
{
	struct dio *dio = bio->bi_private;

	if (dio->is_async)
		dio_bio_end_aio(bio, error);
	else
		dio_bio_end_io(bio, error);
}
EXPORT_SYMBOL_GPL(dio_end_io);

379
static void
380 381 382
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
	      struct block_device *bdev,
	      sector_t first_sector, int nr_vecs)
Linus Torvalds's avatar
Linus Torvalds committed
383 384 385
{
	struct bio *bio;

386 387 388 389
	/*
	 * bio_alloc() is guaranteed to return a bio when called with
	 * __GFP_WAIT and we request a valid number of vectors.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
390 391 392 393 394 395 396 397 398
	bio = bio_alloc(GFP_KERNEL, nr_vecs);

	bio->bi_bdev = bdev;
	bio->bi_sector = first_sector;
	if (dio->is_async)
		bio->bi_end_io = dio_bio_end_aio;
	else
		bio->bi_end_io = dio_bio_end_io;

399 400
	sdio->bio = bio;
	sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
Linus Torvalds's avatar
Linus Torvalds committed
401 402 403 404 405 406
}

/*
 * In the AIO read case we speculatively dirty the pages before starting IO.
 * During IO completion, any of these pages which happen to have been written
 * back will be redirtied by bio_check_pages_dirty().
407 408
 *
 * bios hold a dio reference between submit_bio and ->end_io.
Linus Torvalds's avatar
Linus Torvalds committed
409
 */
410
static void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
411
{
412
	struct bio *bio = sdio->bio;
413
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
414 415

	bio->bi_private = dio;
416 417 418 419 420

	spin_lock_irqsave(&dio->bio_lock, flags);
	dio->refcount++;
	spin_unlock_irqrestore(&dio->bio_lock, flags);

Linus Torvalds's avatar
Linus Torvalds committed
421 422
	if (dio->is_async && dio->rw == READ)
		bio_set_pages_dirty(bio);
423

424 425 426
	if (sdio->submit_io)
		sdio->submit_io(dio->rw, bio, dio->inode,
			       sdio->logical_offset_in_bio);
427 428
	else
		submit_bio(dio->rw, bio);
Linus Torvalds's avatar
Linus Torvalds committed
429

430 431 432
	sdio->bio = NULL;
	sdio->boundary = 0;
	sdio->logical_offset_in_bio = 0;
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435 436 437
}

/*
 * Release any resources in case of a failure
 */
438
static void dio_cleanup(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
439
{
440 441
	while (dio_pages_present(sdio))
		page_cache_release(dio_get_page(dio, sdio));
Linus Torvalds's avatar
Linus Torvalds committed
442 443 444
}

/*
445 446 447 448
 * Wait for the next BIO to complete.  Remove it and return it.  NULL is
 * returned once all BIOs have been completed.  This must only be called once
 * all bios have been issued so that dio->refcount can only decrease.  This
 * requires that that the caller hold a reference on the dio.
Linus Torvalds's avatar
Linus Torvalds committed
449 450 451 452
 */
static struct bio *dio_await_one(struct dio *dio)
{
	unsigned long flags;
453
	struct bio *bio = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
454 455

	spin_lock_irqsave(&dio->bio_lock, flags);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470

	/*
	 * Wait as long as the list is empty and there are bios in flight.  bio
	 * completion drops the count, maybe adds to the list, and wakes while
	 * holding the bio_lock so we don't need set_current_state()'s barrier
	 * and can call it after testing our condition.
	 */
	while (dio->refcount > 1 && dio->bio_list == NULL) {
		__set_current_state(TASK_UNINTERRUPTIBLE);
		dio->waiter = current;
		spin_unlock_irqrestore(&dio->bio_lock, flags);
		io_schedule();
		/* wake up sets us TASK_RUNNING */
		spin_lock_irqsave(&dio->bio_lock, flags);
		dio->waiter = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
471
	}
472 473 474 475
	if (dio->bio_list) {
		bio = dio->bio_list;
		dio->bio_list = bio->bi_private;
	}
Linus Torvalds's avatar
Linus Torvalds committed
476 477 478 479 480 481 482 483 484 485 486 487 488 489
	spin_unlock_irqrestore(&dio->bio_lock, flags);
	return bio;
}

/*
 * Process one completed BIO.  No locks are held.
 */
static int dio_bio_complete(struct dio *dio, struct bio *bio)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec;
	int page_no;

	if (!uptodate)
490
		dio->io_error = -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507

	if (dio->is_async && dio->rw == READ) {
		bio_check_pages_dirty(bio);	/* transfers ownership */
	} else {
		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
			struct page *page = bvec[page_no].bv_page;

			if (dio->rw == READ && !PageCompound(page))
				set_page_dirty_lock(page);
			page_cache_release(page);
		}
		bio_put(bio);
	}
	return uptodate ? 0 : -EIO;
}

/*
508 509 510
 * Wait on and process all in-flight BIOs.  This must only be called once
 * all bios have been issued so that the refcount can only decrease.
 * This just waits for all bios to make it through dio_bio_complete.  IO
511
 * errors are propagated through dio->io_error and should be propagated via
512
 * dio_complete().
Linus Torvalds's avatar
Linus Torvalds committed
513
 */
514
static void dio_await_completion(struct dio *dio)
Linus Torvalds's avatar
Linus Torvalds committed
515
{
516 517 518 519 520 521
	struct bio *bio;
	do {
		bio = dio_await_one(dio);
		if (bio)
			dio_bio_complete(dio, bio);
	} while (bio);
Linus Torvalds's avatar
Linus Torvalds committed
522 523 524 525 526 527 528 529 530
}

/*
 * A really large O_DIRECT read or write can generate a lot of BIOs.  So
 * to keep the memory consumption sane we periodically reap any completed BIOs
 * during the BIO generation phase.
 *
 * This also helps to limit the peak amount of pinned userspace memory.
 */
531
static int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
532 533 534
{
	int ret = 0;

535
	if (sdio->reap_counter++ >= 64) {
Linus Torvalds's avatar
Linus Torvalds committed
536 537 538 539 540 541 542 543 544 545 546 547 548
		while (dio->bio_list) {
			unsigned long flags;
			struct bio *bio;
			int ret2;

			spin_lock_irqsave(&dio->bio_lock, flags);
			bio = dio->bio_list;
			dio->bio_list = bio->bi_private;
			spin_unlock_irqrestore(&dio->bio_lock, flags);
			ret2 = dio_bio_complete(dio, bio);
			if (ret == 0)
				ret = ret2;
		}
549
		sdio->reap_counter = 0;
Linus Torvalds's avatar
Linus Torvalds committed
550 551 552 553 554 555
	}
	return ret;
}

/*
 * Call into the fs to map some more disk blocks.  We record the current number
556
 * of available blocks at sdio->blocks_available.  These are in units of the
Linus Torvalds's avatar
Linus Torvalds committed
557 558 559 560 561
 * fs blocksize, (1 << inode->i_blkbits).
 *
 * The fs is allowed to map lots of blocks at once.  If it wants to do that,
 * it uses the passed inode-relative block number as the file offset, as usual.
 *
562
 * get_block() is passed the number of i_blkbits-sized blocks which direct_io
Linus Torvalds's avatar
Linus Torvalds committed
563 564 565 566 567 568 569 570 571 572 573 574
 * has remaining to do.  The fs should not map more than this number of blocks.
 *
 * If the fs has mapped a lot of blocks, it should populate bh->b_size to
 * indicate how much contiguous disk space has been made available at
 * bh->b_blocknr.
 *
 * If *any* of the mapped blocks are new, then the fs must set buffer_new().
 * This isn't very efficient...
 *
 * In the case of filesystem holes: the fs may return an arbitrarily-large
 * hole by returning an appropriate value in b_size and by clearing
 * buffer_mapped().  However the direct-io code will only process holes one
575
 * block at a time - it will repeatedly call get_block() as it walks the hole.
Linus Torvalds's avatar
Linus Torvalds committed
576
 */
577
static int get_more_blocks(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
{
	int ret;
	struct buffer_head *map_bh = &dio->map_bh;
	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
	unsigned long fs_count;	/* Number of filesystem-sized blocks */
	unsigned long dio_count;/* Number of dio_block-sized blocks */
	unsigned long blkmask;
	int create;

	/*
	 * If there was a memory error and we've overwritten all the
	 * mapped blocks then we can now return that memory error
	 */
	ret = dio->page_errors;
	if (ret == 0) {
593 594 595 596 597
		BUG_ON(sdio->block_in_file >= sdio->final_block_in_request);
		fs_startblk = sdio->block_in_file >> sdio->blkfactor;
		dio_count = sdio->final_block_in_request - sdio->block_in_file;
		fs_count = dio_count >> sdio->blkfactor;
		blkmask = (1 << sdio->blkfactor) - 1;
Linus Torvalds's avatar
Linus Torvalds committed
598 599 600
		if (dio_count & blkmask)	
			fs_count++;

601 602 603
		map_bh->b_state = 0;
		map_bh->b_size = fs_count << dio->inode->i_blkbits;

604 605 606 607 608 609 610 611 612 613 614
		/*
		 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
		 * forbid block creations: only overwrites are permitted.
		 * We will return early to the caller once we see an
		 * unmapped buffer head returned, and the caller will fall
		 * back to buffered I/O.
		 *
		 * Otherwise the decision is left to the get_blocks method,
		 * which may decide to handle it or also return an unmapped
		 * buffer head.
		 */
Jens Axboe's avatar
Jens Axboe committed
615
		create = dio->rw & WRITE;
616
		if (dio->flags & DIO_SKIP_HOLES) {
617 618
			if (sdio->block_in_file < (i_size_read(dio->inode) >>
							sdio->blkbits))
Linus Torvalds's avatar
Linus Torvalds committed
619 620
				create = 0;
		}
621

622
		ret = (*sdio->get_block)(dio->inode, fs_startblk,
Linus Torvalds's avatar
Linus Torvalds committed
623 624 625 626 627 628 629 630
						map_bh, create);
	}
	return ret;
}

/*
 * There is no bio.  Make one now.
 */
631 632
static int dio_new_bio(struct dio *dio, struct dio_submit *sdio,
		       sector_t start_sector)
Linus Torvalds's avatar
Linus Torvalds committed
633 634 635 636
{
	sector_t sector;
	int ret, nr_pages;

637
	ret = dio_bio_reap(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
638 639
	if (ret)
		goto out;
640 641
	sector = start_sector << (sdio->blkbits - 9);
	nr_pages = min(sdio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
642
	nr_pages = min(nr_pages, BIO_MAX_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
643
	BUG_ON(nr_pages <= 0);
644 645
	dio_bio_alloc(dio, sdio, dio->map_bh.b_bdev, sector, nr_pages);
	sdio->boundary = 0;
Linus Torvalds's avatar
Linus Torvalds committed
646 647 648 649 650 651 652 653 654 655 656
out:
	return ret;
}

/*
 * Attempt to put the current chunk of 'cur_page' into the current BIO.  If
 * that was successful then update final_block_in_bio and take a ref against
 * the just-added page.
 *
 * Return zero on success.  Non-zero means the caller needs to start a new BIO.
 */
657
static int dio_bio_add_page(struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
658 659 660
{
	int ret;

661 662 663
	ret = bio_add_page(sdio->bio, sdio->cur_page,
			sdio->cur_page_len, sdio->cur_page_offset);
	if (ret == sdio->cur_page_len) {
Linus Torvalds's avatar
Linus Torvalds committed
664 665 666
		/*
		 * Decrement count only, if we are done with this page
		 */
667 668 669 670 671
		if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE)
			sdio->pages_in_io--;
		page_cache_get(sdio->cur_page);
		sdio->final_block_in_bio = sdio->cur_page_block +
			(sdio->cur_page_len >> sdio->blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
		ret = 0;
	} else {
		ret = 1;
	}
	return ret;
}
		
/*
 * Put cur_page under IO.  The section of cur_page which is described by
 * cur_page_offset,cur_page_len is put into a BIO.  The section of cur_page
 * starts on-disk at cur_page_block.
 *
 * We take a ref against the page here (on behalf of its presence in the bio).
 *
 * The caller of this function is responsible for removing cur_page from the
 * dio, and for dropping the refcount which came from that presence.
 */
689
static int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
690 691 692
{
	int ret = 0;

693 694 695 696
	if (sdio->bio) {
		loff_t cur_offset = sdio->cur_page_fs_offset;
		loff_t bio_next_offset = sdio->logical_offset_in_bio +
			sdio->bio->bi_size;
697

Linus Torvalds's avatar
Linus Torvalds committed
698
		/*
699 700
		 * See whether this new request is contiguous with the old.
		 *
Namhyung Kim's avatar
Namhyung Kim committed
701 702
		 * Btrfs cannot handle having logically non-contiguous requests
		 * submitted.  For example if you have
703 704
		 *
		 * Logical:  [0-4095][HOLE][8192-12287]
Namhyung Kim's avatar
Namhyung Kim committed
705
		 * Physical: [0-4095]      [4096-8191]
706 707 708 709 710
		 *
		 * We cannot submit those pages together as one BIO.  So if our
		 * current logical offset in the file does not equal what would
		 * be the next logical offset in the bio, submit the bio we
		 * have.
Linus Torvalds's avatar
Linus Torvalds committed
711
		 */
712
		if (sdio->final_block_in_bio != sdio->cur_page_block ||
713
		    cur_offset != bio_next_offset)
714
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
715 716 717 718
		/*
		 * Submit now if the underlying fs is about to perform a
		 * metadata read
		 */
719 720
		else if (sdio->boundary)
			dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
721 722
	}

723 724
	if (sdio->bio == NULL) {
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block);
Linus Torvalds's avatar
Linus Torvalds committed
725 726 727 728
		if (ret)
			goto out;
	}

729 730 731
	if (dio_bio_add_page(sdio) != 0) {
		dio_bio_submit(dio, sdio);
		ret = dio_new_bio(dio, sdio, sdio->cur_page_block);
Linus Torvalds's avatar
Linus Torvalds committed
732
		if (ret == 0) {
733
			ret = dio_bio_add_page(sdio);
Linus Torvalds's avatar
Linus Torvalds committed
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
			BUG_ON(ret != 0);
		}
	}
out:
	return ret;
}

/*
 * An autonomous function to put a chunk of a page under deferred IO.
 *
 * The caller doesn't actually know (or care) whether this piece of page is in
 * a BIO, or is under IO or whatever.  We just take care of all possible 
 * situations here.  The separation between the logic of do_direct_IO() and
 * that of submit_page_section() is important for clarity.  Please don't break.
 *
 * The chunk of page starts on-disk at blocknr.
 *
 * We perform deferred IO, by recording the last-submitted page inside our
 * private part of the dio structure.  If possible, we just expand the IO
 * across that page here.
 *
 * If that doesn't work out then we put the old page into the bio and add this
 * page to the dio instead.
 */
static int
759
submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
760 761 762 763
		unsigned offset, unsigned len, sector_t blocknr)
{
	int ret = 0;

764 765 766 767 768 769 770
	if (dio->rw & WRITE) {
		/*
		 * Read accounting is performed in submit_bio()
		 */
		task_io_account_write(len);
	}

Linus Torvalds's avatar
Linus Torvalds committed
771 772 773
	/*
	 * Can we just grow the current page's presence in the dio?
	 */
774 775 776 777 778
	if (sdio->cur_page == page &&
	    sdio->cur_page_offset + sdio->cur_page_len == offset &&
	    sdio->cur_page_block +
	    (sdio->cur_page_len >> sdio->blkbits) == blocknr) {
		sdio->cur_page_len += len;
Linus Torvalds's avatar
Linus Torvalds committed
779 780

		/*
781
		 * If sdio->boundary then we want to schedule the IO now to
Linus Torvalds's avatar
Linus Torvalds committed
782 783
		 * avoid metadata seeks.
		 */
784 785 786 787
		if (sdio->boundary) {
			ret = dio_send_cur_page(dio, sdio);
			page_cache_release(sdio->cur_page);
			sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
788 789 790 791 792 793 794
		}
		goto out;
	}

	/*
	 * If there's a deferred page already there then send it.
	 */
795 796 797 798
	if (sdio->cur_page) {
		ret = dio_send_cur_page(dio, sdio);
		page_cache_release(sdio->cur_page);
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
799 800 801 802 803
		if (ret)
			goto out;
	}

	page_cache_get(page);		/* It is in dio */
804 805 806 807 808
	sdio->cur_page = page;
	sdio->cur_page_offset = offset;
	sdio->cur_page_len = len;
	sdio->cur_page_block = blocknr;
	sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
out:
	return ret;
}

/*
 * Clean any dirty buffers in the blockdev mapping which alias newly-created
 * file blocks.  Only called for S_ISREG files - blockdevs do not set
 * buffer_new
 */
static void clean_blockdev_aliases(struct dio *dio)
{
	unsigned i;
	unsigned nblocks;

	nblocks = dio->map_bh.b_size >> dio->inode->i_blkbits;

	for (i = 0; i < nblocks; i++) {
		unmap_underlying_metadata(dio->map_bh.b_bdev,
					dio->map_bh.b_blocknr + i);
	}
}

/*
 * If we are not writing the entire block and get_block() allocated
 * the block for us, we need to fill-in the unused portion of the
 * block with zeros. This happens only if user-buffer, fileoffset or
 * io length is not filesystem block-size multiple.
 *
 * `end' is zero if we're doing the start of the IO, 1 at the end of the
 * IO.
 */
840
static void dio_zero_block(struct dio *dio, struct dio_submit *sdio, int end)
Linus Torvalds's avatar
Linus Torvalds committed
841 842 843 844 845 846
{
	unsigned dio_blocks_per_fs_block;
	unsigned this_chunk_blocks;	/* In dio_blocks */
	unsigned this_chunk_bytes;
	struct page *page;

847 848
	sdio->start_zero_done = 1;
	if (!sdio->blkfactor || !buffer_new(&dio->map_bh))
Linus Torvalds's avatar
Linus Torvalds committed
849 850
		return;

851 852
	dio_blocks_per_fs_block = 1 << sdio->blkfactor;
	this_chunk_blocks = sdio->block_in_file & (dio_blocks_per_fs_block - 1);
Linus Torvalds's avatar
Linus Torvalds committed
853 854 855 856 857 858 859 860 861 862 863

	if (!this_chunk_blocks)
		return;

	/*
	 * We need to zero out part of an fs block.  It is either at the
	 * beginning or the end of the fs block.
	 */
	if (end) 
		this_chunk_blocks = dio_blocks_per_fs_block - this_chunk_blocks;

864
	this_chunk_bytes = this_chunk_blocks << sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
865

Nick Piggin's avatar
Nick Piggin committed
866
	page = ZERO_PAGE(0);
867 868
	if (submit_page_section(dio, sdio, page, 0, this_chunk_bytes,
				sdio->next_block_for_io))
Linus Torvalds's avatar
Linus Torvalds committed
869 870
		return;

871
	sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
872 873 874 875 876 877 878 879 880 881 882
}

/*
 * Walk the user pages, and the file, mapping blocks to disk and generating
 * a sequence of (page,offset,len,block) mappings.  These mappings are injected
 * into submit_page_section(), which takes care of the next stage of submission
 *
 * Direct IO against a blockdev is different from a file.  Because we can
 * happily perform page-sized but 512-byte aligned IOs.  It is important that
 * blockdev IO be able to have fine alignment and large sizes.
 *
883
 * So what we do is to permit the ->get_block function to populate bh.b_size
Linus Torvalds's avatar
Linus Torvalds committed
884 885 886
 * with the size of IO which is permitted at this offset and this i_blkbits.
 *
 * For best results, the blockdev should be set up with 512-byte i_blkbits and
887
 * it should set b_size to PAGE_SIZE or more inside get_block().  This gives
Linus Torvalds's avatar
Linus Torvalds committed
888 889
 * fine alignment but still allows this function to work in PAGE_SIZE units.
 */
890
static int do_direct_IO(struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
891
{
892
	const unsigned blkbits = sdio->blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
893 894 895 896 897 898 899
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
	struct page *page;
	unsigned block_in_page;
	struct buffer_head *map_bh = &dio->map_bh;
	int ret = 0;

	/* The I/O can start at any block offset within the first page */
900
	block_in_page = sdio->first_block_in_page;
Linus Torvalds's avatar
Linus Torvalds committed
901

902 903
	while (sdio->block_in_file < sdio->final_block_in_request) {
		page = dio_get_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
904 905 906 907 908 909 910 911 912 913 914
		if (IS_ERR(page)) {
			ret = PTR_ERR(page);
			goto out;
		}

		while (block_in_page < blocks_per_page) {
			unsigned offset_in_page = block_in_page << blkbits;
			unsigned this_chunk_bytes;	/* # of bytes mapped */
			unsigned this_chunk_blocks;	/* # of blocks */
			unsigned u;

915
			if (sdio->blocks_available == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
916 917 918 919 920 921
				/*
				 * Need to go and map some more disk
				 */
				unsigned long blkmask;
				unsigned long dio_remainder;

922
				ret = get_more_blocks(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925 926 927 928 929
				if (ret) {
					page_cache_release(page);
					goto out;
				}
				if (!buffer_mapped(map_bh))
					goto do_holes;

930 931 932 933
				sdio->blocks_available =
						map_bh->b_size >> sdio->blkbits;
				sdio->next_block_for_io =
					map_bh->b_blocknr << sdio->blkfactor;
Linus Torvalds's avatar
Linus Torvalds committed
934 935 936
				if (buffer_new(map_bh))
					clean_blockdev_aliases(dio);

937
				if (!sdio->blkfactor)
Linus Torvalds's avatar
Linus Torvalds committed
938 939
					goto do_holes;

940 941
				blkmask = (1 << sdio->blkfactor) - 1;
				dio_remainder = (sdio->block_in_file & blkmask);
Linus Torvalds's avatar
Linus Torvalds committed
942 943 944 945 946 947 948 949 950 951 952 953 954

				/*
				 * If we are at the start of IO and that IO
				 * starts partway into a fs-block,
				 * dio_remainder will be non-zero.  If the IO
				 * is a read then we can simply advance the IO
				 * cursor to the first block which is to be
				 * read.  But if the IO is a write and the
				 * block was newly allocated we cannot do that;
				 * the start of the fs block must be zeroed out
				 * on-disk
				 */
				if (!buffer_new(map_bh))
955 956
					sdio->next_block_for_io += dio_remainder;
				sdio->blocks_available -= dio_remainder;
Linus Torvalds's avatar
Linus Torvalds committed
957 958 959 960
			}
do_holes:
			/* Handle holes */
			if (!buffer_mapped(map_bh)) {
961
				loff_t i_size_aligned;
Linus Torvalds's avatar
Linus Torvalds committed
962 963

				/* AKPM: eargh, -ENOTBLK is a hack */
Jens Axboe's avatar
Jens Axboe committed
964
				if (dio->rw & WRITE) {
Linus Torvalds's avatar
Linus Torvalds committed
965 966 967 968
					page_cache_release(page);
					return -ENOTBLK;
				}

969 970 971 972 973 974
				/*
				 * Be sure to account for a partial block as the
				 * last block in the file
				 */
				i_size_aligned = ALIGN(i_size_read(dio->inode),
							1 << blkbits);
975
				if (sdio->block_in_file >=
976
						i_size_aligned >> blkbits) {
Linus Torvalds's avatar
Linus Torvalds committed
977 978 979 980
					/* We hit eof */
					page_cache_release(page);
					goto out;
				}
981 982
				zero_user(page, block_in_page << blkbits,
						1 << blkbits);
983
				sdio->block_in_file++;
Linus Torvalds's avatar
Linus Torvalds committed
984 985 986 987 988 989 990 991 992
				block_in_page++;
				goto next_block;
			}

			/*
			 * If we're performing IO which has an alignment which
			 * is finer than the underlying fs, go check to see if
			 * we must zero out the start of this block.
			 */
993 994
			if (unlikely(sdio->blkfactor && !sdio->start_zero_done))
				dio_zero_block(dio, sdio, 0);
Linus Torvalds's avatar
Linus Torvalds committed
995 996 997 998 999

			/*
			 * Work out, in this_chunk_blocks, how much disk we
			 * can add to this page
			 */
1000
			this_chunk_blocks = sdio->blocks_available;
Linus Torvalds's avatar
Linus Torvalds committed
1001 1002 1003
			u = (PAGE_SIZE - offset_in_page) >> blkbits;
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
1004
			u = sdio->final_block_in_request - sdio->block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
1005 1006 1007 1008 1009
			if (this_chunk_blocks > u)
				this_chunk_blocks = u;
			this_chunk_bytes = this_chunk_blocks << blkbits;
			BUG_ON(this_chunk_bytes == 0);

1010 1011 1012 1013 1014
			sdio->boundary = buffer_boundary(map_bh);
			ret = submit_page_section(dio, sdio, page,
						  offset_in_page,
						  this_chunk_bytes,
						  sdio->next_block_for_io);
Linus Torvalds's avatar
Linus Torvalds committed
1015 1016 1017 1018
			if (ret) {
				page_cache_release(page);
				goto out;
			}
1019
			sdio->next_block_for_io += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1020

1021
			sdio->block_in_file += this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1022
			block_in_page += this_chunk_blocks;
1023
			sdio->blocks_available -= this_chunk_blocks;
Linus Torvalds's avatar
Linus Torvalds committed
1024
next_block:
1025 1026
			BUG_ON(sdio->block_in_file > sdio->final_block_in_request);
			if (sdio->block_in_file == sdio->final_block_in_request)
Linus Torvalds's avatar
Linus Torvalds committed
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
				break;
		}

		/* Drop the ref which was taken in get_user_pages() */
		page_cache_release(page);
		block_in_page = 0;
	}
out:
	return ret;
}

static ssize_t
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 
	const struct iovec *iov, loff_t offset, unsigned long nr_segs, 
1041
	unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
1042
	dio_submit_t submit_io, struct dio *dio, struct dio_submit *sdio)
Linus Torvalds's avatar
Linus Torvalds committed
1043 1044
{
	unsigned long user_addr; 
1045
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1046 1047 1048 1049 1050 1051 1052
	int seg;
	ssize_t ret = 0;
	ssize_t ret2;
	size_t bytes;

	dio->inode = inode;
	dio->rw = rw;
1053 1054 1055
	sdio->blkbits = blkbits;
	sdio->blkfactor = inode->i_blkbits - blkbits;
	sdio->block_in_file = offset >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
1056

1057
	sdio->get_block = get_block;
Linus Torvalds's avatar
Linus Torvalds committed
1058
	dio->end_io = end_io;
1059 1060 1061
	sdio->submit_io = submit_io;
	sdio->final_block_in_bio = -1;
	sdio->next_block_for_io = -1;
Linus Torvalds's avatar
Linus Torvalds committed
1062 1063

	dio->iocb = iocb;
1064
	dio->i_size = i_size_read(inode);
Linus Torvalds's avatar
Linus Torvalds committed
1065 1066

	spin_lock_init(&dio->bio_lock);
1067
	dio->refcount = 1;
Linus Torvalds's avatar
Linus Torvalds committed
1068 1069 1070 1071 1072

	/*
	 * In case of non-aligned buffers, we may need 2 more
	 * pages since we need to zero out first and last block.
	 */
1073 1074
	if (unlikely(sdio->blkfactor))
		sdio->pages_in_io = 2;
Linus Torvalds's avatar
Linus Torvalds committed
1075 1076 1077

	for (seg = 0; seg < nr_segs; seg++) {
		user_addr = (unsigned long)iov[seg].iov_base;
1078
		sdio->pages_in_io +=
Linus Torvalds's avatar
Linus Torvalds committed
1079 1080 1081 1082 1083 1084
			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
				- user_addr/PAGE_SIZE);
	}

	for (seg = 0; seg < nr_segs; seg++) {
		user_addr = (unsigned long)iov[seg].iov_base;
1085
		sdio->size += bytes = iov[seg].iov_len;
Linus Torvalds's avatar
Linus Torvalds committed
1086 1087

		/* Index into the first page of the first block */
1088 1089
		sdio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
		sdio->final_block_in_request = sdio->block_in_file +
Linus Torvalds's avatar
Linus Torvalds committed
1090 1091
						(bytes >> blkbits);
		/* Page fetching state */
1092 1093 1094
		sdio->head = 0;
		sdio->tail = 0;
		sdio->curr_page = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1095

1096
		sdio->total_pages = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1097
		if (user_addr & (PAGE_SIZE-1)) {
1098
			sdio->total_pages++;
Linus Torvalds's avatar
Linus Torvalds committed
1099 1100
			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
		}
1101 1102
		sdio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
		sdio->curr_user_address = user_addr;
Linus Torvalds's avatar
Linus Torvalds committed
1103
	
1104
		ret = do_direct_IO(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
1105 1106

		dio->result += iov[seg].iov_len -
1107
			((sdio->final_block_in_request - sdio->block_in_file) <<
Linus Torvalds's avatar
Linus Torvalds committed
1108 1109 1110
					blkbits);

		if (ret) {
1111
			dio_cleanup(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
1112 1113 1114 1115
			break;
		}
	} /* end iovec loop */

1116
	if (ret == -ENOTBLK) {
Linus Torvalds's avatar
Linus Torvalds committed
1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		/*
		 * The remaining part of the request will be
		 * be handled by buffered I/O when we return
		 */
		ret = 0;
	}
	/*
	 * There may be some unwritten disk at the end of a part-written
	 * fs-block-sized block.  Go zero that now.
	 */
1127
	dio_zero_block(dio, sdio, 1);
Linus Torvalds's avatar
Linus Torvalds committed
1128

1129 1130
	if (sdio->cur_page) {
		ret2 = dio_send_cur_page(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
1131 1132
		if (ret == 0)
			ret = ret2;
1133 1134
		page_cache_release(sdio->cur_page);
		sdio->cur_page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1135
	}
1136 1137
	if (sdio->bio)
		dio_bio_submit(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
1138 1139 1140 1141 1142

	/*
	 * It is possible that, we return short IO due to end of file.
	 * In that case, we need to release all the pages we got hold on.
	 */
1143
	dio_cleanup(dio, sdio);
Linus Torvalds's avatar
Linus Torvalds committed
1144 1145 1146

	/*
	 * All block lookups have been performed. For READ requests
1147
	 * we can let i_mutex go now that its achieved its purpose
Linus Torvalds's avatar
Linus Torvalds committed
1148 1149
	 * of protecting us from looking up uninitialized blocks.
	 */
1150
	if (rw == READ && (dio->flags & DIO_LOCKING))
1151
		mutex_unlock(&dio->inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1152 1153

	/*
1154 1155 1156 1157 1158
	 * The only time we want to leave bios in flight is when a successful
	 * partial aio read or full aio write have been setup.  In that case
	 * bio completion will call aio_complete.  The only time it's safe to
	 * call aio_complete is when we return -EIOCBQUEUED, so we key on that.
	 * This had *better* be the only place that raises -EIOCBQUEUED.
Linus Torvalds's avatar
Linus Torvalds committed
1159
	 */
1160 1161
	BUG_ON(ret == -EIOCBQUEUED);
	if (dio->is_async && ret == 0 && dio->result &&
1162
	    ((rw & READ) || (dio->result == sdio->size)))
1163
		ret = -EIOCBQUEUED;
1164

Jens Axboe's avatar
Jens Axboe committed
1165
	if (ret != -EIOCBQUEUED)
1166
		dio_await_completion(dio);
Linus Torvalds's avatar
Linus Torvalds committed
1167

1168 1169
	/*
	 * Sync will always be dropping the final ref and completing the
1170 1171 1172 1173 1174 1175 1176 1177
	 * operation.  AIO can if it was a broken operation described above or
	 * in fact if all the bios race to complete before we get here.  In
	 * that case dio_complete() translates the EIOCBQUEUED into the proper
	 * return code that the caller will hand to aio_complete().
	 *
	 * This is managed by the bio_lock instead of being an atomic_t so that
	 * completion paths can drop their ref and use the remaining count to
	 * decide to wake the submission path atomically.
1178
	 */
1179 1180 1181
	spin_lock_irqsave(&dio->bio_lock, flags);
	ret2 = --dio->refcount;
	spin_unlock_irqrestore(&dio->bio_lock, flags);
1182

1183
	if (ret2 == 0) {
1184
		ret = dio_complete(dio, offset, ret, false);
1185
		kmem_cache_free(dio_cache, dio);
1186 1187
	} else
		BUG_ON(ret != -EIOCBQUEUED);
Linus Torvalds's avatar
Linus Torvalds committed
1188 1189 1190 1191

	return ret;
}

1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203
/*
 * This is a library function for use by filesystem drivers.
 *
 * The locking rules are governed by the flags parameter:
 *  - if the flags value contains DIO_LOCKING we use a fancy locking
 *    scheme for dumb filesystems.
 *    For writes this function is called under i_mutex and returns with
 *    i_mutex held, for reads, i_mutex is not held on entry, but it is
 *    taken and dropped again before returning.
 *  - if the flags value does NOT contain DIO_LOCKING we don't use any
 *    internal locking but rather rely on the filesystem to synchronize
 *    direct I/O reads/writes versus each other and truncate.
1204 1205 1206 1207 1208 1209 1210
 *
 * To help with locking against truncate we incremented the i_dio_count
 * counter before starting direct I/O, and decrement it once we are done.
 * Truncate can wait for it to reach zero to provide exclusion.  It is
 * expected that filesystem provide exclusion between new direct I/O
 * and truncates.  For DIO_LOCKING filesystems this is done by i_mutex,
 * but other filesystems need to take care of this on their own.
1211
 */
Linus Torvalds's avatar
Linus Torvalds committed
1212
ssize_t
1213
__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
Linus Torvalds's avatar
Linus Torvalds committed
1214
	struct block_device *bdev, const struct iovec *iov, loff_t offset, 
1215
	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1216
	dio_submit_t submit_io,	int flags)
Linus Torvalds's avatar
Linus Torvalds committed
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
{
	int seg;
	size_t size;
	unsigned long addr;
	unsigned blkbits = inode->i_blkbits;
	unsigned bdev_blkbits = 0;
	unsigned blocksize_mask = (1 << blkbits) - 1;
	ssize_t retval = -EINVAL;
	loff_t end = offset;
	struct dio *dio;
1227
	struct dio_submit sdio = { 0, };
Linus Torvalds's avatar
Linus Torvalds committed
1228 1229

	if (rw & WRITE)
Jens Axboe's avatar
Jens Axboe committed
1230
		rw = WRITE_ODIRECT;
Linus Torvalds's avatar
Linus Torvalds committed
1231 1232

	if (bdev)
1233
		bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
Linus Torvalds's avatar
Linus Torvalds committed
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256

	if (offset & blocksize_mask) {
		if (bdev)
			 blkbits = bdev_blkbits;
		blocksize_mask = (1 << blkbits) - 1;
		if (offset & blocksize_mask)
			goto out;
	}

	/* Check the memory alignment.  Blocks cannot straddle pages */
	for (seg = 0; seg < nr_segs; seg++) {
		addr = (unsigned long)iov[seg].iov_base;
		size = iov[seg].iov_len;
		end += size;
		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
			if (bdev)
				 blkbits = bdev_blkbits;
			blocksize_mask = (1 << blkbits) - 1;
			if ((addr & blocksize_mask) || (size & blocksize_mask))  
				goto out;
		}
	}

1257 1258 1259 1260
	/* watch out for a 0 len io from a tricksy fs */
	if (rw == READ && end == offset)
		return 0;

1261
	dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1262 1263 1264
	retval = -ENOMEM;
	if (!dio)
		goto out;
1265 1266 1267 1268 1269 1270
	/*
	 * Believe it or not, zeroing out the page array caused a .5%
	 * performance regression in a database benchmark.  So, we take
	 * care to only zero out what's needed.
	 */
	memset(dio, 0, offsetof(struct dio, pages));
Linus Torvalds's avatar
Linus Torvalds committed
1271

1272 1273
	dio->flags = flags;
	if (dio->flags & DIO_LOCKING) {
1274
		if (rw == READ) {
1275 1276
			struct address_space *mapping =
					iocb->ki_filp->f_mapping;
Linus Torvalds's avatar
Linus Torvalds committed
1277

1278 1279
			/* will be released by direct_io_worker */
			mutex_lock(&inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1280 1281 1282 1283

			retval = filemap_write_and_wait_range(mapping, offset,
							      end - 1);
			if (retval) {