scsi_lib.c 58.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 *  scsi_lib.c Copyright (C) 1999 Eric Youngdale
 *
 *  SCSI queueing library.
 *      Initial versions: Eric Youngdale (eric@andante.org).
 *                        Based upon conversations with large numbers
 *                        of people at Linux Expo.
 */

#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
19
#include <linux/hardirq.h>
Jens Axboe's avatar
Jens Axboe committed
20
#include <linux/scatterlist.h>
Linus Torvalds's avatar
Linus Torvalds committed
21 22

#include <scsi/scsi.h>
23
#include <scsi/scsi_cmnd.h>
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27 28 29 30 31 32 33
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>

#include "scsi_priv.h"
#include "scsi_logging.h"


34
#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
35
#define SG_MEMPOOL_SIZE		2
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38

struct scsi_host_sg_pool {
	size_t		size;
39
	char		*name;
40
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
41 42 43
	mempool_t	*pool;
};

44
#define SP(x) { x, "sgpool-" #x }
45
static struct scsi_host_sg_pool scsi_sg_pools[] = {
Linus Torvalds's avatar
Linus Torvalds committed
46 47 48 49 50
	SP(8),
	SP(16),
	SP(32),
	SP(64),
	SP(128),
51
};
Linus Torvalds's avatar
Linus Torvalds committed
52 53
#undef SP

54
static void scsi_run_queue(struct request_queue *q);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

/*
 * Function:	scsi_unprep_request()
 *
 * Purpose:	Remove all preparation done for a request, including its
 *		associated scsi_cmnd, so that it can be requeued.
 *
 * Arguments:	req	- request to unprepare
 *
 * Lock status:	Assumed that no locks are held upon entry.
 *
 * Returns:	Nothing.
 */
static void scsi_unprep_request(struct request *req)
{
	struct scsi_cmnd *cmd = req->special;

72
	req->cmd_flags &= ~REQ_DONTPREP;
73
	req->special = NULL;
74 75 76

	scsi_put_command(cmd);
}
77

Linus Torvalds's avatar
Linus Torvalds committed
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/*
 * Function:    scsi_queue_insert()
 *
 * Purpose:     Insert a command in the midlevel queue.
 *
 * Arguments:   cmd    - command that we are adding to queue.
 *              reason - why we are inserting command to queue.
 *
 * Lock status: Assumed that lock is not held upon entry.
 *
 * Returns:     Nothing.
 *
 * Notes:       We do this for one of two cases.  Either the host is busy
 *              and it cannot accept any more commands for the time being,
 *              or the device returned QUEUE_FULL and can accept no more
 *              commands.
 * Notes:       This could be called either from an interrupt context or a
 *              normal process context.
 */
int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
	struct Scsi_Host *host = cmd->device->host;
	struct scsi_device *device = cmd->device;
101 102
	struct request_queue *q = device->request_queue;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105 106 107

	SCSI_LOG_MLQUEUE(1,
		 printk("Inserting command %p into mlqueue\n", cmd));

	/*
108
	 * Set the appropriate busy bit for the device/host.
Linus Torvalds's avatar
Linus Torvalds committed
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	 *
	 * If the host/device isn't busy, assume that something actually
	 * completed, and that we should be able to queue a command now.
	 *
	 * Note that the prior mid-layer assumption that any host could
	 * always queue at least one command is now broken.  The mid-layer
	 * will implement a user specifiable stall (see
	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
	 * if a command is requeued with no other commands outstanding
	 * either for the device or for the host.
	 */
	if (reason == SCSI_MLQUEUE_HOST_BUSY)
		host->host_blocked = host->max_host_blocked;
	else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
		device->device_blocked = device->max_device_blocked;

	/*
	 * Decrement the counters, since these commands are no longer
	 * active on the host/device.
	 */
	scsi_device_unbusy(device);

	/*
132 133
	 * Requeue this command.  It will go before all other commands
	 * that are already in the queue.
Linus Torvalds's avatar
Linus Torvalds committed
134 135 136 137
	 *
	 * NOTE: there is magic here about the way the queue is plugged if
	 * we have no outstanding commands.
	 * 
138
	 * Although we *don't* plug the queue, we call the request
Linus Torvalds's avatar
Linus Torvalds committed
139 140
	 * function.  The SCSI request function detects the blocked condition
	 * and plugs the queue appropriately.
141 142
         */
	spin_lock_irqsave(q->queue_lock, flags);
143
	blk_requeue_request(q, cmd->request);
144 145 146 147
	spin_unlock_irqrestore(q->queue_lock, flags);

	scsi_run_queue(q);

Linus Torvalds's avatar
Linus Torvalds committed
148 149 150
	return 0;
}

151
/**
152
 * scsi_execute - insert request and wait for the result
153 154 155 156 157 158 159 160
 * @sdev:	scsi device
 * @cmd:	scsi command
 * @data_direction: data direction
 * @buffer:	data buffer
 * @bufflen:	len of buffer
 * @sense:	optional sense buffer
 * @timeout:	request timeout in seconds
 * @retries:	number of times to retry request
161
 * @flags:	or into request flags;
162
 *
163
 * returns the req->errors value which is the scsi_cmnd result
164
 * field.
165
 **/
166 167 168
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
		 int data_direction, void *buffer, unsigned bufflen,
		 unsigned char *sense, int timeout, int retries, int flags)
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
{
	struct request *req;
	int write = (data_direction == DMA_TO_DEVICE);
	int ret = DRIVER_ERROR << 24;

	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);

	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
					buffer, bufflen, __GFP_WAIT))
		goto out;

	req->cmd_len = COMMAND_SIZE(cmd[0]);
	memcpy(req->cmd, cmd, req->cmd_len);
	req->sense = sense;
	req->sense_len = 0;
184
	req->retries = retries;
185
	req->timeout = timeout;
186 187
	req->cmd_type = REQ_TYPE_BLOCK_PC;
	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
188 189 190 191 192 193 194 195 196 197 198 199

	/*
	 * head injection *required* here otherwise quiesce won't work
	 */
	blk_execute_rq(req->q, NULL, req, 1);

	ret = req->errors;
 out:
	blk_put_request(req);

	return ret;
}
200
EXPORT_SYMBOL(scsi_execute);
201

202 203 204 205 206 207

int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
		     int data_direction, void *buffer, unsigned bufflen,
		     struct scsi_sense_hdr *sshdr, int timeout, int retries)
{
	char *sense = NULL;
208 209
	int result;
	
210
	if (sshdr) {
211
		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
212 213 214
		if (!sense)
			return DRIVER_ERROR << 24;
	}
215
	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
216
			      sense, timeout, retries, 0);
217
	if (sshdr)
218
		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
219 220 221 222 223 224

	kfree(sense);
	return result;
}
EXPORT_SYMBOL(scsi_execute_req);

225 226 227 228 229 230
struct scsi_io_context {
	void *data;
	void (*done)(void *data, char *sense, int result, int resid);
	char sense[SCSI_SENSE_BUFFERSIZE];
};

231
static struct kmem_cache *scsi_io_context_cache;
232

233
static void scsi_end_async(struct request *req, int uptodate)
234 235 236 237 238 239
{
	struct scsi_io_context *sioc = req->end_io_data;

	if (sioc->done)
		sioc->done(sioc->data, sioc->sense, req->errors, req->data_len);

240
	kmem_cache_free(scsi_io_context_cache, sioc);
241 242 243 244 245 246 247 248 249 250 251 252
	__blk_put_request(req->q, req);
}

static int scsi_merge_bio(struct request *rq, struct bio *bio)
{
	struct request_queue *q = rq->q;

	bio->bi_flags &= ~(1 << BIO_SEG_VALID);
	if (rq_data_dir(rq) == WRITE)
		bio->bi_rw |= (1 << BIO_RW);
	blk_queue_bounce(q, &bio);

NeilBrown's avatar
NeilBrown committed
253
	return blk_rq_append_bio(q, rq, bio);
254 255
}

256
static void scsi_bi_endio(struct bio *bio, int error)
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
{
	bio_put(bio);
}

/**
 * scsi_req_map_sg - map a scatterlist into a request
 * @rq:		request to fill
 * @sg:		scatterlist
 * @nsegs:	number of elements
 * @bufflen:	len of buffer
 * @gfp:	memory allocation flags
 *
 * scsi_req_map_sg maps a scatterlist into a request so that the
 * request can be sent to the block layer. We do not trust the scatterlist
 * sent to use, as some ULDs use that struct to only organize the pages.
 */
static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl,
			   int nsegs, unsigned bufflen, gfp_t gfp)
{
	struct request_queue *q = rq->q;
277
	int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
278
	unsigned int data_len = bufflen, len, bytes, off;
Jens Axboe's avatar
Jens Axboe committed
279
	struct scatterlist *sg;
280 281 282 283
	struct page *page;
	struct bio *bio = NULL;
	int i, err, nr_vecs = 0;

Jens Axboe's avatar
Jens Axboe committed
284 285 286 287 288
	for_each_sg(sgl, sg, nsegs, i) {
		page = sg->page;
		off = sg->offset;
		len = sg->length;
 		data_len += len;
289

290 291 292 293 294 295
		while (len > 0 && data_len > 0) {
			/*
			 * sg sends a scatterlist that is larger than
			 * the data_len it wants transferred for certain
			 * IO sizes
			 */
296
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
297
			bytes = min(bytes, data_len);
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320

			if (!bio) {
				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
				nr_pages -= nr_vecs;

				bio = bio_alloc(gfp, nr_vecs);
				if (!bio) {
					err = -ENOMEM;
					goto free_bios;
				}
				bio->bi_end_io = scsi_bi_endio;
			}

			if (bio_add_pc_page(q, bio, page, bytes, off) !=
			    bytes) {
				bio_put(bio);
				err = -EINVAL;
				goto free_bios;
			}

			if (bio->bi_vcnt >= nr_vecs) {
				err = scsi_merge_bio(rq, bio);
				if (err) {
321
					bio_endio(bio, 0);
322 323 324 325 326 327 328
					goto free_bios;
				}
				bio = NULL;
			}

			page++;
			len -= bytes;
329
			data_len -=bytes;
330 331 332 333 334
			off = 0;
		}
	}

	rq->buffer = rq->data = NULL;
335
	rq->data_len = bufflen;
336 337 338 339 340 341 342 343
	return 0;

free_bios:
	while ((bio = rq->bio) != NULL) {
		rq->bio = bio->bi_next;
		/*
		 * call endio instead of bio_put incase it was bounced
		 */
344
		bio_endio(bio, 0);
345 346 347 348 349 350 351 352 353
	}

	return err;
}

/**
 * scsi_execute_async - insert request
 * @sdev:	scsi device
 * @cmd:	scsi command
354
 * @cmd_len:	length of scsi cdb
355 356 357 358 359 360 361 362 363
 * @data_direction: data direction
 * @buffer:	data buffer (this can be a kernel buffer or scatterlist)
 * @bufflen:	len of buffer
 * @use_sg:	if buffer is a scatterlist this is the number of elements
 * @timeout:	request timeout in seconds
 * @retries:	number of times to retry request
 * @flags:	or into request flags
 **/
int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
364
		       int cmd_len, int data_direction, void *buffer, unsigned bufflen,
365 366 367 368 369 370 371 372
		       int use_sg, int timeout, int retries, void *privdata,
		       void (*done)(void *, char *, int, int), gfp_t gfp)
{
	struct request *req;
	struct scsi_io_context *sioc;
	int err = 0;
	int write = (data_direction == DMA_TO_DEVICE);

373
	sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
374 375 376 377 378 379
	if (!sioc)
		return DRIVER_ERROR << 24;

	req = blk_get_request(sdev->request_queue, write, gfp);
	if (!req)
		goto free_sense;
380 381
	req->cmd_type = REQ_TYPE_BLOCK_PC;
	req->cmd_flags |= REQ_QUIET;
382 383 384 385 386 387 388 389 390

	if (use_sg)
		err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp);
	else if (bufflen)
		err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp);

	if (err)
		goto free_req;

391
	req->cmd_len = cmd_len;
392
	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
393 394 395 396
	memcpy(req->cmd, cmd, req->cmd_len);
	req->sense = sioc->sense;
	req->sense_len = 0;
	req->timeout = timeout;
397
	req->retries = retries;
398 399 400 401 402 403 404 405 406 407 408
	req->end_io_data = sioc;

	sioc->data = privdata;
	sioc->done = done;

	blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async);
	return 0;

free_req:
	blk_put_request(req);
free_sense:
409
	kmem_cache_free(scsi_io_context_cache, sioc);
410 411 412 413
	return DRIVER_ERROR << 24;
}
EXPORT_SYMBOL_GPL(scsi_execute_async);

Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418 419 420 421 422 423 424
/*
 * Function:    scsi_init_cmd_errh()
 *
 * Purpose:     Initialize cmd fields related to error handling.
 *
 * Arguments:   cmd	- command that is ready to be queued.
 *
 * Notes:       This function has the job of initializing a number of
 *              fields related to error handling.   Typically this will
 *              be called once for each command, as required.
 */
425
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
Linus Torvalds's avatar
Linus Torvalds committed
426 427
{
	cmd->serial_number = 0;
428
	cmd->resid = 0;
Linus Torvalds's avatar
Linus Torvalds committed
429 430 431 432 433 434 435 436 437 438 439 440
	memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
	if (cmd->cmd_len == 0)
		cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
}

void scsi_device_unbusy(struct scsi_device *sdev)
{
	struct Scsi_Host *shost = sdev->host;
	unsigned long flags;

	spin_lock_irqsave(shost->host_lock, flags);
	shost->host_busy--;
441
	if (unlikely(scsi_host_in_recovery(shost) &&
442
		     (shost->host_failed || shost->host_eh_scheduled)))
Linus Torvalds's avatar
Linus Torvalds committed
443 444
		scsi_eh_wakeup(shost);
	spin_unlock(shost->host_lock);
's avatar
committed
445
	spin_lock(sdev->request_queue->queue_lock);
Linus Torvalds's avatar
Linus Torvalds committed
446
	sdev->device_busy--;
's avatar
committed
447
	spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
}

/*
 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 * and call blk_run_queue for all the scsi_devices on the target -
 * including current_sdev first.
 *
 * Called with *no* scsi locks held.
 */
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
	struct Scsi_Host *shost = current_sdev->host;
	struct scsi_device *sdev, *tmp;
	struct scsi_target *starget = scsi_target(current_sdev);
	unsigned long flags;

	spin_lock_irqsave(shost->host_lock, flags);
	starget->starget_sdev_user = NULL;
	spin_unlock_irqrestore(shost->host_lock, flags);

	/*
	 * Call blk_run_queue for all LUNs on the target, starting with
	 * current_sdev. We race with others (to set starget_sdev_user),
	 * but in most cases, we will be first. Ideally, each LU on the
	 * target would get some limited time or requests on the target.
	 */
	blk_run_queue(current_sdev->request_queue);

	spin_lock_irqsave(shost->host_lock, flags);
	if (starget->starget_sdev_user)
		goto out;
	list_for_each_entry_safe(sdev, tmp, &starget->devices,
			same_target_siblings) {
		if (sdev == current_sdev)
			continue;
		if (scsi_device_get(sdev))
			continue;

		spin_unlock_irqrestore(shost->host_lock, flags);
		blk_run_queue(sdev->request_queue);
		spin_lock_irqsave(shost->host_lock, flags);
	
		scsi_device_put(sdev);
	}
 out:
	spin_unlock_irqrestore(shost->host_lock, flags);
}

/*
 * Function:	scsi_run_queue()
 *
 * Purpose:	Select a proper request queue to serve next
 *
 * Arguments:	q	- last request's queue
 *
 * Returns:     Nothing
 *
 * Notes:	The previous command was completely finished, start
 *		a new one if possible.
 */
static void scsi_run_queue(struct request_queue *q)
{
	struct scsi_device *sdev = q->queuedata;
	struct Scsi_Host *shost = sdev->host;
	unsigned long flags;

	if (sdev->single_lun)
		scsi_single_lun_run(sdev);

	spin_lock_irqsave(shost->host_lock, flags);
	while (!list_empty(&shost->starved_list) &&
	       !shost->host_blocked && !shost->host_self_blocked &&
		!((shost->can_queue > 0) &&
		  (shost->host_busy >= shost->can_queue))) {
		/*
		 * As long as shost is accepting commands and we have
		 * starved queues, call blk_run_queue. scsi_request_fn
		 * drops the queue_lock and can add us back to the
		 * starved_list.
		 *
		 * host_lock protects the starved_list and starved_entry.
		 * scsi_request_fn must get the host_lock before checking
		 * or modifying starved_list or starved_entry.
		 */
		sdev = list_entry(shost->starved_list.next,
					  struct scsi_device, starved_entry);
		list_del_init(&sdev->starved_entry);
		spin_unlock_irqrestore(shost->host_lock, flags);

537 538 539 540 541 542 543 544 545

		if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
		    !test_and_set_bit(QUEUE_FLAG_REENTER,
				      &sdev->request_queue->queue_flags)) {
			blk_run_queue(sdev->request_queue);
			clear_bit(QUEUE_FLAG_REENTER,
				  &sdev->request_queue->queue_flags);
		} else
			blk_run_queue(sdev->request_queue);
Linus Torvalds's avatar
Linus Torvalds committed
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576

		spin_lock_irqsave(shost->host_lock, flags);
		if (unlikely(!list_empty(&sdev->starved_entry)))
			/*
			 * sdev lost a race, and was put back on the
			 * starved list. This is unlikely but without this
			 * in theory we could loop forever.
			 */
			break;
	}
	spin_unlock_irqrestore(shost->host_lock, flags);

	blk_run_queue(q);
}

/*
 * Function:	scsi_requeue_command()
 *
 * Purpose:	Handle post-processing of completed commands.
 *
 * Arguments:	q	- queue to operate on
 *		cmd	- command that may need to be requeued.
 *
 * Returns:	Nothing
 *
 * Notes:	After command completion, there may be blocks left
 *		over which weren't finished by the previous command
 *		this can be for a number of reasons - the main one is
 *		I/O errors in the middle of the request, in which case
 *		we need to request the blocks that come after the bad
 *		sector.
577
 * Notes:	Upon return, cmd is a stale pointer.
Linus Torvalds's avatar
Linus Torvalds committed
578 579 580
 */
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
581
	struct request *req = cmd->request;
582 583
	unsigned long flags;

584
	scsi_unprep_request(req);
585
	spin_lock_irqsave(q->queue_lock, flags);
586
	blk_requeue_request(q, req);
587
	spin_unlock_irqrestore(q->queue_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
588 589 590 591 592 593

	scsi_run_queue(q);
}

void scsi_next_command(struct scsi_cmnd *cmd)
{
594 595 596 597 598
	struct scsi_device *sdev = cmd->device;
	struct request_queue *q = sdev->request_queue;

	/* need to hold a reference on the device before we let go of the cmd */
	get_device(&sdev->sdev_gendev);
Linus Torvalds's avatar
Linus Torvalds committed
599 600 601

	scsi_put_command(cmd);
	scsi_run_queue(q);
602 603 604

	/* ok to remove device now */
	put_device(&sdev->sdev_gendev);
Linus Torvalds's avatar
Linus Torvalds committed
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
}

void scsi_run_host_queues(struct Scsi_Host *shost)
{
	struct scsi_device *sdev;

	shost_for_each_device(sdev, shost)
		scsi_run_queue(sdev->request_queue);
}

/*
 * Function:    scsi_end_request()
 *
 * Purpose:     Post-processing of completed commands (usually invoked at end
 *		of upper level post-processing and scsi_io_completion).
 *
 * Arguments:   cmd	 - command that is complete.
 *              uptodate - 1 if I/O indicates success, <= 0 for I/O error.
 *              bytes    - number of bytes of completed I/O
 *		requeue  - indicates whether we should requeue leftovers.
 *
 * Lock status: Assumed that lock is not held upon entry.
 *
628
 * Returns:     cmd if requeue required, NULL otherwise.
Linus Torvalds's avatar
Linus Torvalds committed
629 630 631 632 633 634
 *
 * Notes:       This is called for block device requests in order to
 *              mark some number of sectors as complete.
 * 
 *		We are guaranteeing that the request queue will be goosed
 *		at some point during this call.
635
 * Notes:	If cmd was requeued, upon return it will be a stale pointer.
Linus Torvalds's avatar
Linus Torvalds committed
636 637 638 639
 */
static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
					  int bytes, int requeue)
{
640
	struct request_queue *q = cmd->device->request_queue;
Linus Torvalds's avatar
Linus Torvalds committed
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	struct request *req = cmd->request;
	unsigned long flags;

	/*
	 * If there are blocks left over at the end, set up the command
	 * to queue the remainder of them.
	 */
	if (end_that_request_chunk(req, uptodate, bytes)) {
		int leftover = (req->hard_nr_sectors << 9);

		if (blk_pc_request(req))
			leftover = req->data_len;

		/* kill remainder if no retrys */
		if (!uptodate && blk_noretry_request(req))
			end_that_request_chunk(req, 0, leftover);
		else {
658
			if (requeue) {
Linus Torvalds's avatar
Linus Torvalds committed
659 660 661 662 663 664
				/*
				 * Bleah.  Leftovers again.  Stick the
				 * leftovers in the front of the
				 * queue, and goose the queue again.
				 */
				scsi_requeue_command(q, cmd);
665 666
				cmd = NULL;
			}
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669 670 671 672 673 674 675
			return cmd;
		}
	}

	add_disk_randomness(req->rq_disk);

	spin_lock_irqsave(q->queue_lock, flags);
	if (blk_rq_tagged(req))
		blk_queue_end_tag(q, req);
676
	end_that_request_last(req, uptodate);
Linus Torvalds's avatar
Linus Torvalds committed
677 678 679 680 681 682 683 684 685 686
	spin_unlock_irqrestore(q->queue_lock, flags);

	/*
	 * This will goose the queue request function at the end, so we don't
	 * need to worry about launching another command.
	 */
	scsi_next_command(cmd);
	return NULL;
}

687 688 689 690 691 692
/*
 * The maximum number of SG segments that we will put inside a scatterlist
 * (unless chaining is used). Should ideally fit inside a single page, to
 * avoid a higher order allocation.
 */
#define SCSI_MAX_SG_SEGMENTS	128
Linus Torvalds's avatar
Linus Torvalds committed
693

694 695 696 697 698
/*
 * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit
 * is totally arbitrary, a setting of 2048 will get you at least 8mb ios.
 */
#define SCSI_MAX_SG_CHAIN_SEGMENTS	2048
Linus Torvalds's avatar
Linus Torvalds committed
699

700 701 702 703 704
static inline unsigned int scsi_sgtable_index(unsigned short nents)
{
	unsigned int index;

	switch (nents) {
Linus Torvalds's avatar
Linus Torvalds committed
705
	case 1 ... 8:
706
		index = 0;
Linus Torvalds's avatar
Linus Torvalds committed
707 708
		break;
	case 9 ... 16:
709
		index = 1;
Linus Torvalds's avatar
Linus Torvalds committed
710 711
		break;
	case 17 ... 32:
712
		index = 2;
Linus Torvalds's avatar
Linus Torvalds committed
713 714
		break;
	case 33 ... 64:
715
		index = 3;
Linus Torvalds's avatar
Linus Torvalds committed
716
		break;
717 718
	case 65 ... SCSI_MAX_SG_SEGMENTS:
		index = 4;
Linus Torvalds's avatar
Linus Torvalds committed
719 720
		break;
	default:
721 722
		printk(KERN_ERR "scsi: bad segment count=%d\n", nents);
		BUG();
Linus Torvalds's avatar
Linus Torvalds committed
723 724
	}

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	return index;
}

struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
{
	struct scsi_host_sg_pool *sgp;
	struct scatterlist *sgl, *prev, *ret;
	unsigned int index;
	int this, left;

	BUG_ON(!cmd->use_sg);

	left = cmd->use_sg;
	ret = prev = NULL;
	do {
		this = left;
		if (this > SCSI_MAX_SG_SEGMENTS) {
			this = SCSI_MAX_SG_SEGMENTS - 1;
			index = SG_MEMPOOL_NR - 1;
		} else
			index = scsi_sgtable_index(this);

		left -= this;

		sgp = scsi_sg_pools + index;

		sgl = mempool_alloc(sgp->pool, gfp_mask);
		if (unlikely(!sgl))
			goto enomem;

		memset(sgl, 0, sizeof(*sgl) * sgp->size);

		/*
		 * first loop through, set initial index and return value
		 */
		if (!ret) {
			cmd->sglist_len = index;
			ret = sgl;
		}

		/*
		 * chain previous sglist, if any. we know the previous
		 * sglist must be the biggest one, or we would not have
		 * ended up doing another loop.
		 */
		if (prev)
			sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl);

		/*
		 * don't allow subsequent mempool allocs to sleep, it would
		 * violate the mempool principle.
		 */
		gfp_mask &= ~__GFP_WAIT;
		gfp_mask |= __GFP_HIGH;
		prev = sgl;
	} while (left);

	/*
	 * ->use_sg may get modified after dma mapping has potentially
	 * shrunk the number of segments, so keep a copy of it for free.
	 */
	cmd->__use_sg = cmd->use_sg;
	return ret;
enomem:
	if (ret) {
		/*
		 * Free entries chained off ret. Since we were trying to
		 * allocate another sglist, we know that all entries are of
		 * the max size.
		 */
		sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1;
		prev = ret;
		ret = &ret[SCSI_MAX_SG_SEGMENTS - 1];

		while ((sgl = sg_chain_ptr(ret)) != NULL) {
			ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1];
			mempool_free(sgl, sgp->pool);
		}

		mempool_free(prev, sgp->pool);
	}
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
807 808
}

809 810
EXPORT_SYMBOL(scsi_alloc_sgtable);

811
void scsi_free_sgtable(struct scsi_cmnd *cmd)
Linus Torvalds's avatar
Linus Torvalds committed
812
{
813
	struct scatterlist *sgl = cmd->request_buffer;
Linus Torvalds's avatar
Linus Torvalds committed
814 815
	struct scsi_host_sg_pool *sgp;

816
	BUG_ON(cmd->sglist_len >= SG_MEMPOOL_NR);
Linus Torvalds's avatar
Linus Torvalds committed
817

818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853
	/*
	 * if this is the biggest size sglist, check if we have
	 * chained parts we need to free
	 */
	if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) {
		unsigned short this, left;
		struct scatterlist *next;
		unsigned int index;

		left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1);
		next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]);
		while (left && next) {
			sgl = next;
			this = left;
			if (this > SCSI_MAX_SG_SEGMENTS) {
				this = SCSI_MAX_SG_SEGMENTS - 1;
				index = SG_MEMPOOL_NR - 1;
			} else
				index = scsi_sgtable_index(this);

			left -= this;

			sgp = scsi_sg_pools + index;

			if (left)
				next = sg_chain_ptr(&sgl[sgp->size - 1]);

			mempool_free(sgl, sgp->pool);
		}

		/*
		 * Restore original, will be freed below
		 */
		sgl = cmd->request_buffer;
	}

854
	sgp = scsi_sg_pools + cmd->sglist_len;
Linus Torvalds's avatar
Linus Torvalds committed
855 856 857
	mempool_free(sgl, sgp->pool);
}

858 859
EXPORT_SYMBOL(scsi_free_sgtable);

Linus Torvalds's avatar
Linus Torvalds committed
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
/*
 * Function:    scsi_release_buffers()
 *
 * Purpose:     Completion processing for block device I/O requests.
 *
 * Arguments:   cmd	- command that we are bailing.
 *
 * Lock status: Assumed that no lock is held upon entry.
 *
 * Returns:     Nothing
 *
 * Notes:       In the event that an upper level driver rejects a
 *		command, we must release resources allocated during
 *		the __init_io() function.  Primarily this would involve
 *		the scatter-gather table, and potentially any bounce
 *		buffers.
 */
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
	if (cmd->use_sg)
880
		scsi_free_sgtable(cmd);
Linus Torvalds's avatar
Linus Torvalds committed
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917

	/*
	 * Zero these out.  They now point to freed memory, and it is
	 * dangerous to hang onto the pointers.
	 */
	cmd->request_buffer = NULL;
	cmd->request_bufflen = 0;
}

/*
 * Function:    scsi_io_completion()
 *
 * Purpose:     Completion processing for block device I/O requests.
 *
 * Arguments:   cmd   - command that is finished.
 *
 * Lock status: Assumed that no lock is held upon entry.
 *
 * Returns:     Nothing
 *
 * Notes:       This function is matched in terms of capabilities to
 *              the function that created the scatter-gather list.
 *              In other words, if there are no bounce buffers
 *              (the normal case for most drivers), we don't need
 *              the logic to deal with cleaning up afterwards.
 *
 *		We must do one of several things here:
 *
 *		a) Call scsi_end_request.  This will finish off the
 *		   specified number of sectors.  If we are done, the
 *		   command block will be released, and the queue
 *		   function will be goosed.  If we are not done, then
 *		   scsi_end_request will directly goose the queue.
 *
 *		b) We can just use scsi_requeue_command() here.  This would
 *		   be used if we just wanted to retry, for example.
 */
918
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
Linus Torvalds's avatar
Linus Torvalds committed
919 920
{
	int result = cmd->result;
921
	int this_count = cmd->request_bufflen;
922
	struct request_queue *q = cmd->device->request_queue;
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925 926 927 928
	struct request *req = cmd->request;
	int clear_errors = 1;
	struct scsi_sense_hdr sshdr;
	int sense_valid = 0;
	int sense_deferred = 0;

929
	scsi_release_buffers(cmd);
Linus Torvalds's avatar
Linus Torvalds committed
930 931 932 933 934 935

	if (result) {
		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
		if (sense_valid)
			sense_deferred = scsi_sense_is_deferred(&sshdr);
	}
936

Linus Torvalds's avatar
Linus Torvalds committed
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
		req->errors = result;
		if (result) {
			clear_errors = 0;
			if (sense_valid && req->sense) {
				/*
				 * SG_IO wants current and deferred errors
				 */
				int len = 8 + cmd->sense_buffer[7];

				if (len > SCSI_SENSE_BUFFERSIZE)
					len = SCSI_SENSE_BUFFERSIZE;
				memcpy(req->sense, cmd->sense_buffer,  len);
				req->sense_len = len;
			}
952 953
		}
		req->data_len = cmd->resid;
Linus Torvalds's avatar
Linus Torvalds committed
954 955 956 957 958 959
	}

	/*
	 * Next deal with any sectors which we were able to correctly
	 * handle.
	 */
960 961 962 963 964 965 966 967 968 969 970 971 972 973
	SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, "
				      "%d bytes done.\n",
				      req->nr_sectors, good_bytes));
	SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));

	if (clear_errors)
		req->errors = 0;

	/* A number of bytes were successfully read.  If there
	 * are leftovers and there is some kind of error
	 * (result != 0), retry the rest.
	 */
	if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
		return;
974 975 976

	/* good_bytes = 0, or (inclusive) there were leftovers and
	 * result = 0, so scsi_end_request couldn't retry.
Linus Torvalds's avatar
Linus Torvalds committed
977 978 979 980 981
	 */
	if (sense_valid && !sense_deferred) {
		switch (sshdr.sense_key) {
		case UNIT_ATTENTION:
			if (cmd->device->removable) {
982
				/* Detected disc change.  Set a bit
Linus Torvalds's avatar
Linus Torvalds committed
983 984 985
				 * and quietly refuse further access.
				 */
				cmd->device->changed = 1;
986
				scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
987 988
				return;
			} else {
989 990 991 992 993
				/* Must have been a power glitch, or a
				 * bus reset.  Could not have been a
				 * media change, so we just retry the
				 * request and see what happens.
				 */
Linus Torvalds's avatar
Linus Torvalds committed
994 995 996 997 998
				scsi_requeue_command(q, cmd);
				return;
			}
			break;
		case ILLEGAL_REQUEST:
999 1000 1001 1002 1003 1004 1005 1006
			/* If we had an ILLEGAL REQUEST returned, then
			 * we may have performed an unsupported
			 * command.  The only thing this should be
			 * would be a ten byte read where only a six
			 * byte read was supported.  Also, on a system
			 * where READ CAPACITY failed, we may have
			 * read past the end of the disk.
			 */
1007 1008
			if ((cmd->device->use_10_for_rw &&
			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
Linus Torvalds's avatar
Linus Torvalds committed
1009 1010 1011
			    (cmd->cmnd[0] == READ_10 ||
			     cmd->cmnd[0] == WRITE_10)) {
				cmd->device->use_10_for_rw = 0;
1012 1013
				/* This will cause a retry with a
				 * 6-byte command.
Linus Torvalds's avatar
Linus Torvalds committed
1014 1015
				 */
				scsi_requeue_command(q, cmd);
1016
				return;
Linus Torvalds's avatar
Linus Torvalds committed
1017
			} else {
1018
				scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
1019 1020 1021 1022
				return;
			}
			break;
		case NOT_READY:
1023
			/* If the device is in the process of becoming
1024
			 * ready, or has a temporary blockage, retry.
Linus Torvalds's avatar
Linus Torvalds committed
1025
			 */
1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
			if (sshdr.asc == 0x04) {
				switch (sshdr.ascq) {
				case 0x01: /* becoming ready */
				case 0x04: /* format in progress */
				case 0x05: /* rebuild in progress */
				case 0x06: /* recalculation in progress */
				case 0x07: /* operation in progress */
				case 0x08: /* Long write in progress */
				case 0x09: /* self test in progress */
					scsi_requeue_command(q, cmd);
					return;
				default:
					break;
				}
Linus Torvalds's avatar
Linus Torvalds committed
1040
			}
1041 1042 1043 1044 1045
			if (!(req->cmd_flags & REQ_QUIET))
				scsi_cmd_print_sense_hdr(cmd,
							 "Device not ready",
							 &sshdr);

1046
			scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
1047 1048
			return;
		case VOLUME_OVERFLOW:
1049
			if (!(req->cmd_flags & REQ_QUIET)) {
1050
				scmd_printk(KERN_INFO, cmd,
1051
					    "Volume overflow, CDB: ");
1052
				__scsi_print_command(cmd->cmnd);
1053 1054
				scsi_print_sense("", cmd);
			}
1055 1056
			/* See SSC3rXX or current. */
			scsi_end_request(cmd, 0, this_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
1057 1058 1059 1060
			return;
		default:
			break;
		}
1061
	}
Linus Torvalds's avatar
Linus Torvalds committed
1062
	if (host_byte(result) == DID_RESET) {
1063 1064 1065
		/* Third party bus reset or reset for error recovery
		 * reasons.  Just retry the request and see what
		 * happens.
Linus Torvalds's avatar
Linus Torvalds committed
1066 1067 1068 1069 1070
		 */
		scsi_requeue_command(q, cmd);
		return;
	}
	if (result) {
1071
		if (!(req->cmd_flags & REQ_QUIET)) {
1072
			scsi_print_result(cmd);
1073 1074 1075
			if (driver_byte(result) & DRIVER_SENSE)
				scsi_print_sense("", cmd);
		}
Linus Torvalds's avatar
Linus Torvalds committed
1076
	}
1077
	scsi_end_request(cmd, 0, this_count, !result);
Linus Torvalds's avatar
Linus Torvalds committed
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
}

/*
 * Function:    scsi_init_io()
 *
 * Purpose:     SCSI I/O initialize function.
 *
 * Arguments:   cmd   - Command descriptor we wish to initialize
 *
 * Returns:     0 on success
 *		BLKPREP_DEFER if the failure is retryable
 *		BLKPREP_KILL if the failure is fatal
 */
static int scsi_init_io(struct scsi_cmnd *cmd)
{
	struct request     *req = cmd->request;
	int		   count;

	/*
1097
	 * We used to not use scatter-gather for single segment request,
Linus Torvalds's avatar
Linus Torvalds committed
1098 1099 1100 1101 1102 1103
	 * but now we do (it makes highmem I/O easier to support without
	 * kmapping pages)
	 */
	cmd->use_sg = req->nr_phys_segments;

	/*
1104
	 * If sg table allocation fails, requeue request later.
Linus Torvalds's avatar
Linus Torvalds committed
1105
	 */
1106 1107
	cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
	if (unlikely(!cmd->request_buffer)) {
Alan Stern's avatar
Alan Stern committed
1108
		scsi_unprep_request(req);
Linus Torvalds's avatar
Linus Torvalds committed
1109
		return BLKPREP_DEFER;
Alan Stern's avatar
Alan Stern committed
1110
	}
Linus Torvalds's avatar
Linus Torvalds committed
1111

1112
	req->buffer = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1113 1114
	if (blk_pc_request(req))
		cmd->request_bufflen = req->data_len;
1115 1116
	else
		cmd->request_bufflen = req->nr_sectors << 9;
Linus Torvalds's avatar
Linus Torvalds committed
1117 1118 1119 1120 1121 1122 1123 1124

	/* 
	 * Next, walk the list, and fill in the addresses and sizes of
	 * each segment.
	 */
	count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
	if (likely(count <= cmd->use_sg)) {
		cmd->use_sg = count;
1125
		return BLKPREP_OK;
Linus Torvalds's avatar
Linus Torvalds committed
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135
	}

	printk(KERN_ERR "Incorrect number of segments after building list\n");
	printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
	printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
			req->current_nr_sectors);

	return BLKPREP_KILL;
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
		struct request *req)
{
	struct scsi_cmnd *cmd;

	if (!req->special) {
		cmd = scsi_get_command(sdev, GFP_ATOMIC);
		if (unlikely(!cmd))
			return NULL;
		req->special = cmd;
	} else {
		cmd = req->special;
	}

	/* pull a tag out of the request if we have one */
	cmd->tag = req->tag;
	cmd->request = req;

	return cmd;
}

1157
int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
James Bottomley's avatar
James Bottomley committed
1158
{
1159
	struct scsi_cmnd *cmd;
1160 1161 1162 1163
	int ret = scsi_prep_state_check(sdev, req);

	if (ret != BLKPREP_OK)
		return ret;
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191

	cmd = scsi_get_cmd_from_req(sdev, req);
	if (unlikely(!cmd))
		return BLKPREP_DEFER;

	/*
	 * BLOCK_PC requests may transfer data, in which case they must
	 * a bio attached to them.  Or they might contain a SCSI command
	 * that does not transfer data, in which case they may optionally
	 * submit a request without an attached bio.
	 */
	if (req->bio) {
		int ret;

		BUG_ON(!req->nr_phys_segments);

		ret = scsi_init_io(cmd);
		if (unlikely(ret))
			return ret;
	} else {
		BUG_ON(req->data_len);
		BUG_ON(req->data);

		cmd->request_bufflen = 0;
		cmd->request_buffer = NULL;
		cmd->use_sg = 0;
		req->buffer = NULL;
	}
James Bottomley's avatar
James Bottomley committed
1192

1193
	BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
James Bottomley's avatar
James Bottomley committed
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
	cmd->cmd_len = req->cmd_len;
	if (!req->data_len)
		cmd->sc_data_direction = DMA_NONE;
	else if (rq_data_dir(req) == WRITE)
		cmd->sc_data_direction = DMA_TO_DEVICE;
	else
		cmd->sc_data_direction = DMA_FROM_DEVICE;
	
	cmd->transfersize = req->data_len;
	cmd->allowed = req->retries;
	cmd->timeout_per_command = req->timeout;
1206
	return BLKPREP_OK;
James Bottomley's avatar
James Bottomley committed
1207
}
1208
EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
James Bottomley's avatar
James Bottomley committed
1209

1210 1211 1212 1213 1214
/*
 * Setup a REQ_TYPE_FS command.  These are simple read/write request
 * from filesystems that still need to be translated to SCSI CDBs from
 * the ULD.
 */
1215
int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
Linus Torvalds's avatar
Linus Torvalds committed
1216 1217
{
	struct scsi_cmnd *cmd;
1218
	int ret = scsi_prep_state_check(sdev, req);
Linus Torvalds's avatar
Linus Torvalds committed
1219

1220 1221
	if (ret != BLKPREP_OK)
		return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1222
	/*
1223
	 * Filesystem requests must transfer data.
Linus Torvalds's avatar
Linus Torvalds committed
1224
	 */
1225 1226 1227 1228 1229 1230
	BUG_ON(!req->nr_phys_segments);

	cmd = scsi_get_cmd_from_req(sdev, req);
	if (unlikely(!cmd))
		return BLKPREP_DEFER;

1231
	return scsi_init_io(cmd);
1232
}
1233
EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1234

1235
int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1236 1237 1238
{
	int ret = BLKPREP_OK;

Linus Torvalds's avatar
Linus Torvalds committed
1239
	/*
1240 1241
	 * If the device is not in running state we will reject some
	 * or all commands.
Linus Torvalds's avatar
Linus Torvalds committed
1242
	 */
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
		switch (sdev->sdev_state) {
		case SDEV_OFFLINE:
			/*
			 * If the device is offline we refuse to process any
			 * commands.  The device must be brought online
			 * before trying any recovery commands.
			 */
			sdev_printk(KERN_ERR, sdev,
				    "rejecting I/O to offline device\n");
			ret = BLKPREP_KILL;
			break;
		case SDEV_DEL:
			/*
			 * If the device is fully deleted, we refuse to
			 * process any commands as well.
			 */
1260
			sdev_printk(KERN_ERR, sdev,
1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
				    "rejecting I/O to dead device\n");
			ret = BLKPREP_KILL;
			break;
		case SDEV_QUIESCE:
		case SDEV_BLOCK:
			/*
			 * If the devices is blocked we defer normal commands.
			 */
			if (!(req->cmd_flags & REQ_PREEMPT))
				ret = BLKPREP_DEFER;
			break;
		default:
			/*
			 * For any other not fully online state we only allow
			 * special commands.  In particular any user initiated
			 * command is not allowed.
			 */
			if (!(req->cmd_flags & REQ_PREEMPT))
				ret = BLKPREP_KILL;
			break;
Linus Torvalds's avatar
Linus Torvalds committed
1281 1282
		}
	}
1283 1284 1285
	return ret;
}
EXPORT_SYMBOL(scsi_prep_state_check);
Linus Torvalds's avatar
Linus Torvalds committed
1286

1287 1288 1289
int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
{
	struct scsi_device *sdev = q->queuedata;
Linus Torvalds's avatar
Linus Torvalds committed
1290

1291 1292 1293
	switch (ret) {
	case BLKPREP_KILL:
		req->errors = DID_NO_CONNECT << 16;
1294 1295 1296 1297 1298 1299 1300
		/* release the command and kill it */
		if (req->special) {
			struct scsi_cmnd *cmd = req->special;
			scsi_release_buffers(cmd);
			scsi_put_command(cmd);
			req->special = NULL;
		}
1301 1302
		break;
	case BLKPREP_DEFER:
Linus Torvalds's avatar
Linus Torvalds committed
1303
		/*
1304 1305 1306
		 * If we defer, the elv_next_request() returns NULL, but the
		 * queue must be restarted, so we plug here if no returning
		 * command will automatically do that.
Linus Torvalds's avatar
Linus Torvalds committed
1307
		 */
1308 1309 1310 1311 1312
		if (sdev->device_busy == 0)
			blk_plug