fc_fcp.c 58.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 * Copyright(c) 2007 Intel Corporation. All rights reserved.
 * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
 * Copyright(c) 2008 Mike Christie
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Maintained at www.Open-FCoE.org
 */

#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include <linux/crc32.h>
30
#include <linux/slab.h>
31
32
33
34
35
36
37
38
39
40
41
42

#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>

#include <scsi/fc/fc_fc2.h>

#include <scsi/libfc.h>
#include <scsi/fc_encode.h>

43
#include "fc_libfc.h"
44

45
struct kmem_cache *scsi_pkt_cachep;
46
47
48
49
50
51

/* SRB state definitions */
#define FC_SRB_FREE		0		/* cmd is free */
#define FC_SRB_CMD_SENT		(1 << 0)	/* cmd has been sent */
#define FC_SRB_RCV_STATUS	(1 << 1)	/* response has arrived */
#define FC_SRB_ABORT_PENDING	(1 << 2)	/* cmd abort sent to device */
52
#define FC_SRB_ABORTED		(1 << 3)	/* abort acknowledged */
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#define FC_SRB_DISCONTIG	(1 << 4)	/* non-sequential data recvd */
#define FC_SRB_COMPL		(1 << 5)	/* fc_io_compl has been run */
#define FC_SRB_FCP_PROCESSING_TMO (1 << 6)	/* timer function processing */

#define FC_SRB_READ		(1 << 1)
#define FC_SRB_WRITE		(1 << 0)

/*
 * The SCp.ptr should be tested and set under the host lock. NULL indicates
 * that the command has been retruned to the scsi layer.
 */
#define CMD_SP(Cmnd)		    ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
#define CMD_ENTRY_STATUS(Cmnd)	    ((Cmnd)->SCp.have_data_in)
#define CMD_COMPL_STATUS(Cmnd)	    ((Cmnd)->SCp.this_residual)
#define CMD_SCSI_STATUS(Cmnd)	    ((Cmnd)->SCp.Status)
#define CMD_RESID_LEN(Cmnd)	    ((Cmnd)->SCp.buffers_residual)

70
71
/**
 * struct fc_fcp_internal - FCP layer internal data
72
73
 * @scsi_pkt_pool: Memory pool to draw FCP packets from
 * @scsi_queue_lock: Protects the scsi_pkt_queue
74
 * @scsi_pkt_queue: Current FCP packets
75
76
77
 * @last_can_queue_ramp_down_time: ramp down time
 * @last_can_queue_ramp_up_time: ramp up time
 * @max_can_queue: max can_queue size
78
 */
79
struct fc_fcp_internal {
80
81
82
83
84
85
	mempool_t		*scsi_pkt_pool;
	spinlock_t		scsi_queue_lock;
	struct list_head	scsi_pkt_queue;
	unsigned long		last_can_queue_ramp_down_time;
	unsigned long		last_can_queue_ramp_up_time;
	int			max_can_queue;
86
87
88
89
90
91
92
93
94
95
96
97
98
};

#define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)

/*
 * function prototypes
 * FC scsi I/O related functions
 */
static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
99
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
100
static void fc_fcp_recovery(struct fc_fcp_pkt *);
101
static void fc_fcp_timeout(unsigned long);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_io_compl(struct fc_fcp_pkt *);

static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);

/*
 * command status codes
 */
#define FC_COMPLETE		0
#define FC_CMD_ABORTED		1
#define FC_CMD_RESET		2
#define FC_CMD_PLOGO		3
#define FC_SNS_RCV		4
#define FC_TRANS_ERR		5
#define FC_DATA_OVRRUN		6
#define FC_DATA_UNDRUN		7
#define FC_ERROR		8
#define FC_HRD_ERROR		9
124
#define FC_CMD_RECOVERY		10
125
126
127
128
129
130
131
132

/*
 * Error recovery timeout values.
 */
#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
#define FC_SCSI_TM_TOV		(10 * HZ)
#define FC_SCSI_REC_TOV		(2 * HZ)
#define FC_HOST_RESET_TIMEOUT	(30 * HZ)
133
#define FC_CAN_QUEUE_PERIOD	(60 * HZ)
134
135
136
137
138
139
140

#define FC_MAX_ERROR_CNT	5
#define FC_MAX_RECOV_RETRY	3

#define FC_FCP_DFLT_QUEUE_DEPTH 32

/**
141
142
143
 * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
 * @lport: The local port that the FCP packet is for
 * @gfp:   GFP flags for allocation
144
 *
145
146
 * Return value: fcp_pkt structure or null on allocation failure.
 * Context:	 Can be called from process context, no lock is required.
147
 */
148
static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
149
{
150
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
151
152
153
154
155
	struct fc_fcp_pkt *fsp;

	fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
	if (fsp) {
		memset(fsp, 0, sizeof(*fsp));
156
		fsp->lp = lport;
157
158
159
160
161
162
163
164
165
		atomic_set(&fsp->ref_cnt, 1);
		init_timer(&fsp->timer);
		INIT_LIST_HEAD(&fsp->list);
		spin_lock_init(&fsp->scsi_pkt_lock);
	}
	return fsp;
}

/**
166
167
 * fc_fcp_pkt_release() - Release hold on a fcp_pkt
 * @fsp: The FCP packet to be released
168
 *
169
170
 * Context: Can be called from process or interrupt context,
 *	    no lock is required.
171
172
173
174
175
176
177
178
179
180
 */
static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
{
	if (atomic_dec_and_test(&fsp->ref_cnt)) {
		struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);

		mempool_free(fsp, si->scsi_pkt_pool);
	}
}

181
182
183
184
/**
 * fc_fcp_pkt_hold() - Hold a fcp_pkt
 * @fsp: The FCP packet to be held
 */
185
186
187
188
189
190
static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
{
	atomic_inc(&fsp->ref_cnt);
}

/**
191
192
193
194
195
196
197
 * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
 * @seq: The sequence that the FCP packet is on (required by destructor API)
 * @fsp: The FCP packet to be released
 *
 * This routine is called by a destructor callback in the exch_seq_send()
 * routine of the libfc Transport Template. The 'struct fc_seq' is a required
 * argument even though it is not used by this routine.
198
 *
199
 * Context: No locking required.
200
201
202
203
204
205
206
 */
static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
{
	fc_fcp_pkt_release(fsp);
}

/**
207
208
 * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
 * @fsp: The FCP packet to be locked and incremented
209
 *
210
 * We should only return error if we return a command to SCSI-ml before
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
 * getting a response. This can happen in cases where we send a abort, but
 * do not wait for the response and the abort and command can be passing
 * each other on the wire/network-layer.
 *
 * Note: this function locks the packet and gets a reference to allow
 * callers to call the completion function while the lock is held and
 * not have to worry about the packets refcount.
 *
 * TODO: Maybe we should just have callers grab/release the lock and
 * have a function that they call to verify the fsp and grab a ref if
 * needed.
 */
static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
{
	spin_lock_bh(&fsp->scsi_pkt_lock);
	if (fsp->state & FC_SRB_COMPL) {
		spin_unlock_bh(&fsp->scsi_pkt_lock);
		return -EPERM;
	}

	fc_fcp_pkt_hold(fsp);
	return 0;
}

235
236
237
238
239
/**
 * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
 *			 reference count
 * @fsp: The FCP packet to be unlocked and decremented
 */
240
241
242
243
244
245
static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
{
	spin_unlock_bh(&fsp->scsi_pkt_lock);
	fc_fcp_pkt_release(fsp);
}

246
247
248
249
250
/**
 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
 * @fsp:   The FCP packet to start a timer for
 * @delay: The timeout period for the timer
 */
251
252
253
254
255
256
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
	if (!(fsp->state & FC_SRB_COMPL))
		mod_timer(&fsp->timer, jiffies + delay);
}

257
258
259
260
261
/**
 * fc_fcp_send_abort() - Send an abort for exchanges associated with a
 *			 fcp_pkt
 * @fsp: The FCP packet to abort exchanges on
 */
262
263
264
265
266
267
268
269
270
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
	if (!fsp->seq_ptr)
		return -EINVAL;

	fsp->state |= FC_SRB_ABORT_PENDING;
	return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
}

271
272
273
274
275
276
277
278
/**
 * fc_fcp_retry_cmd() - Retry a fcp_pkt
 * @fsp: The FCP packet to be retried
 *
 * Sets the status code to be FC_ERROR and then calls
 * fc_fcp_complete_locked() which in turn calls fc_io_compl().
 * fc_io_compl() will notify the SCSI-ml that the I/O is done.
 * The SCSI-ml will retry the command.
279
280
281
282
283
284
285
286
287
 */
static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
{
	if (fsp->seq_ptr) {
		fsp->lp->tt.exch_done(fsp->seq_ptr);
		fsp->seq_ptr = NULL;
	}

	fsp->state &= ~FC_SRB_ABORT_PENDING;
288
	fsp->io_status = 0;
289
290
291
292
	fsp->status_code = FC_ERROR;
	fc_fcp_complete_locked(fsp);
}

293
294
295
296
/**
 * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
 * @fsp: The FCP packet that will manage the DDP frames
 * @xid: The XID that will be used for the DDP exchange
297
298
299
 */
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
{
300
	struct fc_lport *lport;
301

302
	lport = fsp->lp;
303
	if ((fsp->req_flags & FC_SRB_READ) &&
304
305
306
	    (lport->lro_enabled) && (lport->tt.ddp_setup)) {
		if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
					scsi_sg_count(fsp->cmd)))
307
308
309
310
			fsp->xfer_ddp = xid;
	}
}

311
312
313
314
/**
 * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
 *		       DDP related resources for a fcp_pkt
 * @fsp: The FCP packet that DDP had been used on
315
316
317
 */
static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
318
	struct fc_lport *lport;
319
320
321
322

	if (!fsp)
		return;

323
324
325
	if (fsp->xfer_ddp == FC_XID_UNKNOWN)
		return;

326
327
328
	lport = fsp->lp;
	if (lport->tt.ddp_done) {
		fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
329
		fsp->xfer_ddp = FC_XID_UNKNOWN;
330
331
332
	}
}

333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
/**
 * fc_fcp_can_queue_ramp_up() - increases can_queue
 * @lport: lport to ramp up can_queue
 *
 * Locking notes: Called with Scsi_Host lock held
 */
static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
{
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
	int can_queue;

	if (si->last_can_queue_ramp_up_time &&
	    (time_before(jiffies, si->last_can_queue_ramp_up_time +
			 FC_CAN_QUEUE_PERIOD)))
		return;

	if (time_before(jiffies, si->last_can_queue_ramp_down_time +
			FC_CAN_QUEUE_PERIOD))
		return;

	si->last_can_queue_ramp_up_time = jiffies;

	can_queue = lport->host->can_queue << 1;
	if (can_queue >= si->max_can_queue) {
		can_queue = si->max_can_queue;
		si->last_can_queue_ramp_down_time = 0;
	}
	lport->host->can_queue = can_queue;
	shost_printk(KERN_ERR, lport->host, "libfc: increased "
		     "can_queue to %d.\n", can_queue);
}

365
366
367
368
369
370
371
372
373
/**
 * fc_fcp_can_queue_ramp_down() - reduces can_queue
 * @lport: lport to reduce can_queue
 *
 * If we are getting memory allocation failures, then we may
 * be trying to execute too many commands. We let the running
 * commands complete or timeout, then try again with a reduced
 * can_queue. Eventually we will hit the point where we run
 * on all reserved structs.
374
375
 *
 * Locking notes: Called with Scsi_Host lock held
376
377
378
379
380
381
 */
static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
{
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
	int can_queue;

382
383
384
385
386
387
	if (si->last_can_queue_ramp_down_time &&
	    (time_before(jiffies, si->last_can_queue_ramp_down_time +
			 FC_CAN_QUEUE_PERIOD)))
		return;

	si->last_can_queue_ramp_down_time = jiffies;
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409

	can_queue = lport->host->can_queue;
	can_queue >>= 1;
	if (!can_queue)
		can_queue = 1;
	lport->host->can_queue = can_queue;
	shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
		     "Reducing can_queue to %d.\n", can_queue);
}

/*
 * fc_fcp_frame_alloc() -  Allocates fc_frame structure and buffer.
 * @lport:	fc lport struct
 * @len:	payload length
 *
 * Allocates fc_frame structure and buffer but if fails to allocate
 * then reduce can_queue.
 */
static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
						  size_t len)
{
	struct fc_frame *fp;
410
	unsigned long flags;
411
412

	fp = fc_frame_alloc(lport, len);
413
414
415
416
417
418
419
420
	if (likely(fp))
		return fp;

	/* error case */
	spin_lock_irqsave(lport->host->host_lock, flags);
	fc_fcp_can_queue_ramp_down(lport);
	spin_unlock_irqrestore(lport->host->host_lock, flags);
	return NULL;
421
422
}

423
424
425
426
/**
 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
 * @fsp: The FCP packet the data is on
 * @fp:	 The data frame
427
428
429
430
 */
static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	struct scsi_cmnd *sc = fsp->cmd;
431
	struct fc_lport *lport = fsp->lp;
432
433
434
435
436
437
438
439
440
	struct fcoe_dev_stats *stats;
	struct fc_frame_header *fh;
	size_t start_offset;
	size_t offset;
	u32 crc;
	u32 copy_len = 0;
	size_t len;
	void *buf;
	struct scatterlist *sg;
441
	u32 nents;
442
443
444
445
446
447
448

	fh = fc_frame_header_get(fp);
	offset = ntohl(fh->fh_parm_offset);
	start_offset = offset;
	len = fr_len(fp) - sizeof(*fh);
	buf = fc_frame_payload_get(fp, 0);

449
450
451
452
453
454
455
456
457
458
	/*
	 * if this I/O is ddped then clear it
	 * and initiate recovery since data
	 * frames are expected to be placed
	 * directly in that case.
	 */
	if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
		fc_fcp_ddp_done(fsp);
		goto err;
	}
459
	if (offset + len > fsp->data_len) {
460
		/* this should never happen */
461
462
463
		if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
		    fc_frame_crc_check(fp))
			goto crc_err;
464
465
		FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
			   "data_len %x\n", len, offset, fsp->data_len);
466
		goto err;
467
468
469
470
471
	}
	if (offset != fsp->xfer_len)
		fsp->state |= FC_SRB_DISCONTIG;

	sg = scsi_sglist(sc);
472
	nents = scsi_sg_count(sc);
473

474
475
476
477
478
479
480
	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
						    &offset, KM_SOFTIRQ0, NULL);
	} else {
		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
						    &offset, KM_SOFTIRQ0, &crc);
481
		buf = fc_frame_payload_get(fp, 0);
482
		if (len % 4)
483
484
485
486
			crc = crc32(crc, buf + len, 4 - (len % 4));

		if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
487
			stats = per_cpu_ptr(lport->dev_stats, get_cpu());
488
			stats->ErrorFrames++;
489
			/* per cpu count, not total count, but OK for limit */
490
			if (stats->InvalidCRCCount++ < 5)
491
				printk(KERN_WARNING "libfc: CRC error on data "
492
				       "frame for port (%6.6x)\n",
493
				       lport->port_id);
494
			put_cpu();
495
496
497
498
499
500
501
502
			/*
			 * Assume the frame is total garbage.
			 * We may have copied it over the good part
			 * of the buffer.
			 * If so, we need to retry the entire operation.
			 * Otherwise, ignore it.
			 */
			if (fsp->state & FC_SRB_DISCONTIG)
503
				goto err;
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
			return;
		}
	}

	if (fsp->xfer_contig_end == start_offset)
		fsp->xfer_contig_end += copy_len;
	fsp->xfer_len += copy_len;

	/*
	 * In the very rare event that this data arrived after the response
	 * and completes the transfer, call the completion handler.
	 */
	if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
		fc_fcp_complete_locked(fsp);
519
520
521
	return;
err:
	fc_fcp_recovery(fsp);
522
523
}

524
/**
525
526
527
528
529
 * fc_fcp_send_data() - Send SCSI data to a target
 * @fsp:      The FCP packet the data is on
 * @sp:	      The sequence the data is to be sent on
 * @offset:   The starting offset for this data request
 * @seq_blen: The burst length for this data request
530
531
 *
 * Called after receiving a Transfer Ready data descriptor.
532
 * If the LLD is capable of sequence offload then send down the
533
 * seq_blen amount of data in single frame, otherwise send
534
535
 * multiple frames of the maximum frame payload supported by
 * the target port.
536
537
538
539
540
541
542
543
 */
static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
			    size_t offset, size_t seq_blen)
{
	struct fc_exch *ep;
	struct scsi_cmnd *sc;
	struct scatterlist *sg;
	struct fc_frame *fp = NULL;
544
	struct fc_lport *lport = fsp->lp;
545
	struct page *page;
546
547
548
549
550
	size_t remaining;
	size_t t_blen;
	size_t tlen;
	size_t sg_bytes;
	size_t frame_offset, fh_parm_offset;
551
	size_t off;
552
553
554
	int error;
	void *data = NULL;
	void *page_addr;
555
	int using_sg = lport->sg_supp;
556
557
558
559
560
	u32 f_ctl;

	WARN_ON(seq_blen <= 0);
	if (unlikely(offset + seq_blen > fsp->data_len)) {
		/* this should never happen */
561
562
		FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
			   "offset %zx\n", seq_blen, offset);
563
564
565
566
		fc_fcp_send_abort(fsp);
		return 0;
	} else if (offset != fsp->xfer_len) {
		/* Out of Order Data Request - no problem, but unexpected. */
567
568
		FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
			   "seq_blen %zx offset %zx\n", seq_blen, offset);
569
570
571
572
573
574
575
	}

	/*
	 * if LLD is capable of seq_offload then set transport
	 * burst length (t_blen) to seq_blen, otherwise set t_blen
	 * to max FC frame payload previously set in fsp->max_payload.
	 */
576
	t_blen = fsp->max_payload;
577
578
	if (lport->seq_offload) {
		t_blen = min(seq_blen, (size_t)lport->lso_max);
579
		FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
580
			   fsp, seq_blen, lport->lso_max, t_blen);
581
582
	}

583
584
585
586
587
588
589
590
591
	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);
	if (t_blen > 512)
		t_blen &= ~(512 - 1);	/* round down to block size */
	WARN_ON(t_blen < FC_MIN_MAX_PAYLOAD);	/* won't go below 256 */
	sc = fsp->cmd;

	remaining = seq_blen;
	fh_parm_offset = frame_offset = offset;
	tlen = 0;
592
	seq = lport->tt.seq_start_next(seq);
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
	f_ctl = FC_FC_REL_OFF;
	WARN_ON(!seq);

	sg = scsi_sglist(sc);

	while (remaining > 0 && sg) {
		if (offset >= sg->length) {
			offset -= sg->length;
			sg = sg_next(sg);
			continue;
		}
		if (!fp) {
			tlen = min(t_blen, remaining);

			/*
			 * TODO.  Temporary workaround.	 fc_seq_send() can't
			 * handle odd lengths in non-linear skbs.
			 * This will be the final fragment only.
			 */
			if (tlen % 4)
				using_sg = 0;
614
615
616
617
618
			fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
			if (!fp)
				return -ENOMEM;

			data = fc_frame_header_get(fp) + 1;
619
620
621
			fh_parm_offset = frame_offset;
			fr_max_payload(fp) = fsp->max_payload;
		}
622
623

		off = offset + sg->offset;
624
		sg_bytes = min(tlen, sg->length - offset);
625
626
627
		sg_bytes = min(sg_bytes,
			       (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
		page = sg_page(sg) + (off >> PAGE_SHIFT);
628
		if (using_sg) {
629
			get_page(page);
630
631
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
632
					   page, off & ~PAGE_MASK, sg_bytes);
633
634
635
636
637
638
639
640
			fp_skb(fp)->data_len += sg_bytes;
			fr_len(fp) += sg_bytes;
			fp_skb(fp)->truesize += PAGE_SIZE;
		} else {
			/*
			 * The scatterlist item may be bigger than PAGE_SIZE,
			 * but we must not cross pages inside the kmap.
			 */
641
			page_addr = kmap_atomic(page, KM_SOFTIRQ0);
642
643
644
645
646
647
648
649
650
651
			memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
			       sg_bytes);
			kunmap_atomic(page_addr, KM_SOFTIRQ0);
			data += sg_bytes;
		}
		offset += sg_bytes;
		frame_offset += sg_bytes;
		tlen -= sg_bytes;
		remaining -= sg_bytes;

652
653
		if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
		    (tlen))
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
			continue;

		/*
		 * Send sequence with transfer sequence initiative in case
		 * this is last FCP frame of the sequence.
		 */
		if (remaining == 0)
			f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;

		ep = fc_seq_exch(seq);
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_parm_offset);

		/*
		 * send fragment using for a sequence.
		 */
670
		error = lport->tt.seq_send(lport, seq, fp);
671
672
673
674
675
676
677
678
679
680
681
		if (error) {
			WARN_ON(1);		/* send error should be rare */
			fc_fcp_retry_cmd(fsp);
			return 0;
		}
		fp = NULL;
	}
	fsp->xfer_len += seq_blen;	/* premature count? */
	return 0;
}

682
683
684
685
686
/**
 * fc_fcp_abts_resp() - Send an ABTS response
 * @fsp: The FCP packet that is being aborted
 * @fp:	 The response frame
 */
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	int ba_done = 1;
	struct fc_ba_rjt *brp;
	struct fc_frame_header *fh;

	fh = fc_frame_header_get(fp);
	switch (fh->fh_r_ctl) {
	case FC_RCTL_BA_ACC:
		break;
	case FC_RCTL_BA_RJT:
		brp = fc_frame_payload_get(fp, sizeof(*brp));
		if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
			break;
		/* fall thru */
	default:
		/*
		 * we will let the command timeout
		 * and scsi-ml recover in this case,
		 * therefore cleared the ba_done flag.
		 */
		ba_done = 0;
	}

	if (ba_done) {
		fsp->state |= FC_SRB_ABORTED;
		fsp->state &= ~FC_SRB_ABORT_PENDING;

		if (fsp->wait_for_comp)
			complete(&fsp->tm_done);
		else
			fc_fcp_complete_locked(fsp);
	}
}

722
/**
723
 * fc_fcp_recv() - Reveive an FCP frame
724
 * @seq: The sequence the frame is on
725
 * @fp:	 The received frame
726
 * @arg: The related FCP packet
727
 *
728
729
 * Context: Called from Soft IRQ context. Can not be called
 *	    holding the FCP packet list lock.
730
731
732
733
 */
static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
734
	struct fc_lport *lport = fsp->lp;
735
736
737
738
739
	struct fc_frame_header *fh;
	struct fcp_txrdy *dd;
	u8 r_ctl;
	int rc = 0;

740
741
742
743
	if (IS_ERR(fp)) {
		fc_fcp_error(fsp, fp);
		return;
	}
744
745
746
747

	fh = fc_frame_header_get(fp);
	r_ctl = fh->fh_r_ctl;

748
	if (lport->state != LPORT_ST_READY)
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
		goto out;
	if (fc_fcp_lock_pkt(fsp))
		goto out;
	fsp->last_pkt_time = jiffies;

	if (fh->fh_type == FC_TYPE_BLS) {
		fc_fcp_abts_resp(fsp, fp);
		goto unlock;
	}

	if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
		goto unlock;

	if (r_ctl == FC_RCTL_DD_DATA_DESC) {
		/*
		 * received XFER RDY from the target
		 * need to send data to the target
		 */
		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
		dd = fc_frame_payload_get(fp, sizeof(*dd));
		WARN_ON(!dd);

		rc = fc_fcp_send_data(fsp, seq,
				      (size_t) ntohl(dd->ft_data_ro),
				      (size_t) ntohl(dd->ft_burst_len));
		if (!rc)
			seq->rec_data = fsp->xfer_len;
	} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
		/*
		 * received a DATA frame
		 * next we will copy the data to the system buffer
		 */
		WARN_ON(fr_len(fp) < sizeof(*fh));	/* len may be 0 */
		fc_fcp_recv_data(fsp, fp);
		seq->rec_data = fsp->xfer_contig_end;
	} else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);

		fc_fcp_resp(fsp, fp);
	} else {
789
		FC_FCP_DBG(fsp, "unexpected frame.  r_ctl %x\n", r_ctl);
790
791
792
793
794
795
796
	}
unlock:
	fc_fcp_unlock_pkt(fsp);
out:
	fc_frame_free(fp);
}

797
798
799
800
801
/**
 * fc_fcp_resp() - Handler for FCP responses
 * @fsp: The FCP packet the response is for
 * @fp:	 The response frame
 */
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	struct fc_frame_header *fh;
	struct fcp_resp *fc_rp;
	struct fcp_resp_ext *rp_ex;
	struct fcp_resp_rsp_info *fc_rp_info;
	u32 plen;
	u32 expected_len;
	u32 respl = 0;
	u32 snsl = 0;
	u8 flags = 0;

	plen = fr_len(fp);
	fh = (struct fc_frame_header *)fr_hdr(fp);
	if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
		goto len_err;
	plen -= sizeof(*fh);
	fc_rp = (struct fcp_resp *)(fh + 1);
	fsp->cdb_status = fc_rp->fr_status;
	flags = fc_rp->fr_flags;
	fsp->scsi_comp_flags = flags;
	expected_len = fsp->data_len;

825
826
827
	/* if ddp, update xfer len */
	fc_fcp_ddp_done(fsp);

828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
	if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
		rp_ex = (void *)(fc_rp + 1);
		if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
			if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
				goto len_err;
			fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
			if (flags & FCP_RSP_LEN_VAL) {
				respl = ntohl(rp_ex->fr_rsp_len);
				if (respl != sizeof(*fc_rp_info))
					goto len_err;
				if (fsp->wait_for_comp) {
					/* Abuse cdb_status for rsp code */
					fsp->cdb_status = fc_rp_info->rsp_code;
					complete(&fsp->tm_done);
					/*
					 * tmfs will not have any scsi cmd so
					 * exit here
					 */
					return;
847
				}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
			}
			if (flags & FCP_SNS_LEN_VAL) {
				snsl = ntohl(rp_ex->fr_sns_len);
				if (snsl > SCSI_SENSE_BUFFERSIZE)
					snsl = SCSI_SENSE_BUFFERSIZE;
				memcpy(fsp->cmd->sense_buffer,
				       (char *)fc_rp_info + respl, snsl);
			}
		}
		if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
			if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
				goto len_err;
			if (flags & FCP_RESID_UNDER) {
				fsp->scsi_resid = ntohl(rp_ex->fr_resid);
				/*
				 * The cmnd->underflow is the minimum number of
				 * bytes that must be transfered for this
				 * command.  Provided a sense condition is not
				 * present, make sure the actual amount
				 * transferred is at least the underflow value
				 * or fail.
				 */
				if (!(flags & FCP_SNS_LEN_VAL) &&
				    (fc_rp->fr_status == 0) &&
				    (scsi_bufflen(fsp->cmd) -
				     fsp->scsi_resid) < fsp->cmd->underflow)
					goto err;
				expected_len -= fsp->scsi_resid;
			} else {
				fsp->status_code = FC_ERROR;
			}
		}
	}
	fsp->state |= FC_SRB_RCV_STATUS;

	/*
	 * Check for missing or extra data frames.
	 */
	if (unlikely(fsp->xfer_len != expected_len)) {
		if (fsp->xfer_len < expected_len) {
			/*
			 * Some data may be queued locally,
			 * Wait a at least one jiffy to see if it is delivered.
			 * If this expires without data, we may do SRR.
			 */
			fc_fcp_timer_set(fsp, 2);
			return;
		}
		fsp->status_code = FC_DATA_OVRRUN;
897
		FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
898
899
900
			   "len %x, data len %x\n",
			   fsp->rport->port_id,
			   fsp->xfer_len, expected_len, fsp->data_len);
901
902
903
904
905
	}
	fc_fcp_complete_locked(fsp);
	return;

len_err:
906
907
	FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
		   "snsl %u\n", flags, fr_len(fp), respl, snsl);
908
909
910
911
912
913
err:
	fsp->status_code = FC_ERROR;
	fc_fcp_complete_locked(fsp);
}

/**
914
915
916
 * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
 *			      fcp_pkt lock held
 * @fsp: The FCP packet to be completed
917
918
919
920
921
922
 *
 * This function may sleep if a timer is pending. The packet lock must be
 * held, and the host lock must not be held.
 */
static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
{
923
	struct fc_lport *lport = fsp->lp;
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
	struct fc_seq *seq;
	struct fc_exch *ep;
	u32 f_ctl;

	if (fsp->state & FC_SRB_ABORT_PENDING)
		return;

	if (fsp->state & FC_SRB_ABORTED) {
		if (!fsp->status_code)
			fsp->status_code = FC_CMD_ABORTED;
	} else {
		/*
		 * Test for transport underrun, independent of response
		 * underrun status.
		 */
		if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
			fsp->status_code = FC_DATA_UNDRUN;
943
			fsp->io_status = 0;
944
945
946
947
948
949
950
951
952
953
		}
	}

	seq = fsp->seq_ptr;
	if (seq) {
		fsp->seq_ptr = NULL;
		if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
			struct fc_frame *conf_frame;
			struct fc_seq *csp;

954
			csp = lport->tt.seq_start_next(seq);
955
			conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
956
957
958
959
960
961
962
			if (conf_frame) {
				f_ctl = FC_FC_SEQ_INIT;
				f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
				ep = fc_seq_exch(seq);
				fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
					       ep->did, ep->sid,
					       FC_TYPE_FCP, f_ctl, 0);
963
				lport->tt.seq_send(lport, csp, conf_frame);
964
965
			}
		}
966
		lport->tt.exch_done(seq);
967
968
969
970
	}
	fc_io_compl(fsp);
}

971
972
973
974
975
/**
 * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
 * @fsp:   The FCP packet whose exchanges should be canceled
 * @error: The reason for the cancellation
 */
976
977
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
978
	struct fc_lport *lport = fsp->lp;
979
980

	if (fsp->seq_ptr) {
981
		lport->tt.exch_done(fsp->seq_ptr);
982
983
984
985
986
987
		fsp->seq_ptr = NULL;
	}
	fsp->status_code = error;
}

/**
988
989
990
991
992
 * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
 * @lport: The local port whose exchanges should be canceled
 * @id:	   The target's ID
 * @lun:   The LUN
 * @error: The reason for cancellation
993
994
995
 *
 * If lun or id is -1, they are ignored.
 */
996
static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
997
998
				    unsigned int lun, int error)
{
999
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1000
1001
1002
1003
	struct fc_fcp_pkt *fsp;
	struct scsi_cmnd *sc_cmd;
	unsigned long flags;

1004
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
restart:
	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
		sc_cmd = fsp->cmd;
		if (id != -1 && scmd_id(sc_cmd) != id)
			continue;

		if (lun != -1 && sc_cmd->device->lun != lun)
			continue;

		fc_fcp_pkt_hold(fsp);
1015
		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1016
1017
1018
1019
1020
1021
1022
1023

		if (!fc_fcp_lock_pkt(fsp)) {
			fc_fcp_cleanup_cmd(fsp, error);
			fc_io_compl(fsp);
			fc_fcp_unlock_pkt(fsp);
		}

		fc_fcp_pkt_release(fsp);
1024
		spin_lock_irqsave(&si->scsi_queue_lock, flags);
1025
1026
1027
1028
1029
1030
		/*
		 * while we dropped the lock multiple pkts could
		 * have been released, so we have to start over.
		 */
		goto restart;
	}
1031
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1032
1033
}

1034
1035
1036
1037
1038
/**
 * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
 * @lport: The local port whose exchanges are to be aborted
 */
static void fc_fcp_abort_io(struct fc_lport *lport)
1039
{
1040
	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
1041
1042
1043
}

/**
1044
1045
1046
 * fc_fcp_pkt_send() - Send a fcp_pkt
 * @lport: The local port to send the FCP packet on
 * @fsp:   The FCP packet to send
1047
 *
1048
 * Return:  Zero for success and -1 for failure
1049
 * Locks:   Called without locks held
1050
 */
1051
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1052
{
1053
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1054
	unsigned long flags;
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
	int rc;

	fsp->cmd->SCp.ptr = (char *)fsp;
	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
	fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;

	int_to_scsilun(fsp->cmd->device->lun,
		       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);

1065
1066
1067
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1068
	rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1069
1070
	if (unlikely(rc)) {
		spin_lock_irqsave(&si->scsi_queue_lock, flags);
1071
		list_del(&fsp->list);
1072
1073
		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
	}
1074
1075
1076
1077

	return rc;
}

1078
1079
1080
1081
1082
1083
1084
/**
 * fc_fcp_cmd_send() - Send a FCP command
 * @lport: The local port to send the command on
 * @fsp:   The FCP packet the command is on
 * @resp:  The handler for the response
 */
static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1085
1086
1087
1088
1089
1090
1091
			   void (*resp)(struct fc_seq *,
					struct fc_frame *fp,
					void *arg))
{
	struct fc_frame *fp;
	struct fc_seq *seq;
	struct fc_rport *rport;
1092
	struct fc_rport_libfc_priv *rpriv;
1093
1094
1095
1096
1097
1098
	const size_t len = sizeof(fsp->cdb_cmd);
	int rc = 0;

	if (fc_fcp_lock_pkt(fsp))
		return 0;

1099
	fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
1100
1101
1102
1103
1104
1105
	if (!fp) {
		rc = -1;
		goto unlock;
	}

	memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1106
	fr_fsp(fp) = fsp;
1107
1108
	rport = fsp->rport;
	fsp->max_payload = rport->maxframe_size;
1109
	rpriv = rport->dd_data;
1110
1111

	fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1112
		       rpriv->local_port->port_id, FC_TYPE_FCP,
1113
1114
		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

1115
1116
	seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
				      fsp, 0);
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
	if (!seq) {
		rc = -1;
		goto unlock;
	}
	fsp->last_pkt_time = jiffies;
	fsp->seq_ptr = seq;
	fc_fcp_pkt_hold(fsp);	/* hold for fc_fcp_pkt_destroy */

	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
	fc_fcp_timer_set(fsp,
			 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
			 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
unlock:
	fc_fcp_unlock_pkt(fsp);
	return rc;
}

1134
1135
1136
1137
/**
 * fc_fcp_error() - Handler for FCP layer errors
 * @fsp: The FCP packet the error is on
 * @fp:	 The frame that has errored
1138
1139
1140
1141
1142
1143
1144
1145
 */
static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	int error = PTR_ERR(fp);

	if (fc_fcp_lock_pkt(fsp))
		return;

1146
	if (error == -FC_EX_CLOSED) {
1147
1148
1149
		fc_fcp_retry_cmd(fsp);
		goto unlock;
	}
1150

1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
	/*
	 * clear abort pending, because the lower layer
	 * decided to force completion.
	 */
	fsp->state &= ~FC_SRB_ABORT_PENDING;
	fsp->status_code = FC_CMD_PLOGO;
	fc_fcp_complete_locked(fsp);
unlock:
	fc_fcp_unlock_pkt(fsp);
}

1162
1163
1164
1165
1166
/**
 * fc_fcp_pkt_abort() - Abort a fcp_pkt
 * @fsp:   The FCP packet to abort on
 *
 * Called to send an abort and then wait for abort completion
1167
 */
1168
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
{
	int rc = FAILED;

	if (fc_fcp_send_abort(fsp))
		return FAILED;

	init_completion(&fsp->tm_done);
	fsp->wait_for_comp = 1;

	spin_unlock_bh(&fsp->scsi_pkt_lock);
	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
	spin_lock_bh(&fsp->scsi_pkt_lock);
	fsp->wait_for_comp = 0;

	if (!rc) {
1184
		FC_FCP_DBG(fsp, "target abort cmd  failed\n");
1185
1186
		rc = FAILED;
	} else if (fsp->state & FC_SRB_ABORTED) {
1187
		FC_FCP_DBG(fsp, "target abort cmd  passed\n");
1188
1189
1190
1191
1192
1193
1194
		rc = SUCCESS;
		fc_fcp_complete_locked(fsp);
	}

	return rc;
}

1195
1196
1197
/**
 * fc_lun_reset_send() - Send LUN reset command
 * @data: The FCP packet that identifies the LUN to be reset
1198
1199
1200
1201
 */
static void fc_lun_reset_send(unsigned long data)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1202
1203
	struct fc_lport *lport = fsp->lp;
	if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
			return;
		if (fc_fcp_lock_pkt(fsp))
			return;
		setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
		fc_fcp_unlock_pkt(fsp);
	}
}

1214
1215
1216
1217
1218
1219
1220
/**
 * fc_lun_reset() - Send a LUN RESET command to a device
 *		    and wait for the reply
 * @lport: The local port to sent the comand on
 * @fsp:   The FCP packet that identifies the LUN to be reset
 * @id:	   The SCSI command ID
 * @lun:   The LUN ID to be reset
1221
 */
1222
static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
			unsigned int id, unsigned int lun)
{
	int rc;

	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
	fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
	int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);

	fsp->wait_for_comp = 1;
	init_completion(&fsp->tm_done);

	fc_lun_reset_send((unsigned long)fsp);

	/*
	 * wait for completion of reset
	 * after that make sure all commands are terminated
	 */
	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);

	spin_lock_bh(&fsp->scsi_pkt_lock);
	fsp->state |= FC_SRB_COMPL;
	spin_unlock_bh(&fsp->scsi_pkt_lock);

	del_timer_sync(&fsp->timer);

	spin_lock_bh(&fsp->scsi_pkt_lock);
	if (fsp->seq_ptr) {
1250
		lport->tt.exch_done(fsp->seq_ptr);
1251
1252
1253
1254
1255
1256
		fsp->seq_ptr = NULL;
	}
	fsp->wait_for_comp = 0;
	spin_unlock_bh(&fsp->scsi_pkt_lock);

	if (!rc) {
1257
		FC_SCSI_DBG(lport, "lun reset failed\n");
1258
1259
1260
1261
1262
1263
1264
		return FAILED;
	}

	/* cdb_status holds the tmf's rsp code */
	if (fsp->cdb_status != FCP_TMF_CMPL)
		return FAILED;

1265
1266
	FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
	fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
1267
1268
1269
	return SUCCESS;
}

1270
1271
1272
1273
1274
/**
 * fc_tm_done() - Task Managment response handler
 * @seq: The sequence that the response is on
 * @fp:	 The response frame
 * @arg: The FCP packet the response is for
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
 */
static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = arg;
	struct fc_frame_header *fh;

	if (IS_ERR(fp)) {
		/*
		 * If there is an error just let it timeout or wait
		 * for TMF to be aborted if it timedout.
		 *
		 * scsi-eh will escalate for when either happens.
		 */
		return;
	}

	if (fc_fcp_lock_pkt(fsp))
		return;

	/*
	 * raced with eh timeout handler.
	 */
	if (!fsp->seq_ptr || !fsp->wait_for_comp) {
		spin_unlock_bh(&fsp->scsi_pkt_lock);
		return;
	}

	fh = fc_frame_header_get(fp);
	if (fh->fh_type != FC_TYPE_BLS)
		fc_fcp_resp(fsp, fp);
	fsp->seq_ptr = NULL;
	fsp->lp->tt.exch_done(seq);
	fc_frame_free(fp);
	fc_fcp_unlock_pkt(fsp);
}

1311
1312
1313
1314
1315
/**
 * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
 * @lport: The local port to be cleaned up
 */
static void fc_fcp_cleanup(struct fc_lport *lport)
1316
{
1317
	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
1318
1319
}

1320
1321
1322
/**
 * fc_fcp_timeout() - Handler for fcp_pkt timeouts
 * @data: The FCP packet that has timed out
1323
 *
1324
1325
1326
1327
1328
1329
 * If REC is supported then just issue it and return. The REC exchange will
 * complete or time out and recovery can continue at that point. Otherwise,
 * if the response has been received without all the data it has been
 * ER_TIMEOUT since the response was received. If the response has not been
 * received we see if data was received recently. If it has been then we
 * continue waiting, otherwise, we abort the command.
1330
1331
1332
1333
1334
 */
static void fc_fcp_timeout(unsigned long data)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
	struct fc_rport *rport = fsp->rport;
1335
	struct fc_rport_libfc_priv *rpriv = rport->dd_data;
1336
1337
1338
1339
1340
1341
1342
1343
1344

	if (fc_fcp_lock_pkt(fsp))
		return;

	if (fsp->cdb_cmd.fc_tm_flags)
		goto unlock;

	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;

1345
	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1346
1347
1348
1349
1350
1351
1352
		fc_fcp_rec(fsp);
	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
			       jiffies))
		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
	else if (fsp->state & FC_SRB_RCV_STATUS)
		fc_fcp_complete_locked(fsp);
	else
1353
		fc_fcp_recovery(fsp);
1354
1355
1356
1357
1358
	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
	fc_fcp_unlock_pkt(fsp);
}

1359
1360
1361
/**
 * fc_fcp_rec() - Send a REC ELS request
 * @fsp: The FCP packet to send the REC request on
1362
1363
1364
 */
static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
{
1365
	struct fc_lport *lport;
1366
1367
	struct fc_frame *fp;
	struct fc_rport *rport;
1368
	struct fc_rport_libfc_priv *rpriv;
1369

1370
	lport = fsp->lp;
1371
	rport = fsp->rport;
1372
1373
	rpriv = rport->dd_data;
	if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
1374
		fsp->status_code = FC_HRD_ERROR;
1375
		fsp->io_status = 0;
1376
1377
1378
		fc_fcp_complete_locked(fsp);
		return;
	}
1379
	fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
1380
1381
1382
1383
1384
	if (!fp)
		goto retry;

	fr_seq(fp) = fsp->seq_ptr;
	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1385
		       rpriv->local_port->port_id, FC_TYPE_ELS,
1386
		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
1387
1388
1389
	if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
				 fc_fcp_rec_resp, fsp,
				 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1390
1391
1392
1393
1394
1395
1396
		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
		return;
	}
retry:
	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
	else
1397
		fc_fcp_recovery(fsp);
1398
1399
}

1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
/**
 * fc_fcp_rec_resp() - Handler for REC ELS responses
 * @seq: The sequence the response is on
 * @fp:	 The response frame
 * @arg: The FCP packet the response is on
 *
 * If the response is a reject then the scsi layer will handle
 * the timeout. If the response is a LS_ACC then if the I/O was not completed
 * set the timeout and return. If the I/O was completed then complete the
 * exchange and tell the SCSI layer.
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
 */
static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
	struct fc_els_rec_acc *recp;
	struct fc_els_ls_rjt *rjt;
	u32 e_stat;
	u8 opcode;
	u32 offset;
	enum dma_data_direction data_dir;
	enum fc_rctl r_ctl;
1421
	struct fc_rport_libfc_priv *rpriv;
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436

	if (IS_ERR(fp)) {
		fc_fcp_rec_error(fsp, fp);
		return;
	}

	if (fc_fcp_lock_pkt(fsp))
		goto out;

	fsp->recov_retry = 0;
	opcode = fc_frame_payload_op(fp);
	if (opcode == ELS_LS_RJT) {
		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
		switch (rjt->er_reason) {
		default:
1437
1438
1439
1440
			FC_FCP_DBG(fsp, "device %x unexpected REC reject "
				   "reason %d expl %d\n",
				   fsp->rport->port_id, rjt->er_reason,
				   rjt->er_explan);
1441
1442
			/* fall through */
		case ELS_RJT_UNSUP:
1443
			FC_FCP_DBG(fsp, "device does not support REC\n");
1444
			rpriv = fsp->rport->dd_data;
1445
1446
1447
1448
1449
			/*
			 * if we do not spport RECs or got some bogus
			 * reason then resetup timer so we check for
			 * making progress.
			 */
1450
			rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
			break;
		case ELS_RJT_LOGIC:
		case ELS_RJT_UNAB:
			/*
			 * If no data transfer, the command frame got dropped
			 * so we just retry.  If data was transferred, we
			 * lost the response but the target has no record,
			 * so we abort and retry.
			 */
			if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
			    fsp->xfer_len == 0) {
				fc_fcp_retry_cmd(fsp);
				break;
			}