request.c 108 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * BSD LICENSE
 *
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   * Neither the name of Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "isci.h"
#include "task.h"
#include "request.h"
#include "scu_completion_codes.h"
60
#include "scu_event_codes.h"
Dave Jiang's avatar
Dave Jiang committed
61
#include "sas.h"
62

63
static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 65 66
							int idx)
{
	if (idx == 0)
67
		return &ireq->tc->sgl_pair_ab;
68
	else if (idx == 1)
69
		return &ireq->tc->sgl_pair_cd;
70 71 72
	else if (idx < 0)
		return NULL;
	else
73
		return &ireq->sg_table[idx - 2];
74
}
75

76
static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
77
					  struct isci_request *ireq, u32 idx)
78 79
{
	u32 offset;
80

81
	if (idx == 0) {
82
		offset = (void *) &ireq->tc->sgl_pair_ab -
83 84
			 (void *) &ihost->task_context_table[0];
		return ihost->task_context_dma + offset;
85
	} else if (idx == 1) {
86
		offset = (void *) &ireq->tc->sgl_pair_cd -
87 88
			 (void *) &ihost->task_context_table[0];
		return ihost->task_context_dma + offset;
89
	}
90

91
	return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
92 93 94 95 96 97 98 99
}

static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
{
	e->length = sg_dma_len(sg);
	e->address_upper = upper_32_bits(sg_dma_address(sg));
	e->address_lower = lower_32_bits(sg_dma_address(sg));
	e->address_modifier = 0;
100 101
}

102
static void sci_request_build_sgl(struct isci_request *ireq)
103
{
104
	struct isci_host *ihost = ireq->isci_host;
105
	struct sas_task *task = isci_request_access_task(ireq);
106 107 108 109 110 111 112 113 114 115
	struct scatterlist *sg = NULL;
	dma_addr_t dma_addr;
	u32 sg_idx = 0;
	struct scu_sgl_element_pair *scu_sg   = NULL;
	struct scu_sgl_element_pair *prev_sg  = NULL;

	if (task->num_scatter > 0) {
		sg = task->scatter;

		while (sg) {
116
			scu_sg = to_sgl_element_pair(ireq, sg_idx);
117
			init_sgl_element(&scu_sg->A, sg);
118 119
			sg = sg_next(sg);
			if (sg) {
120
				init_sgl_element(&scu_sg->B, sg);
121 122
				sg = sg_next(sg);
			} else
123
				memset(&scu_sg->B, 0, sizeof(scu_sg->B));
124 125

			if (prev_sg) {
126
				dma_addr = to_sgl_element_pair_dma(ihost,
127
								   ireq,
128
								   sg_idx);
129 130 131 132 133 134 135 136 137 138 139

				prev_sg->next_pair_upper =
					upper_32_bits(dma_addr);
				prev_sg->next_pair_lower =
					lower_32_bits(dma_addr);
			}

			prev_sg = scu_sg;
			sg_idx++;
		}
	} else {	/* handle when no sg */
140
		scu_sg = to_sgl_element_pair(ireq, sg_idx);
141

142
		dma_addr = dma_map_single(&ihost->pdev->dev,
143 144 145
					  task->scatter,
					  task->total_xfer_len,
					  task->data_dir);
146

147
		ireq->zero_scatter_daddr = dma_addr;
148

149 150 151 152
		scu_sg->A.length = task->total_xfer_len;
		scu_sg->A.address_upper = upper_32_bits(dma_addr);
		scu_sg->A.address_lower = lower_32_bits(dma_addr);
	}
153

154 155 156
	if (scu_sg) {
		scu_sg->next_pair_upper = 0;
		scu_sg->next_pair_lower = 0;
157
	}
158
}
159

160
static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
161
{
162
	struct ssp_cmd_iu *cmd_iu;
Dave Jiang's avatar
Dave Jiang committed
163
	struct sas_task *task = isci_request_access_task(ireq);
164

165
	cmd_iu = &ireq->ssp.cmd;
166

167 168 169 170 171 172 173 174
	memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
	cmd_iu->add_cdb_len = 0;
	cmd_iu->_r_a = 0;
	cmd_iu->_r_b = 0;
	cmd_iu->en_fburst = 0; /* unsupported */
	cmd_iu->task_prio = task->ssp_task.task_prio;
	cmd_iu->task_attr = task->ssp_task.task_attr;
	cmd_iu->_r_c = 0;
175

176 177 178
	sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
		       sizeof(task->ssp_task.cdb) / sizeof(u32));
}
179

180
static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
181 182 183 184
{
	struct ssp_task_iu *task_iu;
	struct sas_task *task = isci_request_access_task(ireq);
	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
185

186
	task_iu = &ireq->ssp.tmf;
187 188 189 190 191 192 193 194 195 196

	memset(task_iu, 0, sizeof(struct ssp_task_iu));

	memcpy(task_iu->LUN, task->ssp_task.LUN, 8);

	task_iu->task_func = isci_tmf->tmf_code;
	task_iu->task_tag =
		(ireq->ttype == tmf_task) ?
		isci_tmf->io_tag :
		SCI_CONTROLLER_INVALID_IO_TAG;
197 198 199
}

/**
200 201 202
 * This method is will fill in the SCU Task Context for any type of SSP request.
 * @sci_req:
 * @task_context:
203 204
 *
 */
205
static void scu_ssp_reqeust_construct_task_context(
206
	struct isci_request *ireq,
207
	struct scu_task_context *task_context)
208
{
209
	dma_addr_t dma_addr;
210
	struct isci_remote_device *idev;
211
	struct isci_port *iport;
212

Dan Williams's avatar
Dan Williams committed
213 214
	idev = ireq->target_device;
	iport = idev->owning_port;
215 216 217 218 219

	/* Fill in the TC with the its required data */
	task_context->abort = 0;
	task_context->priority = 0;
	task_context->initiator_request = 1;
220
	task_context->connection_rate = idev->connection_rate;
Dan Williams's avatar
Dan Williams committed
221 222
	task_context->protocol_engine_index = ISCI_PEG;
	task_context->logical_port_index = iport->physical_port_index;
223 224 225 226
	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
	task_context->valid = SCU_TASK_CONTEXT_VALID;
	task_context->context_type = SCU_TASK_CONTEXT_TYPE;

Dan Williams's avatar
Dan Williams committed
227
	task_context->remote_node_index = idev->rnc.remote_node_index;
228 229 230 231 232 233 234 235 236 237 238
	task_context->command_code = 0;

	task_context->link_layer_control = 0;
	task_context->do_not_dma_ssp_good_response = 1;
	task_context->strict_ordering = 0;
	task_context->control_frame = 0;
	task_context->timeout_enable = 0;
	task_context->block_guard_enable = 0;

	task_context->address_modifier = 0;

239
	/* task_context->type.ssp.tag = ireq->io_tag; */
240 241
	task_context->task_phase = 0x01;

242
	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
Dan Williams's avatar
Dan Williams committed
243 244
			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
			      (iport->physical_port_index <<
245 246
			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
			      ISCI_TAG_TCI(ireq->io_tag));
247

248 249 250 251
	/*
	 * Copy the physical address for the command buffer to the
	 * SCU Task Context
	 */
252
	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
253

254 255 256 257 258 259
	task_context->command_iu_upper = upper_32_bits(dma_addr);
	task_context->command_iu_lower = lower_32_bits(dma_addr);

	/*
	 * Copy the physical address for the response buffer to the
	 * SCU Task Context
260
	 */
261
	dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
262

263 264 265
	task_context->response_iu_upper = upper_32_bits(dma_addr);
	task_context->response_iu_lower = lower_32_bits(dma_addr);
}
266

267 268 269 270 271
/**
 * This method is will fill in the SCU Task Context for a SSP IO request.
 * @sci_req:
 *
 */
272
static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
273 274
						      enum dma_data_direction dir,
						      u32 len)
275
{
276
	struct scu_task_context *task_context = ireq->tc;
277

278
	scu_ssp_reqeust_construct_task_context(ireq, task_context);
279

280 281 282 283 284 285 286 287 288
	task_context->ssp_command_iu_length =
		sizeof(struct ssp_cmd_iu) / sizeof(u32);
	task_context->type.ssp.frame_type = SSP_COMMAND;

	switch (dir) {
	case DMA_FROM_DEVICE:
	case DMA_NONE:
	default:
		task_context->task_type = SCU_TASK_TYPE_IOREAD;
289
		break;
290 291
	case DMA_TO_DEVICE:
		task_context->task_type = SCU_TASK_TYPE_IOWRITE;
292
		break;
293 294
	}

295 296 297
	task_context->transfer_length_bytes = len;

	if (task_context->transfer_length_bytes > 0)
298
		sci_request_build_sgl(ireq);
299 300 301
}

/**
302 303 304 305 306 307 308 309 310 311
 * This method will fill in the SCU Task Context for a SSP Task request.  The
 *    following important settings are utilized: -# priority ==
 *    SCU_TASK_PRIORITY_HIGH.  This ensures that the task request is issued
 *    ahead of other task destined for the same Remote Node. -# task_type ==
 *    SCU_TASK_TYPE_IOREAD.  This simply indicates that a normal request type
 *    (i.e. non-raw frame) is being utilized to perform task management. -#
 *    control_frame == 1.  This ensures that the proper endianess is set so
 *    that the bytes are transmitted in the right order for a task frame.
 * @sci_req: This parameter specifies the task request object being
 *    constructed.
312 313
 *
 */
314
static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
315
{
316
	struct scu_task_context *task_context = ireq->tc;
317

318
	scu_ssp_reqeust_construct_task_context(ireq, task_context);
319

320 321 322 323 324 325 326
	task_context->control_frame                = 1;
	task_context->priority                     = SCU_TASK_PRIORITY_HIGH;
	task_context->task_type                    = SCU_TASK_TYPE_RAW_FRAME;
	task_context->transfer_length_bytes        = 0;
	task_context->type.ssp.frame_type          = SSP_TASK;
	task_context->ssp_command_iu_length =
		sizeof(struct ssp_task_iu) / sizeof(u32);
327 328
}

329 330 331 332 333 334 335 336 337 338 339 340 341
/**
 * This method is will fill in the SCU Task Context for any type of SATA
 *    request.  This is called from the various SATA constructors.
 * @sci_req: The general IO request object which is to be used in
 *    constructing the SCU task context.
 * @task_context: The buffer pointer for the SCU task context which is being
 *    constructed.
 *
 * The general io request construction is complete. The buffer assignment for
 * the command buffer is complete. none Revisit task context construction to
 * determine what is common for SSP/SMP/STP task context structures.
 */
static void scu_sata_reqeust_construct_task_context(
342
	struct isci_request *ireq,
343 344 345
	struct scu_task_context *task_context)
{
	dma_addr_t dma_addr;
346
	struct isci_remote_device *idev;
347
	struct isci_port *iport;
348

Dan Williams's avatar
Dan Williams committed
349 350
	idev = ireq->target_device;
	iport = idev->owning_port;
351 352 353 354 355

	/* Fill in the TC with the its required data */
	task_context->abort = 0;
	task_context->priority = SCU_TASK_PRIORITY_NORMAL;
	task_context->initiator_request = 1;
356
	task_context->connection_rate = idev->connection_rate;
Dan Williams's avatar
Dan Williams committed
357 358
	task_context->protocol_engine_index = ISCI_PEG;
	task_context->logical_port_index = iport->physical_port_index;
359 360 361 362
	task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
	task_context->valid = SCU_TASK_CONTEXT_VALID;
	task_context->context_type = SCU_TASK_CONTEXT_TYPE;

Dan Williams's avatar
Dan Williams committed
363
	task_context->remote_node_index = idev->rnc.remote_node_index;
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	task_context->command_code = 0;

	task_context->link_layer_control = 0;
	task_context->do_not_dma_ssp_good_response = 1;
	task_context->strict_ordering = 0;
	task_context->control_frame = 0;
	task_context->timeout_enable = 0;
	task_context->block_guard_enable = 0;

	task_context->address_modifier = 0;
	task_context->task_phase = 0x01;

	task_context->ssp_command_iu_length =
		(sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);

	/* Set the first word of the H2D REG FIS */
380
	task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
381

382
	ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
Dan Williams's avatar
Dan Williams committed
383 384 385 386
			      (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
			      (iport->physical_port_index <<
			       SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
			      ISCI_TAG_TCI(ireq->io_tag));
387 388 389 390 391
	/*
	 * Copy the physical address for the command buffer to the SCU Task
	 * Context. We must offset the command buffer by 4 bytes because the
	 * first 4 bytes are transfered in the body of the TC.
	 */
392
	dma_addr = sci_io_request_get_dma_addr(ireq,
393
						((char *) &ireq->stp.cmd) +
394 395 396 397 398 399 400 401 402 403
						sizeof(u32));

	task_context->command_iu_upper = upper_32_bits(dma_addr);
	task_context->command_iu_lower = lower_32_bits(dma_addr);

	/* SATA Requests do not have a response buffer */
	task_context->response_iu_upper = 0;
	task_context->response_iu_lower = 0;
}

404
static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
405
{
406
	struct scu_task_context *task_context = ireq->tc;
407

408
	scu_sata_reqeust_construct_task_context(ireq, task_context);
409 410 411 412 413 414 415 416

	task_context->control_frame         = 0;
	task_context->priority              = SCU_TASK_PRIORITY_NORMAL;
	task_context->task_type             = SCU_TASK_TYPE_SATA_RAW_FRAME;
	task_context->type.stp.fis_type     = FIS_REGH2D;
	task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
}

417
static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
418
							  bool copy_rx_frame)
419
{
420
	struct isci_stp_request *stp_req = &ireq->stp.req;
421

422
	scu_stp_raw_request_construct_task_context(ireq);
423

424 425 426
	stp_req->status = 0;
	stp_req->sgl.offset = 0;
	stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
427 428

	if (copy_rx_frame) {
429
		sci_request_build_sgl(ireq);
430
		stp_req->sgl.index = 0;
431 432
	} else {
		/* The user does not want the data copied to the SGL buffer location */
433
		stp_req->sgl.index = -1;
434
	}
435

436 437
	return SCI_SUCCESS;
}
438 439 440

/**
 *
441 442 443 444 445 446 447 448 449
 * @sci_req: This parameter specifies the request to be constructed as an
 *    optimized request.
 * @optimized_task_type: This parameter specifies whether the request is to be
 *    an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
 *    value of 1 indicates NCQ.
 *
 * This method will perform request construction common to all types of STP
 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
 * returns an indication as to whether the construction was successful.
450
 */
451
static void sci_stp_optimized_request_construct(struct isci_request *ireq,
452 453 454 455
						     u8 optimized_task_type,
						     u32 len,
						     enum dma_data_direction dir)
{
456
	struct scu_task_context *task_context = ireq->tc;
457 458

	/* Build the STP task context structure */
459
	scu_sata_reqeust_construct_task_context(ireq, task_context);
460 461

	/* Copy over the SGL elements */
462
	sci_request_build_sgl(ireq);
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

	/* Copy over the number of bytes to be transfered */
	task_context->transfer_length_bytes = len;

	if (dir == DMA_TO_DEVICE) {
		/*
		 * The difference between the DMA IN and DMA OUT request task type
		 * values are consistent with the difference between FPDMA READ
		 * and FPDMA WRITE values.  Add the supplied task type parameter
		 * to this difference to set the task type properly for this
		 * DATA OUT (WRITE) case. */
		task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
								 - SCU_TASK_TYPE_DMA_IN);
	} else {
		/*
		 * For the DATA IN (READ) case, simply save the supplied
		 * optimized task type. */
		task_context->task_type = optimized_task_type;
	}
}

Dan Williams's avatar
Dan Williams committed
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
static void sci_atapi_construct(struct isci_request *ireq)
{
	struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
	struct sas_task *task;

	/* To simplify the implementation we take advantage of the
	 * silicon's partial acceleration of atapi protocol (dma data
	 * transfers), so we promote all commands to dma protocol.  This
	 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
	 */
	h2d_fis->features |= ATAPI_PKT_DMA;

	scu_stp_raw_request_construct_task_context(ireq);

	task = isci_request_access_task(ireq);
	if (task->data_dir == DMA_NONE)
		task->total_xfer_len = 0;
501

Dan Williams's avatar
Dan Williams committed
502 503 504 505 506
	/* clear the response so we can detect arrivial of an
	 * unsolicited h2d fis
	 */
	ireq->stp.rsp.fis_type = 0;
}
507

508
static enum sci_status
509
sci_io_request_construct_sata(struct isci_request *ireq,
510 511 512
			       u32 len,
			       enum dma_data_direction dir,
			       bool copy)
513
{
514 515
	enum sci_status status = SCI_SUCCESS;
	struct sas_task *task = isci_request_access_task(ireq);
Dan Williams's avatar
Dan Williams committed
516
	struct domain_device *dev = ireq->target_device->domain_dev;
517

518 519 520
	/* check for management protocols */
	if (ireq->ttype == tmf_task) {
		struct isci_tmf *tmf = isci_request_access_tmf(ireq);
521

522
		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
523
		    tmf->tmf_code == isci_tmf_sata_srst_low) {
524
			scu_stp_raw_request_construct_task_context(ireq);
525 526
			return SCI_SUCCESS;
		} else {
527
			dev_err(&ireq->owning_controller->pdev->dev,
528 529
				"%s: Request 0x%p received un-handled SAT "
				"management protocol 0x%x.\n",
530
				__func__, ireq, tmf->tmf_code);
531 532 533

			return SCI_FAILURE;
		}
534 535
	}

536
	if (!sas_protocol_ata(task->task_proto)) {
537
		dev_err(&ireq->owning_controller->pdev->dev,
538 539 540 541 542 543 544
			"%s: Non-ATA protocol in SATA path: 0x%x\n",
			__func__,
			task->task_proto);
		return SCI_FAILURE;

	}

Dan Williams's avatar
Dan Williams committed
545 546 547 548 549 550 551
	/* ATAPI */
	if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
	    task->ata_task.fis.command == ATA_CMD_PACKET) {
		sci_atapi_construct(ireq);
		return SCI_SUCCESS;
	}

552
	/* non data */
553
	if (task->data_dir == DMA_NONE) {
554
		scu_stp_raw_request_construct_task_context(ireq);
555 556
		return SCI_SUCCESS;
	}
557 558

	/* NCQ */
559
	if (task->ata_task.use_ncq) {
560
		sci_stp_optimized_request_construct(ireq,
561 562 563 564
							 SCU_TASK_TYPE_FPDMAQ_READ,
							 len, dir);
		return SCI_SUCCESS;
	}
565 566

	/* DMA */
567
	if (task->ata_task.dma_xfer) {
568
		sci_stp_optimized_request_construct(ireq,
569 570 571 572
							 SCU_TASK_TYPE_DMA_IN,
							 len, dir);
		return SCI_SUCCESS;
	} else /* PIO */
573
		return sci_stp_pio_request_construct(ireq, copy);
574 575 576 577

	return status;
}

578
static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
579
{
580
	struct sas_task *task = isci_request_access_task(ireq);
581

582
	ireq->protocol = SCIC_SSP_PROTOCOL;
583

584
	scu_ssp_io_request_construct_task_context(ireq,
585 586
						  task->data_dir,
						  task->total_xfer_len);
587

588
	sci_io_request_build_ssp_command_iu(ireq);
589

590
	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
591

592 593
	return SCI_SUCCESS;
}
594

595
enum sci_status sci_task_request_construct_ssp(
596
	struct isci_request *ireq)
597 598
{
	/* Construct the SSP Task SCU Task Context */
599
	scu_ssp_task_request_construct_task_context(ireq);
600

601
	/* Fill in the SSP Task IU */
602
	sci_task_request_build_ssp_task_iu(ireq);
603

604
	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
605

606 607
	return SCI_SUCCESS;
}
608

609
static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
610 611 612
{
	enum sci_status status;
	bool copy = false;
613
	struct sas_task *task = isci_request_access_task(ireq);
614

615
	ireq->protocol = SCIC_STP_PROTOCOL;
616

617 618
	copy = (task->data_dir == DMA_NONE) ? false : true;

619
	status = sci_io_request_construct_sata(ireq,
620 621 622 623 624
						task->total_xfer_len,
						task->data_dir,
						copy);

	if (status == SCI_SUCCESS)
625
		sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
626 627

	return status;
628 629
}

630
enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
631 632 633 634 635 636 637 638 639
{
	enum sci_status status = SCI_SUCCESS;

	/* check for management protocols */
	if (ireq->ttype == tmf_task) {
		struct isci_tmf *tmf = isci_request_access_tmf(ireq);

		if (tmf->tmf_code == isci_tmf_sata_srst_high ||
		    tmf->tmf_code == isci_tmf_sata_srst_low) {
640
			scu_stp_raw_request_construct_task_context(ireq);
641
		} else {
642
			dev_err(&ireq->owning_controller->pdev->dev,
643 644
				"%s: Request 0x%p received un-handled SAT "
				"Protocol 0x%x.\n",
645
				__func__, ireq, tmf->tmf_code);
646 647 648 649 650

			return SCI_FAILURE;
		}
	}

651 652
	if (status != SCI_SUCCESS)
		return status;
653
	sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
654 655 656 657

	return status;
}

658
/**
659
 * sci_req_tx_bytes - bytes transferred when reply underruns request
Dan Williams's avatar
Dan Williams committed
660
 * @ireq: request that was terminated early
661
 */
662
#define SCU_TASK_CONTEXT_SRAM 0x200000
663
static u32 sci_req_tx_bytes(struct isci_request *ireq)
664
{
665
	struct isci_host *ihost = ireq->owning_controller;
666 667
	u32 ret_val = 0;

668 669
	if (readl(&ihost->smu_registers->address_modifier) == 0) {
		void __iomem *scu_reg_base = ihost->scu_registers;
670 671 672 673 674

		/* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
		 *   BAR1 is the scu_registers
		 *   0x20002C = 0x200000 + 0x2c
		 *            = start of task context SRAM + offset of (type.ssp.data_offset)
675
		 *   TCi is the io_tag of struct sci_request
676 677 678
		 */
		ret_val = readl(scu_reg_base +
				(SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
679
				((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
680 681 682 683 684
	}

	return ret_val;
}

685
enum sci_status sci_request_start(struct isci_request *ireq)
686
{
687
	enum sci_base_request_states state;
688
	struct scu_task_context *tc = ireq->tc;
689
	struct isci_host *ihost = ireq->owning_controller;
690

691
	state = ireq->sm.current_state_id;
692
	if (state != SCI_REQ_CONSTRUCTED) {
693
		dev_warn(&ihost->pdev->dev,
694 695 696 697
			"%s: SCIC IO Request requested to start while in wrong "
			 "state %d\n", __func__, state);
		return SCI_FAILURE_INVALID_STATE;
	}
698

699
	tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
700

701 702 703 704
	switch (tc->protocol_type) {
	case SCU_TASK_CONTEXT_PROTOCOL_SMP:
	case SCU_TASK_CONTEXT_PROTOCOL_SSP:
		/* SSP/SMP Frame */
705
		tc->type.ssp.tag = ireq->io_tag;
706 707
		tc->type.ssp.target_port_transfer_tag = 0xFFFF;
		break;
708

709 710
	case SCU_TASK_CONTEXT_PROTOCOL_STP:
		/* STP/SATA Frame
711
		 * tc->type.stp.ncq_tag = ireq->ncq_tag;
712 713
		 */
		break;
714

715 716 717
	case SCU_TASK_CONTEXT_PROTOCOL_NONE:
		/* / @todo When do we set no protocol type? */
		break;
718

719 720 721 722 723
	default:
		/* This should never happen since we build the IO
		 * requests */
		break;
	}
724

725
	/* Add to the post_context the io tag value */
726
	ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
727

728
	/* Everything is good go ahead and change state */
729
	sci_change_state(&ireq->sm, SCI_REQ_STARTED);
730

731
	return SCI_SUCCESS;
732 733 734
}

enum sci_status
735
sci_io_request_terminate(struct isci_request *ireq)
736
{
737
	enum sci_base_request_states state;
738

739
	state = ireq->sm.current_state_id;
740 741

	switch (state) {
742
	case SCI_REQ_CONSTRUCTED:
Dan Williams's avatar
Dan Williams committed
743 744
		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
745
		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
746
		return SCI_SUCCESS;
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
	case SCI_REQ_STARTED:
	case SCI_REQ_TASK_WAIT_TC_COMP:
	case SCI_REQ_SMP_WAIT_RESP:
	case SCI_REQ_SMP_WAIT_TC_COMP:
	case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
	case SCI_REQ_STP_UDMA_WAIT_D2H:
	case SCI_REQ_STP_NON_DATA_WAIT_H2D:
	case SCI_REQ_STP_NON_DATA_WAIT_D2H:
	case SCI_REQ_STP_PIO_WAIT_H2D:
	case SCI_REQ_STP_PIO_WAIT_FRAME:
	case SCI_REQ_STP_PIO_DATA_IN:
	case SCI_REQ_STP_PIO_DATA_OUT:
	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
	case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
	case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
Dan Williams's avatar
Dan Williams committed
762 763 764 765
	case SCI_REQ_ATAPI_WAIT_H2D:
	case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
	case SCI_REQ_ATAPI_WAIT_D2H:
	case SCI_REQ_ATAPI_WAIT_TC_COMP:
766
		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
767
		return SCI_SUCCESS;
768
	case SCI_REQ_TASK_WAIT_TC_RESP:
769 770 771 772 773 774
		/* The task frame was already confirmed to have been
		 * sent by the SCU HW.  Since the state machine is
		 * now only waiting for the task response itself,
		 * abort the request and complete it immediately
		 * and don't wait for the task response.
		 */
775 776
		sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
777
		return SCI_SUCCESS;
778
	case SCI_REQ_ABORTING:
779 780 781 782
		/* If a request has a termination requested twice, return
		 * a failure indication, since HW confirmation of the first
		 * abort is still outstanding.
		 */
783
	case SCI_REQ_COMPLETED:
784
	default:
785
		dev_warn(&ireq->owning_controller->pdev->dev,
786 787 788
			 "%s: SCIC IO Request requested to abort while in wrong "
			 "state %d\n",
			 __func__,
789
			 ireq->sm.current_state_id);
790 791
		break;
	}
792

793 794
	return SCI_FAILURE_INVALID_STATE;
}
795

796
enum sci_status sci_request_complete(struct isci_request *ireq)
797
{
798
	enum sci_base_request_states state;
799
	struct isci_host *ihost = ireq->owning_controller;
800

801
	state = ireq->sm.current_state_id;
802
	if (WARN_ONCE(state != SCI_REQ_COMPLETED,
803 804
		      "isci: request completion from wrong state (%d)\n", state))
		return SCI_FAILURE_INVALID_STATE;
805

806
	if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
807
		sci_controller_release_frame(ihost,
808
						  ireq->saved_rx_frame_index);
809 810

	/* XXX can we just stop the machine and remove the 'final' state? */
811
	sci_change_state(&ireq->sm, SCI_REQ_FINAL);
812 813 814
	return SCI_SUCCESS;
}

815
enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
816 817 818
						  u32 event_code)
{
	enum sci_base_request_states state;
819
	struct isci_host *ihost = ireq->owning_controller;
820

821
	state = ireq->sm.current_state_id;
822

823
	if (state != SCI_REQ_STP_PIO_DATA_IN) {
824
		dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
825 826 827 828 829 830 831 832 833 834
			 __func__, event_code, state);

		return SCI_FAILURE_INVALID_STATE;
	}

	switch (scu_get_event_specifier(event_code)) {
	case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
		/* We are waiting for data and the SCU has R_ERR the data frame.
		 * Go back to waiting for the D2H Register FIS
		 */
835
		sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
836 837
		return SCI_SUCCESS;
	default:
838
		dev_err(&ihost->pdev->dev,
839 840 841 842 843 844 845 846
			"%s: pio request unexpected event %#x\n",
			__func__, event_code);

		/* TODO Should we fail the PIO request when we get an
		 * unexpected event?
		 */
		return SCI_FAILURE;
	}
847 848
}

849 850 851 852 853
/*
 * This function copies response data for requests returning response data
 *    instead of sense data.
 * @sci_req: This parameter specifies the request object for which to copy
 *    the response data.
854
 */
855
static void sci_io_request_copy_response(struct isci_request *ireq)
856
{
857 858 859 860
	void *resp_buf;
	u32 len;
	struct ssp_response_iu *ssp_response;
	struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
861

862
	ssp_response = &ireq->ssp.rsp;
863

864
	resp_buf = &isci_tmf->resp.resp_iu;
865

866 867 868
	len = min_t(u32,
		    SSP_RESP_IU_MAX_SIZE,
		    be32_to_cpu(ssp_response->response_data_len));
869

870 871
	memcpy(resp_buf, ssp_response->resp_data, len);
}
872

873
static enum sci_status
874
request_started_state_tc_event(struct isci_request *ireq,
875
			       u32 completion_code)
876 877
{
	struct ssp_response_iu *resp_iu;
878
	u8 datapres;
879

880 881
	/* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
	 * to determine SDMA status
882 883 884
	 */
	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams's avatar
Dan Williams committed
885 886
		ireq->scu_status = SCU_TASK_DONE_GOOD;
		ireq->sci_status = SCI_SUCCESS;
887
		break;
888 889
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
		/* There are times when the SCU hardware will return an early
890 891 892 893 894 895
		 * response because the io request specified more data than is
		 * returned by the target device (mode pages, inquiry data,
		 * etc.).  We must check the response stats to see if this is
		 * truly a failed request or a good request that just got
		 * completed early.
		 */
896
		struct ssp_response_iu *resp = &ireq->ssp.rsp;
897 898
		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);

899 900
		sci_swab32_cpy(&ireq->ssp.rsp,
			       &ireq->ssp.rsp,
901 902 903
			       word_cnt);

		if (resp->status == 0) {
Dan Williams's avatar
Dan Williams committed
904 905
			ireq->scu_status = SCU_TASK_DONE_GOOD;
			ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
906
		} else {
Dan Williams's avatar
Dan Williams committed
907 908
			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
909
		}
910
		break;
911
	}
912
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
913
		ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
914

915 916
		sci_swab32_cpy(&ireq->ssp.rsp,
			       &ireq->ssp.rsp,
917
			       word_cnt);
918

Dan Williams's avatar
Dan Williams committed
919 920
		ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
		ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
921
		break;
922
	}
923

924
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
925
		/* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
926 927 928
		 * guaranteed to be received before this completion status is
		 * posted?
		 */
929
		resp_iu = &ireq->ssp.rsp;
930 931
		datapres = resp_iu->datapres;

932
		if (datapres == 1 || datapres == 2) {
Dan Williams's avatar
Dan Williams committed
933 934 935 936 937 938
			ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
			ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
		} else {
			ireq->scu_status = SCU_TASK_DONE_GOOD;
			ireq->sci_status = SCI_SUCCESS;
		}
939
		break;
940 941 942 943 944 945 946 947 948 949 950 951
	/* only stp device gets suspended. */
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
952
		if (ireq->protocol == SCIC_STP_PROTOCOL) {
Dan Williams's avatar
Dan Williams committed
953 954 955
			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
					   SCU_COMPLETION_TL_STATUS_SHIFT;
			ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
956
		} else {
Dan Williams's avatar
Dan Williams committed
957 958 959
			ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
					   SCU_COMPLETION_TL_STATUS_SHIFT;
			ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
960
		}
961 962
		break;

963 964 965 966 967 968 969 970 971 972 973
	/* both stp/ssp device gets suspended */
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
Dan Williams's avatar
Dan Williams committed
974 975 976
		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
				   SCU_COMPLETION_TL_STATUS_SHIFT;
		ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
977 978
		break;

979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	/* neither ssp nor stp gets suspended. */
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
	case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
995
	default:
Dan Williams's avatar
Dan Williams committed
996 997 998
		ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
				   SCU_COMPLETION_TL_STATUS_SHIFT;
		ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
999 1000
		break;
	}
1001 1002 1003 1004 1005 1006

	/*
	 * TODO: This is probably wrong for ACK/NAK timeout conditions
	 */

	/* In all cases we will treat this as the completion of the IO req. */
1007
	sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1008
	return SCI_SUCCESS;
1009 1010
}

1011
static enum sci_status
1012
request_aborting_state_tc_event(struct isci_request *ireq,
1013
				u32 completion_code)
1014 1015 1016 1017
{
	switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
	case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
	case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
Dan Williams's avatar
Dan Williams committed
1018 1019
		ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
		ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
1020
		sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1021 1022 1023
		break;

	default:
1024 1025 1026
		/* Unless we get some strange error wait for the task abort to complete
		 * TODO: Should there be a state change for this completion?
		 */
1027 1028
		break;
	}
1029 1030 1031 1032

	return SCI_SUCCESS;
}

1033
static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
1034
						       u32 completion_code)