be_cmds.c 67.3 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla's avatar
Sathya Perla committed
3 4 5 6 7 8 9 10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13 14 15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16 17
 */

18
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
19
#include "be.h"
20
#include "be_cmds.h"
Sathya Perla's avatar
Sathya Perla committed
21

22 23 24 25
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}
26

27
static void be_mcc_notify(struct be_adapter *adapter)
28
{
29
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
30 31
	u32 val = 0;

32
	if (be_error(adapter))
33 34
		return;

35 36
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37 38

	wmb();
39
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40 41 42 43 44
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
45
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46 47 48 49 50 51 52 53 54 55 56
{
	if (compl->flags != 0) {
		compl->flags = le32_to_cpu(compl->flags);
		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
		return true;
	} else {
		return false;
	}
}

/* Need to reset the entire word that houses the valid bit */
57
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58 59 60 61
{
	compl->flags = 0;
}

62 63 64 65 66 67 68 69 70
static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
{
	unsigned long addr;

	addr = tag1;
	addr = ((addr << 16) << 16) | tag0;
	return (void *)addr;
}

71
static int be_mcc_compl_process(struct be_adapter *adapter,
72
				struct be_mcc_compl *compl)
73 74
{
	u16 compl_status, extd_status;
75 76
	struct be_cmd_resp_hdr *resp_hdr;
	u8 opcode = 0, subsystem = 0;
77 78 79 80 81 82 83

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
84

85 86 87 88 89 90 91 92 93 94
	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);

	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
	    (subsystem == CMD_SUBSYSTEM_COMMON)) {
95 96 97 98
		adapter->flash_status = compl_status;
		complete(&adapter->flash_compl);
	}

99
	if (compl_status == MCC_STATUS_SUCCESS) {
100 101 102
		if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
		    (subsystem == CMD_SUBSYSTEM_ETH)) {
103
			be_parse_stats(adapter);
104
			adapter->stats_cmd_sent = false;
105
		}
106 107
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
		    subsystem == CMD_SUBSYSTEM_COMMON) {
108
			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
109
				(void *)resp_hdr;
110 111 112
			adapter->drv_stats.be_on_die_temperature =
				resp->on_die_temperature;
		}
113
	} else {
114
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
115
			adapter->be_get_temp_freq = 0;
116

117 118 119 120 121
		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
			goto done;

		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
122 123 124
			dev_warn(&adapter->pdev->dev,
				 "opcode %d-%d is not permitted\n",
				 opcode, subsystem);
125 126 127
		} else {
			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
					CQE_STATUS_EXTD_MASK;
128 129 130
			dev_err(&adapter->pdev->dev,
				"opcode %d-%d failed:status %d-%d\n",
				opcode, subsystem, compl_status, extd_status);
131
		}
132
	}
133
done:
134
	return compl_status;
135 136
}

137
/* Link state evt is a string of bytes; no need for endian swapping */
138
static void be_async_link_state_process(struct be_adapter *adapter,
139 140
		struct be_async_event_link_state *evt)
{
141
	/* When link status changes, link speed must be re-queried from FW */
142
	adapter->phy.link_speed = -1;
143

144 145 146 147 148
	/* Ignore physical link event */
	if (lancer_chip(adapter) &&
	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
		return;

149 150 151 152 153
	/* For the initial link status do not rely on the ASYNC event as
	 * it may not be received in some cases.
	 */
	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
		be_link_status_update(adapter, evt->port_link_status);
154 155
}

156 157 158 159 160 161
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
		struct be_async_event_grp5_cos_priority *evt)
{
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
162
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
163 164 165 166 167 168 169 170 171 172 173
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

/* Grp5 QOS Speed evt */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
		struct be_async_event_grp5_qos_link_speed *evt)
{
	if (evt->physical_port == adapter->port_num) {
		/* qos_link_speed is in units of 10 Mbps */
174
		adapter->phy.link_speed = evt->qos_link_speed * 10;
175 176 177
	}
}

178 179 180 181 182
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
		struct be_async_event_grp5_pvid_state *evt)
{
	if (evt->enabled)
183
		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
184 185 186 187
	else
		adapter->pvid = 0;
}

188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
static void be_async_grp5_evt_process(struct be_adapter *adapter,
		u32 trailer, struct be_mcc_compl *evt)
{
	u8 event_type = 0;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
		be_async_grp5_cos_priority_process(adapter,
		(struct be_async_event_grp5_cos_priority *)evt);
	break;
	case ASYNC_EVENT_QOS_SPEED:
		be_async_grp5_qos_speed_process(adapter,
		(struct be_async_event_grp5_qos_link_speed *)evt);
	break;
205 206 207 208
	case ASYNC_EVENT_PVID_STATE:
		be_async_grp5_pvid_state_process(adapter,
		(struct be_async_event_grp5_pvid_state *)evt);
	break;
209 210 211 212 213 214
	default:
		dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
		break;
	}
}

215 216
static inline bool is_link_state_evt(u32 trailer)
{
217
	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
218
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
219
				ASYNC_EVENT_CODE_LINK_STATE;
220
}
221

222 223 224 225 226 227 228
static inline bool is_grp5_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_GRP_5);
}

229
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
230
{
231
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
232
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
233 234 235 236 237 238 239 240

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
	adapter->mcc_obj.rearm_cq = false;
}

256
int be_process_mcc(struct be_adapter *adapter)
257
{
258
	struct be_mcc_compl *compl;
259
	int num = 0, status = 0;
260
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
261

262 263
	spin_lock_bh(&adapter->mcc_cq_lock);
	while ((compl = be_mcc_compl_get(adapter))) {
264 265
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
266 267
			if (is_link_state_evt(compl->flags))
				be_async_link_state_process(adapter,
268
				(struct be_async_event_link_state *) compl);
269 270 271
			else if (is_grp5_evt(compl->flags))
				be_async_grp5_evt_process(adapter,
				compl->flags, compl);
272
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
273
				status = be_mcc_compl_process(adapter, compl);
274
				atomic_dec(&mcc_obj->q.used);
275 276 277 278
		}
		be_mcc_compl_use(compl);
		num++;
	}
279

280 281 282
	if (num)
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);

283
	spin_unlock_bh(&adapter->mcc_cq_lock);
284
	return status;
285 286
}

287
/* Wait till no more pending mcc requests are present */
288
static int be_mcc_wait_compl(struct be_adapter *adapter)
289
{
290
#define mcc_timeout		120000 /* 12s timeout */
291
	int i, status = 0;
292 293
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

294
	for (i = 0; i < mcc_timeout; i++) {
295 296 297
		if (be_error(adapter))
			return -EIO;

298
		status = be_process_mcc(adapter);
299

300
		if (atomic_read(&mcc_obj->q.used) == 0)
301 302 303
			break;
		udelay(100);
	}
304
	if (i == mcc_timeout) {
305 306
		dev_err(&adapter->pdev->dev, "FW not responding\n");
		adapter->fw_timeout = true;
307
		return -EIO;
308
	}
309
	return status;
310 311 312
}

/* Notify MCC requests and wait for completion */
313
static int be_mcc_notify_wait(struct be_adapter *adapter)
314
{
315 316 317 318 319 320 321 322 323 324 325
	int status;
	struct be_mcc_wrb *wrb;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
	u16 index = mcc_obj->q.head;
	struct be_cmd_resp_hdr *resp;

	index_dec(&index, mcc_obj->q.len);
	wrb = queue_index_node(&mcc_obj->q, index);

	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);

326
	be_mcc_notify(adapter);
327 328 329 330 331 332 333 334

	status = be_mcc_wait_compl(adapter);
	if (status == -EIO)
		goto out;

	status = resp->status;
out:
	return status;
335 336
}

337
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla's avatar
Sathya Perla committed
338
{
339
	int msecs = 0;
Sathya Perla's avatar
Sathya Perla committed
340 341 342
	u32 ready;

	do {
343 344 345
		if (be_error(adapter))
			return -EIO;

346
		ready = ioread32(db);
347
		if (ready == 0xffffffff)
348 349 350
			return -1;

		ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla's avatar
Sathya Perla committed
351 352 353
		if (ready)
			break;

354
		if (msecs > 4000) {
355 356
			dev_err(&adapter->pdev->dev, "FW not responding\n");
			adapter->fw_timeout = true;
357
			be_detect_error(adapter);
Sathya Perla's avatar
Sathya Perla committed
358 359 360
			return -1;
		}

361
		msleep(1);
362
		msecs++;
Sathya Perla's avatar
Sathya Perla committed
363 364 365 366 367 368 369
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
370
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla's avatar
Sathya Perla committed
371
 */
372
static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
373 374 375
{
	int status;
	u32 val = 0;
376 377
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla's avatar
Sathya Perla committed
378
	struct be_mcc_mailbox *mbox = mbox_mem->va;
379
	struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla's avatar
Sathya Perla committed
380

381 382 383 384 385
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

Sathya Perla's avatar
Sathya Perla committed
386 387 388 389 390 391
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
392
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
393 394 395 396 397 398 399 400
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

401
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
402 403 404
	if (status != 0)
		return status;

405
	/* A cq entry has been made now */
406 407 408
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
409 410 411
		if (status)
			return status;
	} else {
412
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla's avatar
Sathya Perla committed
413 414
		return -1;
	}
415
	return 0;
Sathya Perla's avatar
Sathya Perla committed
416 417
}

418
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
Sathya Perla's avatar
Sathya Perla committed
419
{
420 421 422 423 424 425
	u32 sem;

	if (lancer_chip(adapter))
		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
	else
		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
426 427 428 429 430 431 432 433

	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
		return -1;
	else
		return 0;
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
int lancer_wait_ready(struct be_adapter *adapter)
{
#define SLIPORT_READY_TIMEOUT 30
	u32 sliport_status;
	int status = 0, i;

	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
			break;

		msleep(1000);
	}

	if (i == SLIPORT_READY_TIMEOUT)
		status = -1;

	return status;
}

int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
{
	int status;
	u32 sliport_status, err, reset_needed;
	status = lancer_wait_ready(adapter);
	if (!status) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
		if (err && reset_needed) {
			iowrite32(SLI_PORT_CONTROL_IP_MASK,
				  adapter->db + SLIPORT_CONTROL_OFFSET);

			/* check adapter has corrected the error */
			status = lancer_wait_ready(adapter);
			sliport_status = ioread32(adapter->db +
						  SLIPORT_STATUS_OFFSET);
			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
						SLIPORT_STATUS_RN_MASK);
			if (status || sliport_status)
				status = -1;
		} else if (err || reset_needed) {
			status = -1;
		}
	}
	return status;
}

int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
483
{
484 485
	u16 stage;
	int status, timeout = 0;
486
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
487

488 489 490 491 492
	if (lancer_chip(adapter)) {
		status = lancer_wait_ready(adapter);
		return status;
	}

493 494 495
	do {
		status = be_POST_stage_get(adapter, &stage);
		if (status) {
496
			dev_err(dev, "POST error; stage=0x%x\n", stage);
497 498
			return -1;
		} else if (stage != POST_STAGE_ARMFW_RDY) {
499 500 501 502
			if (msleep_interruptible(2000)) {
				dev_err(dev, "Waiting for POST aborted\n");
				return -EINTR;
			}
503 504 505 506
			timeout += 2;
		} else {
			return 0;
		}
507
	} while (timeout < 60);
Sathya Perla's avatar
Sathya Perla committed
508

509
	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
510
	return -1;
Sathya Perla's avatar
Sathya Perla committed
511 512 513 514 515 516 517 518 519 520
}


static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}


/* Don't touch the hdr after it's prepared */
521 522 523 524
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len,
				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
525
{
526
	struct be_sge *sge;
527 528
	unsigned long addr = (unsigned long)req_hdr;
	u64 req_addr = addr;
529

Sathya Perla's avatar
Sathya Perla committed
530 531 532
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
533
	req_hdr->version = 0;
534

535 536 537
	wrb->tag0 = req_addr & 0xFFFFFFFF;
	wrb->tag1 = upper_32_bits(req_addr);

538 539 540 541 542 543 544 545 546 547 548
	wrb->payload_length = cmd_len;
	if (mem) {
		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
			MCC_WRB_SGE_CNT_SHIFT;
		sge = nonembedded_sgl(wrb);
		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
		sge->len = cpu_to_le32(mem->size);
	} else
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	be_dws_cpu_to_le(wrb, 8);
Sathya Perla's avatar
Sathya Perla committed
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

/* Converts interrupt delay in microseconds to multiplier value */
static u32 eq_delay_to_mult(u32 usec_delay)
{
#define MAX_INTR_RATE			651042
	const u32 round = 10;
	u32 multiplier;

	if (usec_delay == 0)
		multiplier = 0;
	else {
		u32 interrupt_rate = 1000000 / usec_delay;
		/* Max delay, corresponding to the lowest interrupt rate */
		if (interrupt_rate == 0)
			multiplier = 1023;
		else {
			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
			multiplier /= interrupt_rate;
			/* Round the multiplier to the closest value.*/
			multiplier = (multiplier + round/2) / round;
			multiplier = min(multiplier, (u32)1023);
		}
	}
	return multiplier;
}

589
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
590
{
591 592 593 594 595
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
Sathya Perla's avatar
Sathya Perla committed
596 597
}

598
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
599
{
600 601 602
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

603 604 605 606 607
	if (atomic_read(&mccq->used) >= mccq->len) {
		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
		return NULL;
	}

608 609 610 611
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
612 613 614
	return wrb;
}

615 616 617 618 619 620 621 622
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

623 624 625
	if (lancer_chip(adapter))
		return 0;

626 627
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
628 629

	wrb = (u8 *)wrb_from_mbox(adapter);
630 631 632 633 634 635 636 637
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
638 639 640

	status = be_mbox_notify_wait(adapter);

641
	mutex_unlock(&adapter->mbox_lock);
642 643 644 645 646 647 648 649 650 651 652
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

653 654 655
	if (lancer_chip(adapter))
		return 0;

656 657
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
658 659 660 661 662 663 664 665 666 667 668 669 670

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

671
	mutex_unlock(&adapter->mbox_lock);
672 673
	return status;
}
674

675
int be_cmd_eq_create(struct be_adapter *adapter,
Sathya Perla's avatar
Sathya Perla committed
676 677
		struct be_queue_info *eq, int eq_delay)
{
678 679
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
Sathya Perla's avatar
Sathya Perla committed
680 681 682
	struct be_dma_mem *q_mem = &eq->dma_mem;
	int status;

683 684
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
685 686 687

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
688

689 690
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
			__ilog2_u32(eq->len/256));
	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
			eq_delay_to_mult(eq_delay));
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

705
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
706
	if (!status) {
707
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
708 709 710
		eq->id = le16_to_cpu(resp->eq_id);
		eq->created = true;
	}
711

712
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
713 714 715
	return status;
}

716
/* Use MCC */
717
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
718
			u8 type, bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla's avatar
Sathya Perla committed
719
{
720 721
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
Sathya Perla's avatar
Sathya Perla committed
722 723
	int status;

724
	spin_lock_bh(&adapter->mcc_lock);
725

726 727 728 729 730
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
731
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
732

733 734
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
735 736 737 738
	req->type = type;
	if (permanent) {
		req->permanent = 1;
	} else {
739
		req->if_id = cpu_to_le16((u16) if_handle);
740
		req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla's avatar
Sathya Perla committed
741 742 743
		req->permanent = 0;
	}

744
	status = be_mcc_notify_wait(adapter);
745 746
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
747
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
748
	}
Sathya Perla's avatar
Sathya Perla committed
749

750 751
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
752 753 754
	return status;
}

755
/* Uses synchronous MCCQ */
756
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
757
		u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
758
{
759 760
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
Sathya Perla's avatar
Sathya Perla committed
761 762
	int status;

763 764 765
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
766 767 768 769
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
770
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
771

772 773
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
774

775
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
776 777 778
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

779
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
780 781 782 783 784
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

785
err:
786
	spin_unlock_bh(&adapter->mcc_lock);
787 788 789 790

	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
		status = -EPERM;

Sathya Perla's avatar
Sathya Perla committed
791 792 793
	return status;
}

794
/* Uses synchronous MCCQ */
795
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
796
{
797 798
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
Sathya Perla's avatar
Sathya Perla committed
799 800
	int status;

801 802 803
	if (pmac_id == -1)
		return 0;

804 805 806
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
807 808 809 810
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
811
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
812

813 814
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
815

816
	req->hdr.domain = dom;
Sathya Perla's avatar
Sathya Perla committed
817 818 819
	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

820 821
	status = be_mcc_notify_wait(adapter);

822
err:
823
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
824 825 826
	return status;
}

827
/* Uses Mbox */
828 829
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
		struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla's avatar
Sathya Perla committed
830
{
831 832
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
Sathya Perla's avatar
Sathya Perla committed
833
	struct be_dma_mem *q_mem = &cq->dma_mem;
834
	void *ctxt;
Sathya Perla's avatar
Sathya Perla committed
835 836
	int status;

837 838
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
839 840 841 842

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
Sathya Perla's avatar
Sathya Perla committed
843

844 845
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
846 847

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
848
	if (lancer_chip(adapter)) {
849
		req->hdr.version = 2;
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
		req->page_size = 1; /* 1 for 4K */
		AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
								no_delay);
		AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
								ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
								ctxt, eq->id);
	} else {
		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
								coalesce_wm);
		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
								ctxt, no_delay);
		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
	}
Sathya Perla's avatar
Sathya Perla committed
871 872 873 874 875

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

876
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
877
	if (!status) {
878
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
879 880 881
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
882

883
	mutex_unlock(&adapter->mbox_lock);
884 885 886 887 888 889 890 891 892 893 894 895

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

896
int be_cmd_mccq_ext_create(struct be_adapter *adapter,
897 898 899
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
900
	struct be_mcc_wrb *wrb;
901
	struct be_cmd_req_mcc_ext_create *req;
902
	struct be_dma_mem *q_mem = &mccq->dma_mem;
903
	void *ctxt;
904 905
	int status;

906 907
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
908 909 910 911

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
912

913 914
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
915

916
	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
		req->cq_id = cpu_to_le16(cq->id);

		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
						be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
								ctxt, cq->id);
		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
								 ctxt, 1);

	} else {
		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
						be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
	}
935

936
	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
937
	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
938 939 940 941
	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

942
	status = be_mbox_notify_wait(adapter);
943 944 945 946 947
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}
948
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
949 950 951 952

	return status;
}

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
int be_cmd_mccq_org_create(struct be_adapter *adapter,
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mcc_create *req;
	struct be_dma_mem *q_mem = &mccq->dma_mem;
	void *ctxt;
	int status;

	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;

970 971
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010

	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
			be_encoded_q_len(mccq->len));
	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

	status = be_mbox_notify_wait(adapter);
	if (!status) {
		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
		mccq->id = le16_to_cpu(resp->id);
		mccq->created = true;
	}

	mutex_unlock(&adapter->mbox_lock);
	return status;
}

int be_cmd_mccq_create(struct be_adapter *adapter,
			struct be_queue_info *mccq,
			struct be_queue_info *cq)
{
	int status;

	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
	if (status && !lancer_chip(adapter)) {
		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
			"or newer to avoid conflicting priorities between NIC "
			"and FCoE traffic");
		status = be_cmd_mccq_org_create(adapter, mccq, cq);
	}
	return status;
}

1011
int be_cmd_txq_create(struct be_adapter *adapter,
Sathya Perla's avatar
Sathya Perla committed
1012 1013 1014
			struct be_queue_info *txq,
			struct be_queue_info *cq)
{
1015 1016
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_tx_create *req;
Sathya Perla's avatar
Sathya Perla committed
1017
	struct be_dma_mem *q_mem = &txq->dma_mem;
1018
	void *ctxt;
Sathya Perla's avatar
Sathya Perla committed
1019 1020
	int status;

1021 1022 1023 1024 1025 1026 1027
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1028 1029 1030

	req = embedded_payload(wrb);
	ctxt = &req->context;
Sathya Perla's avatar
Sathya Perla committed
1031

1032 1033
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1034

1035 1036 1037 1038 1039 1040
	if (lancer_chip(adapter)) {
		req->hdr.version = 1;
		AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
					adapter->if_handle);
	}

Sathya Perla's avatar
Sathya Perla committed
1041 1042 1043 1044
	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
	req->ulp_num = BE_ULP1_NUM;
	req->type = BE_ETH_TX_RING_TYPE_STANDARD;

1045 1046
	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
		be_encoded_q_len(txq->len));
Sathya Perla's avatar
Sathya Perla committed
1047 1048 1049 1050 1051 1052 1053
	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1054
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1055 1056 1057 1058 1059
	if (!status) {
		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
		txq->id = le16_to_cpu(resp->cid);
		txq->created = true;
	}
1060

1061 1062
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1063 1064 1065 1066

	return status;
}

1067
/* Uses MCC */
1068
int be_cmd_rxq_create(struct be_adapter *adapter,
Sathya Perla's avatar
Sathya Perla committed
1069
		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1070
		u32 if_id, u32 rss, u8 *rss_id)
Sathya Perla's avatar
Sathya Perla committed
1071
{
1072 1073
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eth_rx_create *req;
Sathya Perla's avatar
Sathya Perla committed
1074 1075 1076
	struct be_dma_mem *q_mem = &rxq->dma_mem;
	int status;

1077
	spin_lock_bh(&adapter->mcc_lock);
1078

1079 1080 1081 1082 1083
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1084
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1085

1086 1087
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
				OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1088 1089 1090 1091 1092 1093

	req->cq_id = cpu_to_le16(cq_id);
	req->frag_size = fls(frag_size) - 1;
	req->num_pages = 2;
	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
	req->interface_id = cpu_to_le32(if_id);
1094
	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
Sathya Perla's avatar
Sathya Perla committed
1095 1096
	req->rss_queue = cpu_to_le32(rss);

1097
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1098 1099 1100 1101
	if (!status) {
		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
		rxq->id = le16_to_cpu(resp->id);
		rxq->created = true;
1102
		*rss_id = resp->rss_id;
Sathya Perla's avatar
Sathya Perla committed
1103
	}
1104

1105 1106
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1107 1108 1109
	return status;
}

1110 1111 1112
/* Generic destroyer function for all types of queues
 * Uses Mbox
 */
1113
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla's avatar
Sathya Perla committed
1114 1115
		int queue_type)
{
1116 1117
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
Sathya Perla's avatar
Sathya Perla committed
1118 1119 1120
	u8 subsys = 0, opcode = 0;
	int status;

1121 1122
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
Sathya Perla's avatar
Sathya Perla committed
1123

1124 1125 1126
	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);

Sathya Perla's avatar
Sathya Perla committed
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	switch (queue_type) {
	case QTYPE_EQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_EQ_DESTROY;
		break;
	case QTYPE_CQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_CQ_DESTROY;
		break;
	case QTYPE_TXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_TX_DESTROY;
		break;
	case QTYPE_RXQ:
		subsys = CMD_SUBSYSTEM_ETH;
		opcode = OPCODE_ETH_RX_DESTROY;
		break;
1144 1145 1146 1147
	case QTYPE_MCCQ:
		subsys = CMD_SUBSYSTEM_COMMON;
		opcode = OPCODE_COMMON_MCC_DESTROY;
		break;
Sathya Perla's avatar
Sathya Perla committed
1148
	default:
1149
		BUG();
Sathya Perla's avatar
Sathya Perla committed
1150
	}
1151

1152 1153
	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
				NULL);
Sathya Perla's avatar
Sathya Perla committed
1154 1155
	req->id = cpu_to_le16(q->id);

1156
	status = be_mbox_notify_wait(adapter);
1157 1158
	if (!status)
		q->created = false;
1159

1160
	mutex_unlock(&adapter->mbox_lock);
1161 1162
	return status;
}
Sathya Perla's avatar
Sathya Perla committed
1163

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
/* Uses MCC */
int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_q_destroy *req;
	int status;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = embedded_payload(wrb);

1180 1181
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
			OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1182 1183 1184 1185 1186 1187 1188 1189
	req->id = cpu_to_le16(q->id);

	status = be_mcc_notify_wait(adapter);
	if (!status)
		q->created = false;

err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1190 1191 1192
	return status;
}

1193
/* Create an rx filtering policy configuration on an i/f
1194
 * Uses MCCQ
1195
 */
1196
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1197
		     u32 *if_handle, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1198
{
1199 1200
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_create *req;
Sathya Perla's avatar
Sathya Perla committed
1201 1202
	int status;

1203
	spin_lock_bh(&adapter->mcc_lock);
1204

1205 1206 1207 1208 1209
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1210
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1211

1212 1213
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1214
	req->hdr.domain = domain;
1215 1216
	req->capability_flags = cpu_to_le32(cap_flags);
	req->enable_flags = cpu_to_le32(en_flags);
1217 1218

	req->pmac_invalid = true;
Sathya Perla's avatar
Sathya Perla committed
1219

1220
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1221 1222 1223 1224 1225
	if (!status) {
		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
		*if_handle = le32_to_cpu(resp->interface_id);
	}

1226 1227
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1228 1229 1230
	return status;
}

1231
/* Uses MCCQ */
1232
int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
1233
{
1234 1235
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_if_destroy *req;
Sathya Perla's avatar
Sathya Perla committed
1236 1237
	int status;

1238
	if (interface_id == -1)
1239
		return 0;
1240

1241 1242 1243 1244 1245 1246 1247
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1248
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1249

1250 1251
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1252
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
1253
	req->interface_id = cpu_to_le32(interface_id);
1254

1255 1256 1257
	status = be_mcc_notify_wait(adapter);
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1258 1259 1260 1261 1262
	return status;
}

/* Get stats is a non embedded command: the request is not embedded inside
 * WRB but is a separate dma memory block
1263
 * Uses asynchronous MCC
Sathya Perla's avatar
Sathya Perla committed
1264
 */
1265
int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
Sathya Perla's avatar
Sathya Perla committed
1266
{
1267
	struct be_mcc_wrb *wrb;
1268
	struct be_cmd_req_hdr *hdr;
1269
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1270

1271
	spin_lock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1272

1273
	wrb = wrb_from_mccq(adapter);
1274 1275 1276 1277
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
1278
	hdr = nonemb_cmd->va;
Sathya Perla's avatar
Sathya Perla committed
1279

1280 1281
	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1282 1283 1284 1285

	if (adapter->generation == BE_GEN3)
		hdr->version = 1;

1286
	be_mcc_notify(adapter);
1287
	adapter->stats_cmd_sent = true;
Sathya Perla's avatar
Sathya Perla committed
1288

1289
err:
1290
	spin_unlock_bh(&adapter->mcc_lock);
1291
	return status;
Sathya Perla's avatar
Sathya Perla committed
1292 1293
}

Selvin Xavier's avatar
Selvin Xavier committed
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
				struct be_dma_mem *nonemb_cmd)
{

	struct be_mcc_wrb *wrb;
	struct lancer_cmd_req_pport_stats *req;
	int status = 0;

	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
	req = nonemb_cmd->va;

1312 1313 1314
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
			nonemb_cmd);
Selvin Xavier's avatar
Selvin Xavier committed
1315

1316
	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
Selvin Xavier's avatar
Selvin Xavier committed
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
	req->cmd_params.params.reset_stats = 0;

	be_mcc_notify(adapter);
	adapter->stats_cmd_sent = true;

err:
	spin_unlock_bh(&adapter->mcc_lock);
	return status;
}

1327
/* Uses synchronous mcc */
1328
int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1329
			     u16 *link_speed, u8 *link_status, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
1330
{
1331 1332
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_link_status *req;
Sathya Perla's avatar
Sathya Perla committed
1333 1334
	int status;

1335 1336
	spin_lock_bh(&adapter->