be_cmds.c 94.8 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
 */

18
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
19
#include "be.h"
20
#include "be_cmds.h"
Sathya Perla's avatar
Sathya Perla committed
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
static struct be_cmd_priv_map cmd_priv_map[] = {
	{
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_SET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_ETH_GET_PPORT_STATS,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_PHY_DETAILS,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	}
};

55
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
56
57
58
59
60
61
62
63
64
65
66
67
68
69
{
	int i;
	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
	u32 cmd_privileges = adapter->cmd_privileges;

	for (i = 0; i < num_entries; i++)
		if (opcode == cmd_priv_map[i].opcode &&
		    subsystem == cmd_priv_map[i].subsystem)
			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
				return false;

	return true;
}

70
71
72
73
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}
74

75
static void be_mcc_notify(struct be_adapter *adapter)
76
{
77
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
78
79
	u32 val = 0;

80
	if (be_error(adapter))
81
82
		return;

83
84
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
85
86

	wmb();
87
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
88
89
90
91
92
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
93
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
94
{
95
96
	u32 flags;

97
	if (compl->flags != 0) {
98
99
100
101
102
		flags = le32_to_cpu(compl->flags);
		if (flags & CQE_FLAGS_VALID_MASK) {
			compl->flags = flags;
			return true;
		}
103
	}
104
	return false;
105
106
107
}

/* Need to reset the entire word that houses the valid bit */
108
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
109
110
111
112
{
	compl->flags = 0;
}

113
114
115
116
117
118
119
120
121
static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
{
	unsigned long addr;

	addr = tag1;
	addr = ((addr << 16) << 16) | tag0;
	return (void *)addr;
}

122
static int be_mcc_compl_process(struct be_adapter *adapter,
123
				struct be_mcc_compl *compl)
124
125
{
	u16 compl_status, extd_status;
126
127
	struct be_cmd_resp_hdr *resp_hdr;
	u8 opcode = 0, subsystem = 0;
128
129
130
131
132
133
134

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
135

136
137
138
139
140
141
142
	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);

	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

143
144
145
146
147
148
	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
		complete(&adapter->et_cmd_compl);
		return 0;
	}

149
150
151
	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
	    (subsystem == CMD_SUBSYSTEM_COMMON)) {
152
		adapter->flash_status = compl_status;
153
		complete(&adapter->et_cmd_compl);
154
155
	}

156
	if (compl_status == MCC_STATUS_SUCCESS) {
157
158
159
		if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
		    (subsystem == CMD_SUBSYSTEM_ETH)) {
160
			be_parse_stats(adapter);
Ajit Khaparde's avatar
Ajit Khaparde committed
161
			adapter->stats_cmd_sent = false;
162
		}
163
164
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
		    subsystem == CMD_SUBSYSTEM_COMMON) {
165
			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
166
				(void *)resp_hdr;
167
168
169
			adapter->drv_stats.be_on_die_temperature =
				resp->on_die_temperature;
		}
170
	} else {
171
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
172
			adapter->be_get_temp_freq = 0;
173

174
175
176
177
178
		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
			goto done;

		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
179
			dev_warn(&adapter->pdev->dev,
180
				 "VF is not privileged to issue opcode %d-%d\n",
181
				 opcode, subsystem);
182
183
184
		} else {
			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
					CQE_STATUS_EXTD_MASK;
185
186
187
			dev_err(&adapter->pdev->dev,
				"opcode %d-%d failed:status %d-%d\n",
				opcode, subsystem, compl_status, extd_status);
188
189
190

			if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
				return extd_status;
191
		}
192
	}
193
done:
194
	return compl_status;
195
196
}

197
/* Link state evt is a string of bytes; no need for endian swapping */
198
static void be_async_link_state_process(struct be_adapter *adapter,
199
					struct be_async_event_link_state *evt)
200
{
201
	/* When link status changes, link speed must be re-queried from FW */
202
	adapter->phy.link_speed = -1;
203

204
205
206
207
208
209
	/* On BEx the FW does not send a separate link status
	 * notification for physical and logical link.
	 * On other chips just process the logical link
	 * status notification
	 */
	if (!BEx_chip(adapter) &&
210
211
212
	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
		return;

213
214
215
216
	/* For the initial link status do not rely on the ASYNC event as
	 * it may not be received in some cases.
	 */
	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
217
218
		be_link_status_update(adapter,
				      evt->port_link_status & LINK_STATUS_MASK);
219
220
}

221
222
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
223
224
225
					       struct
					       be_async_event_grp5_cos_priority
					       *evt)
226
227
228
{
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
229
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
230
231
232
233
234
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

235
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
236
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
237
238
239
					    struct
					    be_async_event_grp5_qos_link_speed
					    *evt)
240
{
241
242
243
	if (adapter->phy.link_speed >= 0 &&
	    evt->physical_port == adapter->port_num)
		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
244
245
}

246
247
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
248
249
250
					     struct
					     be_async_event_grp5_pvid_state
					     *evt)
251
{
252
	if (evt->enabled) {
253
		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
254
255
		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
	} else {
256
		adapter->pvid = 0;
257
	}
258
259
}

260
static void be_async_grp5_evt_process(struct be_adapter *adapter,
261
				      u32 trailer, struct be_mcc_compl *evt)
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
{
	u8 event_type = 0;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
		be_async_grp5_cos_priority_process(adapter,
		(struct be_async_event_grp5_cos_priority *)evt);
	break;
	case ASYNC_EVENT_QOS_SPEED:
		be_async_grp5_qos_speed_process(adapter,
		(struct be_async_event_grp5_qos_link_speed *)evt);
	break;
277
278
279
280
	case ASYNC_EVENT_PVID_STATE:
		be_async_grp5_pvid_state_process(adapter,
		(struct be_async_event_grp5_pvid_state *)evt);
	break;
281
	default:
282
283
		dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
			 event_type);
284
285
286
287
		break;
	}
}

288
static void be_async_dbg_evt_process(struct be_adapter *adapter,
289
				     u32 trailer, struct be_mcc_compl *cmp)
290
291
292
293
294
295
296
297
298
299
300
301
302
303
{
	u8 event_type = 0;
	struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
		if (evt->valid)
			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
	break;
	default:
304
305
		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
			 event_type);
306
307
308
309
	break;
	}
}

310
311
static inline bool is_link_state_evt(u32 trailer)
{
312
	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
313
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
314
				ASYNC_EVENT_CODE_LINK_STATE;
315
}
316

317
318
319
320
321
322
323
static inline bool is_grp5_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_GRP_5);
}

324
325
326
327
328
329
330
static inline bool is_dbg_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_QNQ);
}

331
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
332
{
333
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
334
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
335
336
337
338
339
340
341
342

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

343
344
345
346
347
348
349
350
351
352
353
354
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
355
356
	spin_lock_bh(&adapter->mcc_cq_lock);

357
	adapter->mcc_obj.rearm_cq = false;
358
359
360
	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);

	spin_unlock_bh(&adapter->mcc_cq_lock);
361
362
}

Sathya Perla's avatar
Sathya Perla committed
363
int be_process_mcc(struct be_adapter *adapter)
364
{
365
	struct be_mcc_compl *compl;
Sathya Perla's avatar
Sathya Perla committed
366
	int num = 0, status = 0;
367
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
368

369
	spin_lock(&adapter->mcc_cq_lock);
370
	while ((compl = be_mcc_compl_get(adapter))) {
371
372
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
373
374
			if (is_link_state_evt(compl->flags))
				be_async_link_state_process(adapter,
375
				(struct be_async_event_link_state *) compl);
376
377
			else if (is_grp5_evt(compl->flags))
				be_async_grp5_evt_process(adapter,
378
							  compl->flags, compl);
379
380
			else if (is_dbg_evt(compl->flags))
				be_async_dbg_evt_process(adapter,
381
							 compl->flags, compl);
382
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla's avatar
Sathya Perla committed
383
				status = be_mcc_compl_process(adapter, compl);
384
				atomic_dec(&mcc_obj->q.used);
385
386
387
388
		}
		be_mcc_compl_use(compl);
		num++;
	}
389

Sathya Perla's avatar
Sathya Perla committed
390
391
392
	if (num)
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);

393
	spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla's avatar
Sathya Perla committed
394
	return status;
395
396
}

397
/* Wait till no more pending mcc requests are present */
398
static int be_mcc_wait_compl(struct be_adapter *adapter)
399
{
400
#define mcc_timeout		120000 /* 12s timeout */
Sathya Perla's avatar
Sathya Perla committed
401
	int i, status = 0;
402
403
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

404
	for (i = 0; i < mcc_timeout; i++) {
405
406
407
		if (be_error(adapter))
			return -EIO;

408
		local_bh_disable();
Sathya Perla's avatar
Sathya Perla committed
409
		status = be_process_mcc(adapter);
410
		local_bh_enable();
411

412
		if (atomic_read(&mcc_obj->q.used) == 0)
413
414
415
			break;
		udelay(100);
	}
416
	if (i == mcc_timeout) {
417
418
		dev_err(&adapter->pdev->dev, "FW not responding\n");
		adapter->fw_timeout = true;
419
		return -EIO;
420
	}
421
	return status;
422
423
424
}

/* Notify MCC requests and wait for completion */
425
static int be_mcc_notify_wait(struct be_adapter *adapter)
426
{
427
428
429
430
431
432
433
434
435
436
437
	int status;
	struct be_mcc_wrb *wrb;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
	u16 index = mcc_obj->q.head;
	struct be_cmd_resp_hdr *resp;

	index_dec(&index, mcc_obj->q.len);
	wrb = queue_index_node(&mcc_obj->q, index);

	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);

438
	be_mcc_notify(adapter);
439
440
441
442
443
444
445
446

	status = be_mcc_wait_compl(adapter);
	if (status == -EIO)
		goto out;

	status = resp->status;
out:
	return status;
447
448
}

449
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla's avatar
Sathya Perla committed
450
{
451
	int msecs = 0;
Sathya Perla's avatar
Sathya Perla committed
452
453
454
	u32 ready;

	do {
455
456
457
		if (be_error(adapter))
			return -EIO;

458
		ready = ioread32(db);
459
		if (ready == 0xffffffff)
460
461
462
			return -1;

		ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla's avatar
Sathya Perla committed
463
464
465
		if (ready)
			break;

466
		if (msecs > 4000) {
467
468
			dev_err(&adapter->pdev->dev, "FW not responding\n");
			adapter->fw_timeout = true;
469
			be_detect_error(adapter);
Sathya Perla's avatar
Sathya Perla committed
470
471
472
			return -1;
		}

473
		msleep(1);
474
		msecs++;
Sathya Perla's avatar
Sathya Perla committed
475
476
477
478
479
480
481
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
482
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla's avatar
Sathya Perla committed
483
 */
484
static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
485
486
487
{
	int status;
	u32 val = 0;
488
489
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla's avatar
Sathya Perla committed
490
	struct be_mcc_mailbox *mbox = mbox_mem->va;
491
	struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla's avatar
Sathya Perla committed
492

493
494
495
496
497
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

Sathya Perla's avatar
Sathya Perla committed
498
499
500
501
502
503
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
504
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
505
506
507
508
509
510
511
512
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

513
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
514
515
516
	if (status != 0)
		return status;

517
	/* A cq entry has been made now */
518
519
520
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
521
522
523
		if (status)
			return status;
	} else {
524
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla's avatar
Sathya Perla committed
525
526
		return -1;
	}
527
	return 0;
Sathya Perla's avatar
Sathya Perla committed
528
529
}

530
static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
531
{
532
533
	u32 sem;

534
535
	if (BEx_chip(adapter))
		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla's avatar
Sathya Perla committed
536
	else
537
538
539
540
		pci_read_config_dword(adapter->pdev,
				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);

	return sem & POST_STAGE_MASK;
Sathya Perla's avatar
Sathya Perla committed
541
542
}

543
static int lancer_wait_ready(struct be_adapter *adapter)
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
{
#define SLIPORT_READY_TIMEOUT 30
	u32 sliport_status;
	int status = 0, i;

	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
			break;

		msleep(1000);
	}

	if (i == SLIPORT_READY_TIMEOUT)
		status = -1;

	return status;
}

563
564
565
566
567
static bool lancer_provisioning_error(struct be_adapter *adapter)
{
	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
568
569
		sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
		sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
570
571
572
573
574
575
576
577

		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
			return true;
	}
	return false;
}

578
579
580
581
int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
{
	int status;
	u32 sliport_status, err, reset_needed;
582
583
584
585
	bool resource_error;

	resource_error = lancer_provisioning_error(adapter);
	if (resource_error)
586
		return -EAGAIN;
587

588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
	status = lancer_wait_ready(adapter);
	if (!status) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
		if (err && reset_needed) {
			iowrite32(SLI_PORT_CONTROL_IP_MASK,
				  adapter->db + SLIPORT_CONTROL_OFFSET);

			/* check adapter has corrected the error */
			status = lancer_wait_ready(adapter);
			sliport_status = ioread32(adapter->db +
						  SLIPORT_STATUS_OFFSET);
			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
						SLIPORT_STATUS_RN_MASK);
			if (status || sliport_status)
				status = -1;
		} else if (err || reset_needed) {
			status = -1;
		}
	}
609
610
611
612
613
	/* Stop error recovery if error is not recoverable.
	 * No resource error is temporary errors and will go away
	 * when PF provisions resources.
	 */
	resource_error = lancer_provisioning_error(adapter);
614
615
	if (resource_error)
		status = -EAGAIN;
616

617
618
619
620
	return status;
}

int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
621
{
622
623
	u16 stage;
	int status, timeout = 0;
624
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
625

626
627
628
629
630
	if (lancer_chip(adapter)) {
		status = lancer_wait_ready(adapter);
		return status;
	}

631
	do {
632
		stage = be_POST_stage_get(adapter);
633
		if (stage == POST_STAGE_ARMFW_RDY)
634
			return 0;
635

636
		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
637
638
639
		if (msleep_interruptible(2000)) {
			dev_err(dev, "Waiting for POST aborted\n");
			return -EINTR;
640
		}
641
		timeout += 2;
642
	} while (timeout < 60);
Sathya Perla's avatar
Sathya Perla committed
643

644
	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
645
	return -1;
Sathya Perla's avatar
Sathya Perla committed
646
647
648
649
650
651
652
653
}


static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

654
static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
655
656
657
658
{
	wrb->tag0 = addr & 0xFFFFFFFF;
	wrb->tag1 = upper_32_bits(addr);
}
Sathya Perla's avatar
Sathya Perla committed
659
660

/* Don't touch the hdr after it's prepared */
661
662
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
663
664
665
				   u8 subsystem, u8 opcode, int cmd_len,
				   struct be_mcc_wrb *wrb,
				   struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
666
{
667
668
	struct be_sge *sge;

Sathya Perla's avatar
Sathya Perla committed
669
670
671
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
672
	req_hdr->version = 0;
673
	fill_wrb_tags(wrb, (ulong) req_hdr);
674
675
676
677
678
679
680
681
682
683
684
	wrb->payload_length = cmd_len;
	if (mem) {
		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
			MCC_WRB_SGE_CNT_SHIFT;
		sge = nonembedded_sgl(wrb);
		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
		sge->len = cpu_to_le32(mem->size);
	} else
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	be_dws_cpu_to_le(wrb, 8);
Sathya Perla's avatar
Sathya Perla committed
685
686
687
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
688
				      struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
689
690
691
692
693
694
695
696
697
698
699
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

700
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
701
{
702
703
704
705
706
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
Sathya Perla's avatar
Sathya Perla committed
707
708
}

709
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
710
{
711
712
713
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

714
715
716
	if (!mccq->created)
		return NULL;

717
	if (atomic_read(&mccq->used) >= mccq->len)
718
719
		return NULL;

720
721
722
723
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
724
725
726
	return wrb;
}

727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
static bool use_mcc(struct be_adapter *adapter)
{
	return adapter->mcc_obj.q.created;
}

/* Must be used only in process context */
static int be_cmd_lock(struct be_adapter *adapter)
{
	if (use_mcc(adapter)) {
		spin_lock_bh(&adapter->mcc_lock);
		return 0;
	} else {
		return mutex_lock_interruptible(&adapter->mbox_lock);
	}
}

/* Must be used only in process context */
static void be_cmd_unlock(struct be_adapter *adapter)
{
	if (use_mcc(adapter))
		spin_unlock_bh(&adapter->mcc_lock);
	else
		return mutex_unlock(&adapter->mbox_lock);
}

static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
				      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;

	if (use_mcc(adapter)) {
		dest_wrb = wrb_from_mccq(adapter);
		if (!dest_wrb)
			return NULL;
	} else {
		dest_wrb = wrb_from_mbox(adapter);
	}

	memcpy(dest_wrb, wrb, sizeof(*wrb));
	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));

	return dest_wrb;
}

/* Must be used only in process context */
static int be_cmd_notify_wait(struct be_adapter *adapter,
			      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;
	int status;

	status = be_cmd_lock(adapter);
	if (status)
		return status;

	dest_wrb = be_cmd_copy(adapter, wrb);
	if (!dest_wrb)
		return -EBUSY;

	if (use_mcc(adapter))
		status = be_mcc_notify_wait(adapter);
	else
		status = be_mbox_notify_wait(adapter);

	if (!status)
		memcpy(wrb, dest_wrb, sizeof(*wrb));

	be_cmd_unlock(adapter);
	return status;
}

799
800
801
802
803
804
805
806
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

807
808
809
	if (lancer_chip(adapter))
		return 0;

810
811
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
812
813

	wrb = (u8 *)wrb_from_mbox(adapter);
814
815
816
817
818
819
820
821
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
822
823
824

	status = be_mbox_notify_wait(adapter);

825
	mutex_unlock(&adapter->mbox_lock);
826
827
828
829
830
831
832
833
834
835
836
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

837
838
839
	if (lancer_chip(adapter))
		return 0;

840
841
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
842
843
844
845
846
847
848
849
850
851
852
853
854

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

855
	mutex_unlock(&adapter->mbox_lock);
856
857
	return status;
}
858

859
int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla's avatar
Sathya Perla committed
860
{
861
862
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
863
864
	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
	int status, ver = 0;
Sathya Perla's avatar
Sathya Perla committed
865

866
867
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
868
869
870

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
871

872
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
873
874
			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
875

876
877
878
879
880
	/* Support for EQ_CREATEv2 available only SH-R onwards */
	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
		ver = 2;

	req->hdr.version = ver;
Sathya Perla's avatar
Sathya Perla committed
881
882
883
884
885
886
	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
887
		      __ilog2_u32(eqo->q.len / 256));
Sathya Perla's avatar
Sathya Perla committed
888
889
890
891
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

892
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
893
	if (!status) {
894
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
895
896
897
898
		eqo->q.id = le16_to_cpu(resp->eq_id);
		eqo->msix_idx =
			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
		eqo->q.created = true;
Sathya Perla's avatar
Sathya Perla committed
899
	}
900

901
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
902
903
904
	return status;
}

905
/* Use MCC */
906
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
907
			  bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla's avatar
Sathya Perla committed
908
{
909
910
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
Sathya Perla's avatar
Sathya Perla committed
911
912
	int status;

913
	spin_lock_bh(&adapter->mcc_lock);
914

915
916
917
918
919
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
920
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
921

922
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
923
924
			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
			       NULL);
925
	req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla's avatar
Sathya Perla committed
926
927
928
	if (permanent) {
		req->permanent = 1;
	} else {
929
		req->if_id = cpu_to_le16((u16) if_handle);
930
		req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla's avatar
Sathya Perla committed
931
932
933
		req->permanent = 0;
	}

934
	status = be_mcc_notify_wait(adapter);
935
936
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
937
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
938
	}
Sathya Perla's avatar
Sathya Perla committed
939

940
941
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
942
943
944
	return status;
}

945
/* Uses synchronous MCCQ */
946
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
947
		    u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
948
{
949
950
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
Sathya Perla's avatar
Sathya Perla committed
951
952
	int status;

953
954
955
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
956
957
958
959
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
960
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
961

962
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
963
964
			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
			       NULL);
Sathya Perla's avatar
Sathya Perla committed
965

966
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
967
968
969
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

970
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
971
972
973
974
975
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

976
err:
977
	spin_unlock_bh(&adapter->mcc_lock);
978
979
980
981

	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
		status = -EPERM;

Sathya Perla's avatar
Sathya Perla committed
982
983
984
	return status;
}

985
/* Uses synchronous MCCQ */
986
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
987
{
988
989
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
Sathya Perla's avatar
Sathya Perla committed
990
991
	int status;

992
993
994
	if (pmac_id == -1)
		return 0;

995
996
997
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
998
999
1000
	if (!wrb) {
		status = -EBUSY;
		goto err;
For faster browsing, not all history is shown. View entire blame