be_cmds.c 91 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
 */

18
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
19
#include "be.h"
20
#include "be_cmds.h"
Sathya Perla's avatar
Sathya Perla committed
21

22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
static struct be_cmd_priv_map cmd_priv_map[] = {
	{
		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_SET_FLOW_CONTROL,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_ETH_GET_PPORT_STATS,
		CMD_SUBSYSTEM_ETH,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	},
	{
		OPCODE_COMMON_GET_PHY_DETAILS,
		CMD_SUBSYSTEM_COMMON,
		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
	}
};

static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
			   u8 subsystem)
{
	int i;
	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
	u32 cmd_privileges = adapter->cmd_privileges;

	for (i = 0; i < num_entries; i++)
		if (opcode == cmd_priv_map[i].opcode &&
		    subsystem == cmd_priv_map[i].subsystem)
			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
				return false;

	return true;
}

71
72
73
74
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
	return wrb->payload.embedded_payload;
}
75

76
static void be_mcc_notify(struct be_adapter *adapter)
77
{
78
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
79
80
	u32 val = 0;

81
	if (be_error(adapter))
82
83
		return;

84
85
	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87

	wmb();
88
	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89
90
91
92
93
}

/* To check if valid bit is set, check the entire word as we don't know
 * the endianness of the data (old entry is host endian while a new entry is
 * little endian) */
94
static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95
{
96
97
	u32 flags;

98
	if (compl->flags != 0) {
99
100
101
102
103
		flags = le32_to_cpu(compl->flags);
		if (flags & CQE_FLAGS_VALID_MASK) {
			compl->flags = flags;
			return true;
		}
104
	}
105
	return false;
106
107
108
}

/* Need to reset the entire word that houses the valid bit */
109
static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110
111
112
113
{
	compl->flags = 0;
}

114
115
116
117
118
119
120
121
122
static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
{
	unsigned long addr;

	addr = tag1;
	addr = ((addr << 16) << 16) | tag0;
	return (void *)addr;
}

123
static int be_mcc_compl_process(struct be_adapter *adapter,
124
				struct be_mcc_compl *compl)
125
126
{
	u16 compl_status, extd_status;
127
128
	struct be_cmd_resp_hdr *resp_hdr;
	u8 opcode = 0, subsystem = 0;
129
130
131
132
133
134
135

	/* Just swap the status to host endian; mcc tag is opaquely copied
	 * from mcc_wrb */
	be_dws_le_to_cpu(compl, 4);

	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
				CQE_STATUS_COMPL_MASK;
136

137
138
139
140
141
142
143
	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);

	if (resp_hdr) {
		opcode = resp_hdr->opcode;
		subsystem = resp_hdr->subsystem;
	}

144
145
146
147
148
149
	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
		complete(&adapter->et_cmd_compl);
		return 0;
	}

150
151
152
	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
	    (subsystem == CMD_SUBSYSTEM_COMMON)) {
153
		adapter->flash_status = compl_status;
154
		complete(&adapter->et_cmd_compl);
155
156
	}

157
	if (compl_status == MCC_STATUS_SUCCESS) {
158
159
160
		if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
		    (subsystem == CMD_SUBSYSTEM_ETH)) {
161
			be_parse_stats(adapter);
Ajit Khaparde's avatar
Ajit Khaparde committed
162
			adapter->stats_cmd_sent = false;
163
		}
164
165
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
		    subsystem == CMD_SUBSYSTEM_COMMON) {
166
			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
167
				(void *)resp_hdr;
168
169
170
			adapter->drv_stats.be_on_die_temperature =
				resp->on_die_temperature;
		}
171
	} else {
172
		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
173
			adapter->be_get_temp_freq = 0;
174

175
176
177
178
179
		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
			goto done;

		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
180
			dev_warn(&adapter->pdev->dev,
181
				 "VF is not privileged to issue opcode %d-%d\n",
182
				 opcode, subsystem);
183
184
185
		} else {
			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
					CQE_STATUS_EXTD_MASK;
186
187
188
			dev_err(&adapter->pdev->dev,
				"opcode %d-%d failed:status %d-%d\n",
				opcode, subsystem, compl_status, extd_status);
189
190
191

			if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
				return extd_status;
192
		}
193
	}
194
done:
195
	return compl_status;
196
197
}

198
/* Link state evt is a string of bytes; no need for endian swapping */
199
static void be_async_link_state_process(struct be_adapter *adapter,
200
201
		struct be_async_event_link_state *evt)
{
202
	/* When link status changes, link speed must be re-queried from FW */
203
	adapter->phy.link_speed = -1;
204

205
206
207
208
209
	/* Ignore physical link event */
	if (lancer_chip(adapter) &&
	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
		return;

210
211
212
213
214
	/* For the initial link status do not rely on the ASYNC event as
	 * it may not be received in some cases.
	 */
	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
		be_link_status_update(adapter, evt->port_link_status);
215
216
}

217
218
219
220
221
222
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
		struct be_async_event_grp5_cos_priority *evt)
{
	if (evt->valid) {
		adapter->vlan_prio_bmap = evt->available_priority_bmap;
223
		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
224
225
226
227
228
		adapter->recommended_prio =
			evt->reco_default_priority << VLAN_PRIO_SHIFT;
	}
}

229
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
230
231
232
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
		struct be_async_event_grp5_qos_link_speed *evt)
{
233
234
235
	if (adapter->phy.link_speed >= 0 &&
	    evt->physical_port == adapter->port_num)
		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
236
237
}

238
239
240
241
242
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
		struct be_async_event_grp5_pvid_state *evt)
{
	if (evt->enabled)
243
		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
244
245
246
247
	else
		adapter->pvid = 0;
}

248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
static void be_async_grp5_evt_process(struct be_adapter *adapter,
		u32 trailer, struct be_mcc_compl *evt)
{
	u8 event_type = 0;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_EVENT_COS_PRIORITY:
		be_async_grp5_cos_priority_process(adapter,
		(struct be_async_event_grp5_cos_priority *)evt);
	break;
	case ASYNC_EVENT_QOS_SPEED:
		be_async_grp5_qos_speed_process(adapter,
		(struct be_async_event_grp5_qos_link_speed *)evt);
	break;
265
266
267
268
	case ASYNC_EVENT_PVID_STATE:
		be_async_grp5_pvid_state_process(adapter,
		(struct be_async_event_grp5_pvid_state *)evt);
	break;
269
	default:
270
271
		dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
			 event_type);
272
273
274
275
		break;
	}
}

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
static void be_async_dbg_evt_process(struct be_adapter *adapter,
		u32 trailer, struct be_mcc_compl *cmp)
{
	u8 event_type = 0;
	struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;

	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
		ASYNC_TRAILER_EVENT_TYPE_MASK;

	switch (event_type) {
	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
		if (evt->valid)
			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
	break;
	default:
292
293
		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
			 event_type);
294
295
296
297
	break;
	}
}

298
299
static inline bool is_link_state_evt(u32 trailer)
{
300
	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
301
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
302
				ASYNC_EVENT_CODE_LINK_STATE;
303
}
304

305
306
307
308
309
310
311
static inline bool is_grp5_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_GRP_5);
}

312
313
314
315
316
317
318
static inline bool is_dbg_evt(u32 trailer)
{
	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
		ASYNC_TRAILER_EVENT_CODE_MASK) ==
				ASYNC_EVENT_CODE_QNQ);
}

319
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
320
{
321
	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
322
	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
323
324
325
326
327
328
329
330

	if (be_mcc_compl_is_new(compl)) {
		queue_tail_inc(mcc_cq);
		return compl;
	}
	return NULL;
}

331
332
333
334
335
336
337
338
339
340
341
342
void be_async_mcc_enable(struct be_adapter *adapter)
{
	spin_lock_bh(&adapter->mcc_cq_lock);

	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
	adapter->mcc_obj.rearm_cq = true;

	spin_unlock_bh(&adapter->mcc_cq_lock);
}

void be_async_mcc_disable(struct be_adapter *adapter)
{
343
344
	spin_lock_bh(&adapter->mcc_cq_lock);

345
	adapter->mcc_obj.rearm_cq = false;
346
347
348
	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);

	spin_unlock_bh(&adapter->mcc_cq_lock);
349
350
}

Sathya Perla's avatar
Sathya Perla committed
351
int be_process_mcc(struct be_adapter *adapter)
352
{
353
	struct be_mcc_compl *compl;
Sathya Perla's avatar
Sathya Perla committed
354
	int num = 0, status = 0;
355
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
356

357
	spin_lock(&adapter->mcc_cq_lock);
358
	while ((compl = be_mcc_compl_get(adapter))) {
359
360
		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
			/* Interpret flags as an async trailer */
361
362
			if (is_link_state_evt(compl->flags))
				be_async_link_state_process(adapter,
363
				(struct be_async_event_link_state *) compl);
364
365
366
			else if (is_grp5_evt(compl->flags))
				be_async_grp5_evt_process(adapter,
				compl->flags, compl);
367
368
369
			else if (is_dbg_evt(compl->flags))
				be_async_dbg_evt_process(adapter,
				compl->flags, compl);
370
		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
Sathya Perla's avatar
Sathya Perla committed
371
				status = be_mcc_compl_process(adapter, compl);
372
				atomic_dec(&mcc_obj->q.used);
373
374
375
376
		}
		be_mcc_compl_use(compl);
		num++;
	}
377

Sathya Perla's avatar
Sathya Perla committed
378
379
380
	if (num)
		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);

381
	spin_unlock(&adapter->mcc_cq_lock);
Sathya Perla's avatar
Sathya Perla committed
382
	return status;
383
384
}

385
/* Wait till no more pending mcc requests are present */
386
static int be_mcc_wait_compl(struct be_adapter *adapter)
387
{
388
#define mcc_timeout		120000 /* 12s timeout */
Sathya Perla's avatar
Sathya Perla committed
389
	int i, status = 0;
390
391
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;

392
	for (i = 0; i < mcc_timeout; i++) {
393
394
395
		if (be_error(adapter))
			return -EIO;

396
		local_bh_disable();
Sathya Perla's avatar
Sathya Perla committed
397
		status = be_process_mcc(adapter);
398
		local_bh_enable();
399

400
		if (atomic_read(&mcc_obj->q.used) == 0)
401
402
403
			break;
		udelay(100);
	}
404
	if (i == mcc_timeout) {
405
406
		dev_err(&adapter->pdev->dev, "FW not responding\n");
		adapter->fw_timeout = true;
407
		return -EIO;
408
	}
409
	return status;
410
411
412
}

/* Notify MCC requests and wait for completion */
413
static int be_mcc_notify_wait(struct be_adapter *adapter)
414
{
415
416
417
418
419
420
421
422
423
424
425
	int status;
	struct be_mcc_wrb *wrb;
	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
	u16 index = mcc_obj->q.head;
	struct be_cmd_resp_hdr *resp;

	index_dec(&index, mcc_obj->q.len);
	wrb = queue_index_node(&mcc_obj->q, index);

	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);

426
	be_mcc_notify(adapter);
427
428
429
430
431
432
433
434

	status = be_mcc_wait_compl(adapter);
	if (status == -EIO)
		goto out;

	status = resp->status;
out:
	return status;
435
436
}

437
static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
Sathya Perla's avatar
Sathya Perla committed
438
{
439
	int msecs = 0;
Sathya Perla's avatar
Sathya Perla committed
440
441
442
	u32 ready;

	do {
443
444
445
		if (be_error(adapter))
			return -EIO;

446
		ready = ioread32(db);
447
		if (ready == 0xffffffff)
448
449
450
			return -1;

		ready &= MPU_MAILBOX_DB_RDY_MASK;
Sathya Perla's avatar
Sathya Perla committed
451
452
453
		if (ready)
			break;

454
		if (msecs > 4000) {
455
456
			dev_err(&adapter->pdev->dev, "FW not responding\n");
			adapter->fw_timeout = true;
457
			be_detect_error(adapter);
Sathya Perla's avatar
Sathya Perla committed
458
459
460
			return -1;
		}

461
		msleep(1);
462
		msecs++;
Sathya Perla's avatar
Sathya Perla committed
463
464
465
466
467
468
469
	} while (true);

	return 0;
}

/*
 * Insert the mailbox address into the doorbell in two steps
470
 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
Sathya Perla's avatar
Sathya Perla committed
471
 */
472
static int be_mbox_notify_wait(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
473
474
475
{
	int status;
	u32 val = 0;
476
477
	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
Sathya Perla's avatar
Sathya Perla committed
478
	struct be_mcc_mailbox *mbox = mbox_mem->va;
479
	struct be_mcc_compl *compl = &mbox->compl;
Sathya Perla's avatar
Sathya Perla committed
480

481
482
483
484
485
	/* wait for ready to be set */
	status = be_mbox_db_ready_wait(adapter, db);
	if (status != 0)
		return status;

Sathya Perla's avatar
Sathya Perla committed
486
487
488
489
490
491
	val |= MPU_MAILBOX_DB_HI_MASK;
	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
	iowrite32(val, db);

	/* wait for ready to be set */
492
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
493
494
495
496
497
498
499
500
	if (status != 0)
		return status;

	val = 0;
	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
	val |= (u32)(mbox_mem->dma >> 4) << 2;
	iowrite32(val, db);

501
	status = be_mbox_db_ready_wait(adapter, db);
Sathya Perla's avatar
Sathya Perla committed
502
503
504
	if (status != 0)
		return status;

505
	/* A cq entry has been made now */
506
507
508
	if (be_mcc_compl_is_new(compl)) {
		status = be_mcc_compl_process(adapter, &mbox->compl);
		be_mcc_compl_use(compl);
509
510
511
		if (status)
			return status;
	} else {
512
		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
Sathya Perla's avatar
Sathya Perla committed
513
514
		return -1;
	}
515
	return 0;
Sathya Perla's avatar
Sathya Perla committed
516
517
}

518
static u16 be_POST_stage_get(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
519
{
520
521
	u32 sem;

522
523
	if (BEx_chip(adapter))
		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
Sathya Perla's avatar
Sathya Perla committed
524
	else
525
526
527
528
		pci_read_config_dword(adapter->pdev,
				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);

	return sem & POST_STAGE_MASK;
Sathya Perla's avatar
Sathya Perla committed
529
530
}

531
static int lancer_wait_ready(struct be_adapter *adapter)
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
{
#define SLIPORT_READY_TIMEOUT 30
	u32 sliport_status;
	int status = 0, i;

	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
			break;

		msleep(1000);
	}

	if (i == SLIPORT_READY_TIMEOUT)
		status = -1;

	return status;
}

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
static bool lancer_provisioning_error(struct be_adapter *adapter)
{
	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
		sliport_err1 = ioread32(adapter->db +
					SLIPORT_ERROR1_OFFSET);
		sliport_err2 = ioread32(adapter->db +
					SLIPORT_ERROR2_OFFSET);

		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
			return true;
	}
	return false;
}

568
569
570
571
int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
{
	int status;
	u32 sliport_status, err, reset_needed;
572
573
574
575
	bool resource_error;

	resource_error = lancer_provisioning_error(adapter);
	if (resource_error)
576
		return -EAGAIN;
577

578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
	status = lancer_wait_ready(adapter);
	if (!status) {
		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
		if (err && reset_needed) {
			iowrite32(SLI_PORT_CONTROL_IP_MASK,
				  adapter->db + SLIPORT_CONTROL_OFFSET);

			/* check adapter has corrected the error */
			status = lancer_wait_ready(adapter);
			sliport_status = ioread32(adapter->db +
						  SLIPORT_STATUS_OFFSET);
			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
						SLIPORT_STATUS_RN_MASK);
			if (status || sliport_status)
				status = -1;
		} else if (err || reset_needed) {
			status = -1;
		}
	}
599
600
601
602
603
	/* Stop error recovery if error is not recoverable.
	 * No resource error is temporary errors and will go away
	 * when PF provisions resources.
	 */
	resource_error = lancer_provisioning_error(adapter);
604
605
	if (resource_error)
		status = -EAGAIN;
606

607
608
609
610
	return status;
}

int be_fw_wait_ready(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
611
{
612
613
	u16 stage;
	int status, timeout = 0;
614
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
615

616
617
618
619
620
	if (lancer_chip(adapter)) {
		status = lancer_wait_ready(adapter);
		return status;
	}

621
	do {
622
		stage = be_POST_stage_get(adapter);
623
		if (stage == POST_STAGE_ARMFW_RDY)
624
			return 0;
625
626
627
628
629
630

		dev_info(dev, "Waiting for POST, %ds elapsed\n",
			 timeout);
		if (msleep_interruptible(2000)) {
			dev_err(dev, "Waiting for POST aborted\n");
			return -EINTR;
631
		}
632
		timeout += 2;
633
	} while (timeout < 60);
Sathya Perla's avatar
Sathya Perla committed
634

635
	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
636
	return -1;
Sathya Perla's avatar
Sathya Perla committed
637
638
639
640
641
642
643
644
}


static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
{
	return &wrb->payload.sgl[0];
}

645
646
647
648
649
650
static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
				 unsigned long addr)
{
	wrb->tag0 = addr & 0xFFFFFFFF;
	wrb->tag1 = upper_32_bits(addr);
}
Sathya Perla's avatar
Sathya Perla committed
651
652

/* Don't touch the hdr after it's prepared */
653
654
655
656
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
				u8 subsystem, u8 opcode, int cmd_len,
				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
Sathya Perla's avatar
Sathya Perla committed
657
{
658
659
	struct be_sge *sge;

Sathya Perla's avatar
Sathya Perla committed
660
661
662
	req_hdr->opcode = opcode;
	req_hdr->subsystem = subsystem;
	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
663
	req_hdr->version = 0;
664
	fill_wrb_tags(wrb, (ulong) req_hdr);
665
666
667
668
669
670
671
672
673
674
675
	wrb->payload_length = cmd_len;
	if (mem) {
		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
			MCC_WRB_SGE_CNT_SHIFT;
		sge = nonembedded_sgl(wrb);
		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
		sge->len = cpu_to_le32(mem->size);
	} else
		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
	be_dws_cpu_to_le(wrb, 8);
Sathya Perla's avatar
Sathya Perla committed
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
}

static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
			struct be_dma_mem *mem)
{
	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
	u64 dma = (u64)mem->dma;

	for (i = 0; i < buf_pages; i++) {
		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
		dma += PAGE_SIZE_4K;
	}
}

691
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
692
{
693
694
695
696
697
	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
	struct be_mcc_wrb *wrb
		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
	memset(wrb, 0, sizeof(*wrb));
	return wrb;
Sathya Perla's avatar
Sathya Perla committed
698
699
}

700
static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
701
{
702
703
704
	struct be_queue_info *mccq = &adapter->mcc_obj.q;
	struct be_mcc_wrb *wrb;

705
706
707
	if (!mccq->created)
		return NULL;

708
	if (atomic_read(&mccq->used) >= mccq->len)
709
710
		return NULL;

711
712
713
714
	wrb = queue_head_node(mccq);
	queue_head_inc(mccq);
	atomic_inc(&mccq->used);
	memset(wrb, 0, sizeof(*wrb));
715
716
717
	return wrb;
}

718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
static bool use_mcc(struct be_adapter *adapter)
{
	return adapter->mcc_obj.q.created;
}

/* Must be used only in process context */
static int be_cmd_lock(struct be_adapter *adapter)
{
	if (use_mcc(adapter)) {
		spin_lock_bh(&adapter->mcc_lock);
		return 0;
	} else {
		return mutex_lock_interruptible(&adapter->mbox_lock);
	}
}

/* Must be used only in process context */
static void be_cmd_unlock(struct be_adapter *adapter)
{
	if (use_mcc(adapter))
		spin_unlock_bh(&adapter->mcc_lock);
	else
		return mutex_unlock(&adapter->mbox_lock);
}

static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
				      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;

	if (use_mcc(adapter)) {
		dest_wrb = wrb_from_mccq(adapter);
		if (!dest_wrb)
			return NULL;
	} else {
		dest_wrb = wrb_from_mbox(adapter);
	}

	memcpy(dest_wrb, wrb, sizeof(*wrb));
	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));

	return dest_wrb;
}

/* Must be used only in process context */
static int be_cmd_notify_wait(struct be_adapter *adapter,
			      struct be_mcc_wrb *wrb)
{
	struct be_mcc_wrb *dest_wrb;
	int status;

	status = be_cmd_lock(adapter);
	if (status)
		return status;

	dest_wrb = be_cmd_copy(adapter, wrb);
	if (!dest_wrb)
		return -EBUSY;

	if (use_mcc(adapter))
		status = be_mcc_notify_wait(adapter);
	else
		status = be_mbox_notify_wait(adapter);

	if (!status)
		memcpy(wrb, dest_wrb, sizeof(*wrb));

	be_cmd_unlock(adapter);
	return status;
}

790
791
792
793
794
795
796
797
/* Tell fw we're about to start firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_init(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

798
799
800
	if (lancer_chip(adapter))
		return 0;

801
802
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
803
804

	wrb = (u8 *)wrb_from_mbox(adapter);
805
806
807
808
809
810
811
812
	*wrb++ = 0xFF;
	*wrb++ = 0x12;
	*wrb++ = 0x34;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0x56;
	*wrb++ = 0x78;
	*wrb = 0xFF;
813
814
815

	status = be_mbox_notify_wait(adapter);

816
	mutex_unlock(&adapter->mbox_lock);
817
818
819
820
821
822
823
824
825
826
827
	return status;
}

/* Tell fw we're done with firing cmds by writing a
 * special pattern across the wrb hdr; uses mbox
 */
int be_cmd_fw_clean(struct be_adapter *adapter)
{
	u8 *wrb;
	int status;

828
829
830
	if (lancer_chip(adapter))
		return 0;

831
832
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
833
834
835
836
837
838
839
840
841
842
843
844
845

	wrb = (u8 *)wrb_from_mbox(adapter);
	*wrb++ = 0xFF;
	*wrb++ = 0xAA;
	*wrb++ = 0xBB;
	*wrb++ = 0xFF;
	*wrb++ = 0xFF;
	*wrb++ = 0xCC;
	*wrb++ = 0xDD;
	*wrb = 0xFF;

	status = be_mbox_notify_wait(adapter);

846
	mutex_unlock(&adapter->mbox_lock);
847
848
	return status;
}
849

850
int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
Sathya Perla's avatar
Sathya Perla committed
851
{
852
853
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_eq_create *req;
854
855
	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
	int status, ver = 0;
Sathya Perla's avatar
Sathya Perla committed
856

857
858
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
859
860
861

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
862

863
864
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
865

866
867
868
869
870
	/* Support for EQ_CREATEv2 available only SH-R onwards */
	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
		ver = 2;

	req->hdr.version = ver;
Sathya Perla's avatar
Sathya Perla committed
871
872
873
874
875
876
	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));

	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
	/* 4byte eqe*/
	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
877
		      __ilog2_u32(eqo->q.len / 256));
Sathya Perla's avatar
Sathya Perla committed
878
879
880
881
	be_dws_cpu_to_le(req->context, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

882
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
883
	if (!status) {
884
		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
885
886
887
888
		eqo->q.id = le16_to_cpu(resp->eq_id);
		eqo->msix_idx =
			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
		eqo->q.created = true;
Sathya Perla's avatar
Sathya Perla committed
889
	}
890

891
	mutex_unlock(&adapter->mbox_lock);
Sathya Perla's avatar
Sathya Perla committed
892
893
894
	return status;
}

895
/* Use MCC */
896
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
897
			  bool permanent, u32 if_handle, u32 pmac_id)
Sathya Perla's avatar
Sathya Perla committed
898
{
899
900
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_mac_query *req;
Sathya Perla's avatar
Sathya Perla committed
901
902
	int status;

903
	spin_lock_bh(&adapter->mcc_lock);
904

905
906
907
908
909
	wrb = wrb_from_mccq(adapter);
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
910
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
911

912
913
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
914
	req->type = MAC_ADDRESS_TYPE_NETWORK;
Sathya Perla's avatar
Sathya Perla committed
915
916
917
	if (permanent) {
		req->permanent = 1;
	} else {
918
		req->if_id = cpu_to_le16((u16) if_handle);
919
		req->pmac_id = cpu_to_le32(pmac_id);
Sathya Perla's avatar
Sathya Perla committed
920
921
922
		req->permanent = 0;
	}

923
	status = be_mcc_notify_wait(adapter);
924
925
	if (!status) {
		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
926
		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
927
	}
Sathya Perla's avatar
Sathya Perla committed
928

929
930
err:
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
931
932
933
	return status;
}

934
/* Uses synchronous MCCQ */
935
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
936
		u32 if_id, u32 *pmac_id, u32 domain)
Sathya Perla's avatar
Sathya Perla committed
937
{
938
939
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_add *req;
Sathya Perla's avatar
Sathya Perla committed
940
941
	int status;

942
943
944
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
945
946
947
948
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
949
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
950

951
952
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
953

954
	req->hdr.domain = domain;
Sathya Perla's avatar
Sathya Perla committed
955
956
957
	req->if_id = cpu_to_le32(if_id);
	memcpy(req->mac_address, mac_addr, ETH_ALEN);

958
	status = be_mcc_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
959
960
961
962
963
	if (!status) {
		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
		*pmac_id = le32_to_cpu(resp->pmac_id);
	}

964
err:
965
	spin_unlock_bh(&adapter->mcc_lock);
966
967
968
969

	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
		status = -EPERM;

Sathya Perla's avatar
Sathya Perla committed
970
971
972
	return status;
}

973
/* Uses synchronous MCCQ */
974
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
Sathya Perla's avatar
Sathya Perla committed
975
{
976
977
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_pmac_del *req;
Sathya Perla's avatar
Sathya Perla committed
978
979
	int status;

980
981
982
	if (pmac_id == -1)
		return 0;

983
984
985
	spin_lock_bh(&adapter->mcc_lock);

	wrb = wrb_from_mccq(adapter);
986
987
988
989
	if (!wrb) {
		status = -EBUSY;
		goto err;
	}
990
	req = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
991

992
993
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
994

995
	req->hdr.domain = dom;
Sathya Perla's avatar
Sathya Perla committed
996
997
998
	req->if_id = cpu_to_le32(if_id);
	req->pmac_id = cpu_to_le32(pmac_id);

999
1000
	status = be_mcc_notify_wait(adapter);

1001
err:
1002
	spin_unlock_bh(&adapter->mcc_lock);
Sathya Perla's avatar
Sathya Perla committed
1003
1004
1005
	return status;
}

1006
/* Uses Mbox */
Sathya Perla's avatar
Sathya Perla committed
1007
1008
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
		struct be_queue_info *eq, bool no_delay, int coalesce_wm)
Sathya Perla's avatar
Sathya Perla committed
1009
{
1010
1011
	struct be_mcc_wrb *wrb;
	struct be_cmd_req_cq_create *req;
Sathya Perla's avatar
Sathya Perla committed
1012
	struct be_dma_mem *q_mem = &cq->dma_mem;
1013
	void *ctxt;
Sathya Perla's avatar
Sathya Perla committed
1014
1015
	int status;

1016
1017
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1018
1019
1020
1021

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
Sathya Perla's avatar
Sathya Perla committed
1022

1023
1024
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
Sathya Perla's avatar
Sathya Perla committed
1025
1026

	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1027
1028

	if (BEx_chip(adapter)) {
1029
1030
1031
1032
1033
1034
1035
1036
1037
		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
								coalesce_wm);
		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
								ctxt, no_delay);
		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1038
1039
1040
	} else {
		req->hdr.version = 2;
		req->page_size = 1; /* 1 for 4K */
1041
1042
1043
1044
1045
1046
1047

		/* coalesce-wm field in this cmd is not relevant to Lancer.
		 * Lancer uses COMMON_MODIFY_CQ to set this field
		 */
		if (!lancer_chip(adapter))
			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
				      ctxt, coalesce_wm);
1048
1049
1050
1051
1052
1053
1054
1055
1056
		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
								no_delay);
		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
						__ilog2_u32(cq->len/256));
		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
								ctxt, 1);
		AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
								ctxt, eq->id);
1057
	}
Sathya Perla's avatar
Sathya Perla committed
1058
1059
1060
1061
1062

	be_dws_cpu_to_le(ctxt, sizeof(req->context));

	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);

1063
	status = be_mbox_notify_wait(adapter);
Sathya Perla's avatar
Sathya Perla committed
1064
	if (!status) {
1065
		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
Sathya Perla's avatar
Sathya Perla committed
1066
1067
1068
		cq->id = le16_to_cpu(resp->cq_id);
		cq->created = true;
	}
1069

1070
	mutex_unlock(&adapter->mbox_lock);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082

	return status;
}

static u32 be_encoded_q_len(int q_len)
{
	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
	if (len_encoded == 16)
		len_encoded = 0;
	return len_encoded;
}

Jingoo Han's avatar
Jingoo Han committed
1083
1084
1085
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
				struct be_queue_info *mccq,
				struct be_queue_info *cq)
1086
{
1087
	struct be_mcc_wrb *wrb;
1088
	struct be_cmd_req_mcc_ext_create *req;
1089
	struct be_dma_mem *q_mem = &mccq->dma_mem;
1090
	void *ctxt;
1091
1092
	int status;

1093
1094
	if (mutex_lock_interruptible(&adapter->mbox_lock))
		return -1;
1095
1096
1097
1098

	wrb = wrb_from_mbox(adapter);
	req = embedded_payload(wrb);
	ctxt = &req->context;
1099

1100
1101
	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1102

1103
	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1104
	if (BEx_chip(adapter)) {
1105
1106
1107
1108
		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
						be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
	} else {
		req->hdr.version = 1;
		req->cq_id = cpu_to_le16(cq->id);

		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
			      be_encoded_q_len(mccq->len));
		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
			      ctxt, cq->id);
		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
			      ctxt, 1);
1120
	}
1121

1122
	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1123
	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1124
	req->async_event_bitmap[