be_main.c 116 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
 */

18
#include <linux/prefetch.h>
19
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
20
#include "be.h"
21
#include "be_cmds.h"
22
#include <asm/div64.h>
Sathya Perla's avatar
Sathya Perla committed
23
#include <linux/aer.h>
Sathya Perla's avatar
Sathya Perla committed
24
25
26
27

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28
MODULE_AUTHOR("Emulex Corporation");
Sathya Perla's avatar
Sathya Perla committed
29
30
MODULE_LICENSE("GPL");

31
32
33
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla's avatar
Sathya Perla committed
34

35
36
37
38
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

Sathya Perla's avatar
Sathya Perla committed
39
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42
43
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla's avatar
Sathya Perla committed
48
49
50
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
51
/* UE Status Low CSR */
52
static const char * const ue_status_low_desc[] = {
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
	"AXGMAC0",
	"AXGMAC1",
	"JTAG",
	"MPU_INTPEND"
};
/* UE Status High CSR */
87
static const char * const ue_status_hi_desc[] = {
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
	"HOST8",
	"HOST9",
111
	"NETC",
112
113
114
115
116
117
118
119
120
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown"
};
Sathya Perla's avatar
Sathya Perla committed
121

122
123
124
125
126
127
128
/* Is BE in a multi-channel mode */
static inline bool be_is_mc(struct be_adapter *adapter) {
	return (adapter->function_mode & FLEX10_MODE ||
		adapter->function_mode & VNIC_MODE ||
		adapter->function_mode & UMC_ENABLED);
}

Sathya Perla's avatar
Sathya Perla committed
129
130
131
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
132
	if (mem->va) {
Ivan Vecera's avatar
Ivan Vecera committed
133
134
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
135
136
		mem->va = NULL;
	}
Sathya Perla's avatar
Sathya Perla committed
137
138
139
140
141
142
143
144
145
146
147
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
		u16 len, u16 entry_size)
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
Ivan Vecera's avatar
Ivan Vecera committed
148
	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149
				     GFP_KERNEL | __GFP_ZERO);
Sathya Perla's avatar
Sathya Perla committed
150
	if (!mem->va)
Sathya Perla's avatar
Sathya Perla committed
151
		return -ENOMEM;
Sathya Perla's avatar
Sathya Perla committed
152
153
154
	return 0;
}

155
static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla's avatar
Sathya Perla committed
156
{
157
	u32 reg, enabled;
158

159
160
161
162
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
				&reg);
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

163
	if (!enabled && enable)
Sathya Perla's avatar
Sathya Perla committed
164
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165
	else if (enabled && !enable)
Sathya Perla's avatar
Sathya Perla committed
166
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
	else
Sathya Perla's avatar
Sathya Perla committed
168
		return;
169

170
171
	pci_write_config_dword(adapter->pdev,
			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla's avatar
Sathya Perla committed
172
173
}

174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
static void be_intr_set(struct be_adapter *adapter, bool enable)
{
	int status = 0;

	/* On lancer interrupts can't be controlled via this register */
	if (lancer_chip(adapter))
		return;

	if (adapter->eeh_error)
		return;

	status = be_cmd_intr_set(adapter, enable);
	if (status)
		be_reg_intr_set(adapter, enable);
}

190
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla's avatar
Sathya Perla committed
191
192
193
194
{
	u32 val = 0;
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196

	wmb();
197
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
198
199
}

200
201
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted)
Sathya Perla's avatar
Sathya Perla committed
202
203
{
	u32 val = 0;
204
	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla's avatar
Sathya Perla committed
205
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207

	wmb();
208
	iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla's avatar
Sathya Perla committed
209
210
}

211
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla's avatar
Sathya Perla committed
212
213
214
215
		bool arm, bool clear_int, u16 num_popped)
{
	u32 val = 0;
	val |= qid & DB_EQ_RING_ID_MASK;
216
217
	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
			DB_EQ_RING_ID_EXT_MASK_SHIFT);
218

219
	if (adapter->eeh_error)
220
221
		return;

Sathya Perla's avatar
Sathya Perla committed
222
223
224
225
226
227
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
229
230
}

231
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla's avatar
Sathya Perla committed
232
233
234
{
	u32 val = 0;
	val |= qid & DB_CQ_RING_ID_MASK;
235
236
	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
			DB_CQ_RING_ID_EXT_MASK_SHIFT);
237

238
	if (adapter->eeh_error)
239
240
		return;

Sathya Perla's avatar
Sathya Perla committed
241
242
243
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
245
246
247
248
249
250
251
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct sockaddr *addr = p;
	int status = 0;
252
	u8 current_mac[ETH_ALEN];
253
	u32 pmac_id = adapter->pmac_id[0];
254
	bool active_mac = true;
Sathya Perla's avatar
Sathya Perla committed
255

256
257
258
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
	/* For BE VF, MAC address is already activated by PF.
	 * Hence only operation left is updating netdev->devaddr.
	 * Update it if user is passing the same MAC which was used
	 * during configuring VF MAC from PF(Hypervisor).
	 */
	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
		status = be_cmd_mac_addr_query(adapter, current_mac,
					       false, adapter->if_handle, 0);
		if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
			goto done;
		else
			goto err;
	}

	if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
		goto done;

	/* For Lancer check if any MAC is active.
	 * If active, get its mac id.
	 */
	if (lancer_chip(adapter) && !be_physfn(adapter))
		be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
					 &pmac_id, 0);

	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
				 adapter->if_handle,
				 &adapter->pmac_id[0], 0);

287
	if (status)
288
		goto err;
Sathya Perla's avatar
Sathya Perla committed
289

290
291
292
293
	if (active_mac)
		be_cmd_pmac_del(adapter, adapter->if_handle,
				pmac_id, 0);
done:
294
295
296
297
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
	return 0;
err:
	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
Sathya Perla's avatar
Sathya Perla committed
298
299
300
	return status;
}

301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
/* BE2 supports only v0 cmd */
static void *hw_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	} else  {
		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	}
}

/* BE2 supports only v0 cmd */
static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	} else {
		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	}
}

static void populate_be_v0_stats(struct be_adapter *adapter)
330
{
331
332
333
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334
	struct be_port_rxf_stats_v0 *port_stats =
335
336
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
337

338
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339
340
341
342
343
344
345
346
347
348
349
350
351
352
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354
355
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
356
357
358
	drvs->rx_address_filtered =
					port_stats->rx_address_filtered +
					port_stats->rx_vlan_filtered;
359
360
361
362
363
364
365
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
366
		drvs->jabber_events = rxf_stats->port1_jabber_events;
367
	else
368
		drvs->jabber_events = rxf_stats->port0_jabber_events;
369
370
371
372
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373
374
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375
376
377
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

378
static void populate_be_v1_stats(struct be_adapter *adapter)
379
{
380
381
382
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383
	struct be_port_rxf_stats_v1 *port_stats =
384
385
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
386

387
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388
389
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
407
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
408
409
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
410
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411
412
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
413
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414
415
416
417
418
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419
420
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421
422
423
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

Selvin Xavier's avatar
Selvin Xavier committed
424
425
static void populate_lancer_stats(struct be_adapter *adapter)
{
426

Selvin Xavier's avatar
Selvin Xavier committed
427
	struct be_drv_stats *drvs = &adapter->drv_stats;
428
429
430
431
432
433
434
	struct lancer_pport_stats *pport_stats =
					pport_stats_from_cmd(adapter);

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
435
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier's avatar
Selvin Xavier committed
437
438
439
440
441
442
443
444
445
446
447
448
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449
450
451
	drvs->rx_address_filtered =
					pport_stats->rx_address_filtered +
					pport_stats->rx_vlan_filtered;
452
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier's avatar
Selvin Xavier committed
453
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454
455
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
456
	drvs->jabber_events = pport_stats->rx_jabbers;
457
458
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier's avatar
Selvin Xavier committed
459
	drvs->rx_drops_too_many_frags =
460
				pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier's avatar
Selvin Xavier committed
461
}
462

463
464
465
466
467
468
469
470
471
472
473
474
static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

475
476
477
478
479
480
481
482
483
484
485
486
487
488
void populate_erx_stats(struct be_adapter *adapter,
			struct be_rx_obj *rxo,
			u32 erx_stat)
{
	if (!BEx_chip(adapter))
		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
	else
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				     (u16)erx_stat);
}

489
490
void be_parse_stats(struct be_adapter *adapter)
{
491
492
493
	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
	struct be_rx_obj *rxo;
	int i;
494
	u32 erx_stat;
495

496
497
	if (lancer_chip(adapter)) {
		populate_lancer_stats(adapter);
Selvin Xavier's avatar
Selvin Xavier committed
498
	} else {
499
500
501
502
503
		if (BE2_chip(adapter))
			populate_be_v0_stats(adapter);
		else
			/* for BE3 and Skyhawk */
			populate_be_v1_stats(adapter);
504

505
506
		/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
		for_all_rx_queues(adapter, rxo, i) {
507
508
			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
			populate_erx_stats(adapter, rxo, erx_stat);
509
		}
510
	}
511
512
}

513
514
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
					struct rtnl_link_stats64 *stats)
Sathya Perla's avatar
Sathya Perla committed
515
{
516
	struct be_adapter *adapter = netdev_priv(netdev);
517
	struct be_drv_stats *drvs = &adapter->drv_stats;
518
	struct be_rx_obj *rxo;
519
	struct be_tx_obj *txo;
520
521
	u64 pkts, bytes;
	unsigned int start;
522
	int i;
Sathya Perla's avatar
Sathya Perla committed
523

524
	for_all_rx_queues(adapter, rxo, i) {
525
526
527
528
529
530
531
532
533
534
535
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
		do {
			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
					rx_stats(rxo)->rx_drops_no_frags;
536
537
	}

538
	for_all_tx_queues(adapter, txo, i) {
539
540
541
542
543
544
545
546
		const struct be_tx_stats *tx_stats = tx_stats(txo);
		do {
			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
547
	}
Sathya Perla's avatar
Sathya Perla committed
548
549

	/* bad pkts received */
550
	stats->rx_errors = drvs->rx_crc_errors +
551
552
553
554
555
556
557
558
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
559
		drvs->rx_dropped_runt;
560

Sathya Perla's avatar
Sathya Perla committed
561
	/* detailed rx errors */
562
	stats->rx_length_errors = drvs->rx_in_range_errors +
563
564
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;
565

566
	stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla's avatar
Sathya Perla committed
567
568

	/* frame alignment errors */
569
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570

Sathya Perla's avatar
Sathya Perla committed
571
572
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
573
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574
575
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
576
	return stats;
Sathya Perla's avatar
Sathya Perla committed
577
578
}

579
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla's avatar
Sathya Perla committed
580
581
582
{
	struct net_device *netdev = adapter->netdev;

583
	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584
		netif_carrier_off(netdev);
585
		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla's avatar
Sathya Perla committed
586
	}
587
588
589
590
591

	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
		netif_carrier_on(netdev);
	else
		netif_carrier_off(netdev);
Sathya Perla's avatar
Sathya Perla committed
592
593
}

594
static void be_tx_stats_update(struct be_tx_obj *txo,
595
			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla's avatar
Sathya Perla committed
596
{
597
598
	struct be_tx_stats *stats = tx_stats(txo);

599
	u64_stats_update_begin(&stats->sync);
600
601
602
603
	stats->tx_reqs++;
	stats->tx_wrbs += wrb_cnt;
	stats->tx_bytes += copied;
	stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla's avatar
Sathya Perla committed
604
	if (stopped)
605
		stats->tx_stops++;
606
	u64_stats_update_end(&stats->sync);
Sathya Perla's avatar
Sathya Perla committed
607
608
609
}

/* Determine number of WRB entries needed to xmit data in an skb */
610
611
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
								bool *dummy)
Sathya Perla's avatar
Sathya Perla committed
612
{
613
614
615
616
	int cnt = (skb->len > skb->data_len);

	cnt += skb_shinfo(skb)->nr_frags;

Sathya Perla's avatar
Sathya Perla committed
617
618
	/* to account for hdr wrb */
	cnt++;
619
620
621
	if (lancer_chip(adapter) || !(cnt & 1)) {
		*dummy = false;
	} else {
Sathya Perla's avatar
Sathya Perla committed
622
623
624
		/* add a dummy to make it an even num */
		cnt++;
		*dummy = true;
625
	}
Sathya Perla's avatar
Sathya Perla committed
626
627
628
629
630
631
632
633
634
	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
	return cnt;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635
	wrb->rsvd0 = 0;
Sathya Perla's avatar
Sathya Perla committed
636
637
}

638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
					struct sk_buff *skb)
{
	u8 vlan_prio;
	u16 vlan_tag;

	vlan_tag = vlan_tx_tag_get(skb);
	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
	/* If vlan priority provided by OS is NOT in available bmap */
	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
				adapter->recommended_prio;

	return vlan_tag;
}

654
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655
		struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
656
{
657
	u16 vlan_tag;
658

Sathya Perla's avatar
Sathya Perla committed
659
660
661
662
	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

663
	if (skb_is_gso(skb)) {
Sathya Perla's avatar
Sathya Perla committed
664
665
666
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
			hdr, skb_shinfo(skb)->gso_size);
667
		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla's avatar
Sathya Perla committed
669
670
671
672
673
674
675
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (is_tcp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
		else if (is_udp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

Ajit Khaparde's avatar
Ajit Khaparde committed
676
	if (vlan_tx_tag_present(skb)) {
Sathya Perla's avatar
Sathya Perla committed
677
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla's avatar
Sathya Perla committed
680
681
	}

682
683
	/* To skip HW VLAN tagging: evt = 1, compl = 0 */
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
684
685
686
687
688
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}

Ivan Vecera's avatar
Ivan Vecera committed
689
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690
691
692
693
694
695
696
		bool unmap_single)
{
	dma_addr_t dma;

	be_dws_le_to_cpu(wrb, sizeof(*wrb));

	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697
	if (wrb->frag_len) {
698
		if (unmap_single)
Ivan Vecera's avatar
Ivan Vecera committed
699
700
			dma_unmap_single(dev, dma, wrb->frag_len,
					 DMA_TO_DEVICE);
701
		else
Ivan Vecera's avatar
Ivan Vecera committed
702
			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703
704
	}
}
Sathya Perla's avatar
Sathya Perla committed
705

706
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707
708
		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
		bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
709
{
710
711
	dma_addr_t busaddr;
	int i, copied = 0;
Ivan Vecera's avatar
Ivan Vecera committed
712
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
713
714
715
	struct sk_buff *first_skb = skb;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;
716
717
	bool map_single = false;
	u16 map_head;
Sathya Perla's avatar
Sathya Perla committed
718
719
720

	hdr = queue_head_node(txq);
	queue_head_inc(txq);
721
	map_head = txq->head;
Sathya Perla's avatar
Sathya Perla committed
722

723
	if (skb->len > skb->data_len) {
Eric Dumazet's avatar
Eric Dumazet committed
724
		int len = skb_headlen(skb);
Ivan Vecera's avatar
Ivan Vecera committed
725
726
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, busaddr))
727
728
			goto dma_err;
		map_single = true;
729
730
731
732
733
734
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}
Sathya Perla's avatar
Sathya Perla committed
735

736
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737
		const struct skb_frag_struct *frag =
738
			&skb_shinfo(skb)->frags[i];
739
		busaddr = skb_frag_dma_map(dev, frag, 0,
740
					   skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera's avatar
Ivan Vecera committed
741
		if (dma_mapping_error(dev, busaddr))
742
			goto dma_err;
743
		wrb = queue_head_node(txq);
744
		wrb_fill(wrb, busaddr, skb_frag_size(frag));
745
746
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
747
		copied += skb_frag_size(frag);
Sathya Perla's avatar
Sathya Perla committed
748
749
750
751
752
753
754
755
756
	}

	if (dummy_wrb) {
		wrb = queue_head_node(txq);
		wrb_fill(wrb, 0, 0);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
	}

757
	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
758
759
760
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return copied;
761
762
763
764
dma_err:
	txq->head = map_head;
	while (copied) {
		wrb = queue_head_node(txq);
Ivan Vecera's avatar
Ivan Vecera committed
765
		unmap_tx_frag(dev, wrb, map_single);
766
767
768
769
770
		map_single = false;
		copied -= wrb->frag_len;
		queue_head_inc(txq);
	}
	return 0;
Sathya Perla's avatar
Sathya Perla committed
771
772
}

773
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774
775
					     struct sk_buff *skb,
					     bool *skip_hw_vlan)
776
777
778
779
780
781
782
783
784
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

	if (vlan_tx_tag_present(skb)) {
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
786
787
		if (skb)
			skb->vlan_tci = 0;
788
789
	}

790
791
792
793
794
795
796
797
	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
		if (!vlan_tag)
			vlan_tag = adapter->pvid;
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

	if (vlan_tag) {
798
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799
800
801
802
803
804
805
806
807
		if (unlikely(!skb))
			return skb;

		skb->vlan_tci = 0;
	}

	/* Insert the outer VLAN, if any */
	if (adapter->qnq_vid) {
		vlan_tag = adapter->qnq_vid;
808
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
809
810
811
812
813
814
		if (unlikely(!skb))
			return skb;
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

815
816
817
	return skb;
}

818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
	struct ethhdr *eh = (struct ethhdr *)skb->data;
	u16 offset = ETH_HLEN;

	if (eh->h_proto == htons(ETH_P_IPV6)) {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);

		offset += sizeof(struct ipv6hdr);
		if (ip6h->nexthdr != NEXTHDR_TCP &&
		    ip6h->nexthdr != NEXTHDR_UDP) {
			struct ipv6_opt_hdr *ehdr =
				(struct ipv6_opt_hdr *) (skb->data + offset);

			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
			if (ehdr->hdrlen == 0xff)
				return true;
		}
	}
	return false;
}

static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}

static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return BE3_chip(adapter) &&
		be_ipv6_exthdr_check(skb);
}

851
static netdev_tx_t be_xmit(struct sk_buff *skb,
852
			struct net_device *netdev)
Sathya Perla's avatar
Sathya Perla committed
853
854
{
	struct be_adapter *adapter = netdev_priv(netdev);
855
856
	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
	struct be_queue_info *txq = &txo->q;
857
	struct iphdr *ip = NULL;
Sathya Perla's avatar
Sathya Perla committed
858
	u32 wrb_cnt = 0, copied = 0;
859
	u32 start = txq->head, eth_hdr_len;
Sathya Perla's avatar
Sathya Perla committed
860
	bool dummy_wrb, stopped = false;
861
	bool skip_hw_vlan = false;
862
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perla's avatar
Sathya Perla committed
863

864
865
866
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
		VLAN_ETH_HLEN : ETH_HLEN;

867
868
	/* For padded packets, BE HW modifies tot_len field in IP header
	 * incorrecly when VLAN tag is inserted by HW.
869
	 */
870
	if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
871
872
873
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}
874

875
876
877
878
879
880
881
	/* If vlan tag is already inlined in the packet, skip HW VLAN
	 * tagging in UMC mode
	 */
	if ((adapter->function_mode & UMC_ENABLED) &&
	    veh->h_vlan_proto == htons(ETH_P_8021Q))
			skip_hw_vlan = true;

882
883
884
885
886
	/* HW has a bug wherein it will calculate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL &&
887
			vlan_tx_tag_present(skb)) {
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
		skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
		if (unlikely(!skb))
			goto tx_drop;
	}

	/* HW may lockup when VLAN HW tagging is requested on
	 * certain ipv6 packets. Drop such pkts if the HW workaround to
	 * skip HW tagging is not enabled by FW.
	 */
	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
		     (adapter->pvid || adapter->qnq_vid) &&
		     !qnq_async_evt_rcvd(adapter)))
		goto tx_drop;

	/* Manual VLAN tag insertion to prevent:
	 * ASIC lockup when the ASIC inserts VLAN tag into
	 * certain ipv6 packets. Insert VLAN tags in driver,
	 * and set event, completion, vlan bits accordingly
	 * in the Tx WRB.
	 */
	if (be_ipv6_tx_stall_chk(adapter, skb) &&
	    be_vlan_tag_tx_chk(adapter, skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
911
912
913
914
		if (unlikely(!skb))
			goto tx_drop;
	}

915
	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla's avatar
Sathya Perla committed
916

917
918
	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
			      skip_hw_vlan);
919
	if (copied) {
Eric Dumazet's avatar
Eric Dumazet committed
920
921
		int gso_segs = skb_shinfo(skb)->gso_segs;

922
		/* record the sent skb in the sent_skb table */
923
924
		BUG_ON(txo->sent_skb_list[start]);
		txo->sent_skb_list[start] = skb;
925
926
927
928
929

		/* Ensure txq has space for the next skb; Else stop the queue
		 * *BEFORE* ringing the tx doorbell, so that we serialze the
		 * tx compls of the current transmit which'll wake up the queue
		 */
930
		atomic_add(wrb_cnt, &txq->used);
931
932
		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
								txq->len) {
933
			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
934
935
			stopped = true;
		}
Sathya Perla's avatar
Sathya Perla committed
936

937
		be_txq_notify(adapter, txo, wrb_cnt);
Sathya Perla's avatar
Sathya Perla committed
938

Eric Dumazet's avatar
Eric Dumazet committed
939
		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
940
941
942
	} else {
		txq->head = start;
		dev_kfree_skb_any(skb);
Sathya Perla's avatar
Sathya Perla committed
943
	}
944
tx_drop:
Sathya Perla's avatar
Sathya Perla committed
945
946
947
948
949
950
951
	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	if (new_mtu < BE_MIN_MTU ||
952
953
			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
					(ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla's avatar
Sathya Perla committed
954
955
		dev_info(&adapter->pdev->dev,
			"MTU must be between %d and %d bytes\n",
956
957
			BE_MIN_MTU,
			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla's avatar
Sathya Perla committed
958
959
960
961
962
963
964
965
966
		return -EINVAL;
	}
	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
			netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
	return 0;
}

/*
967
968
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla's avatar
Sathya Perla committed
969
 */
Sathya Perla's avatar
Sathya Perla committed
970
static int be_vid_config(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
971
{
Sathya Perla's avatar
Sathya Perla committed
972
973
	u16 vids[BE_NUM_VLANS_SUPPORTED];
	u16 num = 0, i;
974
	int status = 0;
975

976
977
978
979
	/* No need to further configure vids if in promiscuous mode */
	if (adapter->promiscuous)
		return 0;

980
981
982
983
984
985
	if (adapter->vlans_added > adapter->max_vlans)
		goto set_vlan_promisc;

	/* Construct VLAN Table to give to HW */
	for (i = 0; i < VLAN_N_VID; i++)
		if (adapter->vlan_tag[i])
Sathya Perla's avatar
Sathya Perla committed
986
			vids[num++] = cpu_to_le16(i);
987
988

	status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla's avatar
Sathya Perla committed
989
				    vids, num, 1, 0);
990
991
992
993
994
995

	/* Set to VLAN promisc mode as setting VLAN filter failed */
	if (status) {
		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
		goto set_vlan_promisc;
Sathya Perla's avatar
Sathya Perla committed
996
	}
997

998
	return status;
999
1000
1001
1002
1003

set_vlan_promisc:
	status = be_cmd_vlan_config(adapter, adapter->if_handle,
				    NULL, 0, 1, 1);
	return status;
Sathya Perla's avatar
Sathya Perla committed
1004
1005
}

1006
static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla's avatar
Sathya Perla committed
1007
1008
{
	struct be_adapter *adapter = netdev_priv(netdev);
1009
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1010

1011
	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1012
1013
1014
		status = -EINVAL;
		goto ret;
	}
1015

1016
1017
1018
1019
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
		goto ret;

Sathya Perla's avatar
Sathya Perla committed
1020
	adapter->vlan_tag[vid] = 1;
1021
	if (adapter->vlans_added <= (adapter->max_vlans + 1))
Sathya Perla's avatar
Sathya Perla committed
1022
		status = be_vid_config(adapter);
1023

1024
1025
1026
1027
1028
1029
	if (!status)
		adapter->vlans_added++;
	else
		adapter->vlan_tag[vid] = 0;
ret:
	return status;
Sathya Perla's avatar
Sathya Perla committed
1030
1031
}

1032
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla's avatar
Sathya Perla committed
1033
1034
{
	struct be_adapter *adapter = netdev_priv(netdev);
1035
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1036

1037
	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1038
1039
1040
		status = -EINVAL;
		goto ret;
	}
1041

1042
1043
1044
1045
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
		goto ret;

Sathya Perla's avatar
Sathya Perla committed
1046
	adapter->vlan_tag[vid] = 0;
1047
	if (adapter->vlans_added <= adapter->max_vlans)
Sathya Perla's avatar
Sathya Perla committed
1048
		status = be_vid_config(adapter);
1049

1050
1051
1052
1053
1054
1055
	if (!status)
		adapter->vlans_added--;
	else
		adapter->vlan_tag[vid] = 1;
ret:
	return status;
Sathya Perla's avatar
Sathya Perla committed
1056
1057
}

1058
static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla's avatar
Sathya Perla committed
1059
1060
{
	struct be_adapter *adapter = netdev_priv(netdev);
1061
	int status;
Sathya Perla's avatar
Sathya Perla committed
1062

1063
	if (netdev->flags & IFF_PROMISC) {
1064
		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1065
1066
		adapter->promiscuous = true;
		goto done;
Sathya Perla's avatar
Sathya Perla committed
1067
1068
	}

Lucas De Marchi's avatar
Lucas De Marchi committed
1069
	/* BE was previously in promiscuous mode; disable it */
1070
1071
	if (adapter->promiscuous) {
		adapter->promiscuous = false;
1072
		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1073
1074

		if (adapter->vlans_added)
Sathya Perla's avatar
Sathya Perla committed
1075
			be_vid_config(adapter);
Sathya Perla's avatar