be_main.c 118 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
 */

18
#include <linux/prefetch.h>
19
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
20
#include "be.h"
21
#include "be_cmds.h"
22
#include <asm/div64.h>
Sathya Perla's avatar
Sathya Perla committed
23
#include <linux/aer.h>
24
#include <linux/if_bridge.h>
Sathya Perla's avatar
Sathya Perla committed
25
26
27
28

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
29
MODULE_AUTHOR("Emulex Corporation");
Sathya Perla's avatar
Sathya Perla committed
30
31
MODULE_LICENSE("GPL");

32
33
34
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla's avatar
Sathya Perla committed
35

36
37
38
39
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

Sathya Perla's avatar
Sathya Perla committed
40
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
42
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
43
44
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
45
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
46
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
47
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
48
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla's avatar
Sathya Perla committed
49
50
51
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
52
/* UE Status Low CSR */
53
static const char * const ue_status_low_desc[] = {
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
	"AXGMAC0",
	"AXGMAC1",
	"JTAG",
	"MPU_INTPEND"
};
/* UE Status High CSR */
88
static const char * const ue_status_hi_desc[] = {
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
	"HOST8",
	"HOST9",
112
	"NETC",
113
114
115
116
117
118
119
120
121
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown"
};
Sathya Perla's avatar
Sathya Perla committed
122

123
124
125
126
127
128
129
/* Is BE in a multi-channel mode */
static inline bool be_is_mc(struct be_adapter *adapter) {
	return (adapter->function_mode & FLEX10_MODE ||
		adapter->function_mode & VNIC_MODE ||
		adapter->function_mode & UMC_ENABLED);
}

Sathya Perla's avatar
Sathya Perla committed
130
131
132
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
133
	if (mem->va) {
Ivan Vecera's avatar
Ivan Vecera committed
134
135
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
136
137
		mem->va = NULL;
	}
Sathya Perla's avatar
Sathya Perla committed
138
139
140
141
142
143
144
145
146
147
148
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
		u16 len, u16 entry_size)
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
149
150
	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
				      GFP_KERNEL);
Sathya Perla's avatar
Sathya Perla committed
151
	if (!mem->va)
Sathya Perla's avatar
Sathya Perla committed
152
		return -ENOMEM;
Sathya Perla's avatar
Sathya Perla committed
153
154
155
	return 0;
}

156
static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla's avatar
Sathya Perla committed
157
{
158
	u32 reg, enabled;
159

160
161
162
163
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
				&reg);
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

164
	if (!enabled && enable)
Sathya Perla's avatar
Sathya Perla committed
165
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
	else if (enabled && !enable)
Sathya Perla's avatar
Sathya Perla committed
167
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
168
	else
Sathya Perla's avatar
Sathya Perla committed
169
		return;
170

171
172
	pci_write_config_dword(adapter->pdev,
			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla's avatar
Sathya Perla committed
173
174
}

175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
static void be_intr_set(struct be_adapter *adapter, bool enable)
{
	int status = 0;

	/* On lancer interrupts can't be controlled via this register */
	if (lancer_chip(adapter))
		return;

	if (adapter->eeh_error)
		return;

	status = be_cmd_intr_set(adapter, enable);
	if (status)
		be_reg_intr_set(adapter, enable);
}

191
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla's avatar
Sathya Perla committed
192
193
194
195
{
	u32 val = 0;
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
196
197

	wmb();
198
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
199
200
}

201
202
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted)
Sathya Perla's avatar
Sathya Perla committed
203
204
{
	u32 val = 0;
205
	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla's avatar
Sathya Perla committed
206
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
207
208

	wmb();
209
	iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla's avatar
Sathya Perla committed
210
211
}

212
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla's avatar
Sathya Perla committed
213
214
215
216
		bool arm, bool clear_int, u16 num_popped)
{
	u32 val = 0;
	val |= qid & DB_EQ_RING_ID_MASK;
217
218
	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
			DB_EQ_RING_ID_EXT_MASK_SHIFT);
219

220
	if (adapter->eeh_error)
221
222
		return;

Sathya Perla's avatar
Sathya Perla committed
223
224
225
226
227
228
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
229
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
230
231
}

232
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla's avatar
Sathya Perla committed
233
234
235
{
	u32 val = 0;
	val |= qid & DB_CQ_RING_ID_MASK;
236
237
	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
			DB_CQ_RING_ID_EXT_MASK_SHIFT);
238

239
	if (adapter->eeh_error)
240
241
		return;

Sathya Perla's avatar
Sathya Perla committed
242
243
244
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
245
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
246
247
248
249
250
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
251
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
252
	struct sockaddr *addr = p;
253
254
255
	int status;
	u8 mac[ETH_ALEN];
	u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla's avatar
Sathya Perla committed
256

257
258
259
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

260
261
262
263
264
	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
	 * privilege or if PF did not provision the new MAC address.
	 * On BE3, this cmd will always fail if the VF doesn't have the
	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
	 * the MAC for the VF.
265
	 */
266
267
268
269
270
271
272
273
274
275
276
	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
				 adapter->if_handle, &adapter->pmac_id[0], 0);
	if (!status) {
		curr_pmac_id = adapter->pmac_id[0];

		/* Delete the old programmed MAC. This call may fail if the
		 * old MAC was already deleted by the PF driver.
		 */
		if (adapter->pmac_id[0] != old_pmac_id)
			be_cmd_pmac_del(adapter, adapter->if_handle,
					old_pmac_id, 0);
277
278
	}

279
280
	/* Decide if the new MAC is successfully activated only after
	 * querying the FW
281
	 */
282
	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
283
	if (status)
284
		goto err;
Sathya Perla's avatar
Sathya Perla committed
285

286
287
288
289
290
291
292
293
	/* The MAC change did not happen, either due to lack of privilege
	 * or PF didn't pre-provision.
	 */
	if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
		status = -EPERM;
		goto err;
	}

294
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295
	dev_info(dev, "MAC address changed to %pM\n", mac);
296
297
	return 0;
err:
298
	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla's avatar
Sathya Perla committed
299
300
301
	return status;
}

302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
/* BE2 supports only v0 cmd */
static void *hw_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	} else  {
		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
	}
}

/* BE2 supports only v0 cmd */
static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	} else {
		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
	}
}

static void populate_be_v0_stats(struct be_adapter *adapter)
331
{
332
333
334
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
335
	struct be_port_rxf_stats_v0 *port_stats =
336
337
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
338

339
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
340
341
342
343
344
345
346
347
348
349
350
351
352
353
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
354
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
355
356
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
357
358
359
	drvs->rx_address_filtered =
					port_stats->rx_address_filtered +
					port_stats->rx_vlan_filtered;
360
361
362
363
364
365
366
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
367
		drvs->jabber_events = rxf_stats->port1_jabber_events;
368
	else
369
		drvs->jabber_events = rxf_stats->port0_jabber_events;
370
371
372
373
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
374
375
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
376
377
378
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

379
static void populate_be_v1_stats(struct be_adapter *adapter)
380
{
381
382
383
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
384
	struct be_port_rxf_stats_v1 *port_stats =
385
386
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
387

388
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
389
390
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
408
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
409
410
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
411
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
412
413
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
414
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
415
416
417
418
419
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
420
421
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
422
423
424
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

Selvin Xavier's avatar
Selvin Xavier committed
425
426
static void populate_lancer_stats(struct be_adapter *adapter)
{
427

Selvin Xavier's avatar
Selvin Xavier committed
428
	struct be_drv_stats *drvs = &adapter->drv_stats;
429
430
431
432
433
434
435
	struct lancer_pport_stats *pport_stats =
					pport_stats_from_cmd(adapter);

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
436
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
437
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier's avatar
Selvin Xavier committed
438
439
440
441
442
443
444
445
446
447
448
449
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
450
451
452
	drvs->rx_address_filtered =
					pport_stats->rx_address_filtered +
					pport_stats->rx_vlan_filtered;
453
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier's avatar
Selvin Xavier committed
454
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
455
456
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
457
	drvs->jabber_events = pport_stats->rx_jabbers;
458
459
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier's avatar
Selvin Xavier committed
460
	drvs->rx_drops_too_many_frags =
461
				pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier's avatar
Selvin Xavier committed
462
}
463

464
465
466
467
468
469
470
471
472
473
474
475
static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

Jingoo Han's avatar
Jingoo Han committed
476
static void populate_erx_stats(struct be_adapter *adapter,
477
478
479
480
481
482
483
484
485
486
487
488
489
			struct be_rx_obj *rxo,
			u32 erx_stat)
{
	if (!BEx_chip(adapter))
		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
	else
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				     (u16)erx_stat);
}

490
491
void be_parse_stats(struct be_adapter *adapter)
{
492
493
494
	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
	struct be_rx_obj *rxo;
	int i;
495
	u32 erx_stat;
496

497
498
	if (lancer_chip(adapter)) {
		populate_lancer_stats(adapter);
Selvin Xavier's avatar
Selvin Xavier committed
499
	} else {
500
501
502
503
504
		if (BE2_chip(adapter))
			populate_be_v0_stats(adapter);
		else
			/* for BE3 and Skyhawk */
			populate_be_v1_stats(adapter);
505

506
507
		/* as erx_v1 is longer than v0, ok to use v1 for v0 access */
		for_all_rx_queues(adapter, rxo, i) {
508
509
			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
			populate_erx_stats(adapter, rxo, erx_stat);
510
		}
511
	}
512
513
}

514
515
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
					struct rtnl_link_stats64 *stats)
Sathya Perla's avatar
Sathya Perla committed
516
{
517
	struct be_adapter *adapter = netdev_priv(netdev);
518
	struct be_drv_stats *drvs = &adapter->drv_stats;
519
	struct be_rx_obj *rxo;
520
	struct be_tx_obj *txo;
521
522
	u64 pkts, bytes;
	unsigned int start;
523
	int i;
Sathya Perla's avatar
Sathya Perla committed
524

525
	for_all_rx_queues(adapter, rxo, i) {
526
527
528
529
530
531
532
533
534
535
536
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
		do {
			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
					rx_stats(rxo)->rx_drops_no_frags;
537
538
	}

539
	for_all_tx_queues(adapter, txo, i) {
540
541
542
543
544
545
546
547
		const struct be_tx_stats *tx_stats = tx_stats(txo);
		do {
			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
548
	}
Sathya Perla's avatar
Sathya Perla committed
549
550

	/* bad pkts received */
551
	stats->rx_errors = drvs->rx_crc_errors +
552
553
554
555
556
557
558
559
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
560
		drvs->rx_dropped_runt;
561

Sathya Perla's avatar
Sathya Perla committed
562
	/* detailed rx errors */
563
	stats->rx_length_errors = drvs->rx_in_range_errors +
564
565
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;
566

567
	stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla's avatar
Sathya Perla committed
568
569

	/* frame alignment errors */
570
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
571

Sathya Perla's avatar
Sathya Perla committed
572
573
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
574
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
575
576
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
577
	return stats;
Sathya Perla's avatar
Sathya Perla committed
578
579
}

580
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla's avatar
Sathya Perla committed
581
582
583
{
	struct net_device *netdev = adapter->netdev;

584
	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
585
		netif_carrier_off(netdev);
586
		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla's avatar
Sathya Perla committed
587
	}
588
589
590
591
592

	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
		netif_carrier_on(netdev);
	else
		netif_carrier_off(netdev);
Sathya Perla's avatar
Sathya Perla committed
593
594
}

595
static void be_tx_stats_update(struct be_tx_obj *txo,
596
			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla's avatar
Sathya Perla committed
597
{
598
599
	struct be_tx_stats *stats = tx_stats(txo);

600
	u64_stats_update_begin(&stats->sync);
601
602
603
604
	stats->tx_reqs++;
	stats->tx_wrbs += wrb_cnt;
	stats->tx_bytes += copied;
	stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla's avatar
Sathya Perla committed
605
	if (stopped)
606
		stats->tx_stops++;
607
	u64_stats_update_end(&stats->sync);
Sathya Perla's avatar
Sathya Perla committed
608
609
610
}

/* Determine number of WRB entries needed to xmit data in an skb */
611
612
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
								bool *dummy)
Sathya Perla's avatar
Sathya Perla committed
613
{
614
615
616
617
	int cnt = (skb->len > skb->data_len);

	cnt += skb_shinfo(skb)->nr_frags;

Sathya Perla's avatar
Sathya Perla committed
618
619
	/* to account for hdr wrb */
	cnt++;
620
621
622
	if (lancer_chip(adapter) || !(cnt & 1)) {
		*dummy = false;
	} else {
Sathya Perla's avatar
Sathya Perla committed
623
624
625
		/* add a dummy to make it an even num */
		cnt++;
		*dummy = true;
626
	}
Sathya Perla's avatar
Sathya Perla committed
627
628
629
630
631
632
633
634
635
	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
	return cnt;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
636
	wrb->rsvd0 = 0;
Sathya Perla's avatar
Sathya Perla committed
637
638
}

639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
					struct sk_buff *skb)
{
	u8 vlan_prio;
	u16 vlan_tag;

	vlan_tag = vlan_tx_tag_get(skb);
	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
	/* If vlan priority provided by OS is NOT in available bmap */
	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
				adapter->recommended_prio;

	return vlan_tag;
}

655
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
656
		struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
657
{
658
	u16 vlan_tag;
659

Sathya Perla's avatar
Sathya Perla committed
660
661
662
663
	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

664
	if (skb_is_gso(skb)) {
Sathya Perla's avatar
Sathya Perla committed
665
666
667
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
			hdr, skb_shinfo(skb)->gso_size);
668
		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
669
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla's avatar
Sathya Perla committed
670
671
672
673
674
675
676
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (is_tcp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
		else if (is_udp_pkt(skb))
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

Ajit Khaparde's avatar
Ajit Khaparde committed
677
	if (vlan_tx_tag_present(skb)) {
Sathya Perla's avatar
Sathya Perla committed
678
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
679
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
680
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla's avatar
Sathya Perla committed
681
682
	}

683
684
	/* To skip HW VLAN tagging: evt = 1, compl = 0 */
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
685
686
687
688
689
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}

Ivan Vecera's avatar
Ivan Vecera committed
690
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
691
692
693
694
695
696
697
		bool unmap_single)
{
	dma_addr_t dma;

	be_dws_le_to_cpu(wrb, sizeof(*wrb));

	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
698
	if (wrb->frag_len) {
699
		if (unmap_single)
Ivan Vecera's avatar
Ivan Vecera committed
700
701
			dma_unmap_single(dev, dma, wrb->frag_len,
					 DMA_TO_DEVICE);
702
		else
Ivan Vecera's avatar
Ivan Vecera committed
703
			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
704
705
	}
}
Sathya Perla's avatar
Sathya Perla committed
706

707
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
708
709
		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
		bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
710
{
711
712
	dma_addr_t busaddr;
	int i, copied = 0;
Ivan Vecera's avatar
Ivan Vecera committed
713
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
714
715
716
	struct sk_buff *first_skb = skb;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;
717
718
	bool map_single = false;
	u16 map_head;
Sathya Perla's avatar
Sathya Perla committed
719
720
721

	hdr = queue_head_node(txq);
	queue_head_inc(txq);
722
	map_head = txq->head;
Sathya Perla's avatar
Sathya Perla committed
723

724
	if (skb->len > skb->data_len) {
Eric Dumazet's avatar
Eric Dumazet committed
725
		int len = skb_headlen(skb);
Ivan Vecera's avatar
Ivan Vecera committed
726
727
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, busaddr))
728
729
			goto dma_err;
		map_single = true;
730
731
732
733
734
735
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}
Sathya Perla's avatar
Sathya Perla committed
736

737
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
738
		const struct skb_frag_struct *frag =
739
			&skb_shinfo(skb)->frags[i];
740
		busaddr = skb_frag_dma_map(dev, frag, 0,
741
					   skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera's avatar
Ivan Vecera committed
742
		if (dma_mapping_error(dev, busaddr))
743
			goto dma_err;
744
		wrb = queue_head_node(txq);
745
		wrb_fill(wrb, busaddr, skb_frag_size(frag));
746
747
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
748
		copied += skb_frag_size(frag);
Sathya Perla's avatar
Sathya Perla committed
749
750
751
752
753
754
755
756
757
	}

	if (dummy_wrb) {
		wrb = queue_head_node(txq);
		wrb_fill(wrb, 0, 0);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
	}

758
	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
759
760
761
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return copied;
762
763
764
765
dma_err:
	txq->head = map_head;
	while (copied) {
		wrb = queue_head_node(txq);
Ivan Vecera's avatar
Ivan Vecera committed
766
		unmap_tx_frag(dev, wrb, map_single);
767
768
769
770
771
		map_single = false;
		copied -= wrb->frag_len;
		queue_head_inc(txq);
	}
	return 0;
Sathya Perla's avatar
Sathya Perla committed
772
773
}

774
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
775
776
					     struct sk_buff *skb,
					     bool *skip_hw_vlan)
777
778
779
780
781
782
783
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

784
	if (vlan_tx_tag_present(skb))
785
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
786
787
788
789
790
791
792
793
794
795

	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
		if (!vlan_tag)
			vlan_tag = adapter->pvid;
		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
		 * skip VLAN insertion
		 */
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}
796
797

	if (vlan_tag) {
798
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799
800
801
802
803
804
805
806
		if (unlikely(!skb))
			return skb;
		skb->vlan_tci = 0;
	}

	/* Insert the outer VLAN, if any */
	if (adapter->qnq_vid) {
		vlan_tag = adapter->qnq_vid;
807
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
808
809
810
811
812
813
		if (unlikely(!skb))
			return skb;
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

814
815
816
	return skb;
}

817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
	struct ethhdr *eh = (struct ethhdr *)skb->data;
	u16 offset = ETH_HLEN;

	if (eh->h_proto == htons(ETH_P_IPV6)) {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);

		offset += sizeof(struct ipv6hdr);
		if (ip6h->nexthdr != NEXTHDR_TCP &&
		    ip6h->nexthdr != NEXTHDR_UDP) {
			struct ipv6_opt_hdr *ehdr =
				(struct ipv6_opt_hdr *) (skb->data + offset);

			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
			if (ehdr->hdrlen == 0xff)
				return true;
		}
	}
	return false;
}

static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}

844
845
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
				struct sk_buff *skb)
846
{
847
	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
848
849
}

850
851
852
static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
					   struct sk_buff *skb,
					   bool *skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
853
{
854
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855
856
	unsigned int eth_hdr_len;
	struct iphdr *ip;
857

858
859
860
861
862
863
864
865
866
867
	/* Lancer ASIC has a bug wherein packets that are 32 bytes or less
	 * may cause a transmit stall on that port. So the work-around is to
	 * pad such packets to a 36-byte length.
	 */
	if (unlikely(lancer_chip(adapter) && skb->len <= 32)) {
		if (skb_padto(skb, 36))
			goto tx_drop;
		skb->len = 36;
	}

868
869
	/* For padded packets, BE HW modifies tot_len field in IP header
	 * incorrecly when VLAN tag is inserted by HW.
870
	 * For padded packets, Lancer computes incorrect checksum.
871
	 */
872
873
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
						VLAN_ETH_HLEN : ETH_HLEN;
874
875
	if (skb->len <= 60 &&
	    (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
876
	    is_ipv4_pkt(skb)) {
877
878
879
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}
880

881
882
883
884
885
	/* If vlan tag is already inlined in the packet, skip HW VLAN
	 * tagging in UMC mode
	 */
	if ((adapter->function_mode & UMC_ENABLED) &&
	    veh->h_vlan_proto == htons(ETH_P_8021Q))
886
			*skip_hw_vlan = true;
887

888
889
890
891
892
	/* HW has a bug wherein it will calculate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL &&
893
894
	    vlan_tx_tag_present(skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
895
896
897
898
899
900
901
902
903
		if (unlikely(!skb))
			goto tx_drop;
	}

	/* HW may lockup when VLAN HW tagging is requested on
	 * certain ipv6 packets. Drop such pkts if the HW workaround to
	 * skip HW tagging is not enabled by FW.
	 */
	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
904
905
	    (adapter->pvid || adapter->qnq_vid) &&
	    !qnq_async_evt_rcvd(adapter)))
906
907
908
909
910
911
912
913
914
915
		goto tx_drop;

	/* Manual VLAN tag insertion to prevent:
	 * ASIC lockup when the ASIC inserts VLAN tag into
	 * certain ipv6 packets. Insert VLAN tags in driver,
	 * and set event, completion, vlan bits accordingly
	 * in the Tx WRB.
	 */
	if (be_ipv6_tx_stall_chk(adapter, skb) &&
	    be_vlan_tag_tx_chk(adapter, skb)) {
916
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
917
918
919
920
		if (unlikely(!skb))
			goto tx_drop;
	}

921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
	return skb;
tx_drop:
	dev_kfree_skb_any(skb);
	return NULL;
}

static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
	struct be_queue_info *txq = &txo->q;
	bool dummy_wrb, stopped = false;
	u32 wrb_cnt = 0, copied = 0;
	bool skip_hw_vlan = false;
	u32 start = txq->head;

	skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
	if (!skb)
		return NETDEV_TX_OK;

941
	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla's avatar
Sathya Perla committed
942

943
944
	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
			      skip_hw_vlan);
945
	if (copied) {
Eric Dumazet's avatar
Eric Dumazet committed
946
947
		int gso_segs = skb_shinfo(skb)->gso_segs;

948
		/* record the sent skb in the sent_skb table */
949
950
		BUG_ON(txo->sent_skb_list[start]);
		txo->sent_skb_list[start] = skb;
951
952
953
954
955

		/* Ensure txq has space for the next skb; Else stop the queue
		 * *BEFORE* ringing the tx doorbell, so that we serialze the
		 * tx compls of the current transmit which'll wake up the queue
		 */
956
		atomic_add(wrb_cnt, &txq->used);
957
958
		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
								txq->len) {
959
			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
960
961
			stopped = true;
		}
Sathya Perla's avatar
Sathya Perla committed
962

963
		be_txq_notify(adapter, txo, wrb_cnt);
Sathya Perla's avatar
Sathya Perla committed
964

Eric Dumazet's avatar
Eric Dumazet committed
965
		be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
966
967
968
	} else {
		txq->head = start;
		dev_kfree_skb_any(skb);
Sathya Perla's avatar
Sathya Perla committed
969
970
971
972
973
974
975
976
	}
	return NETDEV_TX_OK;
}

static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct be_adapter *adapter = netdev_priv(netdev);
	if (new_mtu < BE_MIN_MTU ||
977
978
			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
					(ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla's avatar
Sathya Perla committed
979
980
		dev_info(&adapter->pdev->dev,
			"MTU must be between %d and %d bytes\n",
981
982
			BE_MIN_MTU,
			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla's avatar
Sathya Perla committed
983
984
985
986
987
988
989
990
991
		return -EINVAL;
	}
	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
			netdev->mtu, new_mtu);
	netdev->mtu = new_mtu;
	return 0;
}

/*
992
993
 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla's avatar
Sathya Perla committed
994
 */
Sathya Perla's avatar
Sathya Perla committed
995
static int be_vid_config(struct be_adapter *adapter)
Sathya Perla's avatar
Sathya Perla committed
996
{
Sathya Perla's avatar
Sathya Perla committed
997
998
	u16 vids[BE_NUM_VLANS_SUPPORTED];
	u16 num = 0, i;
999
	int status = 0;
1000

1001
1002
1003
1004
	/* No need to further configure vids if in promiscuous mode */
	if (adapter->promiscuous)
		return 0;

1005
	if (adapter->vlans_added > be_max_vlans(adapter))
1006
1007
1008
1009
1010
		goto set_vlan_promisc;

	/* Construct VLAN Table to give to HW */
	for (i = 0; i < VLAN_N_VID; i++)
		if (adapter->vlan_tag[i])
Sathya Perla's avatar
Sathya Perla committed
1011
			vids[num++] = cpu_to_le16(i);
1012
1013

	status = be_cmd_vlan_config(adapter, adapter->if_handle,
Sathya Perla's avatar
Sathya Perla committed
1014
				    vids, num, 1, 0);
1015
1016
1017
1018
1019
1020

	/* Set to VLAN promisc mode as setting VLAN filter failed */
	if (status) {
		dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
		dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
		goto set_vlan_promisc;
Sathya Perla's avatar
Sathya Perla committed
1021
	}
1022

1023
	return status;
1024
1025
1026
1027
1028

set_vlan_promisc:
	status = be_cmd_vlan_config(adapter, adapter->if_handle,
				    NULL, 0, 1, 1);
	return status;
Sathya Perla's avatar
Sathya Perla committed
1029
1030
}

1031
static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla's avatar
Sathya Perla committed
1032
1033
{
	struct be_adapter *adapter = netdev_priv(netdev);
1034
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1035

1036
	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1037
1038
1039
		status = -EINVAL;
		goto ret;
	}
1040

1041
1042
1043
1044
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
		goto ret;

Sathya Perla's avatar
Sathya Perla committed
1045
	adapter->vlan_tag[vid] = 1;
1046
	if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
Sathya Perla's avatar
Sathya Perla committed
1047
		status = be_vid_config(adapter);
1048

1049
1050
1051
1052
1053
1054
	if (!status)
		adapter->vlans_added++;
	else
		adapter->vlan_tag[vid] = 0;
ret:
	return status;
Sathya Perla's avatar
Sathya Perla committed
1055
1056
}

1057
static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla's avatar
Sathya Perla committed
1058
1059
{
	struct be_adapter *adapter = netdev_priv(netdev);
1060
	int status = 0;
Sathya Perla's avatar
Sathya Perla committed
1061

1062
	if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1063
1064
1065
		status = -EINVAL;
		goto ret;
	}
1066

1067
1068
1069
1070
	/* Packets with VID 0 are always received by Lancer by default */
	if (lancer_chip(adapter) && vid == 0)
		goto ret;

Sathya Perla's avatar
Sathya Perla committed
1071
	adapter->vlan_tag[vid] = 0;
1072
	if (adapter->vlans_added <= be_max_vlans(adapter))
Sathya Perla's avatar
Sathya Perla committed
1073
		status = be_vid_config(adapter);
1074

1075
1076
1077
1078