be_main.c 128 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
 */

18
#include <linux/prefetch.h>
19
#include <linux/module.h>
Sathya Perla's avatar
Sathya Perla committed
20
#include "be.h"
21
#include "be_cmds.h"
22
#include <asm/div64.h>
Sathya Perla's avatar
Sathya Perla committed
23
#include <linux/aer.h>
24
#include <linux/if_bridge.h>
25
#include <net/busy_poll.h>
26
#include <net/vxlan.h>
Sathya Perla's avatar
Sathya Perla committed
27
28
29
30

MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31
MODULE_AUTHOR("Emulex Corporation");
Sathya Perla's avatar
Sathya Perla committed
32
33
MODULE_LICENSE("GPL");

34
35
36
static unsigned int num_vfs;
module_param(num_vfs, uint, S_IRUGO);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla's avatar
Sathya Perla committed
37

38
39
40
41
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, S_IRUGO);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");

Sathya Perla's avatar
Sathya Perla committed
42
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
43
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44
	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45
46
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50
	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla's avatar
Sathya Perla committed
51
52
53
	{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
54
/* UE Status Low CSR */
55
static const char * const ue_status_low_desc[] = {
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
	"CEV",
	"CTX",
	"DBUF",
	"ERX",
	"Host",
	"MPU",
	"NDMA",
	"PTC ",
	"RDMA ",
	"RXF ",
	"RXIPS ",
	"RXULP0 ",
	"RXULP1 ",
	"RXULP2 ",
	"TIM ",
	"TPOST ",
	"TPRE ",
	"TXIPS ",
	"TXULP0 ",
	"TXULP1 ",
	"UC ",
	"WDMA ",
	"TXULP2 ",
	"HOST1 ",
	"P0_OB_LINK ",
	"P1_OB_LINK ",
	"HOST_GPIO ",
	"MBOX ",
	"AXGMAC0",
	"AXGMAC1",
	"JTAG",
	"MPU_INTPEND"
};
/* UE Status High CSR */
90
static const char * const ue_status_hi_desc[] = {
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
	"LPCMEMHOST",
	"MGMT_MAC",
	"PCS0ONLINE",
	"MPU_IRAM",
	"PCS1ONLINE",
	"PCTL0",
	"PCTL1",
	"PMEM",
	"RR",
	"TXPB",
	"RXPP",
	"XAUI",
	"TXP",
	"ARM",
	"IPC",
	"HOST2",
	"HOST3",
	"HOST4",
	"HOST5",
	"HOST6",
	"HOST7",
	"HOST8",
	"HOST9",
114
	"NETC",
115
116
117
118
119
120
121
122
123
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown",
	"Unknown"
};
Sathya Perla's avatar
Sathya Perla committed
124

125

Sathya Perla's avatar
Sathya Perla committed
126
127
128
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
	struct be_dma_mem *mem = &q->dma_mem;
129
	if (mem->va) {
Ivan Vecera's avatar
Ivan Vecera committed
130
131
		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
				  mem->dma);
132
133
		mem->va = NULL;
	}
Sathya Perla's avatar
Sathya Perla committed
134
135
136
}

static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137
			  u16 len, u16 entry_size)
Sathya Perla's avatar
Sathya Perla committed
138
139
140
141
142
143
144
{
	struct be_dma_mem *mem = &q->dma_mem;

	memset(q, 0, sizeof(*q));
	q->len = len;
	q->entry_size = entry_size;
	mem->size = len * entry_size;
145
146
	mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
				      GFP_KERNEL);
Sathya Perla's avatar
Sathya Perla committed
147
	if (!mem->va)
Sathya Perla's avatar
Sathya Perla committed
148
		return -ENOMEM;
Sathya Perla's avatar
Sathya Perla committed
149
150
151
	return 0;
}

152
static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla's avatar
Sathya Perla committed
153
{
154
	u32 reg, enabled;
155

156
	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157
			      &reg);
158
159
	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;

160
	if (!enabled && enable)
Sathya Perla's avatar
Sathya Perla committed
161
		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
	else if (enabled && !enable)
Sathya Perla's avatar
Sathya Perla committed
163
		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
	else
Sathya Perla's avatar
Sathya Perla committed
165
		return;
166

167
	pci_write_config_dword(adapter->pdev,
168
			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla's avatar
Sathya Perla committed
169
170
}

171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
static void be_intr_set(struct be_adapter *adapter, bool enable)
{
	int status = 0;

	/* On lancer interrupts can't be controlled via this register */
	if (lancer_chip(adapter))
		return;

	if (adapter->eeh_error)
		return;

	status = be_cmd_intr_set(adapter, enable);
	if (status)
		be_reg_intr_set(adapter, enable);
}

187
static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla's avatar
Sathya Perla committed
188
189
190
191
{
	u32 val = 0;
	val |= qid & DB_RQ_RING_ID_MASK;
	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192
193

	wmb();
194
	iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
195
196
}

197
198
static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
			  u16 posted)
Sathya Perla's avatar
Sathya Perla committed
199
200
{
	u32 val = 0;
201
	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla's avatar
Sathya Perla committed
202
	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203
204

	wmb();
205
	iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla's avatar
Sathya Perla committed
206
207
}

208
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209
			 bool arm, bool clear_int, u16 num_popped)
Sathya Perla's avatar
Sathya Perla committed
210
211
212
{
	u32 val = 0;
	val |= qid & DB_EQ_RING_ID_MASK;
213
	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214

215
	if (adapter->eeh_error)
216
217
		return;

Sathya Perla's avatar
Sathya Perla committed
218
219
220
221
222
223
	if (arm)
		val |= 1 << DB_EQ_REARM_SHIFT;
	if (clear_int)
		val |= 1 << DB_EQ_CLR_SHIFT;
	val |= 1 << DB_EQ_EVNT_SHIFT;
	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224
	iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
225
226
}

227
void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla's avatar
Sathya Perla committed
228
229
230
{
	u32 val = 0;
	val |= qid & DB_CQ_RING_ID_MASK;
231
232
	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
			DB_CQ_RING_ID_EXT_MASK_SHIFT);
233

234
	if (adapter->eeh_error)
235
236
		return;

Sathya Perla's avatar
Sathya Perla committed
237
238
239
	if (arm)
		val |= 1 << DB_CQ_REARM_SHIFT;
	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240
	iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla's avatar
Sathya Perla committed
241
242
243
244
245
}

static int be_mac_addr_set(struct net_device *netdev, void *p)
{
	struct be_adapter *adapter = netdev_priv(netdev);
246
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
247
	struct sockaddr *addr = p;
248
249
250
	int status;
	u8 mac[ETH_ALEN];
	u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla's avatar
Sathya Perla committed
251

252
253
254
	if (!is_valid_ether_addr(addr->sa_data))
		return -EADDRNOTAVAIL;

255
256
257
258
259
260
	/* Proceed further only if, User provided MAC is different
	 * from active MAC
	 */
	if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
		return 0;

261
262
263
264
265
	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
	 * privilege or if PF did not provision the new MAC address.
	 * On BE3, this cmd will always fail if the VF doesn't have the
	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
	 * the MAC for the VF.
266
	 */
267
268
269
270
271
272
273
274
275
276
277
	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
				 adapter->if_handle, &adapter->pmac_id[0], 0);
	if (!status) {
		curr_pmac_id = adapter->pmac_id[0];

		/* Delete the old programmed MAC. This call may fail if the
		 * old MAC was already deleted by the PF driver.
		 */
		if (adapter->pmac_id[0] != old_pmac_id)
			be_cmd_pmac_del(adapter, adapter->if_handle,
					old_pmac_id, 0);
278
279
	}

280
281
	/* Decide if the new MAC is successfully activated only after
	 * querying the FW
282
	 */
283
284
	status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
				       adapter->if_handle, true, 0);
285
	if (status)
286
		goto err;
Sathya Perla's avatar
Sathya Perla committed
287

288
289
290
	/* The MAC change did not happen, either due to lack of privilege
	 * or PF didn't pre-provision.
	 */
291
	if (!ether_addr_equal(addr->sa_data, mac)) {
292
293
294
295
		status = -EPERM;
		goto err;
	}

296
	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297
	dev_info(dev, "MAC address changed to %pM\n", mac);
298
299
	return 0;
err:
300
	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla's avatar
Sathya Perla committed
301
302
303
	return status;
}

304
305
306
307
308
309
310
/* BE2 supports only v0 cmd */
static void *hw_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;

		return &cmd->hw_stats;
311
	} else if (BE3_chip(adapter)) {
312
313
		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;

314
315
316
317
		return &cmd->hw_stats;
	} else {
		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;

318
319
320
321
322
323
324
325
326
327
328
		return &cmd->hw_stats;
	}
}

/* BE2 supports only v0 cmd */
static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
{
	if (BE2_chip(adapter)) {
		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);

		return &hw_stats->erx;
329
	} else if (BE3_chip(adapter)) {
330
331
		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);

332
333
334
335
		return &hw_stats->erx;
	} else {
		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);

336
337
338
339
340
		return &hw_stats->erx;
	}
}

static void populate_be_v0_stats(struct be_adapter *adapter)
341
{
342
343
344
	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345
	struct be_port_rxf_stats_v0 *port_stats =
346
347
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
348

349
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350
351
352
353
354
355
356
357
358
359
360
361
362
363
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364
	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365
366
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
367
368
369
	drvs->rx_address_filtered =
					port_stats->rx_address_filtered +
					port_stats->rx_vlan_filtered;
370
371
372
373
374
375
376
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;

	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;

	if (adapter->port_num)
377
		drvs->jabber_events = rxf_stats->port1_jabber_events;
378
	else
379
		drvs->jabber_events = rxf_stats->port0_jabber_events;
380
381
382
383
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384
385
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386
387
388
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

389
static void populate_be_v1_stats(struct be_adapter *adapter)
390
{
391
392
393
	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394
	struct be_port_rxf_stats_v1 *port_stats =
395
396
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;
397

398
	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399
400
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
418
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
419
420
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
421
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422
423
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
424
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425
426
427
428
429
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430
431
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432
433
434
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
}

435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
static void populate_be_v2_stats(struct be_adapter *adapter)
{
	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
	struct be_port_rxf_stats_v2 *port_stats =
					&rxf_stats->port[adapter->port_num];
	struct be_drv_stats *drvs = &adapter->drv_stats;

	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
	drvs->rx_pause_frames = port_stats->rx_pause_frames;
	drvs->rx_crc_errors = port_stats->rx_crc_errors;
	drvs->rx_control_frames = port_stats->rx_control_frames;
	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
	drvs->rx_dropped_header_too_small =
		port_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop =
		port_stats->rx_input_fifo_overflow_drop;
	drvs->rx_address_filtered = port_stats->rx_address_filtered;
	drvs->rx_alignment_symbol_errors =
		port_stats->rx_alignment_symbol_errors;
	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
	drvs->tx_pauseframes = port_stats->tx_pauseframes;
	drvs->tx_controlframes = port_stats->tx_controlframes;
	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
	drvs->jabber_events = port_stats->jabber_events;
	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
	drvs->forwarded_packets = rxf_stats->forwarded_packets;
	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479
	if (be_roce_supported(adapter)) {
480
481
482
483
484
485
486
		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
		drvs->rx_roce_frames = port_stats->roce_frames_received;
		drvs->roce_drops_crc = port_stats->roce_drops_crc;
		drvs->roce_drops_payload_len =
			port_stats->roce_drops_payload_len;
	}
487
488
}

Selvin Xavier's avatar
Selvin Xavier committed
489
490
static void populate_lancer_stats(struct be_adapter *adapter)
{
491

Selvin Xavier's avatar
Selvin Xavier committed
492
	struct be_drv_stats *drvs = &adapter->drv_stats;
493
	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
494
495
496
497
498

	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
499
	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500
	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier's avatar
Selvin Xavier committed
501
502
503
504
505
506
507
508
509
510
511
512
	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
	drvs->rx_dropped_tcp_length =
				pport_stats->rx_dropped_invalid_tcp_length;
	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
	drvs->rx_dropped_header_too_small =
				pport_stats->rx_dropped_header_too_small;
	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513
514
515
	drvs->rx_address_filtered =
					pport_stats->rx_address_filtered +
					pport_stats->rx_vlan_filtered;
516
	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier's avatar
Selvin Xavier committed
517
	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518
519
	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier's avatar
Selvin Xavier committed
520
	drvs->jabber_events = pport_stats->rx_jabbers;
521
522
	drvs->forwarded_packets = pport_stats->num_forwards_lo;
	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier's avatar
Selvin Xavier committed
523
	drvs->rx_drops_too_many_frags =
524
				pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier's avatar
Selvin Xavier committed
525
}
526

527
528
529
530
531
532
533
534
535
536
537
538
static void accumulate_16bit_val(u32 *acc, u16 val)
{
#define lo(x)			(x & 0xFFFF)
#define hi(x)			(x & 0xFFFF0000)
	bool wrapped = val < lo(*acc);
	u32 newacc = hi(*acc) + val;

	if (wrapped)
		newacc += 65536;
	ACCESS_ONCE(*acc) = newacc;
}

Jingoo Han's avatar
Jingoo Han committed
539
static void populate_erx_stats(struct be_adapter *adapter,
540
			       struct be_rx_obj *rxo, u32 erx_stat)
541
542
543
544
545
546
547
548
549
550
551
{
	if (!BEx_chip(adapter))
		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
	else
		/* below erx HW counter can actually wrap around after
		 * 65535. Driver accumulates a 32-bit value
		 */
		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
				     (u16)erx_stat);
}

552
553
void be_parse_stats(struct be_adapter *adapter)
{
554
	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
555
556
	struct be_rx_obj *rxo;
	int i;
557
	u32 erx_stat;
558

559
560
	if (lancer_chip(adapter)) {
		populate_lancer_stats(adapter);
Selvin Xavier's avatar
Selvin Xavier committed
561
	} else {
562
563
		if (BE2_chip(adapter))
			populate_be_v0_stats(adapter);
564
565
		else if (BE3_chip(adapter))
			/* for BE3 */
566
			populate_be_v1_stats(adapter);
567
568
		else
			populate_be_v2_stats(adapter);
569

570
		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
571
		for_all_rx_queues(adapter, rxo, i) {
572
573
			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
			populate_erx_stats(adapter, rxo, erx_stat);
574
		}
575
	}
576
577
}

578
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
579
						struct rtnl_link_stats64 *stats)
Sathya Perla's avatar
Sathya Perla committed
580
{
581
	struct be_adapter *adapter = netdev_priv(netdev);
582
	struct be_drv_stats *drvs = &adapter->drv_stats;
583
	struct be_rx_obj *rxo;
584
	struct be_tx_obj *txo;
585
586
	u64 pkts, bytes;
	unsigned int start;
587
	int i;
Sathya Perla's avatar
Sathya Perla committed
588

589
	for_all_rx_queues(adapter, rxo, i) {
590
591
		const struct be_rx_stats *rx_stats = rx_stats(rxo);
		do {
592
			start = u64_stats_fetch_begin_irq(&rx_stats->sync);
593
594
			pkts = rx_stats(rxo)->rx_pkts;
			bytes = rx_stats(rxo)->rx_bytes;
595
		} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
596
597
598
599
600
		stats->rx_packets += pkts;
		stats->rx_bytes += bytes;
		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
					rx_stats(rxo)->rx_drops_no_frags;
601
602
	}

603
	for_all_tx_queues(adapter, txo, i) {
604
605
		const struct be_tx_stats *tx_stats = tx_stats(txo);
		do {
606
			start = u64_stats_fetch_begin_irq(&tx_stats->sync);
607
608
			pkts = tx_stats(txo)->tx_pkts;
			bytes = tx_stats(txo)->tx_bytes;
609
		} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
610
611
		stats->tx_packets += pkts;
		stats->tx_bytes += bytes;
612
	}
Sathya Perla's avatar
Sathya Perla committed
613
614

	/* bad pkts received */
615
	stats->rx_errors = drvs->rx_crc_errors +
616
617
618
619
620
621
622
623
		drvs->rx_alignment_symbol_errors +
		drvs->rx_in_range_errors +
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long +
		drvs->rx_dropped_too_small +
		drvs->rx_dropped_too_short +
		drvs->rx_dropped_header_too_small +
		drvs->rx_dropped_tcp_length +
624
		drvs->rx_dropped_runt;
625

Sathya Perla's avatar
Sathya Perla committed
626
	/* detailed rx errors */
627
	stats->rx_length_errors = drvs->rx_in_range_errors +
628
629
		drvs->rx_out_range_errors +
		drvs->rx_frame_too_long;
630

631
	stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla's avatar
Sathya Perla committed
632
633

	/* frame alignment errors */
634
	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
635

Sathya Perla's avatar
Sathya Perla committed
636
637
	/* receiver fifo overrun */
	/* drops_no_pbuf is no per i/f, it's per BE card */
638
	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
639
640
				drvs->rx_input_fifo_overflow_drop +
				drvs->rx_drops_no_pbuf;
641
	return stats;
Sathya Perla's avatar
Sathya Perla committed
642
643
}

644
void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla's avatar
Sathya Perla committed
645
646
647
{
	struct net_device *netdev = adapter->netdev;

648
	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
649
		netif_carrier_off(netdev);
650
		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla's avatar
Sathya Perla committed
651
	}
652

653
	if (link_status)
654
655
656
		netif_carrier_on(netdev);
	else
		netif_carrier_off(netdev);
Sathya Perla's avatar
Sathya Perla committed
657
658
}

659
static void be_tx_stats_update(struct be_tx_obj *txo,
660
661
			       u32 wrb_cnt, u32 copied, u32 gso_segs,
			       bool stopped)
Sathya Perla's avatar
Sathya Perla committed
662
{
663
664
	struct be_tx_stats *stats = tx_stats(txo);

665
	u64_stats_update_begin(&stats->sync);
666
667
668
669
	stats->tx_reqs++;
	stats->tx_wrbs += wrb_cnt;
	stats->tx_bytes += copied;
	stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla's avatar
Sathya Perla committed
670
	if (stopped)
671
		stats->tx_stops++;
672
	u64_stats_update_end(&stats->sync);
Sathya Perla's avatar
Sathya Perla committed
673
674
675
}

/* Determine number of WRB entries needed to xmit data in an skb */
676
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677
			   bool *dummy)
Sathya Perla's avatar
Sathya Perla committed
678
{
679
680
681
682
	int cnt = (skb->len > skb->data_len);

	cnt += skb_shinfo(skb)->nr_frags;

Sathya Perla's avatar
Sathya Perla committed
683
684
	/* to account for hdr wrb */
	cnt++;
685
686
687
	if (lancer_chip(adapter) || !(cnt & 1)) {
		*dummy = false;
	} else {
Sathya Perla's avatar
Sathya Perla committed
688
689
690
		/* add a dummy to make it an even num */
		cnt++;
		*dummy = true;
691
	}
Sathya Perla's avatar
Sathya Perla committed
692
693
694
695
696
697
698
699
700
	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
	return cnt;
}

static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
{
	wrb->frag_pa_hi = upper_32_bits(addr);
	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701
	wrb->rsvd0 = 0;
Sathya Perla's avatar
Sathya Perla committed
702
703
}

704
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705
				     struct sk_buff *skb)
706
707
708
709
710
711
712
713
714
715
716
717
718
719
{
	u8 vlan_prio;
	u16 vlan_tag;

	vlan_tag = vlan_tx_tag_get(skb);
	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
	/* If vlan priority provided by OS is NOT in available bmap */
	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
				adapter->recommended_prio;

	return vlan_tag;
}

720
721
722
723
724
725
726
727
728
729
730
731
732
/* Used only for IP tunnel packets */
static u16 skb_inner_ip_proto(struct sk_buff *skb)
{
	return (inner_ip_hdr(skb)->version == 4) ?
		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
}

static u16 skb_ip_proto(struct sk_buff *skb)
{
	return (ip_hdr(skb)->version == 4) ?
		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
}

733
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
734
735
			 struct sk_buff *skb, u32 wrb_cnt, u32 len,
			 bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
736
{
737
	u16 vlan_tag, proto;
738

Sathya Perla's avatar
Sathya Perla committed
739
740
741
742
	memset(hdr, 0, sizeof(*hdr));

	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);

743
	if (skb_is_gso(skb)) {
Sathya Perla's avatar
Sathya Perla committed
744
745
746
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
			hdr, skb_shinfo(skb)->gso_size);
747
		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
748
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla's avatar
Sathya Perla committed
749
	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
750
751
752
753
754
755
756
		if (skb->encapsulation) {
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
			proto = skb_inner_ip_proto(skb);
		} else {
			proto = skb_ip_proto(skb);
		}
		if (proto == IPPROTO_TCP)
Sathya Perla's avatar
Sathya Perla committed
757
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
758
		else if (proto == IPPROTO_UDP)
Sathya Perla's avatar
Sathya Perla committed
759
760
761
			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
	}

Ajit Khaparde's avatar
Ajit Khaparde committed
762
	if (vlan_tx_tag_present(skb)) {
Sathya Perla's avatar
Sathya Perla committed
763
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
764
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
765
		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla's avatar
Sathya Perla committed
766
767
	}

768
769
	/* To skip HW VLAN tagging: evt = 1, compl = 0 */
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
770
771
772
773
774
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
}

Ivan Vecera's avatar
Ivan Vecera committed
775
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
776
			  bool unmap_single)
777
778
779
780
781
782
{
	dma_addr_t dma;

	be_dws_le_to_cpu(wrb, sizeof(*wrb));

	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
783
	if (wrb->frag_len) {
784
		if (unmap_single)
Ivan Vecera's avatar
Ivan Vecera committed
785
786
			dma_unmap_single(dev, dma, wrb->frag_len,
					 DMA_TO_DEVICE);
787
		else
Ivan Vecera's avatar
Ivan Vecera committed
788
			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
789
790
	}
}
Sathya Perla's avatar
Sathya Perla committed
791

792
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
793
794
			struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
			bool skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
795
{
796
797
	dma_addr_t busaddr;
	int i, copied = 0;
Ivan Vecera's avatar
Ivan Vecera committed
798
	struct device *dev = &adapter->pdev->dev;
Sathya Perla's avatar
Sathya Perla committed
799
800
801
	struct sk_buff *first_skb = skb;
	struct be_eth_wrb *wrb;
	struct be_eth_hdr_wrb *hdr;
802
803
	bool map_single = false;
	u16 map_head;
Sathya Perla's avatar
Sathya Perla committed
804
805
806

	hdr = queue_head_node(txq);
	queue_head_inc(txq);
807
	map_head = txq->head;
Sathya Perla's avatar
Sathya Perla committed
808

809
	if (skb->len > skb->data_len) {
Eric Dumazet's avatar
Eric Dumazet committed
810
		int len = skb_headlen(skb);
Ivan Vecera's avatar
Ivan Vecera committed
811
812
		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
		if (dma_mapping_error(dev, busaddr))
813
814
			goto dma_err;
		map_single = true;
815
816
817
818
819
820
		wrb = queue_head_node(txq);
		wrb_fill(wrb, busaddr, len);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
		copied += len;
	}
Sathya Perla's avatar
Sathya Perla committed
821

822
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
823
		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
824
		busaddr = skb_frag_dma_map(dev, frag, 0,
825
					   skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera's avatar
Ivan Vecera committed
826
		if (dma_mapping_error(dev, busaddr))
827
			goto dma_err;
828
		wrb = queue_head_node(txq);
829
		wrb_fill(wrb, busaddr, skb_frag_size(frag));
830
831
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
832
		copied += skb_frag_size(frag);
Sathya Perla's avatar
Sathya Perla committed
833
834
835
836
837
838
839
840
841
	}

	if (dummy_wrb) {
		wrb = queue_head_node(txq);
		wrb_fill(wrb, 0, 0);
		be_dws_cpu_to_le(wrb, sizeof(*wrb));
		queue_head_inc(txq);
	}

842
	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla's avatar
Sathya Perla committed
843
844
845
	be_dws_cpu_to_le(hdr, sizeof(*hdr));

	return copied;
846
847
848
849
dma_err:
	txq->head = map_head;
	while (copied) {
		wrb = queue_head_node(txq);
Ivan Vecera's avatar
Ivan Vecera committed
850
		unmap_tx_frag(dev, wrb, map_single);
851
852
853
854
855
		map_single = false;
		copied -= wrb->frag_len;
		queue_head_inc(txq);
	}
	return 0;
Sathya Perla's avatar
Sathya Perla committed
856
857
}

858
static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
859
860
					     struct sk_buff *skb,
					     bool *skip_hw_vlan)
861
862
863
864
865
866
867
{
	u16 vlan_tag = 0;

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return skb;

868
	if (vlan_tx_tag_present(skb))
869
		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
870
871
872
873
874
875
876
877
878
879

	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
		if (!vlan_tag)
			vlan_tag = adapter->pvid;
		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
		 * skip VLAN insertion
		 */
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}
880
881

	if (vlan_tag) {
882
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
883
884
885
886
887
888
889
890
		if (unlikely(!skb))
			return skb;
		skb->vlan_tci = 0;
	}

	/* Insert the outer VLAN, if any */
	if (adapter->qnq_vid) {
		vlan_tag = adapter->qnq_vid;
891
		skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
892
893
894
895
896
897
		if (unlikely(!skb))
			return skb;
		if (skip_hw_vlan)
			*skip_hw_vlan = true;
	}

898
899
900
	return skb;
}

901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
static bool be_ipv6_exthdr_check(struct sk_buff *skb)
{
	struct ethhdr *eh = (struct ethhdr *)skb->data;
	u16 offset = ETH_HLEN;

	if (eh->h_proto == htons(ETH_P_IPV6)) {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);

		offset += sizeof(struct ipv6hdr);
		if (ip6h->nexthdr != NEXTHDR_TCP &&
		    ip6h->nexthdr != NEXTHDR_UDP) {
			struct ipv6_opt_hdr *ehdr =
				(struct ipv6_opt_hdr *) (skb->data + offset);

			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
			if (ehdr->hdrlen == 0xff)
				return true;
		}
	}
	return false;
}

static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
	return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}

928
static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
929
{
930
	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
931
932
}

933
934
935
static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
						  struct sk_buff *skb,
						  bool *skip_hw_vlan)
Sathya Perla's avatar
Sathya Perla committed
936
{
937
	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
938
939
	unsigned int eth_hdr_len;
	struct iphdr *ip;
940

941
942
	/* For padded packets, BE HW modifies tot_len field in IP header
	 * incorrecly when VLAN tag is inserted by HW.
943
	 * For padded packets, Lancer computes incorrect checksum.
944
	 */
945
946
	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
						VLAN_ETH_HLEN : ETH_HLEN;
947
948
	if (skb->len <= 60 &&
	    (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
949
	    is_ipv4_pkt(skb)) {
950
951
952
		ip = (struct iphdr *)ip_hdr(skb);
		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
	}
953

954
	/* If vlan tag is already inlined in the packet, skip HW VLAN
955
	 * tagging in pvid-tagging mode
956
	 */
957
	if (be_pvid_tagging_enabled(adapter) &&
958
	    veh->h_vlan_proto == htons(ETH_P_8021Q))
959
		*skip_hw_vlan = true;
960

961
962
963
964
965
	/* HW has a bug wherein it will calculate CSUM for VLAN
	 * pkts even though it is disabled.
	 * Manually insert VLAN in pkt.
	 */
	if (skb->ip_summed != CHECKSUM_PARTIAL &&
966
967
	    vlan_tx_tag_present(skb)) {
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
968
		if (unlikely(!skb))
969
			goto err;
970
971
972
973
974
975
976
	}

	/* HW may lockup when VLAN HW tagging is requested on
	 * certain ipv6 packets. Drop such pkts if the HW workaround to
	 * skip HW tagging is not enabled by FW.
	 */
	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
977
978
	    (adapter->pvid || adapter->qnq_vid) &&
	    !qnq_async_evt_rcvd(adapter)))
979
980
981
982
983
984
985
986
987
988
		goto tx_drop;

	/* Manual VLAN tag insertion to prevent:
	 * ASIC lockup when the ASIC inserts VLAN tag into
	 * certain ipv6 packets. Insert VLAN tags in driver,
	 * and set event, completion, vlan bits accordingly
	 * in the Tx WRB.
	 */
	if (be_ipv6_tx_stall_chk(adapter, skb) &&
	    be_vlan_tag_tx_chk(adapter, skb)) {
989
		skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
990
		if (unlikely(!skb))
991
			goto err;
992
993
	}

994
995
996
	return skb;
tx_drop:
	dev_kfree_skb_any(skb);
997
err:
998
999
1000
	return NULL;
}

For faster browsing, not all history is shown. View entire blame