bnx2x_cmn.c 127 KB
Newer Older
1
2
/* bnx2x_cmn.c: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2013 Broadcom Corporation
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

18
19
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

20
#include <linux/etherdevice.h>
21
#include <linux/if_vlan.h>
22
#include <linux/interrupt.h>
23
#include <linux/ip.h>
24
#include <net/tcp.h>
Dmitry Kravkov's avatar
Dmitry Kravkov committed
25
#include <net/ipv6.h>
26
#include <net/ip6_checksum.h>
27
#include <net/busy_poll.h>
28
#include <linux/prefetch.h>
29
#include "bnx2x_cmn.h"
30
#include "bnx2x_init.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
31
#include "bnx2x_sp.h"
32

33
34
35
36
37
38
39
40
/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
41
42
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
43
44
 * source onto the target. Update txdata pointers and related
 * content.
45
46
47
48
49
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
50
51
52
53
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54
55
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
56
57
58
59

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

60
61
62
	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;
63

64
65
66
67
68
69
	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

70
71
72
73
74
75
76
77
78
79
80
81
82
	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

83
84
	memcpy(&bp->bnx2x_txq[new_txdata_index],
	       &bp->bnx2x_txq[old_txdata_index],
85
86
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87
88
}

89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{
	if (IS_PF(bp)) {
		u8 phy_fw_ver[PHY_FW_VER_LEN];

		phy_fw_ver[0] = '\0';
		bnx2x_get_ext_phy_fw_version(&bp->link_params,
					     phy_fw_ver, PHY_FW_VER_LEN);
		strlcpy(buf, bp->fw_ver, buf_len);
		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
			 "bc %d.%d.%d%s%s",
			 (bp->common.bc_ver & 0xff0000) >> 16,
			 (bp->common.bc_ver & 0xff00) >> 8,
			 (bp->common.bc_ver & 0xff),
			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
	} else {
Ariel Elior's avatar
Ariel Elior committed
113
		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
114
115
116
	}
}

117
118
119
120
121
122
123
124
125
126
127
/**
 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 *
 * @bp:	driver handle
 * @delta:	number of eth queues which were not allocated
 */
static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
{
	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);

	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
128
	 * backward along the array could cause memory to be overridden
129
130
131
132
133
134
135
136
137
138
139
140
141
	 */
	for (cos = 1; cos < bp->max_cos; cos++) {
		for (i = 0; i < old_eth_num - delta; i++) {
			struct bnx2x_fastpath *fp = &bp->fp[i];
			int new_idx = cos * (old_eth_num - delta) + i;

			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
			       sizeof(struct bnx2x_fp_txdata));
			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
		}
	}
}

142
143
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

144
145
146
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
147
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
148
149
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
150
{
151
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
152
153
154
155
156
157
158
159
160
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

Merav Sicron's avatar
Merav Sicron committed
161
	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
162
	   txdata->txq_index, idx, tx_buf, skb);
163
164

	/* unmap first bd */
165
	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
166
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
167
			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193

	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* ...and the TSO split header bd since they have no mapping */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* now free frags */
	while (nbd > 0) {

194
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195
196
197
198
199
200
201
202
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
	WARN_ON(!skb);
203
	if (likely(skb)) {
204
205
206
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
	}
207

208
	dev_kfree_skb_any(skb);
209
210
211
212
213
214
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

215
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
216
217
{
	struct netdev_queue *txq;
218
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219
	unsigned int pkts_compl = 0, bytes_compl = 0;
220
221
222
223
224
225

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

226
227
228
	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;
229
230
231
232
233
234

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

Merav Sicron's avatar
Merav Sicron committed
235
236
		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
237
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
238

239
		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz's avatar
Yuval Mintz committed
240
					    &pkts_compl, &bytes_compl);
241

242
243
244
		sw_cons++;
	}

245
246
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

247
248
	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;
249
250
251
252
253
254

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
255
256
257
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
258
259
260
261
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
262
		/* Taking tx_lock() is needed to prevent re-enabling the queue
263
264
265
266
267
268
269
270
271
272
273
274
275
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
276
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

Dmitry Kravkov's avatar
Dmitry Kravkov committed
293
294
295
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
296
297
298
299
300
301
302
303
304
305
306
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
307
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
308
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
309
310

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov's avatar
Dmitry Kravkov committed
311
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
312
313
314

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
315
	bnx2x_update_last_max_sge(fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
316
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
317
318

	last_max = RX_SGE(fp->last_max_sge);
319
320
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
321
322
323
324
325
326
327
328
329
330

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

331
332
		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
333
334
335
336
337
338
339
340
341
342
343
344
345
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

Yuval Mintz's avatar
Yuval Mintz committed
346
/* Get Toeplitz hash value in the skb using the value from the
347
348
349
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazet's avatar
Eric Dumazet committed
350
351
			    const struct eth_fast_path_rx_cqe *cqe,
			    bool *l4_rxhash)
352
{
Yuval Mintz's avatar
Yuval Mintz committed
353
	/* Get Toeplitz hash from CQE */
354
	if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazet's avatar
Eric Dumazet committed
355
356
357
358
359
360
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
			     (htype == TCP_IPV6_HASH_TYPE);
361
		return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazet's avatar
Eric Dumazet committed
362
363
	}
	*l4_rxhash = false;
364
365
366
	return 0;
}

367
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
368
			    u16 cons, u16 prod,
369
			    struct eth_fast_path_rx_cqe *cqe)
370
371
372
373
374
375
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
376
377
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
378

379
380
	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381
382
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

383
	/* Try to map an empty data buffer from the aggregation info  */
384
	mapping = dma_map_single(&bp->pdev->dev,
385
				 first_buf->data + NET_SKB_PAD,
386
387
388
389
390
391
392
393
394
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
395
		bnx2x_reuse_rx_data(fp, cons, prod);
396
397
398
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}
399

400
401
	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
402
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403
	/* point prod_bd to new data */
404
405
406
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

407
408
409
410
411
412
413
414
415
416
	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazet's avatar
Eric Dumazet committed
417
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
418
419
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420
		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
421
422
		tpa_info->gro_size = gro_size;
	}
423

424
425
426
427
428
429
430
431
432
433
434
#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

435
436
437
438
439
440
/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
Yuval Mintz's avatar
Yuval Mintz committed
441
 * bnx2x_set_gro_params - compute GRO values
442
 *
Yuval Mintz's avatar
Yuval Mintz committed
443
 * @skb:		packet skb
444
445
446
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
Yuval Mintz's avatar
Yuval Mintz committed
447
 * @pkt_len:		length of all segments
448
449
450
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
Yuval Mintz's avatar
Yuval Mintz committed
451
 * Compute number of aggregated segments, and gso_type.
452
 */
Yuval Mintz's avatar
Yuval Mintz committed
453
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintz's avatar
Yuval Mintz committed
454
455
				 u16 len_on_bd, unsigned int pkt_len,
				 u16 num_of_coalesced_segs)
456
{
Yuval Mintz's avatar
Yuval Mintz committed
457
	/* TPA aggregation won't have either IP options or TCP options
458
	 * other than timestamp or IPv6 extension headers.
459
	 */
460
461
462
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintz's avatar
Yuval Mintz committed
463
	    PRS_FLAG_OVERETH_IPV6) {
464
		hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintz's avatar
Yuval Mintz committed
465
466
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
	} else {
467
		hdrs_len += sizeof(struct iphdr);
Yuval Mintz's avatar
Yuval Mintz committed
468
469
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
	}
470
471
472
473
474
475
476
477
478

	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

Yuval Mintz's avatar
Yuval Mintz committed
479
480
481
482
483
	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;

	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
	 * to skb_shinfo(skb)->gso_segs
	 */
Yuval Mintz's avatar
Yuval Mintz committed
484
	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
485
486
}

Eric Dumazet's avatar
Eric Dumazet committed
487
488
489
490
491
492
493
494
495
496
497
498
499
500
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
501
			       SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

517
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
518
519
520
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
521
522
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
523
524
{
	struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
525
526
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
527
	u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
528
	u16 full_page = 0, gro_size = 0;
529

530
	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
531
532
533
534
535

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}
536
537

	/* This is needed in order to enable forwarding support */
Yuval Mintz's avatar
Yuval Mintz committed
538
539
	if (frag_size)
		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintz's avatar
Yuval Mintz committed
540
541
				     le16_to_cpu(cqe->pkt_len),
				     le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov's avatar
Dmitry Kravkov committed
542

543
#ifdef BNX2X_STOP_ON_ERROR
544
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545
546
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
547
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
548
549
550
551
552
553
554
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
556
557
558

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
559
560
561
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
562
			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
563

564
565
566
567
568
569
570
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
571
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
572
573
574
			return err;
		}

575
		/* Unmap the page as we're going to pass it to the stack */
576
577
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
578
			       SGE_PAGES, DMA_FROM_DEVICE);
579
		/* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
580
581
582
583
584
585
586
587
588
589
590
591
592
593
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page, offset, len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}
594
595

		skb->data_len += frag_len;
596
		skb->truesize += SGE_PAGES;
597
598
599
600
601
602
603
604
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		put_page(virt_to_head_page(data));
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
{
	if (fp->rx_frag_size)
		return netdev_alloc_frag(fp->rx_frag_size);

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
#ifdef CONFIG_INET
static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	const struct iphdr *iph = ip_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct iphdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
				  iph->saddr, iph->daddr, 0);
}

static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	struct ipv6hdr *iph = ipv6_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
				  &iph->saddr, &iph->daddr, 0);
}
645
646
647
648
649
650
651
652

static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
{
	skb_set_network_header(skb, 0);
	gro_func(bp, skb);
	tcp_gro_complete(skb);
}
653
654
655
656
657
658
#endif

static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct sk_buff *skb)
{
#ifdef CONFIG_INET
Yuval Mintz's avatar
Yuval Mintz committed
659
	if (skb_shinfo(skb)->gso_size) {
660
661
		switch (be16_to_cpu(skb->protocol)) {
		case ETH_P_IP:
662
			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
663
664
			break;
		case ETH_P_IPV6:
665
			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
666
667
			break;
		default:
668
			BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
669
670
671
672
673
674
675
				  be16_to_cpu(skb->protocol));
		}
	}
#endif
	napi_gro_receive(&fp->napi, skb);
}

Eric Dumazet's avatar
Eric Dumazet committed
676
677
678
679
680
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
681
{
682
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
683
	u8 pad = tpa_info->placement_offset;
684
	u16 len = tpa_info->len_on_bd;
685
	struct sk_buff *skb = NULL;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
686
	u8 *new_data, *data = rx_buf->data;
687
688
689
690
691
692
693
694
695
696
	u8 old_tpa_state = tpa_info->tpa_state;

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

697
	/* Try to allocate the new data */
Eric Dumazet's avatar
Eric Dumazet committed
698
	new_data = bnx2x_frag_alloc(fp);
699
700
701
702
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
703
			 fp->rx_buf_size, DMA_FROM_DEVICE);
704
	if (likely(new_data))
Eric Dumazet's avatar
Eric Dumazet committed
705
		skb = build_skb(data, fp->rx_frag_size);
706

707
	if (likely(skb)) {
708
#ifdef BNX2X_STOP_ON_ERROR
709
		if (pad + len > fp->rx_buf_size) {
Merav Sicron's avatar
Merav Sicron committed
710
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
711
				  pad, len, fp->rx_buf_size);
712
713
714
715
716
			bnx2x_panic();
			return;
		}
#endif

717
		skb_reserve(skb, pad + NET_SKB_PAD);
718
		skb_put(skb, len);
719
		skb->rxhash = tpa_info->rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
720
		skb->l4_rxhash = tpa_info->l4_rxhash;
721
722
723
724

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
725
726
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
727
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
728
				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
729
			bnx2x_gro_receive(bp, fp, skb);
730
		} else {
Merav Sicron's avatar
Merav Sicron committed
731
732
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
733
			dev_kfree_skb_any(skb);
734
735
		}

736
737
		/* put new data in bin */
		rx_buf->data = new_data;
738

739
		return;
740
	}
Eric Dumazet's avatar
Eric Dumazet committed
741
	bnx2x_frag_free(fp, new_data);
742
743
744
745
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
746
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
747
748
}

Eric Dumazet's avatar
Eric Dumazet committed
749
750
751
752
753
754
755
756
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

Eric Dumazet's avatar
Eric Dumazet committed
757
	data = bnx2x_frag_alloc(fp);
Eric Dumazet's avatar
Eric Dumazet committed
758
759
760
761
762
763
764
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazet's avatar
Eric Dumazet committed
765
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
766
767
768
769
770
771
772
773
774
775
776
777
778
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

779
780
781
782
static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
Eric Dumazet's avatar
Eric Dumazet committed
783
{
784
785
786
787
788
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
Eric Dumazet's avatar
Eric Dumazet committed
789
	if (cqe->fast_path_cqe.status_flags &
790
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazet's avatar
Eric Dumazet committed
791
792
		return;

793
	/* If L4 validation was done, check if an error was found. */
Eric Dumazet's avatar
Eric Dumazet committed
794
795
796
797

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
798
		qstats->hw_csum_err++;
Eric Dumazet's avatar
Eric Dumazet committed
799
800
801
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}
802
803
804
805
806

int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
807
	u16 sw_comp_cons, sw_comp_prod;
808
	int rx_pkt = 0;
809
810
	union eth_rx_cqe *cqe;
	struct eth_fast_path_rx_cqe *cqe_fp;
811
812
813
814
815
816
817
818
819
820
821
822

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

823
824
825
	comp_ring_cons = RCQ_BD(sw_comp_cons);
	cqe = &fp->rx_comp_ring[comp_ring_cons];
	cqe_fp = &cqe->fast_path_cqe;
826
827

	DP(NETIF_MSG_RX_STATUS,
828
	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
829

830
	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
831
832
833
		struct sw_rx_bd *rx_buf = NULL;
		struct sk_buff *skb;
		u8 cqe_fp_flags;
834
		enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
835
		u16 len, pad, queue;
836
		u8 *data;
Eric Dumazet's avatar
Eric Dumazet committed
837
		bool l4_rxhash;
838

839
840
841
842
843
#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

844
845
846
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

847
848
		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
849

Merav Sicron's avatar
Merav Sicron committed
850
851
852
		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
853
854
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov's avatar
Dmitry Kravkov committed
855
856
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
857
858

		/* is this a slowpath msg? */
859
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
860
861
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
862
		}
Dmitry Kravkov's avatar
Dmitry Kravkov committed
863

864
865
		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;
866

867
		if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov's avatar
Dmitry Kravkov committed
868
869
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
870
#ifdef BNX2X_STOP_ON_ERROR
871
872
873
874
			/* sanity check */
			if (fp->disable_tpa &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron's avatar
Merav Sicron committed
875
				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
876
					  CQE_TYPE(cqe_fp_type));
877
#endif
878

879
880
881
882
883
			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);
884

885
886
887
				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
888

889
				goto next_rx;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
909
#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov's avatar
Dmitry Kravkov committed
910
911
			if (bp->panic)
				return 0;
912
913
#endif

Dmitry Kravkov's avatar
Dmitry Kravkov committed
914
915
			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
916
917
		}
		/* non TPA */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
918
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
919
920
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
921
					dma_unmap_addr(rx_buf, mapping),
922
923
924
925
926
927
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron's avatar
Merav Sicron committed
928
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
929
930
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
931
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
932
933
			goto reuse_rx;
		}
934

935
936
937
938
939
940
941
		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = netdev_alloc_skb_ip_align(bp->dev, len);
			if (skb == NULL) {
Merav Sicron's avatar
Merav Sicron committed
942
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
943
				   "ERROR  packet dropped because of alloc failure\n");
944
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
945
946
				goto reuse_rx;
			}
947
948
949
950
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
951
				dma_unmap_single(&bp->pdev->dev,
952
						 dma_unmap_addr(rx_buf, mapping),
953
						 fp->rx_buf_size,
954
						 DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
955
				skb = build_skb(data, fp->rx_frag_size);
956
				if (unlikely(!skb)) {
Eric Dumazet's avatar
Eric Dumazet committed
957
					bnx2x_frag_free(fp, data);
958
959
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
960
961
					goto next_rx;
				}
962
963
				skb_reserve(skb, pad);
			} else {
Merav Sicron's avatar
Merav Sicron committed
964
965
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
966
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
967
reuse_rx:
968
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
969
970
				goto next_rx;
			}
971
		}
972

973
974
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);
975

976
		/* Set Toeplitz hash for a none-LRO skb */
Eric Dumazet's avatar
Eric Dumazet committed
977
978
		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
		skb->l4_rxhash = l4_rxhash;
979

980
		skb_checksum_none_assert(skb);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
981

Eric Dumazet's avatar
Eric Dumazet committed
982
		if (bp->dev->features & NETIF_F_RXCSUM)
983
984
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));
985

986
		skb_record_rx_queue(skb, fp->rx_queue);
987

988
989
		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
990
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
991
					       le16_to_cpu(cqe_fp->vlan_tag));
992

993
994
995
996
997
998
		skb_mark_ll(skb, &fp->napi);

		if (bnx2x_fp_ll_polling(fp))
			netif_receive_skb(skb);
		else
			napi_gro_receive(&fp->napi, skb);
999
next_rx:
1000
		rx_buf->data = NULL;
1001
1002
1003
1004
1005
1006
1007
1008
1009

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

1010
1011
1012
		/* mark CQE as free */
		BNX2X_SEED_CQE(cqe_fp);

1013
1014
		if (rx_pkt == budget)
			break;
1015
1016
1017
1018

		comp_ring_cons = RCQ_BD(sw_comp_cons);
		cqe = &fp->rx_comp_ring[comp_ring_cons];
		cqe_fp = &cqe->fast_path_cqe;
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
	} /* while */

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	fp->rx_pkt += rx_pkt;
	fp->rx_calls++;

	return rx_pkt;
}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
	struct bnx2x_fastpath *fp = fp_cookie;
	struct bnx2x *bp = fp->bp;
1040
	u8 cos;
1041

Merav Sicron's avatar
Merav Sicron committed
1042
1043
	DP(NETIF_MSG_INTR,
	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1044
	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1045

1046
	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1047
1048
1049
1050
1051
1052
1053

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	/* Handle Rx and Tx according to MSI-X vector */
1054
	for_each_cos_in_tx_queue(fp, cos)
1055
		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1056

1057
	prefetch(&fp->sb_running_index[SM_RX_ID]);
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
	napi_schedule(&bnx2x_fp(bp, fp->index, napi));

	return IRQ_HANDLED;
}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
	mutex_lock(&bp->port.phy_mutex);

Yaniv Rosner's avatar
Yaniv Rosner committed
1068
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1069
1070
1071
1072
}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{
Yaniv Rosner's avatar
Yaniv Rosner committed
1073
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1074
1075
1076
1077

	mutex_unlock(&bp->port.phy_mutex);
}

1078
1079
1080
1081
1082
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
	u16 line_speed = bp->link_vars.line_speed;
	if (IS_MF(bp)) {
1083
1084
1085
1086
1087
		u16 maxCfg = bnx2x_extract_max_cfg(bp,
						   bp->mf_config[BP_VN(bp)]);

		/* Calculate the current MAX line speed limit for the MF
		 * devices
1088
		 */
1089
1090
1091
		if (IS_MF_SI(bp))
			line_speed = (line_speed * maxCfg) / 100;
		else { /* SD mode */
1092
1093
1094
1095
			u16 vn_max_rate = maxCfg * 100;

			if (vn_max_rate < line_speed)
				line_speed = vn_max_rate;
1096
		}
1097
1098
1099
1100
1101
	}

	return line_speed;
}

1102
1103
1104
1105
1106
1107
1108
1109
/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
Eric Dumazet's avatar
Eric Dumazet committed
1110
1111
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
1112
1113
1114
1115
1116
{
	u16 line_speed = bnx2x_get_mf_speed(bp);

	memset(data, 0, sizeof(*data));

1117
	/* Fill the report data: effective line speed */
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
	data->line_speed = line_speed;

	/* Link is down */
	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
			  &data->link_report_flags);

	/* Full DUPLEX */
	if (bp->link_vars.duplex == DUPLEX_FULL)
		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);

	/* Rx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);

	/* Tx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

1148
1149
void bnx2x_link_report(struct bnx2x *bp)
{
1150
1151
1152
1153
	bnx2x_acquire_phy_lock(bp);
	__bnx2x_link_report(bp);
	bnx2x_release_phy_lock(bp);
}
1154

1155
1156
1157
1158
1159
/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
1160
 * None atomic implementation.
1161
1162
1163
1164
1165
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{
	struct bnx2x_link_report_data cur_data;
1166

1167
	/* reread mf_cfg */
Ariel Elior's avatar
Ariel Elior committed
1168
	if (IS_PF(bp) && !CHIP_IS_E1(bp))