bnx2x_cmn.c 125 KB
Newer Older
1
2
/* bnx2x_cmn.c: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2013 Broadcom Corporation
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

18
19
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

20
#include <linux/etherdevice.h>
21
#include <linux/if_vlan.h>
22
#include <linux/interrupt.h>
23
#include <linux/ip.h>
24
#include <net/tcp.h>
Dmitry Kravkov's avatar
Dmitry Kravkov committed
25
#include <net/ipv6.h>
26
#include <net/ip6_checksum.h>
27
#include <linux/prefetch.h>
28
#include "bnx2x_cmn.h"
29
#include "bnx2x_init.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
30
#include "bnx2x_sp.h"
31

32
33
34
35
36
37
38
39
/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
40
41
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
42
43
 * source onto the target. Update txdata pointers and related
 * content.
44
45
46
47
48
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
49
50
51
52
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53
54
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
55
56
57
58

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

59
60
61
	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;
62

63
64
65
66
67
68
	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

69
70
71
72
73
74
75
76
77
78
79
80
81
	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

82
83
	memcpy(&bp->bnx2x_txq[new_txdata_index],
	       &bp->bnx2x_txq[old_txdata_index],
84
85
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
86
87
}

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{
	if (IS_PF(bp)) {
		u8 phy_fw_ver[PHY_FW_VER_LEN];

		phy_fw_ver[0] = '\0';
		bnx2x_get_ext_phy_fw_version(&bp->link_params,
					     phy_fw_ver, PHY_FW_VER_LEN);
		strlcpy(buf, bp->fw_ver, buf_len);
		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
			 "bc %d.%d.%d%s%s",
			 (bp->common.bc_ver & 0xff0000) >> 16,
			 (bp->common.bc_ver & 0xff00) >> 8,
			 (bp->common.bc_ver & 0xff),
			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
	} else {
Ariel Elior's avatar
Ariel Elior committed
112
		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
113
114
115
	}
}

116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
/**
 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 *
 * @bp:	driver handle
 * @delta:	number of eth queues which were not allocated
 */
static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
{
	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);

	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
	 * backward along the array could cause memory to be overriden
	 */
	for (cos = 1; cos < bp->max_cos; cos++) {
		for (i = 0; i < old_eth_num - delta; i++) {
			struct bnx2x_fastpath *fp = &bp->fp[i];
			int new_idx = cos * (old_eth_num - delta) + i;

			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
			       sizeof(struct bnx2x_fp_txdata));
			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
		}
	}
}

141
142
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

143
144
145
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
146
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
147
148
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
149
{
150
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
151
152
153
154
155
156
157
158
159
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

Merav Sicron's avatar
Merav Sicron committed
160
	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
161
	   txdata->txq_index, idx, tx_buf, skb);
162
163

	/* unmap first bd */
164
	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
165
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
166
			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
167

168

169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* ...and the TSO split header bd since they have no mapping */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* now free frags */
	while (nbd > 0) {

194
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
195
196
197
198
199
200
201
202
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
	WARN_ON(!skb);
203
	if (likely(skb)) {
204
205
206
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
	}
207

208
	dev_kfree_skb_any(skb);
209
210
211
212
213
214
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

215
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
216
217
{
	struct netdev_queue *txq;
218
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
219
	unsigned int pkts_compl = 0, bytes_compl = 0;
220
221
222
223
224
225

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

226
227
228
	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;
229
230
231
232
233
234

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

Merav Sicron's avatar
Merav Sicron committed
235
236
		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
237
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
238

239
		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
Yuval Mintz's avatar
Yuval Mintz committed
240
					    &pkts_compl, &bytes_compl);
241

242
243
244
		sw_cons++;
	}

245
246
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

247
248
	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;
249
250
251
252
253
254

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
255
256
257
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
		/* Taking tx_lock() is needed to prevent reenabling the queue
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
276
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

Dmitry Kravkov's avatar
Dmitry Kravkov committed
293
294
295
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
296
297
298
299
300
301
302
303
304
305
306
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
307
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
308
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
309
310

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov's avatar
Dmitry Kravkov committed
311
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
312
313
314

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
315
	bnx2x_update_last_max_sge(fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
316
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
317
318

	last_max = RX_SGE(fp->last_max_sge);
319
320
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
321
322
323
324
325
326
327
328
329
330

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

331
332
		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
333
334
335
336
337
338
339
340
341
342
343
344
345
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

Yuval Mintz's avatar
Yuval Mintz committed
346
/* Get Toeplitz hash value in the skb using the value from the
347
348
349
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazet's avatar
Eric Dumazet committed
350
351
			    const struct eth_fast_path_rx_cqe *cqe,
			    bool *l4_rxhash)
352
{
Yuval Mintz's avatar
Yuval Mintz committed
353
	/* Get Toeplitz hash from CQE */
354
	if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazet's avatar
Eric Dumazet committed
355
356
357
358
359
360
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
			     (htype == TCP_IPV6_HASH_TYPE);
361
		return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazet's avatar
Eric Dumazet committed
362
363
	}
	*l4_rxhash = false;
364
365
366
	return 0;
}

367
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
368
			    u16 cons, u16 prod,
369
			    struct eth_fast_path_rx_cqe *cqe)
370
371
372
373
374
375
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
376
377
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
378

379
380
	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
381
382
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

383
	/* Try to map an empty data buffer from the aggregation info  */
384
	mapping = dma_map_single(&bp->pdev->dev,
385
				 first_buf->data + NET_SKB_PAD,
386
387
388
389
390
391
392
393
394
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
395
		bnx2x_reuse_rx_data(fp, cons, prod);
396
397
398
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}
399

400
401
	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
402
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
403
	/* point prod_bd to new data */
404
405
406
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

407
408
409
410
411
412
413
414
415
416
	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazet's avatar
Eric Dumazet committed
417
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
418
419
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
420
		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
421
422
		tpa_info->gro_size = gro_size;
	}
423

424
425
426
427
428
429
430
431
432
433
434
#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

435
436
437
438
439
440
/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
Yuval Mintz's avatar
Yuval Mintz committed
441
 * bnx2x_set_gro_params - compute GRO values
442
 *
Yuval Mintz's avatar
Yuval Mintz committed
443
 * @skb:		packet skb
444
445
446
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
Yuval Mintz's avatar
Yuval Mintz committed
447
 * @pkt_len:		length of all segments
448
449
450
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
Yuval Mintz's avatar
Yuval Mintz committed
451
 * Compute number of aggregated segments, and gso_type.
452
 */
Yuval Mintz's avatar
Yuval Mintz committed
453
static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
Yuval Mintz's avatar
Yuval Mintz committed
454
455
				 u16 len_on_bd, unsigned int pkt_len,
				 u16 num_of_coalesced_segs)
456
{
Yuval Mintz's avatar
Yuval Mintz committed
457
	/* TPA aggregation won't have either IP options or TCP options
458
	 * other than timestamp or IPv6 extension headers.
459
	 */
460
461
462
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
Yuval Mintz's avatar
Yuval Mintz committed
463
	    PRS_FLAG_OVERETH_IPV6) {
464
		hdrs_len += sizeof(struct ipv6hdr);
Yuval Mintz's avatar
Yuval Mintz committed
465
466
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
	} else {
467
		hdrs_len += sizeof(struct iphdr);
Yuval Mintz's avatar
Yuval Mintz committed
468
469
		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
	}
470
471
472
473
474
475
476
477
478

	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

Yuval Mintz's avatar
Yuval Mintz committed
479
480
481
482
483
	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;

	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
	 * to skb_shinfo(skb)->gso_segs
	 */
Yuval Mintz's avatar
Yuval Mintz committed
484
	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
485
486
}

Eric Dumazet's avatar
Eric Dumazet committed
487
488
489
490
491
492
493
494
495
496
497
498
499
500
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
501
			       SGE_PAGES, DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

517
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
518
519
520
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
521
522
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
523
524
{
	struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
525
526
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
527
	u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
528
	u16 full_page = 0, gro_size = 0;
529

530
	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
531
532
533
534
535

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}
536
537

	/* This is needed in order to enable forwarding support */
Yuval Mintz's avatar
Yuval Mintz committed
538
539
	if (frag_size)
		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
Yuval Mintz's avatar
Yuval Mintz committed
540
541
				     le16_to_cpu(cqe->pkt_len),
				     le16_to_cpu(cqe->num_of_coalesced_segs));
Dmitry Kravkov's avatar
Dmitry Kravkov committed
542

543
#ifdef BNX2X_STOP_ON_ERROR
544
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
545
546
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
547
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
548
549
550
551
552
553
554
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
555
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
556
557
558

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
559
560
561
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
562
			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
563

564
565
566
567
568
569
570
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
571
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
572
573
574
575
576
577
			return err;
		}

		/* Unmap the page as we r going to pass it to the stack */
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
578
			       SGE_PAGES, DMA_FROM_DEVICE);
579
		/* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
580
581
582
583
584
585
586
587
588
589
590
591
592
593
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page, offset, len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}
594
595

		skb->data_len += frag_len;
596
		skb->truesize += SGE_PAGES;
597
598
599
600
601
602
603
604
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		put_page(virt_to_head_page(data));
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
{
	if (fp->rx_frag_size)
		return netdev_alloc_frag(fp->rx_frag_size);

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}

621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
#ifdef CONFIG_INET
static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	const struct iphdr *iph = ip_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct iphdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
				  iph->saddr, iph->daddr, 0);
}

static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
{
	struct ipv6hdr *iph = ipv6_hdr(skb);
	struct tcphdr *th;

	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
	th = tcp_hdr(skb);

	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
				  &iph->saddr, &iph->daddr, 0);
}
#endif

static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct sk_buff *skb)
{
#ifdef CONFIG_INET
Yuval Mintz's avatar
Yuval Mintz committed
651
	if (skb_shinfo(skb)->gso_size) {
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
		skb_set_network_header(skb, 0);
		switch (be16_to_cpu(skb->protocol)) {
		case ETH_P_IP:
			bnx2x_gro_ip_csum(bp, skb);
			break;
		case ETH_P_IPV6:
			bnx2x_gro_ipv6_csum(bp, skb);
			break;
		default:
			BNX2X_ERR("FW GRO supports only IPv4/IPv6, not 0x%04x\n",
				  be16_to_cpu(skb->protocol));
		}
		tcp_gro_complete(skb);
	}
#endif
	napi_gro_receive(&fp->napi, skb);
}

Eric Dumazet's avatar
Eric Dumazet committed
670
671
672
673
674
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
675
{
676
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
677
	u8 pad = tpa_info->placement_offset;
678
	u16 len = tpa_info->len_on_bd;
679
	struct sk_buff *skb = NULL;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
680
	u8 *new_data, *data = rx_buf->data;
681
682
683
684
685
686
687
688
689
690
	u8 old_tpa_state = tpa_info->tpa_state;

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

691
	/* Try to allocate the new data */
Eric Dumazet's avatar
Eric Dumazet committed
692
	new_data = bnx2x_frag_alloc(fp);
693
694
695
696
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
697
			 fp->rx_buf_size, DMA_FROM_DEVICE);
698
	if (likely(new_data))
Eric Dumazet's avatar
Eric Dumazet committed
699
		skb = build_skb(data, fp->rx_frag_size);
700

701
	if (likely(skb)) {
702
#ifdef BNX2X_STOP_ON_ERROR
703
		if (pad + len > fp->rx_buf_size) {
Merav Sicron's avatar
Merav Sicron committed
704
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
705
				  pad, len, fp->rx_buf_size);
706
707
708
709
710
			bnx2x_panic();
			return;
		}
#endif

711
		skb_reserve(skb, pad + NET_SKB_PAD);
712
		skb_put(skb, len);
713
		skb->rxhash = tpa_info->rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
714
		skb->l4_rxhash = tpa_info->l4_rxhash;
715
716
717
718

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
719
720
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
721
722
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
723
			bnx2x_gro_receive(bp, fp, skb);
724
		} else {
Merav Sicron's avatar
Merav Sicron committed
725
726
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
727
			dev_kfree_skb_any(skb);
728
729
730
		}


731
732
		/* put new data in bin */
		rx_buf->data = new_data;
733

734
		return;
735
	}
Eric Dumazet's avatar
Eric Dumazet committed
736
	bnx2x_frag_free(fp, new_data);
737
738
739
740
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
741
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
742
743
}

Eric Dumazet's avatar
Eric Dumazet committed
744
745
746
747
748
749
750
751
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

Eric Dumazet's avatar
Eric Dumazet committed
752
	data = bnx2x_frag_alloc(fp);
Eric Dumazet's avatar
Eric Dumazet committed
753
754
755
756
757
758
759
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazet's avatar
Eric Dumazet committed
760
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
761
762
763
764
765
766
767
768
769
770
771
772
773
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

774
775
776
777
static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
Eric Dumazet's avatar
Eric Dumazet committed
778
{
779
780
781
782
783
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
Eric Dumazet's avatar
Eric Dumazet committed
784
	if (cqe->fast_path_cqe.status_flags &
785
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazet's avatar
Eric Dumazet committed
786
787
		return;

788
	/* If L4 validation was done, check if an error was found. */
Eric Dumazet's avatar
Eric Dumazet committed
789
790
791
792

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
793
		qstats->hw_csum_err++;
Eric Dumazet's avatar
Eric Dumazet committed
794
795
796
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834

int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

	/* CQ "next element" is of the size of the regular element,
	   that's why it's ok here */
	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
		hw_comp_cons++;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
	   fp->index, hw_comp_cons, sw_comp_cons);

	while (sw_comp_cons != hw_comp_cons) {
		struct sw_rx_bd *rx_buf = NULL;
		struct sk_buff *skb;
		union eth_rx_cqe *cqe;
835
		struct eth_fast_path_rx_cqe *cqe_fp;
836
		u8 cqe_fp_flags;
837
		enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
838
		u16 len, pad, queue;
839
		u8 *data;
Eric Dumazet's avatar
Eric Dumazet committed
840
		bool l4_rxhash;
841

842
843
844
845
846
#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

847
848
849
850
851
		comp_ring_cons = RCQ_BD(sw_comp_cons);
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		cqe = &fp->rx_comp_ring[comp_ring_cons];
852
853
854
		cqe_fp = &cqe->fast_path_cqe;
		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
855

Merav Sicron's avatar
Merav Sicron committed
856
857
858
		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
859
860
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov's avatar
Dmitry Kravkov committed
861
862
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
863
864

		/* is this a slowpath msg? */
865
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
866
867
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
868
		}
Dmitry Kravkov's avatar
Dmitry Kravkov committed
869

870
871
		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;
872

873
		if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov's avatar
Dmitry Kravkov committed
874
875
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
876
#ifdef BNX2X_STOP_ON_ERROR
877
878
879
880
			/* sanity check */
			if (fp->disable_tpa &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron's avatar
Merav Sicron committed
881
				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
882
					  CQE_TYPE(cqe_fp_type));
883
#endif
884

885
886
887
888
889
			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);
890

891
892
893
				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
894

895
896
				goto next_rx;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
916
#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov's avatar
Dmitry Kravkov committed
917
918
			if (bp->panic)
				return 0;
919
920
#endif

Dmitry Kravkov's avatar
Dmitry Kravkov committed
921
922
			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
923
924
		}
		/* non TPA */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
925
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
926
927
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
928
					dma_unmap_addr(rx_buf, mapping),
929
930
931
932
933
934
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron's avatar
Merav Sicron committed
935
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
936
937
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
938
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
939
940
			goto reuse_rx;
		}
941

942
943
944
945
946
947
948
		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = netdev_alloc_skb_ip_align(bp->dev, len);
			if (skb == NULL) {
Merav Sicron's avatar
Merav Sicron committed
949
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
950
				   "ERROR  packet dropped because of alloc failure\n");
951
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
952
953
				goto reuse_rx;
			}
954
955
956
957
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
958
				dma_unmap_single(&bp->pdev->dev,
959
						 dma_unmap_addr(rx_buf, mapping),
960
						 fp->rx_buf_size,
961
						 DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
962
				skb = build_skb(data, fp->rx_frag_size);
963
				if (unlikely(!skb)) {
Eric Dumazet's avatar
Eric Dumazet committed
964
					bnx2x_frag_free(fp, data);
965
966
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
967
968
					goto next_rx;
				}
969
970
				skb_reserve(skb, pad);
			} else {
Merav Sicron's avatar
Merav Sicron committed
971
972
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
973
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
974
reuse_rx:
975
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
976
977
				goto next_rx;
			}
978
		}
979

980
981
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);
982

983
		/* Set Toeplitz hash for a none-LRO skb */
Eric Dumazet's avatar
Eric Dumazet committed
984
985
		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
		skb->l4_rxhash = l4_rxhash;
986

987
		skb_checksum_none_assert(skb);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
988

Eric Dumazet's avatar
Eric Dumazet committed
989
		if (bp->dev->features & NETIF_F_RXCSUM)
990
991
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));
992

993
		skb_record_rx_queue(skb, fp->rx_queue);
994

995
996
		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
997
			__vlan_hwaccel_put_tag(skb,
998
					       le16_to_cpu(cqe_fp->vlan_tag));
999
		napi_gro_receive(&fp->napi, skb);
1000
1001
1002


next_rx:
1003
		rx_buf->data = NULL;
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

		if (rx_pkt == budget)
			break;
	} /* while */

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	fp->rx_pkt += rx_pkt;
	fp->rx_calls++;

	return rx_pkt;
}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
	struct bnx2x_fastpath *fp = fp_cookie;
	struct bnx2x *bp = fp->bp;
1036
	u8 cos;
1037

Merav Sicron's avatar
Merav Sicron committed
1038
1039
	DP(NETIF_MSG_INTR,
	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1040
1041
	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1042
1043
1044
1045
1046
1047
1048
1049

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	/* Handle Rx and Tx according to MSI-X vector */
	prefetch(fp->rx_cons_sb);
1050
1051

	for_each_cos_in_tx_queue(fp, cos)
1052
		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1053

1054
	prefetch(&fp->sb_running_index[SM_RX_ID]);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
	napi_schedule(&bnx2x_fp(bp, fp->index, napi));

	return IRQ_HANDLED;
}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
	mutex_lock(&bp->port.phy_mutex);

Yaniv Rosner's avatar
Yaniv Rosner committed
1065
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1066
1067
1068
1069
}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{
Yaniv Rosner's avatar
Yaniv Rosner committed
1070
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1071
1072
1073
1074

	mutex_unlock(&bp->port.phy_mutex);
}

1075
1076
1077
1078
1079
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
	u16 line_speed = bp->link_vars.line_speed;
	if (IS_MF(bp)) {
1080
1081
1082
1083
1084
		u16 maxCfg = bnx2x_extract_max_cfg(bp,
						   bp->mf_config[BP_VN(bp)]);

		/* Calculate the current MAX line speed limit for the MF
		 * devices
1085
		 */
1086
1087
1088
		if (IS_MF_SI(bp))
			line_speed = (line_speed * maxCfg) / 100;
		else { /* SD mode */
1089
1090
1091
1092
			u16 vn_max_rate = maxCfg * 100;

			if (vn_max_rate < line_speed)
				line_speed = vn_max_rate;
1093
		}
1094
1095
1096
1097
1098
	}

	return line_speed;
}

1099
1100
1101
1102
1103
1104
1105
1106
/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
Eric Dumazet's avatar
Eric Dumazet committed
1107
1108
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
{
	u16 line_speed = bnx2x_get_mf_speed(bp);

	memset(data, 0, sizeof(*data));

	/* Fill the report data: efective line speed */
	data->line_speed = line_speed;

	/* Link is down */
	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
			  &data->link_report_flags);

	/* Full DUPLEX */
	if (bp->link_vars.duplex == DUPLEX_FULL)
		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);

	/* Rx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);

	/* Tx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

1145
1146
void bnx2x_link_report(struct bnx2x *bp)
{
1147
1148
1149
1150
	bnx2x_acquire_phy_lock(bp);
	__bnx2x_link_report(bp);
	bnx2x_release_phy_lock(bp);
}
1151

1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * None atomic inmlementation.
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{
	struct bnx2x_link_report_data cur_data;
1163

1164
	/* reread mf_cfg */
Ariel Elior's avatar
Ariel Elior committed
1165
	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
		bnx2x_read_mf_cfg(bp);

	/* Read the current link report info */
	bnx2x_fill_report_data(bp, &cur_data);

	/* Don't report link down or exactly the same link status twice */
	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &bp->last_reported_link.link_report_flags) &&
	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &cur_data.link_report_flags)))
		return;

	bp->link_cnt++;
1180

1181
1182
1183
1184
	/* We are going to report a new link parameters now -
	 * remember the current data for the next time.
	 */
	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1185

1186
1187
1188
1189
1190
1191
	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		     &cur_data.link_report_flags)) {
		netif_carrier_off(bp->dev);
		netdev_err(bp->dev, "NIC Link is Down\n");
		return;
	} else {
1192
1193
1194
		const char *duplex;
		const char *flow;

1195
		netif_carrier_on(bp->dev);
1196

1197
1198
		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
				       &cur_data.link_report_flags))
1199
			duplex = "full";
1200
		else
1201
			duplex = "half";
1202

1203
1204
1205
1206
1207
1208
1209
1210
1211
		/* Handle the FC at the end so that only these flags would be
		 * possibly set. This way we may easily check if there is no FC
		 * enabled.
		 */
		if (cur_data.link_report_flags) {
			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
				     &cur_data.link_report_flags)) {
				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
				     &cur_data.link_report_flags))
1212
1213
1214
					flow = "ON - receive & transmit";
				else
					flow = "ON - receive";
1215
			} else {
1216
				flow = "ON - transmit";
1217
			}
1218
1219
		} else {
			flow = "none";
1220
		}
1221
1222
		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
			    cur_data.line_speed, duplex,