bnx2x_cmn.c 119 KB
Newer Older
1
2
/* bnx2x_cmn.c: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2012 Broadcom Corporation
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

18
19
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

20
#include <linux/etherdevice.h>
21
#include <linux/if_vlan.h>
22
#include <linux/interrupt.h>
23
#include <linux/ip.h>
Dmitry Kravkov's avatar
Dmitry Kravkov committed
24
#include <net/ipv6.h>
25
#include <net/ip6_checksum.h>
26
#include <linux/prefetch.h>
27
#include "bnx2x_cmn.h"
28
#include "bnx2x_init.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
29
#include "bnx2x_sp.h"
30

31
32
33
34
35
36
37
38
/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
39
40
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
41
42
 * source onto the target. Update txdata pointers and related
 * content.
43
44
45
46
47
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
48
49
50
51
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
52
53
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
54
55
56
57

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

58
59
60
	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;
61

62
63
64
65
66
67
	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

	memcpy(&bp->bnx2x_txq[old_txdata_index],
	       &bp->bnx2x_txq[new_txdata_index],
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
85
86
}

87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{
	if (IS_PF(bp)) {
		u8 phy_fw_ver[PHY_FW_VER_LEN];

		phy_fw_ver[0] = '\0';
		bnx2x_get_ext_phy_fw_version(&bp->link_params,
					     phy_fw_ver, PHY_FW_VER_LEN);
		strlcpy(buf, bp->fw_ver, buf_len);
		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
			 "bc %d.%d.%d%s%s",
			 (bp->common.bc_ver & 0xff0000) >> 16,
			 (bp->common.bc_ver & 0xff00) >> 8,
			 (bp->common.bc_ver & 0xff),
			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
	} else {
Ariel Elior's avatar
Ariel Elior committed
111
		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
112
113
114
	}
}

115
116
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

117
118
119
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
120
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
121
122
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
123
{
124
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
125
126
127
128
129
130
131
132
133
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

Merav Sicron's avatar
Merav Sicron committed
134
	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
135
	   txdata->txq_index, idx, tx_buf, skb);
136
137

	/* unmap first bd */
138
	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
139
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
140
			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
141

142

143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* ...and the TSO split header bd since they have no mapping */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* now free frags */
	while (nbd > 0) {

168
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
169
170
171
172
173
174
175
176
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
	WARN_ON(!skb);
177
	if (likely(skb)) {
178
179
180
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
	}
181

182
	dev_kfree_skb_any(skb);
183
184
185
186
187
188
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

189
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
190
191
{
	struct netdev_queue *txq;
192
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
193
	unsigned int pkts_compl = 0, bytes_compl = 0;
194
195
196
197
198
199

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

200
201
202
	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;
203
204
205
206
207
208

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

Merav Sicron's avatar
Merav Sicron committed
209
210
		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
211
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
212

213
214
215
		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
		    &pkts_compl, &bytes_compl);

216
217
218
		sw_cons++;
	}

219
220
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

221
222
	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;
223
224
225
226
227
228

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
229
230
231
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
		/* Taking tx_lock() is needed to prevent reenabling the queue
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
250
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

Dmitry Kravkov's avatar
Dmitry Kravkov committed
267
268
269
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
270
271
272
273
274
275
276
277
278
279
280
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
281
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
282
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
283
284

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov's avatar
Dmitry Kravkov committed
285
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
286
287
288

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
289
	bnx2x_update_last_max_sge(fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
290
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
291
292

	last_max = RX_SGE(fp->last_max_sge);
293
294
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
295
296
297
298
299
300
301
302
303
304

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

305
306
		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
307
308
309
310
311
312
313
314
315
316
317
318
319
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

320
321
322
323
/* Set Toeplitz hash value in the skb using the value from the
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazet's avatar
Eric Dumazet committed
324
325
			    const struct eth_fast_path_rx_cqe *cqe,
			    bool *l4_rxhash)
326
327
328
{
	/* Set Toeplitz hash from CQE */
	if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazet's avatar
Eric Dumazet committed
329
330
331
332
333
334
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
			     (htype == TCP_IPV6_HASH_TYPE);
335
		return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazet's avatar
Eric Dumazet committed
336
337
	}
	*l4_rxhash = false;
338
339
340
	return 0;
}

341
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
342
			    u16 cons, u16 prod,
343
			    struct eth_fast_path_rx_cqe *cqe)
344
345
346
347
348
349
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
350
351
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
352

353
354
	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
355
356
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

357
	/* Try to map an empty data buffer from the aggregation info  */
358
	mapping = dma_map_single(&bp->pdev->dev,
359
				 first_buf->data + NET_SKB_PAD,
360
361
362
363
364
365
366
367
368
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
369
		bnx2x_reuse_rx_data(fp, cons, prod);
370
371
372
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}
373

374
375
	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
376
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
377
	/* point prod_bd to new data */
378
379
380
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

381
382
383
384
385
386
387
388
389
390
	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazet's avatar
Eric Dumazet committed
391
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
392
393
394
395
396
397
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
		tpa_info->full_page =
			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
		tpa_info->gro_size = gro_size;
	}
398

399
400
401
402
403
404
405
406
407
408
409
#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

410
411
412
413
414
415
/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
416
 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
417
 *
418
419
420
421
422
423
424
 * @bp:			driver handle
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
425
 */
Eric Dumazet's avatar
Eric Dumazet committed
426
427
static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
			     u16 len_on_bd)
428
{
429
430
431
	/*
	 * TPA arrgregation won't have either IP options or TCP options
	 * other than timestamp or IPv6 extension headers.
432
	 */
433
434
435
436
437
438
439
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
	    PRS_FLAG_OVERETH_IPV6)
		hdrs_len += sizeof(struct ipv6hdr);
	else /* IPv4 */
		hdrs_len += sizeof(struct iphdr);
440
441
442
443
444
445
446
447
448
449
450
451
452


	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

	return len_on_bd - hdrs_len;
}

Eric Dumazet's avatar
Eric Dumazet committed
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

483
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
484
485
486
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
487
488
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
489
490
{
	struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
491
492
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
493
	u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
494
	u16 full_page = 0, gro_size = 0;
495

496
	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
497
498
499
500
501

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}
502
503

	/* This is needed in order to enable forwarding support */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
504
	if (frag_size) {
505
506
		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
					tpa_info->parsing_flags, len_on_bd);
507

Dmitry Kravkov's avatar
Dmitry Kravkov committed
508
509
510
511
512
513
514
515
516
517
		/* set for GRO */
		if (fp->mode == TPA_MODE_GRO)
			skb_shinfo(skb)->gso_type =
			    (GET_FLAG(tpa_info->parsing_flags,
				      PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
						PRS_FLAG_OVERETH_IPV6) ?
				SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
	}


518
519
520
521
#ifdef BNX2X_STOP_ON_ERROR
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
522
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
523
524
525
526
527
528
529
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
530
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
531
532
533

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
534
535
536
537
538
539
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
			frag_len = min_t(u32, frag_size,
					 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));

540
541
542
543
544
545
546
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
547
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
548
549
550
551
552
553
554
555
			return err;
		}

		/* Unmap the page as we r going to pass it to the stack */
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
		/* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
556
557
558
559
560
561
562
563
564
565
566
567
568
569
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page, offset, len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}
570
571

		skb->data_len += frag_len;
572
		skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
573
574
575
576
577
578
579
580
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		put_page(virt_to_head_page(data));
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
{
	if (fp->rx_frag_size)
		return netdev_alloc_frag(fp->rx_frag_size);

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}


Eric Dumazet's avatar
Eric Dumazet committed
598
599
600
601
602
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
603
{
604
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
605
	u8 pad = tpa_info->placement_offset;
606
	u16 len = tpa_info->len_on_bd;
607
	struct sk_buff *skb = NULL;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
608
	u8 *new_data, *data = rx_buf->data;
609
610
611
612
613
614
615
616
617
618
	u8 old_tpa_state = tpa_info->tpa_state;

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

619
	/* Try to allocate the new data */
Eric Dumazet's avatar
Eric Dumazet committed
620
	new_data = bnx2x_frag_alloc(fp);
621
622
623
624
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
625
			 fp->rx_buf_size, DMA_FROM_DEVICE);
626
	if (likely(new_data))
Eric Dumazet's avatar
Eric Dumazet committed
627
		skb = build_skb(data, fp->rx_frag_size);
628

629
	if (likely(skb)) {
630
#ifdef BNX2X_STOP_ON_ERROR
631
		if (pad + len > fp->rx_buf_size) {
Merav Sicron's avatar
Merav Sicron committed
632
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
633
				  pad, len, fp->rx_buf_size);
634
635
636
637
638
			bnx2x_panic();
			return;
		}
#endif

639
		skb_reserve(skb, pad + NET_SKB_PAD);
640
		skb_put(skb, len);
641
		skb->rxhash = tpa_info->rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
642
		skb->l4_rxhash = tpa_info->l4_rxhash;
643
644
645
646

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
647
648
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
649
650
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
651
			napi_gro_receive(&fp->napi, skb);
652
		} else {
Merav Sicron's avatar
Merav Sicron committed
653
654
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
655
			dev_kfree_skb_any(skb);
656
657
658
		}


659
660
		/* put new data in bin */
		rx_buf->data = new_data;
661

662
		return;
663
	}
Eric Dumazet's avatar
Eric Dumazet committed
664
	bnx2x_frag_free(fp, new_data);
665
666
667
668
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
669
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
670
671
}

Eric Dumazet's avatar
Eric Dumazet committed
672
673
674
675
676
677
678
679
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

Eric Dumazet's avatar
Eric Dumazet committed
680
	data = bnx2x_frag_alloc(fp);
Eric Dumazet's avatar
Eric Dumazet committed
681
682
683
684
685
686
687
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazet's avatar
Eric Dumazet committed
688
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
689
690
691
692
693
694
695
696
697
698
699
700
701
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

702
703
704
705
static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
Eric Dumazet's avatar
Eric Dumazet committed
706
{
707
708
709
710
711
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
Eric Dumazet's avatar
Eric Dumazet committed
712
	if (cqe->fast_path_cqe.status_flags &
713
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazet's avatar
Eric Dumazet committed
714
715
		return;

716
	/* If L4 validation was done, check if an error was found. */
Eric Dumazet's avatar
Eric Dumazet committed
717
718
719
720

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
721
		qstats->hw_csum_err++;
Eric Dumazet's avatar
Eric Dumazet committed
722
723
724
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762

int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

	/* CQ "next element" is of the size of the regular element,
	   that's why it's ok here */
	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
		hw_comp_cons++;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
	   fp->index, hw_comp_cons, sw_comp_cons);

	while (sw_comp_cons != hw_comp_cons) {
		struct sw_rx_bd *rx_buf = NULL;
		struct sk_buff *skb;
		union eth_rx_cqe *cqe;
763
		struct eth_fast_path_rx_cqe *cqe_fp;
764
		u8 cqe_fp_flags;
765
		enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
766
		u16 len, pad, queue;
767
		u8 *data;
Eric Dumazet's avatar
Eric Dumazet committed
768
		bool l4_rxhash;
769

770
771
772
773
774
#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

775
776
777
778
779
		comp_ring_cons = RCQ_BD(sw_comp_cons);
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		cqe = &fp->rx_comp_ring[comp_ring_cons];
780
781
782
		cqe_fp = &cqe->fast_path_cqe;
		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
783

Merav Sicron's avatar
Merav Sicron committed
784
785
786
		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
787
788
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov's avatar
Dmitry Kravkov committed
789
790
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
791
792

		/* is this a slowpath msg? */
793
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
794
795
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
796
		}
Dmitry Kravkov's avatar
Dmitry Kravkov committed
797

798
799
		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;
800

801
		if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov's avatar
Dmitry Kravkov committed
802
803
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
804
#ifdef BNX2X_STOP_ON_ERROR
805
806
807
808
			/* sanity check */
			if (fp->disable_tpa &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron's avatar
Merav Sicron committed
809
				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
810
					  CQE_TYPE(cqe_fp_type));
811
#endif
812

813
814
815
816
817
			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);
818

819
820
821
				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
822

823
824
				goto next_rx;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
844
#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov's avatar
Dmitry Kravkov committed
845
846
			if (bp->panic)
				return 0;
847
848
#endif

Dmitry Kravkov's avatar
Dmitry Kravkov committed
849
850
			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
851
852
		}
		/* non TPA */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
853
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
854
855
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
856
					dma_unmap_addr(rx_buf, mapping),
857
858
859
860
861
862
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron's avatar
Merav Sicron committed
863
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
864
865
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
866
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
867
868
			goto reuse_rx;
		}
869

870
871
872
873
874
875
876
		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = netdev_alloc_skb_ip_align(bp->dev, len);
			if (skb == NULL) {
Merav Sicron's avatar
Merav Sicron committed
877
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
878
				   "ERROR  packet dropped because of alloc failure\n");
879
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
880
881
				goto reuse_rx;
			}
882
883
884
885
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
886
				dma_unmap_single(&bp->pdev->dev,
887
						 dma_unmap_addr(rx_buf, mapping),
888
						 fp->rx_buf_size,
889
						 DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
890
				skb = build_skb(data, fp->rx_frag_size);
891
				if (unlikely(!skb)) {
Eric Dumazet's avatar
Eric Dumazet committed
892
					bnx2x_frag_free(fp, data);
893
894
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
895
896
					goto next_rx;
				}
897
898
				skb_reserve(skb, pad);
			} else {
Merav Sicron's avatar
Merav Sicron committed
899
900
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
901
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
902
reuse_rx:
903
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
904
905
				goto next_rx;
			}
906
		}
907

908
909
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);
910

911
		/* Set Toeplitz hash for a none-LRO skb */
Eric Dumazet's avatar
Eric Dumazet committed
912
913
		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
		skb->l4_rxhash = l4_rxhash;
914

915
		skb_checksum_none_assert(skb);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
916

Eric Dumazet's avatar
Eric Dumazet committed
917
		if (bp->dev->features & NETIF_F_RXCSUM)
918
919
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));
920

921
		skb_record_rx_queue(skb, fp->rx_queue);
922

923
924
		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
925
			__vlan_hwaccel_put_tag(skb,
926
					       le16_to_cpu(cqe_fp->vlan_tag));
927
		napi_gro_receive(&fp->napi, skb);
928
929
930


next_rx:
931
		rx_buf->data = NULL;
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

		if (rx_pkt == budget)
			break;
	} /* while */

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	fp->rx_pkt += rx_pkt;
	fp->rx_calls++;

	return rx_pkt;
}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
	struct bnx2x_fastpath *fp = fp_cookie;
	struct bnx2x *bp = fp->bp;
964
	u8 cos;
965

Merav Sicron's avatar
Merav Sicron committed
966
967
	DP(NETIF_MSG_INTR,
	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
968
969
	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
970
971
972
973
974
975
976
977

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	/* Handle Rx and Tx according to MSI-X vector */
	prefetch(fp->rx_cons_sb);
978
979

	for_each_cos_in_tx_queue(fp, cos)
980
		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
981

982
	prefetch(&fp->sb_running_index[SM_RX_ID]);
983
984
985
986
987
988
989
990
991
992
	napi_schedule(&bnx2x_fp(bp, fp->index, napi));

	return IRQ_HANDLED;
}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
	mutex_lock(&bp->port.phy_mutex);

Yaniv Rosner's avatar
Yaniv Rosner committed
993
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
994
995
996
997
}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{
Yaniv Rosner's avatar
Yaniv Rosner committed
998
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
999
1000
1001
1002

	mutex_unlock(&bp->port.phy_mutex);
}

1003
1004
1005
1006
1007
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
	u16 line_speed = bp->link_vars.line_speed;
	if (IS_MF(bp)) {
1008
1009
1010
1011
1012
		u16 maxCfg = bnx2x_extract_max_cfg(bp,
						   bp->mf_config[BP_VN(bp)]);

		/* Calculate the current MAX line speed limit for the MF
		 * devices
1013
		 */
1014
1015
1016
		if (IS_MF_SI(bp))
			line_speed = (line_speed * maxCfg) / 100;
		else { /* SD mode */
1017
1018
1019
1020
			u16 vn_max_rate = maxCfg * 100;

			if (vn_max_rate < line_speed)
				line_speed = vn_max_rate;
1021
		}
1022
1023
1024
1025
1026
	}

	return line_speed;
}

1027
1028
1029
1030
1031
1032
1033
1034
/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
Eric Dumazet's avatar
Eric Dumazet committed
1035
1036
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
{
	u16 line_speed = bnx2x_get_mf_speed(bp);

	memset(data, 0, sizeof(*data));

	/* Fill the report data: efective line speed */
	data->line_speed = line_speed;

	/* Link is down */
	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
			  &data->link_report_flags);

	/* Full DUPLEX */
	if (bp->link_vars.duplex == DUPLEX_FULL)
		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);

	/* Rx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);

	/* Tx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

1073
1074
void bnx2x_link_report(struct bnx2x *bp)
{
1075
1076
1077
1078
	bnx2x_acquire_phy_lock(bp);
	__bnx2x_link_report(bp);
	bnx2x_release_phy_lock(bp);
}
1079

1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * None atomic inmlementation.
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{
	struct bnx2x_link_report_data cur_data;
1091

1092
	/* reread mf_cfg */
Ariel Elior's avatar
Ariel Elior committed
1093
	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
		bnx2x_read_mf_cfg(bp);

	/* Read the current link report info */
	bnx2x_fill_report_data(bp, &cur_data);

	/* Don't report link down or exactly the same link status twice */
	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &bp->last_reported_link.link_report_flags) &&
	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &cur_data.link_report_flags)))
		return;

	bp->link_cnt++;
1108

1109
1110
1111
1112
	/* We are going to report a new link parameters now -
	 * remember the current data for the next time.
	 */
	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1113

1114
1115
1116
1117
1118
1119
	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		     &cur_data.link_report_flags)) {
		netif_carrier_off(bp->dev);
		netdev_err(bp->dev, "NIC Link is Down\n");
		return;
	} else {
1120
1121
1122
		const char *duplex;
		const char *flow;

1123
		netif_carrier_on(bp->dev);
1124

1125
1126
		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
				       &cur_data.link_report_flags))
1127
			duplex = "full";
1128
		else
1129
			duplex = "half";
1130

1131
1132
1133
1134
1135
1136
1137
1138
1139
		/* Handle the FC at the end so that only these flags would be
		 * possibly set. This way we may easily check if there is no FC
		 * enabled.
		 */
		if (cur_data.link_report_flags) {
			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
				     &cur_data.link_report_flags)) {
				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
				     &cur_data.link_report_flags))
1140
1141
1142
					flow = "ON - receive & transmit";
				else
					flow = "ON - receive";
1143
			} else {
1144
				flow = "ON - transmit";
1145
			}
1146
1147
		} else {
			flow = "none";
1148
		}
1149
1150
		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
			    cur_data.line_speed, duplex, flow);
1151
1152
1153
	}
}

Eric Dumazet's avatar
Eric Dumazet committed
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		struct eth_rx_sge *sge;

		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
		sge->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));

		sge->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
	}
}

static void bnx2x_free_tpa_pool(struct bnx2x *bp,
				struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++) {
		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
		u8 *data = first_buf->data;

		if (data == NULL) {
			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
			continue;
		}
		if (tpa_info->tpa_state == BNX2X_TPA_START)
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(first_buf, mapping),
					 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
1190
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
1191
1192
1193
1194
		first_buf->data = NULL;
	}
}

1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
{
	int j;

	for_each_rx_queue_cnic(bp, j) {
		struct bnx2x_fastpath *fp = &bp->fp[j];

		fp->rx_bd_cons = 0;

		/* Activate BD ring */
		/* Warning!
		 * this will generate an interrupt (to the TSTORM)
		 * must only be done after chip is initialized
		 */
		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
				     fp->rx_sge_prod);
	}
}

1214
1215
1216
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
1217
	u16 ring_prod;
1218
	int i, j;
1219

1220
	/* Allocate TPA resources */
1221
	for_each_eth_queue(bp, j) {
1222
		struct bnx2x_fastpath *fp = &bp->fp[j];
1223

1224
1225
1226
		DP(NETIF_MSG_IFUP,
		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);

1227
		if (!fp->disable_tpa) {
1228
			/* Fill the per-aggregtion pool */
1229
			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1230
1231
1232
1233
1234
				struct bnx2x_agg_info *tpa_info =
					&fp->tpa_info[i];
				struct sw_rx_bd *first_buf =
					&tpa_info->first_buf;

Eric Dumazet's avatar
Eric Dumazet committed
1235
				first_buf->data = bnx2x_frag_alloc(fp);
1236
				if (!first_buf->data) {
Merav Sicron's avatar
Merav Sicron committed
1237
1238
					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
						  j);
1239
1240
1241
1242
					bnx2x_free_tpa_pool(bp, fp, i);
					fp->disable_tpa = 1;
					break;
				}
1243
1244
				dma_unmap_addr_set(first_buf, mapping, 0);
				tpa_info->tpa_state = BNX2X_TPA_STOP;
1245
			}
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257

			/* "next page" elements initialization */
			bnx2x_set_next_page_sgl(fp);

			/* set SGEs bit mask */
			bnx2x_init_sge_ring_bit_mask(fp);

			/* Allocate SGEs and initialize the ring elements */
			for (i = 0, ring_prod = 0;
			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {

				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron's avatar
Merav Sicron committed
1258
1259
1260
1261
					BNX2X_ERR("was only able to allocate %d rx sges\n",
						  i);
					BNX2X_ERR("disabling TPA for queue[%d]\n",
						  j);
1262
					/* Cleanup already allocated elements */
1263
1264
1265
					bnx2x_free_rx_sge_range(bp, fp,
								ring_prod);
					bnx2x_free_tpa_pool(bp, fp,
1266
							    MAX_AGG_QS(bp));
1267
1268
1269
1270
1271
1272
1273
1274
					fp->disable_tpa = 1;
					ring_prod = 0;
					break;
				}
				ring_prod = NEXT_SGE_IDX(ring_prod);
			}

			fp->rx_sge_prod = ring_prod;