bnx2x_cmn.c 122 KB
Newer Older
1
2
/* bnx2x_cmn.c: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2012 Broadcom Corporation
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

18
19
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

20
#include <linux/etherdevice.h>
21
#include <linux/if_vlan.h>
22
#include <linux/interrupt.h>
23
#include <linux/ip.h>
Dmitry Kravkov's avatar
Dmitry Kravkov committed
24
#include <net/ipv6.h>
25
#include <net/ip6_checksum.h>
26
#include <linux/prefetch.h>
27
#include "bnx2x_cmn.h"
28
#include "bnx2x_init.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
29
#include "bnx2x_sp.h"
30
#include "bnx2x_sriov.h"
31

32
33
34
35
36
37
38
39
/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
40
41
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
42
43
 * source onto the target. Update txdata pointers and related
 * content.
44
45
46
47
48
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
49
50
51
52
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
53
54
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
55
56
57
58

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

59
60
61
	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;
62

63
64
65
66
67
68
	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

	memcpy(&bp->bnx2x_txq[old_txdata_index],
	       &bp->bnx2x_txq[new_txdata_index],
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
86
87
}

88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
/**
 * bnx2x_fill_fw_str - Fill buffer with FW version string.
 *
 * @bp:        driver handle
 * @buf:       character buffer to fill with the fw name
 * @buf_len:   length of the above buffer
 *
 */
void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
{
	if (IS_PF(bp)) {
		u8 phy_fw_ver[PHY_FW_VER_LEN];

		phy_fw_ver[0] = '\0';
		bnx2x_get_ext_phy_fw_version(&bp->link_params,
					     phy_fw_ver, PHY_FW_VER_LEN);
		strlcpy(buf, bp->fw_ver, buf_len);
		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
			 "bc %d.%d.%d%s%s",
			 (bp->common.bc_ver & 0xff0000) >> 16,
			 (bp->common.bc_ver & 0xff00) >> 8,
			 (bp->common.bc_ver & 0xff),
			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
	} else {
		strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
	}
}

116
117
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

118
119
120
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
121
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
122
123
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
124
{
125
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
126
127
128
129
130
131
132
133
134
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

Merav Sicron's avatar
Merav Sicron committed
135
	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
136
	   txdata->txq_index, idx, tx_buf, skb);
137
138

	/* unmap first bd */
139
	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
140
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
141
			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
142

143

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* ...and the TSO split header bd since they have no mapping */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* now free frags */
	while (nbd > 0) {

169
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
170
171
172
173
174
175
176
177
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
	WARN_ON(!skb);
178
	if (likely(skb)) {
179
180
181
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
	}
182

183
	dev_kfree_skb_any(skb);
184
185
186
187
188
189
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

190
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
191
192
{
	struct netdev_queue *txq;
193
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
194
	unsigned int pkts_compl = 0, bytes_compl = 0;
195
196
197
198
199
200

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

201
202
203
	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;
204
205
206
207
208
209

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

Merav Sicron's avatar
Merav Sicron committed
210
211
		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
212
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
213

214
215
216
		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
		    &pkts_compl, &bytes_compl);

217
218
219
		sw_cons++;
	}

220
221
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

222
223
	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;
224
225
226
227
228
229

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
230
231
232
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
		/* Taking tx_lock() is needed to prevent reenabling the queue
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
251
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

Dmitry Kravkov's avatar
Dmitry Kravkov committed
268
269
270
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
271
272
273
274
275
276
277
278
279
280
281
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
282
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
283
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
284
285

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov's avatar
Dmitry Kravkov committed
286
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
287
288
289

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
290
	bnx2x_update_last_max_sge(fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
291
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
292
293

	last_max = RX_SGE(fp->last_max_sge);
294
295
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
296
297
298
299
300
301
302
303
304
305

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

306
307
		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
308
309
310
311
312
313
314
315
316
317
318
319
320
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

321
322
323
324
/* Set Toeplitz hash value in the skb using the value from the
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazet's avatar
Eric Dumazet committed
325
326
			    const struct eth_fast_path_rx_cqe *cqe,
			    bool *l4_rxhash)
327
328
329
{
	/* Set Toeplitz hash from CQE */
	if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazet's avatar
Eric Dumazet committed
330
331
332
333
334
335
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
			     (htype == TCP_IPV6_HASH_TYPE);
336
		return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazet's avatar
Eric Dumazet committed
337
338
	}
	*l4_rxhash = false;
339
340
341
	return 0;
}

342
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
343
			    u16 cons, u16 prod,
344
			    struct eth_fast_path_rx_cqe *cqe)
345
346
347
348
349
350
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
351
352
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
353

354
355
	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
356
357
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

358
	/* Try to map an empty data buffer from the aggregation info  */
359
	mapping = dma_map_single(&bp->pdev->dev,
360
				 first_buf->data + NET_SKB_PAD,
361
362
363
364
365
366
367
368
369
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
370
		bnx2x_reuse_rx_data(fp, cons, prod);
371
372
373
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}
374

375
376
	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
377
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
378
	/* point prod_bd to new data */
379
380
381
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

382
383
384
385
386
387
388
389
390
391
	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazet's avatar
Eric Dumazet committed
392
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
393
394
395
396
397
398
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
		tpa_info->full_page =
			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
		tpa_info->gro_size = gro_size;
	}
399

400
401
402
403
404
405
406
407
408
409
410
#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

411
412
413
414
415
416
/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
417
 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
418
 *
419
420
421
422
423
424
425
 * @bp:			driver handle
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
426
 */
Eric Dumazet's avatar
Eric Dumazet committed
427
428
static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
			     u16 len_on_bd)
429
{
430
431
432
	/*
	 * TPA arrgregation won't have either IP options or TCP options
	 * other than timestamp or IPv6 extension headers.
433
	 */
434
435
436
437
438
439
440
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
	    PRS_FLAG_OVERETH_IPV6)
		hdrs_len += sizeof(struct ipv6hdr);
	else /* IPv4 */
		hdrs_len += sizeof(struct iphdr);
441
442
443
444
445
446
447
448
449
450
451
452
453


	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

	return len_on_bd - hdrs_len;
}

Eric Dumazet's avatar
Eric Dumazet committed
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

484
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
485
486
487
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
488
489
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
490
491
{
	struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
492
493
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
494
	u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
495
	u16 full_page = 0, gro_size = 0;
496

497
	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
498
499
500
501
502

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}
503
504

	/* This is needed in order to enable forwarding support */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
505
	if (frag_size) {
506
507
		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
					tpa_info->parsing_flags, len_on_bd);
508

Dmitry Kravkov's avatar
Dmitry Kravkov committed
509
510
511
512
513
514
515
516
517
518
		/* set for GRO */
		if (fp->mode == TPA_MODE_GRO)
			skb_shinfo(skb)->gso_type =
			    (GET_FLAG(tpa_info->parsing_flags,
				      PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
						PRS_FLAG_OVERETH_IPV6) ?
				SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
	}


519
520
521
522
#ifdef BNX2X_STOP_ON_ERROR
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
523
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
524
525
526
527
528
529
530
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
531
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
532
533
534

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
535
536
537
538
539
540
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
			frag_len = min_t(u32, frag_size,
					 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));

541
542
543
544
545
546
547
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
548
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
549
550
551
552
553
554
555
556
			return err;
		}

		/* Unmap the page as we r going to pass it to the stack */
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
		/* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
557
558
559
560
561
562
563
564
565
566
567
568
569
570
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page, offset, len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}
571
572

		skb->data_len += frag_len;
573
		skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
574
575
576
577
578
579
580
581
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		put_page(virt_to_head_page(data));
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
{
	if (fp->rx_frag_size)
		return netdev_alloc_frag(fp->rx_frag_size);

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}


Eric Dumazet's avatar
Eric Dumazet committed
599
600
601
602
603
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
604
{
605
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
606
	u8 pad = tpa_info->placement_offset;
607
	u16 len = tpa_info->len_on_bd;
608
	struct sk_buff *skb = NULL;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
609
	u8 *new_data, *data = rx_buf->data;
610
611
612
613
614
615
616
617
618
619
	u8 old_tpa_state = tpa_info->tpa_state;

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

620
	/* Try to allocate the new data */
Eric Dumazet's avatar
Eric Dumazet committed
621
	new_data = bnx2x_frag_alloc(fp);
622
623
624
625
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
626
			 fp->rx_buf_size, DMA_FROM_DEVICE);
627
	if (likely(new_data))
Eric Dumazet's avatar
Eric Dumazet committed
628
		skb = build_skb(data, fp->rx_frag_size);
629

630
	if (likely(skb)) {
631
#ifdef BNX2X_STOP_ON_ERROR
632
		if (pad + len > fp->rx_buf_size) {
Merav Sicron's avatar
Merav Sicron committed
633
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
634
				  pad, len, fp->rx_buf_size);
635
636
637
638
639
			bnx2x_panic();
			return;
		}
#endif

640
		skb_reserve(skb, pad + NET_SKB_PAD);
641
		skb_put(skb, len);
642
		skb->rxhash = tpa_info->rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
643
		skb->l4_rxhash = tpa_info->l4_rxhash;
644
645
646
647

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
648
649
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
650
651
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
652
			napi_gro_receive(&fp->napi, skb);
653
		} else {
Merav Sicron's avatar
Merav Sicron committed
654
655
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
656
			dev_kfree_skb_any(skb);
657
658
659
		}


660
661
		/* put new data in bin */
		rx_buf->data = new_data;
662

663
		return;
664
	}
Eric Dumazet's avatar
Eric Dumazet committed
665
	bnx2x_frag_free(fp, new_data);
666
667
668
669
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
670
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
671
672
}

Eric Dumazet's avatar
Eric Dumazet committed
673
674
675
676
677
678
679
680
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

Eric Dumazet's avatar
Eric Dumazet committed
681
	data = bnx2x_frag_alloc(fp);
Eric Dumazet's avatar
Eric Dumazet committed
682
683
684
685
686
687
688
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazet's avatar
Eric Dumazet committed
689
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
690
691
692
693
694
695
696
697
698
699
700
701
702
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

703
704
705
706
static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
Eric Dumazet's avatar
Eric Dumazet committed
707
{
708
709
710
711
712
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
Eric Dumazet's avatar
Eric Dumazet committed
713
	if (cqe->fast_path_cqe.status_flags &
714
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazet's avatar
Eric Dumazet committed
715
716
		return;

717
	/* If L4 validation was done, check if an error was found. */
Eric Dumazet's avatar
Eric Dumazet committed
718
719
720
721

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
722
		qstats->hw_csum_err++;
Eric Dumazet's avatar
Eric Dumazet committed
723
724
725
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763

int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

	/* CQ "next element" is of the size of the regular element,
	   that's why it's ok here */
	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
		hw_comp_cons++;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
	   fp->index, hw_comp_cons, sw_comp_cons);

	while (sw_comp_cons != hw_comp_cons) {
		struct sw_rx_bd *rx_buf = NULL;
		struct sk_buff *skb;
		union eth_rx_cqe *cqe;
764
		struct eth_fast_path_rx_cqe *cqe_fp;
765
		u8 cqe_fp_flags;
766
		enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
767
		u16 len, pad, queue;
768
		u8 *data;
Eric Dumazet's avatar
Eric Dumazet committed
769
		bool l4_rxhash;
770

771
772
773
774
775
#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

776
777
778
779
780
		comp_ring_cons = RCQ_BD(sw_comp_cons);
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		cqe = &fp->rx_comp_ring[comp_ring_cons];
781
782
783
		cqe_fp = &cqe->fast_path_cqe;
		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
784

Merav Sicron's avatar
Merav Sicron committed
785
786
787
		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
788
789
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov's avatar
Dmitry Kravkov committed
790
791
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
792
793

		/* is this a slowpath msg? */
794
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
795
796
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
797
		}
Dmitry Kravkov's avatar
Dmitry Kravkov committed
798

799
800
		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;
801

802
		if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov's avatar
Dmitry Kravkov committed
803
804
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
805
#ifdef BNX2X_STOP_ON_ERROR
806
807
808
809
			/* sanity check */
			if (fp->disable_tpa &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron's avatar
Merav Sicron committed
810
				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
811
					  CQE_TYPE(cqe_fp_type));
812
#endif
813

814
815
816
817
818
			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);
819

820
821
822
				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
823

824
825
				goto next_rx;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
845
#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov's avatar
Dmitry Kravkov committed
846
847
			if (bp->panic)
				return 0;
848
849
#endif

Dmitry Kravkov's avatar
Dmitry Kravkov committed
850
851
			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
852
853
		}
		/* non TPA */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
854
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
855
856
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
857
					dma_unmap_addr(rx_buf, mapping),
858
859
860
861
862
863
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron's avatar
Merav Sicron committed
864
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
865
866
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
867
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
868
869
			goto reuse_rx;
		}
870

871
872
873
874
875
876
877
		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = netdev_alloc_skb_ip_align(bp->dev, len);
			if (skb == NULL) {
Merav Sicron's avatar
Merav Sicron committed
878
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
879
				   "ERROR  packet dropped because of alloc failure\n");
880
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
881
882
				goto reuse_rx;
			}
883
884
885
886
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
887
				dma_unmap_single(&bp->pdev->dev,
888
						 dma_unmap_addr(rx_buf, mapping),
889
						 fp->rx_buf_size,
890
						 DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
891
				skb = build_skb(data, fp->rx_frag_size);
892
				if (unlikely(!skb)) {
Eric Dumazet's avatar
Eric Dumazet committed
893
					bnx2x_frag_free(fp, data);
894
895
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
896
897
					goto next_rx;
				}
898
899
				skb_reserve(skb, pad);
			} else {
Merav Sicron's avatar
Merav Sicron committed
900
901
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
902
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
903
reuse_rx:
904
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
905
906
				goto next_rx;
			}
907
		}
908

909
910
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);
911

912
		/* Set Toeplitz hash for a none-LRO skb */
Eric Dumazet's avatar
Eric Dumazet committed
913
914
		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
		skb->l4_rxhash = l4_rxhash;
915

916
		skb_checksum_none_assert(skb);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
917

Eric Dumazet's avatar
Eric Dumazet committed
918
		if (bp->dev->features & NETIF_F_RXCSUM)
919
920
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));
921

922
		skb_record_rx_queue(skb, fp->rx_queue);
923

924
925
		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
926
			__vlan_hwaccel_put_tag(skb,
927
					       le16_to_cpu(cqe_fp->vlan_tag));
928
		napi_gro_receive(&fp->napi, skb);
929
930
931


next_rx:
932
		rx_buf->data = NULL;
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

		if (rx_pkt == budget)
			break;
	} /* while */

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	fp->rx_pkt += rx_pkt;
	fp->rx_calls++;

	return rx_pkt;
}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
	struct bnx2x_fastpath *fp = fp_cookie;
	struct bnx2x *bp = fp->bp;
965
	u8 cos;
966

Merav Sicron's avatar
Merav Sicron committed
967
968
	DP(NETIF_MSG_INTR,
	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
969
970
	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
971
972
973
974
975
976
977
978

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	/* Handle Rx and Tx according to MSI-X vector */
	prefetch(fp->rx_cons_sb);
979
980

	for_each_cos_in_tx_queue(fp, cos)
981
		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
982

983
	prefetch(&fp->sb_running_index[SM_RX_ID]);
984
985
986
987
988
989
990
991
992
993
	napi_schedule(&bnx2x_fp(bp, fp->index, napi));

	return IRQ_HANDLED;
}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
	mutex_lock(&bp->port.phy_mutex);

Yaniv Rosner's avatar
Yaniv Rosner committed
994
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
995
996
997
998
}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{
Yaniv Rosner's avatar
Yaniv Rosner committed
999
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1000
1001
1002
1003

	mutex_unlock(&bp->port.phy_mutex);
}

1004
1005
1006
1007
1008
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
	u16 line_speed = bp->link_vars.line_speed;
	if (IS_MF(bp)) {
1009
1010
1011
1012
1013
		u16 maxCfg = bnx2x_extract_max_cfg(bp,
						   bp->mf_config[BP_VN(bp)]);

		/* Calculate the current MAX line speed limit for the MF
		 * devices
1014
		 */
1015
1016
1017
		if (IS_MF_SI(bp))
			line_speed = (line_speed * maxCfg) / 100;
		else { /* SD mode */
1018
1019
1020
1021
			u16 vn_max_rate = maxCfg * 100;

			if (vn_max_rate < line_speed)
				line_speed = vn_max_rate;
1022
		}
1023
1024
1025
1026
1027
	}

	return line_speed;
}

1028
1029
1030
1031
1032
1033
1034
1035
/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
Eric Dumazet's avatar
Eric Dumazet committed
1036
1037
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
{
	u16 line_speed = bnx2x_get_mf_speed(bp);

	memset(data, 0, sizeof(*data));

	/* Fill the report data: efective line speed */
	data->line_speed = line_speed;

	/* Link is down */
	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
			  &data->link_report_flags);

	/* Full DUPLEX */
	if (bp->link_vars.duplex == DUPLEX_FULL)
		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);

	/* Rx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);

	/* Tx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

1074
1075
void bnx2x_link_report(struct bnx2x *bp)
{
1076
1077
1078
1079
	bnx2x_acquire_phy_lock(bp);
	__bnx2x_link_report(bp);
	bnx2x_release_phy_lock(bp);
}
1080

1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * None atomic inmlementation.
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{
	struct bnx2x_link_report_data cur_data;
1092

1093
	/* reread mf_cfg */
Ariel Elior's avatar
Ariel Elior committed
1094
	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
		bnx2x_read_mf_cfg(bp);

	/* Read the current link report info */
	bnx2x_fill_report_data(bp, &cur_data);

	/* Don't report link down or exactly the same link status twice */
	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &bp->last_reported_link.link_report_flags) &&
	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &cur_data.link_report_flags)))
		return;

	bp->link_cnt++;
1109

1110
1111
1112
1113
	/* We are going to report a new link parameters now -
	 * remember the current data for the next time.
	 */
	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1114

1115
1116
1117
1118
1119
1120
	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		     &cur_data.link_report_flags)) {
		netif_carrier_off(bp->dev);
		netdev_err(bp->dev, "NIC Link is Down\n");
		return;
	} else {
1121
1122
1123
		const char *duplex;
		const char *flow;

1124
		netif_carrier_on(bp->dev);
1125

1126
1127
		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
				       &cur_data.link_report_flags))
1128
			duplex = "full";
1129
		else
1130
			duplex = "half";
1131

1132
1133
1134
1135
1136
1137
1138
1139
1140
		/* Handle the FC at the end so that only these flags would be
		 * possibly set. This way we may easily check if there is no FC
		 * enabled.
		 */
		if (cur_data.link_report_flags) {
			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
				     &cur_data.link_report_flags)) {
				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
				     &cur_data.link_report_flags))
1141
1142
1143
					flow = "ON - receive & transmit";
				else
					flow = "ON - receive";
1144
			} else {
1145
				flow = "ON - transmit";
1146
			}
1147
1148
		} else {
			flow = "none";
1149
		}
1150
1151
		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
			    cur_data.line_speed, duplex, flow);
1152
1153
1154
	}
}

Eric Dumazet's avatar
Eric Dumazet committed
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		struct eth_rx_sge *sge;

		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
		sge->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));

		sge->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
	}
}

static void bnx2x_free_tpa_pool(struct bnx2x *bp,
				struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++) {
		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
		u8 *data = first_buf->data;

		if (data == NULL) {
			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
			continue;
		}
		if (tpa_info->tpa_state == BNX2X_TPA_START)
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(first_buf, mapping),
					 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
1191
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
1192
1193
1194
1195
		first_buf->data = NULL;
	}
}

1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
{
	int j;

	for_each_rx_queue_cnic(bp, j) {
		struct bnx2x_fastpath *fp = &bp->fp[j];

		fp->rx_bd_cons = 0;

		/* Activate BD ring */
		/* Warning!
		 * this will generate an interrupt (to the TSTORM)
		 * must only be done after chip is initialized
		 */
		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
				     fp->rx_sge_prod);
	}
}

1215
1216
1217
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
1218
	u16 ring_prod;
1219
	int i, j;
1220

1221
	/* Allocate TPA resources */
1222
	for_each_eth_queue(bp, j) {
1223
		struct bnx2x_fastpath *fp = &bp->fp[j];
1224

1225
1226
1227
		DP(NETIF_MSG_IFUP,
		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);

1228
		if (!fp->disable_tpa) {
1229
			/* Fill the per-aggregtion pool */
1230
			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1231
1232
1233
1234
1235
				struct bnx2x_agg_info *tpa_info =
					&fp->tpa_info[i];
				struct sw_rx_bd *first_buf =
					&tpa_info->first_buf;

Eric Dumazet's avatar
Eric Dumazet committed
1236
				first_buf->data = bnx2x_frag_alloc(fp);
1237
				if (!first_buf->data) {
Merav Sicron's avatar
Merav Sicron committed
1238
1239
					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
						  j);
1240
1241
1242
1243
					bnx2x_free_tpa_pool(bp, fp, i);
					fp->disable_tpa = 1;
					break;
				}
1244
1245
				dma_unmap_addr_set(first_buf, mapping, 0);
				tpa_info->tpa_state = BNX2X_TPA_STOP;
1246
			}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258

			/* "next page" elements initialization */
			bnx2x_set_next_page_sgl(fp);

			/* set SGEs bit mask */
			bnx2x_init_sge_ring_bit_mask(fp);

			/* Allocate SGEs and initialize the ring elements */
			for (i = 0, ring_prod = 0;
			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {

				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron's avatar
Merav Sicron committed
1259
1260
1261
1262
					BNX2X_ERR("was only able to allocate %d rx sges\n",
						  i);
					BNX2X_ERR("disabling TPA for queue[%d]\n",
						  j);
1263
					/* Cleanup already allocated elements */
1264
1265
1266
					bnx2x_free_rx_sge_range(bp, fp,
								ring_prod);
					bnx2x_free_tpa_pool(bp, fp,
1267
							    MAX_AGG_QS(bp));
1268
1269
1270
1271
1272
1273
1274
1275
					fp->disable_tpa = 1;
					ring_prod = 0;
					break;
				}
				ring_prod = NEXT_SGE_IDX(ring_prod);
			}

			fp->rx_sge_prod = ring_prod;
1276
1277
1278
		}
	}