bnx2x_cmn.c 112 KB
Newer Older
1
2
/* bnx2x_cmn.c: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2012 Broadcom Corporation
4
5
6
7
8
9
10
11
12
13
14
15
16
17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath and fastpath rework by Vladislav Zolotarov
 * Statistics and Link management by Yitchak Gertner
 *
 */

18
19
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

20
#include <linux/etherdevice.h>
21
#include <linux/if_vlan.h>
22
#include <linux/interrupt.h>
23
#include <linux/ip.h>
Dmitry Kravkov's avatar
Dmitry Kravkov committed
24
#include <net/ipv6.h>
25
#include <net/ip6_checksum.h>
26
#include <linux/prefetch.h>
27
#include "bnx2x_cmn.h"
28
#include "bnx2x_init.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
29
#include "bnx2x_sp.h"
30

31

32

33
34
35
36
37
38
39
40
/**
 * bnx2x_move_fp - move content of the fastpath structure.
 *
 * @bp:		driver handle
 * @from:	source FP index
 * @to:		destination FP index
 *
 * Makes sure the contents of the bp->fp[to].napi is kept
41
42
 * intact. This is done by first copying the napi struct from
 * the target to the source, and then mem copying the entire
43
44
 * source onto the target. Update txdata pointers and related
 * content.
45
46
47
48
49
 */
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
	struct bnx2x_fastpath *from_fp = &bp->fp[from];
	struct bnx2x_fastpath *to_fp = &bp->fp[to];
50
51
52
53
	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
54
55
	int old_max_eth_txqs, new_max_eth_txqs;
	int old_txdata_index = 0, new_txdata_index = 0;
56
57
58
59

	/* Copy the NAPI object as it has been already initialized */
	from_fp->napi = to_fp->napi;

60
61
62
	/* Move bnx2x_fastpath contents */
	memcpy(to_fp, from_fp, sizeof(*to_fp));
	to_fp->index = to;
63

64
65
66
67
68
69
	/* move sp_objs contents as well, as their indices match fp ones */
	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));

	/* move fp_stats contents as well, as their indices match fp ones */
	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));

70
71
72
73
74
75
76
77
78
79
80
81
82
	/* Update txdata pointers in fp and move txdata content accordingly:
	 * Each fp consumes 'max_cos' txdata structures, so the index should be
	 * decremented by max_cos x delta.
	 */

	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
				(bp)->max_cos;
	if (from == FCOE_IDX(bp)) {
		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
	}

83
84
	memcpy(&bp->bnx2x_txq[new_txdata_index],
	       &bp->bnx2x_txq[old_txdata_index],
85
86
	       sizeof(struct bnx2x_fp_txdata));
	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
87
88
}

89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
/**
 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
 *
 * @bp:	driver handle
 * @delta:	number of eth queues which were not allocated
 */
static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
{
	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);

	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
	 * backward along the array could cause memory to be overriden
	 */
	for (cos = 1; cos < bp->max_cos; cos++) {
		for (i = 0; i < old_eth_num - delta; i++) {
			struct bnx2x_fastpath *fp = &bp->fp[i];
			int new_idx = cos * (old_eth_num - delta) + i;

			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
			       sizeof(struct bnx2x_fp_txdata));
			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
		}
	}
}

114
115
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */

116
117
118
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
119
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
120
121
			     u16 idx, unsigned int *pkts_compl,
			     unsigned int *bytes_compl)
122
{
123
	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
124
125
126
127
128
129
130
131
132
	struct eth_tx_start_bd *tx_start_bd;
	struct eth_tx_bd *tx_data_bd;
	struct sk_buff *skb = tx_buf->skb;
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
	int nbd;

	/* prefetch skb end pointer to speedup dev_kfree_skb() */
	prefetch(&skb->end);

Merav Sicron's avatar
Merav Sicron committed
133
	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
134
	   txdata->txq_index, idx, tx_buf, skb);
135
136

	/* unmap first bd */
137
	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
138
	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
139
			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
140

141

142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
#ifdef BNX2X_STOP_ON_ERROR
	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
		BNX2X_ERR("BAD nbd!\n");
		bnx2x_panic();
	}
#endif
	new_cons = nbd + tx_buf->first_bd;

	/* Get the next bd */
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* Skip a parse bd... */
	--nbd;
	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	/* ...and the TSO split header bd since they have no mapping */
	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
		--nbd;
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* now free frags */
	while (nbd > 0) {

167
		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
168
169
170
171
172
173
174
175
		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
	WARN_ON(!skb);
176
	if (likely(skb)) {
177
178
179
		(*pkts_compl)++;
		(*bytes_compl) += skb->len;
	}
180

181
	dev_kfree_skb_any(skb);
182
183
184
185
186
187
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

	return new_cons;
}

188
int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
189
190
{
	struct netdev_queue *txq;
191
	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
192
	unsigned int pkts_compl = 0, bytes_compl = 0;
193
194
195
196
197
198

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return -1;
#endif

199
200
201
	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
	sw_cons = txdata->tx_pkt_cons;
202
203
204
205
206
207

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

Merav Sicron's avatar
Merav Sicron committed
208
209
		DP(NETIF_MSG_TX_DONE,
		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
210
		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
211

212
213
214
		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
		    &pkts_compl, &bytes_compl);

215
216
217
		sw_cons++;
	}

218
219
	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);

220
221
	txdata->tx_pkt_cons = sw_cons;
	txdata->tx_bd_cons = bd_cons;
222
223
224
225
226
227

	/* Need to make the tx_bd_cons update visible to start_xmit()
	 * before checking for netif_tx_queue_stopped().  Without the
	 * memory barrier, there is a small possibility that
	 * start_xmit() will miss it and cause the queue to be stopped
	 * forever.
228
229
230
	 * On the other hand we need an rmb() here to ensure the proper
	 * ordering of bit testing in the following
	 * netif_tx_queue_stopped(txq) call.
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
	 */
	smp_mb();

	if (unlikely(netif_tx_queue_stopped(txq))) {
		/* Taking tx_lock() is needed to prevent reenabling the queue
		 * while it's empty. This could have happen if rx_action() gets
		 * suspended in bnx2x_tx_int() after the condition before
		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
		 *
		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
		 * sends some packets consuming the whole queue again->
		 * stops the queue
		 */

		__netif_tx_lock(txq, smp_processor_id());

		if ((netif_tx_queue_stopped(txq)) &&
		    (bp->state == BNX2X_STATE_OPEN) &&
249
		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
			netif_tx_wake_queue(txq);

		__netif_tx_unlock(txq);
	}
	return 0;
}

static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

Dmitry Kravkov's avatar
Dmitry Kravkov committed
266
267
268
static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
					 u16 sge_len,
					 struct eth_end_agg_rx_cqe *cqe)
269
270
271
272
273
274
275
276
277
278
279
{
	struct bnx2x *bp = fp->bp;
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
280
		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
281
			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
282
283

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov's avatar
Dmitry Kravkov committed
284
	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
285
286
287

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
288
	bnx2x_update_last_max_sge(fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
289
		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
290
291

	last_max = RX_SGE(fp->last_max_sge);
292
293
	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
294
295
296
297
298
299
300
301
302
303

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

304
305
		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
		delta += BIT_VEC64_ELEM_SZ;
306
307
308
309
310
311
312
313
314
315
316
317
318
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

319
320
321
322
/* Set Toeplitz hash value in the skb using the value from the
 * CQE (calculated by HW).
 */
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
Eric Dumazet's avatar
Eric Dumazet committed
323
324
			    const struct eth_fast_path_rx_cqe *cqe,
			    bool *l4_rxhash)
325
326
327
{
	/* Set Toeplitz hash from CQE */
	if ((bp->dev->features & NETIF_F_RXHASH) &&
Eric Dumazet's avatar
Eric Dumazet committed
328
329
330
331
332
333
	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
		enum eth_rss_hash_type htype;

		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
		*l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
			     (htype == TCP_IPV6_HASH_TYPE);
334
		return le32_to_cpu(cqe->rss_hash_result);
Eric Dumazet's avatar
Eric Dumazet committed
335
336
	}
	*l4_rxhash = false;
337
338
339
	return 0;
}

340
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
341
			    u16 cons, u16 prod,
342
			    struct eth_fast_path_rx_cqe *cqe)
343
344
345
346
347
348
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;
349
350
	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
351

352
353
	/* print error if current state != stop */
	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
354
355
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

356
	/* Try to map an empty data buffer from the aggregation info  */
357
	mapping = dma_map_single(&bp->pdev->dev,
358
				 first_buf->data + NET_SKB_PAD,
359
360
361
362
363
364
365
366
367
				 fp->rx_buf_size, DMA_FROM_DEVICE);
	/*
	 *  ...if it fails - move the skb from the consumer to the producer
	 *  and set the current aggregation state as ERROR to drop it
	 *  when TPA_STOP arrives.
	 */

	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		/* Move the BD from the consumer to the producer */
368
		bnx2x_reuse_rx_data(fp, cons, prod);
369
370
371
		tpa_info->tpa_state = BNX2X_TPA_ERROR;
		return;
	}
372

373
374
	/* move empty data from pool to prod */
	prod_rx_buf->data = first_buf->data;
375
	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
376
	/* point prod_bd to new data */
377
378
379
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

380
381
382
383
384
385
386
387
388
389
	/* move partial skb from cons to pool (don't unmap yet) */
	*first_buf = *cons_rx_buf;

	/* mark bin state as START */
	tpa_info->parsing_flags =
		le16_to_cpu(cqe->pars_flags.flags);
	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
	tpa_info->tpa_state = BNX2X_TPA_START;
	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
	tpa_info->placement_offset = cqe->placement_offset;
Eric Dumazet's avatar
Eric Dumazet committed
390
	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
391
392
393
394
395
396
	if (fp->mode == TPA_MODE_GRO) {
		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
		tpa_info->full_page =
			SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
		tpa_info->gro_size = gro_size;
	}
397

398
399
400
401
402
403
404
405
406
407
408
#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef _ASM_GENERIC_INT_L64_H
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

409
410
411
412
413
414
/* Timestamp option length allowed for TPA aggregation:
 *
 *		nop nop kind length echo val
 */
#define TPA_TSTAMP_OPT_LEN	12
/**
415
 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
416
 *
417
418
419
420
421
422
423
 * @bp:			driver handle
 * @parsing_flags:	parsing flags from the START CQE
 * @len_on_bd:		total length of the first packet for the
 *			aggregation.
 *
 * Approximate value of the MSS for this aggregation calculated using
 * the first packet of it.
424
 */
Eric Dumazet's avatar
Eric Dumazet committed
425
426
static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
			     u16 len_on_bd)
427
{
428
429
430
	/*
	 * TPA arrgregation won't have either IP options or TCP options
	 * other than timestamp or IPv6 extension headers.
431
	 */
432
433
434
435
436
437
438
	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);

	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
	    PRS_FLAG_OVERETH_IPV6)
		hdrs_len += sizeof(struct ipv6hdr);
	else /* IPv4 */
		hdrs_len += sizeof(struct iphdr);
439
440
441
442
443
444
445
446
447
448
449
450
451


	/* Check if there was a TCP timestamp, if there is it's will
	 * always be 12 bytes length: nop nop kind length echo val.
	 *
	 * Otherwise FW would close the aggregation.
	 */
	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
		hdrs_len += TPA_TSTAMP_OPT_LEN;

	return len_on_bd - hdrs_len;
}

Eric Dumazet's avatar
Eric Dumazet committed
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
			      struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL)) {
		BNX2X_ERR("Can't alloc sge\n");
		return -ENOMEM;
	}

	mapping = dma_map_page(&bp->pdev->dev, page, 0,
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}

	sw_buf->page = page;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

482
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Dmitry Kravkov's avatar
Dmitry Kravkov committed
483
484
485
			       struct bnx2x_agg_info *tpa_info,
			       u16 pages,
			       struct sk_buff *skb,
486
487
			       struct eth_end_agg_rx_cqe *cqe,
			       u16 cqe_idx)
488
489
{
	struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
490
491
	u32 i, frag_len, frag_size;
	int err, j, frag_id = 0;
492
	u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
493
	u16 full_page = 0, gro_size = 0;
494

495
	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
496
497
498
499
500

	if (fp->mode == TPA_MODE_GRO) {
		gro_size = tpa_info->gro_size;
		full_page = tpa_info->full_page;
	}
501
502

	/* This is needed in order to enable forwarding support */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
503
	if (frag_size) {
504
505
		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
					tpa_info->parsing_flags, len_on_bd);
506

Michael S. Tsirkin's avatar
Michael S. Tsirkin committed
507
508
509
510
511
		skb_shinfo(skb)->gso_type =
			(GET_FLAG(tpa_info->parsing_flags,
				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
			 PRS_FLAG_OVERETH_IPV6) ?
			SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
512
513
514
	}


515
516
517
518
#ifdef BNX2X_STOP_ON_ERROR
	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
519
		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
520
521
522
523
524
525
526
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
527
		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
528
529
530

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
531
532
533
534
535
536
		if (fp->mode == TPA_MODE_GRO)
			frag_len = min_t(u32, frag_size, (u32)full_page);
		else /* LRO */
			frag_len = min_t(u32, frag_size,
					 (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));

537
538
539
540
541
542
543
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
544
			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
545
546
547
548
549
550
551
552
			return err;
		}

		/* Unmap the page as we r going to pass it to the stack */
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
		/* Add one frag and update the appropriate fields in the skb */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
553
554
555
556
557
558
559
560
561
562
563
564
565
566
		if (fp->mode == TPA_MODE_LRO)
			skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
		else { /* GRO */
			int rem;
			int offset = 0;
			for (rem = frag_len; rem > 0; rem -= gro_size) {
				int len = rem > gro_size ? gro_size : rem;
				skb_fill_page_desc(skb, frag_id++,
						   old_rx_pg.page, offset, len);
				if (offset)
					get_page(old_rx_pg.page);
				offset += len;
			}
		}
567
568

		skb->data_len += frag_len;
569
		skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
570
571
572
573
574
575
576
577
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
	if (fp->rx_frag_size)
		put_page(virt_to_head_page(data));
	else
		kfree(data);
}

static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp)
{
	if (fp->rx_frag_size)
		return netdev_alloc_frag(fp->rx_frag_size);

	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
}


Eric Dumazet's avatar
Eric Dumazet committed
595
596
597
598
599
static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   struct bnx2x_agg_info *tpa_info,
			   u16 pages,
			   struct eth_end_agg_rx_cqe *cqe,
			   u16 cqe_idx)
600
{
601
	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
602
	u8 pad = tpa_info->placement_offset;
603
	u16 len = tpa_info->len_on_bd;
604
	struct sk_buff *skb = NULL;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
605
	u8 *new_data, *data = rx_buf->data;
606
607
608
609
610
611
612
613
614
615
	u8 old_tpa_state = tpa_info->tpa_state;

	tpa_info->tpa_state = BNX2X_TPA_STOP;

	/* If we there was an error during the handling of the TPA_START -
	 * drop this aggregation.
	 */
	if (old_tpa_state == BNX2X_TPA_ERROR)
		goto drop;

616
	/* Try to allocate the new data */
Eric Dumazet's avatar
Eric Dumazet committed
617
	new_data = bnx2x_frag_alloc(fp);
618
619
620
621
	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
622
			 fp->rx_buf_size, DMA_FROM_DEVICE);
623
	if (likely(new_data))
Eric Dumazet's avatar
Eric Dumazet committed
624
		skb = build_skb(data, fp->rx_frag_size);
625

626
	if (likely(skb)) {
627
#ifdef BNX2X_STOP_ON_ERROR
628
		if (pad + len > fp->rx_buf_size) {
Merav Sicron's avatar
Merav Sicron committed
629
			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
630
				  pad, len, fp->rx_buf_size);
631
632
633
634
635
			bnx2x_panic();
			return;
		}
#endif

636
		skb_reserve(skb, pad + NET_SKB_PAD);
637
		skb_put(skb, len);
638
		skb->rxhash = tpa_info->rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
639
		skb->l4_rxhash = tpa_info->l4_rxhash;
640
641
642
643

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
644
645
		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
					 skb, cqe, cqe_idx)) {
646
647
			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
648
			napi_gro_receive(&fp->napi, skb);
649
		} else {
Merav Sicron's avatar
Merav Sicron committed
650
651
			DP(NETIF_MSG_RX_STATUS,
			   "Failed to allocate new pages - dropping packet!\n");
652
			dev_kfree_skb_any(skb);
653
654
655
		}


656
657
		/* put new data in bin */
		rx_buf->data = new_data;
658

659
		return;
660
	}
Eric Dumazet's avatar
Eric Dumazet committed
661
	bnx2x_frag_free(fp, new_data);
662
663
664
665
drop:
	/* drop the packet and keep the buffer in the bin */
	DP(NETIF_MSG_RX_STATUS,
	   "Failed to allocate or map a new skb - dropping packet!\n");
666
	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
667
668
}

Eric Dumazet's avatar
Eric Dumazet committed
669
670
671
672
673
674
675
676
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
			       struct bnx2x_fastpath *fp, u16 index)
{
	u8 *data;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

Eric Dumazet's avatar
Eric Dumazet committed
677
	data = bnx2x_frag_alloc(fp);
Eric Dumazet's avatar
Eric Dumazet committed
678
679
680
681
682
683
684
	if (unlikely(data == NULL))
		return -ENOMEM;

	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
				 fp->rx_buf_size,
				 DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eric Dumazet's avatar
Eric Dumazet committed
685
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
686
687
688
689
690
691
692
693
694
695
696
697
698
		BNX2X_ERR("Can't map rx data\n");
		return -ENOMEM;
	}

	rx_buf->data = data;
	dma_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

699
700
701
702
static
void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
				 struct bnx2x_fastpath *fp,
				 struct bnx2x_eth_q_stats *qstats)
Eric Dumazet's avatar
Eric Dumazet committed
703
{
704
705
706
707
708
	/* Do nothing if no L4 csum validation was done.
	 * We do not check whether IP csum was validated. For IPv4 we assume
	 * that if the card got as far as validating the L4 csum, it also
	 * validated the IP csum. IPv6 has no IP csum.
	 */
Eric Dumazet's avatar
Eric Dumazet committed
709
	if (cqe->fast_path_cqe.status_flags &
710
	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
Eric Dumazet's avatar
Eric Dumazet committed
711
712
		return;

713
	/* If L4 validation was done, check if an error was found. */
Eric Dumazet's avatar
Eric Dumazet committed
714
715
716
717

	if (cqe->fast_path_cqe.type_error_flags &
	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
718
		qstats->hw_csum_err++;
Eric Dumazet's avatar
Eric Dumazet committed
719
720
721
	else
		skb->ip_summed = CHECKSUM_UNNECESSARY;
}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759

int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

	/* CQ "next element" is of the size of the regular element,
	   that's why it's ok here */
	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
		hw_comp_cons++;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
	bd_prod_fw = bd_prod;
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
	   fp->index, hw_comp_cons, sw_comp_cons);

	while (sw_comp_cons != hw_comp_cons) {
		struct sw_rx_bd *rx_buf = NULL;
		struct sk_buff *skb;
		union eth_rx_cqe *cqe;
760
		struct eth_fast_path_rx_cqe *cqe_fp;
761
		u8 cqe_fp_flags;
762
		enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
763
		u16 len, pad, queue;
764
		u8 *data;
Eric Dumazet's avatar
Eric Dumazet committed
765
		bool l4_rxhash;
766

767
768
769
770
771
#ifdef BNX2X_STOP_ON_ERROR
		if (unlikely(bp->panic))
			return 0;
#endif

772
773
774
775
776
		comp_ring_cons = RCQ_BD(sw_comp_cons);
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		cqe = &fp->rx_comp_ring[comp_ring_cons];
777
778
779
		cqe_fp = &cqe->fast_path_cqe;
		cqe_fp_flags = cqe_fp->type_error_flags;
		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
780

Merav Sicron's avatar
Merav Sicron committed
781
782
783
		DP(NETIF_MSG_RX_STATUS,
		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
		   CQE_TYPE(cqe_fp_flags),
784
785
		   cqe_fp_flags, cqe_fp->status_flags,
		   le32_to_cpu(cqe_fp->rss_hash_result),
Dmitry Kravkov's avatar
Dmitry Kravkov committed
786
787
		   le16_to_cpu(cqe_fp->vlan_tag),
		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
788
789

		/* is this a slowpath msg? */
790
		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
791
792
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;
793
		}
Dmitry Kravkov's avatar
Dmitry Kravkov committed
794

795
796
		rx_buf = &fp->rx_buf_ring[bd_cons];
		data = rx_buf->data;
797

798
		if (!CQE_TYPE_FAST(cqe_fp_type)) {
Dmitry Kravkov's avatar
Dmitry Kravkov committed
799
800
			struct bnx2x_agg_info *tpa_info;
			u16 frag_size, pages;
801
#ifdef BNX2X_STOP_ON_ERROR
802
803
804
805
			/* sanity check */
			if (fp->disable_tpa &&
			    (CQE_TYPE_START(cqe_fp_type) ||
			     CQE_TYPE_STOP(cqe_fp_type)))
Merav Sicron's avatar
Merav Sicron committed
806
				BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
807
					  CQE_TYPE(cqe_fp_type));
808
#endif
809

810
811
812
813
814
			if (CQE_TYPE_START(cqe_fp_type)) {
				u16 queue = cqe_fp->queue_index;
				DP(NETIF_MSG_RX_STATUS,
				   "calling tpa_start on queue %d\n",
				   queue);
815

816
817
818
				bnx2x_tpa_start(fp, queue,
						bd_cons, bd_prod,
						cqe_fp);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
819

820
821
				goto next_rx;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
			}
			queue = cqe->end_agg_cqe.queue_index;
			tpa_info = &fp->tpa_info[queue];
			DP(NETIF_MSG_RX_STATUS,
			   "calling tpa_stop on queue %d\n",
			   queue);

			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
				    tpa_info->len_on_bd;

			if (fp->mode == TPA_MODE_GRO)
				pages = (frag_size + tpa_info->full_page - 1) /
					 tpa_info->full_page;
			else
				pages = SGE_PAGE_ALIGN(frag_size) >>
					SGE_PAGE_SHIFT;

			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
				       &cqe->end_agg_cqe, comp_ring_cons);
841
#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov's avatar
Dmitry Kravkov committed
842
843
			if (bp->panic)
				return 0;
844
845
#endif

Dmitry Kravkov's avatar
Dmitry Kravkov committed
846
847
			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
			goto next_cqe;
848
849
		}
		/* non TPA */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
850
		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
851
852
		pad = cqe_fp->placement_offset;
		dma_sync_single_for_cpu(&bp->pdev->dev,
853
					dma_unmap_addr(rx_buf, mapping),
854
855
856
857
858
859
					pad + RX_COPY_THRESH,
					DMA_FROM_DEVICE);
		pad += NET_SKB_PAD;
		prefetch(data + pad); /* speedup eth_type_trans() */
		/* is this an error packet? */
		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Merav Sicron's avatar
Merav Sicron committed
860
			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
861
862
			   "ERROR  flags %x  rx packet %u\n",
			   cqe_fp_flags, sw_comp_cons);
863
			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
864
865
			goto reuse_rx;
		}
866

867
868
869
870
871
872
873
		/* Since we don't have a jumbo ring
		 * copy small packets if mtu > 1500
		 */
		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
		    (len <= RX_COPY_THRESH)) {
			skb = netdev_alloc_skb_ip_align(bp->dev, len);
			if (skb == NULL) {
Merav Sicron's avatar
Merav Sicron committed
874
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
875
				   "ERROR  packet dropped because of alloc failure\n");
876
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
877
878
				goto reuse_rx;
			}
879
880
881
882
			memcpy(skb->data, data + pad, len);
			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
		} else {
			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
883
				dma_unmap_single(&bp->pdev->dev,
884
						 dma_unmap_addr(rx_buf, mapping),
885
						 fp->rx_buf_size,
886
						 DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
887
				skb = build_skb(data, fp->rx_frag_size);
888
				if (unlikely(!skb)) {
Eric Dumazet's avatar
Eric Dumazet committed
889
					bnx2x_frag_free(fp, data);
890
891
					bnx2x_fp_qstats(bp, fp)->
							rx_skb_alloc_failed++;
892
893
					goto next_rx;
				}
894
895
				skb_reserve(skb, pad);
			} else {
Merav Sicron's avatar
Merav Sicron committed
896
897
				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
				   "ERROR  packet dropped because of alloc failure\n");
898
				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
899
reuse_rx:
900
				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
901
902
				goto next_rx;
			}
903
		}
904

905
906
		skb_put(skb, len);
		skb->protocol = eth_type_trans(skb, bp->dev);
907

908
		/* Set Toeplitz hash for a none-LRO skb */
Eric Dumazet's avatar
Eric Dumazet committed
909
910
		skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
		skb->l4_rxhash = l4_rxhash;
911

912
		skb_checksum_none_assert(skb);
Dmitry Kravkov's avatar
Dmitry Kravkov committed
913

Eric Dumazet's avatar
Eric Dumazet committed
914
		if (bp->dev->features & NETIF_F_RXCSUM)
915
916
			bnx2x_csum_validate(skb, cqe, fp,
					    bnx2x_fp_qstats(bp, fp));
917

918
		skb_record_rx_queue(skb, fp->rx_queue);
919

920
921
		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
		    PARSING_FLAGS_VLAN)
922
			__vlan_hwaccel_put_tag(skb,
923
					       le16_to_cpu(cqe_fp->vlan_tag));
924
		napi_gro_receive(&fp->napi, skb);
925
926
927


next_rx:
928
		rx_buf->data = NULL;
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960

		bd_cons = NEXT_RX_IDX(bd_cons);
		bd_prod = NEXT_RX_IDX(bd_prod);
		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
		rx_pkt++;
next_cqe:
		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);

		if (rx_pkt == budget)
			break;
	} /* while */

	fp->rx_bd_cons = bd_cons;
	fp->rx_bd_prod = bd_prod_fw;
	fp->rx_comp_cons = sw_comp_cons;
	fp->rx_comp_prod = sw_comp_prod;

	/* Update producers */
	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
			     fp->rx_sge_prod);

	fp->rx_pkt += rx_pkt;
	fp->rx_calls++;

	return rx_pkt;
}

static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
{
	struct bnx2x_fastpath *fp = fp_cookie;
	struct bnx2x *bp = fp->bp;
961
	u8 cos;
962

Merav Sicron's avatar
Merav Sicron committed
963
964
	DP(NETIF_MSG_INTR,
	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
965
966
	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
967
968
969
970
971
972
973
974

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return IRQ_HANDLED;
#endif

	/* Handle Rx and Tx according to MSI-X vector */
	prefetch(fp->rx_cons_sb);
975
976

	for_each_cos_in_tx_queue(fp, cos)
977
		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
978

979
	prefetch(&fp->sb_running_index[SM_RX_ID]);
980
981
982
983
984
985
986
987
988
989
	napi_schedule(&bnx2x_fp(bp, fp->index, napi));

	return IRQ_HANDLED;
}

/* HW Lock for shared dual port PHYs */
void bnx2x_acquire_phy_lock(struct bnx2x *bp)
{
	mutex_lock(&bp->port.phy_mutex);

Yaniv Rosner's avatar
Yaniv Rosner committed
990
	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
991
992
993
994
}

void bnx2x_release_phy_lock(struct bnx2x *bp)
{
Yaniv Rosner's avatar
Yaniv Rosner committed
995
	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
996
997
998
999

	mutex_unlock(&bp->port.phy_mutex);
}

1000
1001
1002
1003
1004
/* calculates MF speed according to current linespeed and MF configuration */
u16 bnx2x_get_mf_speed(struct bnx2x *bp)
{
	u16 line_speed = bp->link_vars.line_speed;
	if (IS_MF(bp)) {
1005
1006
1007
1008
1009
		u16 maxCfg = bnx2x_extract_max_cfg(bp,
						   bp->mf_config[BP_VN(bp)]);

		/* Calculate the current MAX line speed limit for the MF
		 * devices
1010
		 */
1011
1012
1013
		if (IS_MF_SI(bp))
			line_speed = (line_speed * maxCfg) / 100;
		else { /* SD mode */
1014
1015
1016
1017
			u16 vn_max_rate = maxCfg * 100;

			if (vn_max_rate < line_speed)
				line_speed = vn_max_rate;
1018
		}
1019
1020
1021
1022
1023
	}

	return line_speed;
}

1024
1025
1026
1027
1028
1029
1030
1031
/**
 * bnx2x_fill_report_data - fill link report data to report
 *
 * @bp:		driver handle
 * @data:	link state to update
 *
 * It uses a none-atomic bit operations because is called under the mutex.
 */
Eric Dumazet's avatar
Eric Dumazet committed
1032
1033
static void bnx2x_fill_report_data(struct bnx2x *bp,
				   struct bnx2x_link_report_data *data)
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
{
	u16 line_speed = bnx2x_get_mf_speed(bp);

	memset(data, 0, sizeof(*data));

	/* Fill the report data: efective line speed */
	data->line_speed = line_speed;

	/* Link is down */
	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
			  &data->link_report_flags);

	/* Full DUPLEX */
	if (bp->link_vars.duplex == DUPLEX_FULL)
		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);

	/* Rx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);

	/* Tx Flow Control is ON */
	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
}

/**
 * bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * Calls the __bnx2x_link_report() under the same locking scheme
 * as a link/PHY state managing code to ensure a consistent link
 * reporting.
 */

1070
1071
void bnx2x_link_report(struct bnx2x *bp)
{
1072
1073
1074
1075
	bnx2x_acquire_phy_lock(bp);
	__bnx2x_link_report(bp);
	bnx2x_release_phy_lock(bp);
}
1076

1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
/**
 * __bnx2x_link_report - report link status to OS.
 *
 * @bp:		driver handle
 *
 * None atomic inmlementation.
 * Should be called under the phy_lock.
 */
void __bnx2x_link_report(struct bnx2x *bp)
{
	struct bnx2x_link_report_data cur_data;
1088

1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
	/* reread mf_cfg */
	if (!CHIP_IS_E1(bp))
		bnx2x_read_mf_cfg(bp);

	/* Read the current link report info */
	bnx2x_fill_report_data(bp, &cur_data);

	/* Don't report link down or exactly the same link status twice */
	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &bp->last_reported_link.link_report_flags) &&
	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		      &cur_data.link_report_flags)))
		return;

	bp->link_cnt++;
1105

1106
1107
1108
1109
	/* We are going to report a new link parameters now -
	 * remember the current data for the next time.
	 */
	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1110

1111
1112
1113
1114
1115
1116
	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
		     &cur_data.link_report_flags)) {
		netif_carrier_off(bp->dev);
		netdev_err(bp->dev, "NIC Link is Down\n");
		return;
	} else {
1117
1118
1119
		const char *duplex;
		const char *flow;

1120
		netif_carrier_on(bp->dev);
1121

1122
1123
		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
				       &cur_data.link_report_flags))
1124
			duplex = "full";
1125
		else
1126
			duplex = "half";
1127

1128
1129
1130
1131
1132
1133
1134
1135
1136
		/* Handle the FC at the end so that only these flags would be
		 * possibly set. This way we may easily check if there is no FC
		 * enabled.
		 */
		if (cur_data.link_report_flags) {
			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
				     &cur_data.link_report_flags)) {
				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
				     &cur_data.link_report_flags))
1137
1138
1139
					flow = "ON - receive & transmit";
				else
					flow = "ON - receive";
1140
			} else {
1141
				flow = "ON - transmit";
1142
			}
1143
1144
		} else {
			flow = "none";
1145
		}
1146
1147
		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
			    cur_data.line_speed, duplex, flow);
1148
1149
1150
	}
}

Eric Dumazet's avatar
Eric Dumazet committed
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
{
	int i;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		struct eth_rx_sge *sge;

		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
		sge->addr_hi =
			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));

		sge->addr_lo =
			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
	}
}

static void bnx2x_free_tpa_pool(struct bnx2x *bp,
				struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++) {
		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
		u8 *data = first_buf->data;

		if (data == NULL) {
			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
			continue;
		}
		if (tpa_info->tpa_state == BNX2X_TPA_START)
			dma_unmap_single(&bp->pdev->dev,
					 dma_unmap_addr(first_buf, mapping),
					 fp->rx_buf_size, DMA_FROM_DEVICE);
Eric Dumazet's avatar
Eric Dumazet committed
1187
		bnx2x_frag_free(fp, data);
Eric Dumazet's avatar
Eric Dumazet committed
1188
1189
1190
1191
		first_buf->data = NULL;
	}
}

1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
{
	int j;

	for_each_rx_queue_cnic(bp, j) {
		struct bnx2x_fastpath *fp = &bp->fp[j];

		fp->rx_bd_cons = 0;

		/* Activate BD ring */
		/* Warning!
		 * this will generate an interrupt (to the TSTORM)
		 * must only be done after chip is initialized
		 */
		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
				     fp->rx_sge_prod);
	}
}

1211
1212
1213
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
	int func = BP_FUNC(bp);
1214
	u16 ring_prod;
1215
	int i, j;
1216

1217
	/* Allocate TPA resources */
1218
	for_each_eth_queue(bp, j) {
1219
		struct bnx2x_fastpath *fp = &bp->fp[j];
1220

1221
1222
1223
		DP(NETIF_MSG_IFUP,
		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);

1224
		if (!fp->disable_tpa) {
1225
			/* Fill the per-aggregtion pool */
1226
			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1227
1228
1229
1230
1231
				struct bnx2x_agg_info *tpa_info =
					&fp->tpa_info[i];
				struct sw_rx_bd *first_buf =
					&tpa_info->first_buf;

Eric Dumazet's avatar
Eric Dumazet committed
1232
				first_buf->data = bnx2x_frag_alloc(fp);
1233
				if (!first_buf->data) {
Merav Sicron's avatar
Merav Sicron committed
1234
1235
					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
						  j);
1236
1237
1238
1239
					bnx2x_free_tpa_pool(bp, fp, i);
					fp->disable_tpa = 1;
					break;
				}
1240
1241
				dma_unmap_addr_set(first_buf, mapping, 0);
				tpa_info->tpa_state = BNX2X_TPA_STOP;
1242
			}
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254

			/* "next page" elements initialization */
			bnx2x_set_next_page_sgl(fp);

			/* set SGEs bit mask */
			bnx2x_init_sge_ring_bit_mask(fp);

			/* Allocate SGEs and initialize the ring elements */
			for (i = 0, ring_prod = 0;
			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {

				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
Merav Sicron's avatar
Merav Sicron committed
1255
1256
1257
1258
					BNX2X_ERR("was only able to allocate %d rx sges\n",
						  i);
					BNX2X_ERR("disabling TPA for queue[%d]\n",
						  j);