bnx2x_main.c 315 KB
Newer Older
1
/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamir's avatar
Eliezer Tamir committed
2
 *
Eilon Greenstein's avatar
Eilon Greenstein committed
3
 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamir's avatar
Eliezer Tamir committed
4
5
6
7
8
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
9
10
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
Eliezer Tamir's avatar
Eliezer Tamir committed
11
12
13
 * Based on code from Michael Chan's bnx2 driver
 * UDP CSUM errata workaround by Arik Gendelman
 * Slowpath rework by Vladislav Zolotarov
Eliezer Tamir's avatar
Eliezer Tamir committed
14
 * Statistics and Link management by Yitchak Gertner
Eliezer Tamir's avatar
Eliezer Tamir committed
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
 *
 */

#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/device.h>  /* for dev_info() */
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <linux/delay.h>
#include <asm/byteorder.h>
#include <linux/time.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
41
#include <linux/if_vlan.h>
Eliezer Tamir's avatar
Eliezer Tamir committed
42
43
44
#include <net/ip.h>
#include <net/tcp.h>
#include <net/checksum.h>
45
#include <net/ip6_checksum.h>
Eliezer Tamir's avatar
Eliezer Tamir committed
46
47
#include <linux/workqueue.h>
#include <linux/crc32.h>
48
#include <linux/crc32c.h>
Eliezer Tamir's avatar
Eliezer Tamir committed
49
50
51
52
#include <linux/prefetch.h>
#include <linux/zlib.h>
#include <linux/io.h>

Eilon Greenstein's avatar
Eilon Greenstein committed
53

Eliezer Tamir's avatar
Eliezer Tamir committed
54
55
#include "bnx2x.h"
#include "bnx2x_init.h"
56
#include "bnx2x_init_ops.h"
Eilon Greenstein's avatar
Eilon Greenstein committed
57
#include "bnx2x_dump.h"
Eliezer Tamir's avatar
Eliezer Tamir committed
58

59
60
#define DRV_MODULE_VERSION	"1.48.105-1"
#define DRV_MODULE_RELDATE	"2009/04/22"
61
#define BNX2X_BC_VER		0x040200
Eliezer Tamir's avatar
Eliezer Tamir committed
62

63
64
65
66
67
68
#include <linux/firmware.h>
#include "bnx2x_fw_file_hdr.h"
/* FW files */
#define FW_FILE_PREFIX_E1		"bnx2x-e1-"
#define FW_FILE_PREFIX_E1H		"bnx2x-e1h-"

69
70
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT		(5*HZ)
Eliezer Tamir's avatar
Eliezer Tamir committed
71

Andrew Morton's avatar
Andrew Morton committed
72
static char version[] __devinitdata =
73
	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamir's avatar
Eliezer Tamir committed
74
75
	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";

76
MODULE_AUTHOR("Eliezer Tamir");
77
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamir's avatar
Eliezer Tamir committed
78
79
80
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);

Eilon Greenstein's avatar
Eilon Greenstein committed
81
82
static int multi_mode = 1;
module_param(multi_mode, int, 0);
83
MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
Eilon Greenstein's avatar
Eilon Greenstein committed
84

85
86
static int disable_tpa;
module_param(disable_tpa, int, 0);
87
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein's avatar
Eilon Greenstein committed
88
89
90
91
92

static int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");

93
static int poll;
Eliezer Tamir's avatar
Eliezer Tamir committed
94
module_param(poll, int, 0);
95
MODULE_PARM_DESC(poll, " Use polling (for debug)");
96
97
98
99
100

static int mrrs = -1;
module_param(mrrs, int, 0);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");

101
static int debug;
Eliezer Tamir's avatar
Eliezer Tamir committed
102
module_param(debug, int, 0);
103
104
105
MODULE_PARM_DESC(debug, " Default debug msglevel");

static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamir's avatar
Eliezer Tamir committed
106

107
static struct workqueue_struct *bnx2x_wq;
Eliezer Tamir's avatar
Eliezer Tamir committed
108
109
110

enum bnx2x_board_type {
	BCM57710 = 0,
111
112
	BCM57711 = 1,
	BCM57711E = 2,
Eliezer Tamir's avatar
Eliezer Tamir committed
113
114
};

115
/* indexed by board_type, above */
Andrew Morton's avatar
Andrew Morton committed
116
static struct {
Eliezer Tamir's avatar
Eliezer Tamir committed
117
118
	char *name;
} board_info[] __devinitdata = {
119
120
121
	{ "Broadcom NetXtreme II BCM57710 XGb" },
	{ "Broadcom NetXtreme II BCM57711 XGb" },
	{ "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamir's avatar
Eliezer Tamir committed
122
123
};

124

Eliezer Tamir's avatar
Eliezer Tamir committed
125
126
127
static const struct pci_device_id bnx2x_pci_tbl[] = {
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128
129
130
131
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
		PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
Eliezer Tamir's avatar
Eliezer Tamir committed
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
	{ 0 }
};

MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);

/****************************************************************************
* General service functions
****************************************************************************/

/* used only at init
 * locking is done by mcp
 */
static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);
}

static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
{
	u32 val;

	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
			       PCICFG_VENDOR_ID_OFFSET);

	return val;
}

static const u32 dmae_reg_go_c[] = {
	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
};

/* copy command into DMAE command memory and set DMAE command go */
static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
			    int idx)
{
	u32 cmd_offset;
	int i;

	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));

182
183
		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamir's avatar
Eliezer Tamir committed
184
185
186
187
	}
	REG_WR(bp, dmae_reg_go_c[idx], 1);
}

188
189
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
		      u32 len32)
Eliezer Tamir's avatar
Eliezer Tamir committed
190
{
191
	struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamir's avatar
Eliezer Tamir committed
192
	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
193
194
195
196
197
198
199
200
201
202
203
204
	int cnt = 200;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);

		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
		   "  using indirect\n", dst_addr, len32);
		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
		return;
	}

	mutex_lock(&bp->dmae_mutex);
Eliezer Tamir's avatar
Eliezer Tamir committed
205
206
207
208
209
210
211
212
213
214
215

	memset(dmae, 0, sizeof(struct dmae_command));

	dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
			DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
			DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
216
217
			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamir's avatar
Eliezer Tamir committed
218
219
220
221
222
223
224
	dmae->src_addr_lo = U64_LO(dma_addr);
	dmae->src_addr_hi = U64_HI(dma_addr);
	dmae->dst_addr_lo = dst_addr >> 2;
	dmae->dst_addr_hi = 0;
	dmae->len = len32;
	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225
	dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamir's avatar
Eliezer Tamir committed
226

Eilon Greenstein's avatar
Eilon Greenstein committed
227
	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamir's avatar
Eliezer Tamir committed
228
229
230
231
232
233
	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
		    "dst_addr [%x:%08x (%08x)]\n"
	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
	   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
	   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
	   dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234
	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamir's avatar
Eliezer Tamir committed
235
236
237
238
239
	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);

	*wb_comp = 0;

240
	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamir's avatar
Eliezer Tamir committed
241
242

	udelay(5);
243
244
245
246
247

	while (*wb_comp != DMAE_COMP_VAL) {
		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);

		if (!cnt) {
Eilon Greenstein's avatar
Eilon Greenstein committed
248
			BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamir's avatar
Eliezer Tamir committed
249
250
			break;
		}
251
		cnt--;
Yitchak Gertner's avatar
Yitchak Gertner committed
252
253
254
255
256
		/* adjust delay for emulation/FPGA */
		if (CHIP_REV_IS_SLOW(bp))
			msleep(100);
		else
			udelay(5);
Eliezer Tamir's avatar
Eliezer Tamir committed
257
	}
258
259

	mutex_unlock(&bp->dmae_mutex);
Eliezer Tamir's avatar
Eliezer Tamir committed
260
261
}

Yaniv Rosner's avatar
Yaniv Rosner committed
262
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamir's avatar
Eliezer Tamir committed
263
{
264
	struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamir's avatar
Eliezer Tamir committed
265
	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
266
267
268
269
270
271
272
273
274
275
276
277
278
279
	int cnt = 200;

	if (!bp->dmae_ready) {
		u32 *data = bnx2x_sp(bp, wb_data[0]);
		int i;

		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
		   "  using indirect\n", src_addr, len32);
		for (i = 0; i < len32; i++)
			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
		return;
	}

	mutex_lock(&bp->dmae_mutex);
Eliezer Tamir's avatar
Eliezer Tamir committed
280
281
282
283
284
285
286
287
288
289
290
291

	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
	memset(dmae, 0, sizeof(struct dmae_command));

	dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
			DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
			DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
#ifdef __BIG_ENDIAN
			DMAE_CMD_ENDIANITY_B_DW_SWAP |
#else
			DMAE_CMD_ENDIANITY_DW_SWAP |
#endif
292
293
			(BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
			(BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamir's avatar
Eliezer Tamir committed
294
295
296
297
298
299
300
	dmae->src_addr_lo = src_addr >> 2;
	dmae->src_addr_hi = 0;
	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
	dmae->len = len32;
	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301
	dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamir's avatar
Eliezer Tamir committed
302

Eilon Greenstein's avatar
Eilon Greenstein committed
303
	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamir's avatar
Eliezer Tamir committed
304
305
306
307
308
309
310
311
312
	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
		    "dst_addr [%x:%08x (%08x)]\n"
	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
	   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
	   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
	   dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);

	*wb_comp = 0;

313
	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamir's avatar
Eliezer Tamir committed
314
315

	udelay(5);
316
317
318
319

	while (*wb_comp != DMAE_COMP_VAL) {

		if (!cnt) {
Eilon Greenstein's avatar
Eilon Greenstein committed
320
			BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamir's avatar
Eliezer Tamir committed
321
322
			break;
		}
323
		cnt--;
Yitchak Gertner's avatar
Yitchak Gertner committed
324
325
326
327
328
		/* adjust delay for emulation/FPGA */
		if (CHIP_REV_IS_SLOW(bp))
			msleep(100);
		else
			udelay(5);
Eliezer Tamir's avatar
Eliezer Tamir committed
329
	}
330
	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamir's avatar
Eliezer Tamir committed
331
332
	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
333
334
335
336
337
338
339
340
341
342
343
344

	mutex_unlock(&bp->dmae_mutex);
}

/* used only for slowpath so not inlined */
static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
{
	u32 wb_write[2];

	wb_write[0] = val_hi;
	wb_write[1] = val_lo;
	REG_WR_DMAE(bp, reg, wb_write, 2);
Eliezer Tamir's avatar
Eliezer Tamir committed
345
346
}

347
348
349
350
351
352
353
354
355
356
357
#ifdef USE_WB_RD
static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
{
	u32 wb_data[2];

	REG_RD_DMAE(bp, reg, wb_data, 2);

	return HILO_U64(wb_data[0], wb_data[1]);
}
#endif

Eliezer Tamir's avatar
Eliezer Tamir committed
358
359
360
static int bnx2x_mc_assert(struct bnx2x *bp)
{
	char last_idx;
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
	int i, rc = 0;
	u32 row0, row1, row2, row3;

	/* XSTORM */
	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* TSTORM */
	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* CSTORM */
	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
		}
	}

	/* USTORM */
	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
			   USTORM_ASSERT_LIST_INDEX_OFFSET);
	if (last_idx)
		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);

	/* print the asserts */
	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {

		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i));
		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
			      USTORM_ASSERT_LIST_OFFSET(i) + 12);

		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
				  " 0x%08x 0x%08x 0x%08x\n",
				  i, row3, row2, row1, row0);
			rc++;
		} else {
			break;
Eliezer Tamir's avatar
Eliezer Tamir committed
473
474
		}
	}
475

Eliezer Tamir's avatar
Eliezer Tamir committed
476
477
	return rc;
}
Eliezer Tamir's avatar
Eliezer Tamir committed
478

Eliezer Tamir's avatar
Eliezer Tamir committed
479
480
481
static void bnx2x_fw_dump(struct bnx2x *bp)
{
	u32 mark, offset;
482
	__be32 data[9];
Eliezer Tamir's avatar
Eliezer Tamir committed
483
484
485
	int word;

	mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486
487
	mark = ((mark + 0x3) & ~0x3);
	printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
Eliezer Tamir's avatar
Eliezer Tamir committed
488
489
490
491
492
493

	for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
		for (word = 0; word < 8; word++)
			data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
						  offset + 4*word));
		data[8] = 0x0;
494
		printk(KERN_CONT "%s", (char *)data);
Eliezer Tamir's avatar
Eliezer Tamir committed
495
496
497
498
499
500
	}
	for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
		for (word = 0; word < 8; word++)
			data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
						  offset + 4*word));
		data[8] = 0x0;
501
		printk(KERN_CONT "%s", (char *)data);
Eliezer Tamir's avatar
Eliezer Tamir committed
502
503
504
505
506
507
508
509
510
	}
	printk("\n" KERN_ERR PFX "end of fw dump\n");
}

static void bnx2x_panic_dump(struct bnx2x *bp)
{
	int i;
	u16 j, start, end;

Yitchak Gertner's avatar
Yitchak Gertner committed
511
512
513
	bp->stats_state = STATS_STATE_DISABLED;
	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");

Eliezer Tamir's avatar
Eliezer Tamir committed
514
515
	BNX2X_ERR("begin crash dump -----------------\n");

Eilon Greenstein's avatar
Eilon Greenstein committed
516
517
518
519
520
521
522
523
524
525
	/* Indices */
	/* Common */
	BNX2X_ERR("def_c_idx(%u)  def_u_idx(%u)  def_x_idx(%u)"
		  "  def_t_idx(%u)  def_att_idx(%u)  attn_state(%u)"
		  "  spq_prod_idx(%u)\n",
		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);

	/* Rx */
	for_each_rx_queue(bp, i) {
Eliezer Tamir's avatar
Eliezer Tamir committed
526
527
		struct bnx2x_fastpath *fp = &bp->fp[i];

Eilon Greenstein's avatar
Eilon Greenstein committed
528
		BNX2X_ERR("fp%d: rx_bd_prod(%x)  rx_bd_cons(%x)"
Yitchak Gertner's avatar
Yitchak Gertner committed
529
530
			  "  *rx_bd_cons_sb(%x)  rx_comp_prod(%x)"
			  "  rx_comp_cons(%x)  *rx_cons_sb(%x)\n",
Eilon Greenstein's avatar
Eilon Greenstein committed
531
			  i, fp->rx_bd_prod, fp->rx_bd_cons,
Yitchak Gertner's avatar
Yitchak Gertner committed
532
533
			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greenstein's avatar
Eilon Greenstein committed
534
		BNX2X_ERR("      rx_sge_prod(%x)  last_max_sge(%x)"
Eilon Greenstein's avatar
Eilon Greenstein committed
535
536
537
538
539
			  "  fp_u_idx(%x) *sb_u_idx(%x)\n",
			  fp->rx_sge_prod, fp->last_max_sge,
			  le16_to_cpu(fp->fp_u_idx),
			  fp->status_blk->u_status_block.status_block_index);
	}
Eliezer Tamir's avatar
Eliezer Tamir committed
540

Eilon Greenstein's avatar
Eilon Greenstein committed
541
542
543
544
	/* Tx */
	for_each_tx_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];
		struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
Eliezer Tamir's avatar
Eliezer Tamir committed
545

Eilon Greenstein's avatar
Eilon Greenstein committed
546
		BNX2X_ERR("fp%d: tx_pkt_prod(%x)  tx_pkt_cons(%x)"
Eilon Greenstein's avatar
Eilon Greenstein committed
547
548
549
			  "  tx_bd_prod(%x)  tx_bd_cons(%x)  *tx_cons_sb(%x)\n",
			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greenstein's avatar
Eilon Greenstein committed
550
		BNX2X_ERR("      fp_c_idx(%x)  *sb_c_idx(%x)"
Eilon Greenstein's avatar
Eilon Greenstein committed
551
552
553
554
			  "  bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
			  fp->status_blk->c_status_block.status_block_index,
			  hw_prods->packets_prod, hw_prods->bds_prod);
	}
Eliezer Tamir's avatar
Eliezer Tamir committed
555

Eilon Greenstein's avatar
Eilon Greenstein committed
556
557
558
559
	/* Rings */
	/* Rx */
	for_each_rx_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamir's avatar
Eliezer Tamir committed
560
561
562

		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein's avatar
Eilon Greenstein committed
563
		for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamir's avatar
Eliezer Tamir committed
564
565
566
			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];

Eilon Greenstein's avatar
Eilon Greenstein committed
567
568
			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamir's avatar
Eliezer Tamir committed
569
570
		}

571
572
		start = RX_SGE(fp->rx_sge_prod);
		end = RX_SGE(fp->last_max_sge);
Eilon Greenstein's avatar
Eilon Greenstein committed
573
		for (j = start; j != end; j = RX_SGE(j + 1)) {
574
575
576
			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];

Eilon Greenstein's avatar
Eilon Greenstein committed
577
578
			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
579
580
		}

Eliezer Tamir's avatar
Eliezer Tamir committed
581
582
		start = RCQ_BD(fp->rx_comp_cons - 10);
		end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein's avatar
Eilon Greenstein committed
583
		for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamir's avatar
Eliezer Tamir committed
584
585
			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];

Eilon Greenstein's avatar
Eilon Greenstein committed
586
587
			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamir's avatar
Eliezer Tamir committed
588
589
590
		}
	}

Eilon Greenstein's avatar
Eilon Greenstein committed
591
592
593
594
595
596
597
598
599
	/* Tx */
	for_each_tx_queue(bp, i) {
		struct bnx2x_fastpath *fp = &bp->fp[i];

		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
		for (j = start; j != end; j = TX_BD(j + 1)) {
			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];

Eilon Greenstein's avatar
Eilon Greenstein committed
600
601
			BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
				  i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein's avatar
Eilon Greenstein committed
602
603
604
605
606
607
608
		}

		start = TX_BD(fp->tx_bd_cons - 10);
		end = TX_BD(fp->tx_bd_cons + 254);
		for (j = start; j != end; j = TX_BD(j + 1)) {
			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];

Eilon Greenstein's avatar
Eilon Greenstein committed
609
610
			BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
				  i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein's avatar
Eilon Greenstein committed
611
612
		}
	}
Eliezer Tamir's avatar
Eliezer Tamir committed
613

614
	bnx2x_fw_dump(bp);
Eliezer Tamir's avatar
Eliezer Tamir committed
615
616
617
618
	bnx2x_mc_assert(bp);
	BNX2X_ERR("end crash dump -----------------\n");
}

619
static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamir's avatar
Eliezer Tamir committed
620
{
621
	int port = BP_PORT(bp);
Eliezer Tamir's avatar
Eliezer Tamir committed
622
623
624
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);
	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein's avatar
Eilon Greenstein committed
625
	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamir's avatar
Eliezer Tamir committed
626
627

	if (msix) {
Eilon Greenstein's avatar
Eilon Greenstein committed
628
629
		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamir's avatar
Eliezer Tamir committed
630
631
		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein's avatar
Eilon Greenstein committed
632
633
634
635
636
	} else if (msi) {
		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir's avatar
Eliezer Tamir committed
637
638
	} else {
		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639
			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamir's avatar
Eliezer Tamir committed
640
641
			HC_CONFIG_0_REG_INT_LINE_EN_0 |
			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642

Eilon Greenstein's avatar
Eilon Greenstein committed
643
644
		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
		   val, port, addr);
645
646
647

		REG_WR(bp, addr, val);

Eliezer Tamir's avatar
Eliezer Tamir committed
648
649
650
		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
	}

Eilon Greenstein's avatar
Eilon Greenstein committed
651
652
	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamir's avatar
Eliezer Tamir committed
653
654

	REG_WR(bp, addr, val);
655
656
657
658

	if (CHIP_IS_E1H(bp)) {
		/* init leading/trailing edge */
		if (IS_E1HMF(bp)) {
Eilon Greenstein's avatar
Eilon Greenstein committed
659
			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
660
			if (bp->port.pmf)
Eilon Greenstein's avatar
Eilon Greenstein committed
661
662
				/* enable nig and gpio3 attention */
				val |= 0x1100;
663
664
665
666
667
668
		} else
			val = 0xffff;

		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
	}
Eliezer Tamir's avatar
Eliezer Tamir committed
669
670
}

671
static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamir's avatar
Eliezer Tamir committed
672
{
673
	int port = BP_PORT(bp);
Eliezer Tamir's avatar
Eliezer Tamir committed
674
675
676
677
678
679
680
681
682
683
684
	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
	u32 val = REG_RD(bp, addr);

	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);

	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
	   val, port, addr);

Eilon Greenstein's avatar
Eilon Greenstein committed
685
686
687
	/* flush all outstanding writes */
	mmiowb();

Eliezer Tamir's avatar
Eliezer Tamir committed
688
689
690
	REG_WR(bp, addr, val);
	if (REG_RD(bp, addr) != val)
		BNX2X_ERR("BUG! proper val not read from IGU!\n");
Eilon Greenstein's avatar
Eilon Greenstein committed
691

Eliezer Tamir's avatar
Eliezer Tamir committed
692
693
}

Yitchak Gertner's avatar
Yitchak Gertner committed
694
static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamir's avatar
Eliezer Tamir committed
695
696
{
	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein's avatar
Eilon Greenstein committed
697
	int i, offset;
Eliezer Tamir's avatar
Eliezer Tamir committed
698

699
	/* disable interrupt handling */
Eliezer Tamir's avatar
Eliezer Tamir committed
700
	atomic_inc(&bp->intr_sem);
Yitchak Gertner's avatar
Yitchak Gertner committed
701
702
703
	if (disable_hw)
		/* prevent the HW from sending interrupts */
		bnx2x_int_disable(bp);
Eliezer Tamir's avatar
Eliezer Tamir committed
704
705
706

	/* make sure all ISRs are done */
	if (msix) {
Eilon Greenstein's avatar
Eilon Greenstein committed
707
708
		synchronize_irq(bp->msix_table[0].vector);
		offset = 1;
Eliezer Tamir's avatar
Eliezer Tamir committed
709
		for_each_queue(bp, i)
Eilon Greenstein's avatar
Eilon Greenstein committed
710
			synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamir's avatar
Eliezer Tamir committed
711
712
713
714
	} else
		synchronize_irq(bp->pdev->irq);

	/* make sure sp_task is not running */
715
716
	cancel_delayed_work(&bp->sp_task);
	flush_workqueue(bnx2x_wq);
Eliezer Tamir's avatar
Eliezer Tamir committed
717
718
}

719
/* fast path */
Eliezer Tamir's avatar
Eliezer Tamir committed
720
721

/*
722
 * General service functions
Eliezer Tamir's avatar
Eliezer Tamir committed
723
724
 */

725
static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamir's avatar
Eliezer Tamir committed
726
727
				u8 storm, u16 index, u8 op, u8 update)
{
728
729
	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
		       COMMAND_REG_INT_ACK);
Eliezer Tamir's avatar
Eliezer Tamir committed
730
731
732
733
	struct igu_ack_register igu_ack;

	igu_ack.status_block_index = index;
	igu_ack.sb_id_and_flags =
734
			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamir's avatar
Eliezer Tamir committed
735
736
737
738
			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));

739
740
741
	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
	   (*(u32 *)&igu_ack), hc_addr);
	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eliezer Tamir's avatar
Eliezer Tamir committed
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
}

static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{
	struct host_status_block *fpsb = fp->status_blk;
	u16 rc = 0;

	barrier(); /* status block is written to by the chip */
	if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
		fp->fp_c_idx = fpsb->c_status_block.status_block_index;
		rc |= 1;
	}
	if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
		fp->fp_u_idx = fpsb->u_status_block.status_block_index;
		rc |= 2;
	}
	return rc;
}

static u16 bnx2x_ack_int(struct bnx2x *bp)
{
763
764
765
	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
		       COMMAND_REG_SIMD_MASK);
	u32 result = REG_RD(bp, hc_addr);
Eliezer Tamir's avatar
Eliezer Tamir committed
766

767
768
	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
	   result, hc_addr);
Eliezer Tamir's avatar
Eliezer Tamir committed
769
770
771
772
773
774
775
776
777

	return result;
}


/*
 * fast path service functions
 */

778
779
780
781
782
783
784
static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
	u16 tx_cons_sb;

	/* Tell compiler that status block fields can change */
	barrier();
	tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
785
786
787
788
789
790
791
792
	return (fp->tx_pkt_cons != tx_cons_sb);
}

static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
{
	/* Tell compiler that consumer and producer can change */
	barrier();
	return (fp->tx_pkt_prod != fp->tx_pkt_cons);
793
794
}

Eliezer Tamir's avatar
Eliezer Tamir committed
795
796
797
798
799
800
801
802
803
/* free skb in the packet ring at pos idx
 * return idx of last bd freed
 */
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			     u16 idx)
{
	struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
	struct eth_tx_bd *tx_bd;
	struct sk_buff *skb = tx_buf->skb;
804
	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamir's avatar
Eliezer Tamir committed
805
806
807
808
809
810
811
812
813
814
815
816
	int nbd;

	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
	   idx, tx_buf, skb);

	/* unmap first bd */
	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
	tx_bd = &fp->tx_desc_ring[bd_idx];
	pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
			 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);

	nbd = le16_to_cpu(tx_bd->nbd) - 1;
817
	new_cons = nbd + tx_buf->first_bd;
Eliezer Tamir's avatar
Eliezer Tamir committed
818
819
#ifdef BNX2X_STOP_ON_ERROR
	if (nbd > (MAX_SKB_FRAGS + 2)) {
820
		BNX2X_ERR("BAD nbd!\n");
Eliezer Tamir's avatar
Eliezer Tamir committed
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
		bnx2x_panic();
	}
#endif

	/* Skip a parse bd and the TSO split header bd
	   since they have no mapping */
	if (nbd)
		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));

	if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
					   ETH_TX_BD_FLAGS_TCP_CSUM |
					   ETH_TX_BD_FLAGS_SW_LSO)) {
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
		tx_bd = &fp->tx_desc_ring[bd_idx];
		/* is this a TSO split header bd? */
		if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
			if (--nbd)
				bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
		}
	}

	/* now free frags */
	while (nbd > 0) {

		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
		tx_bd = &fp->tx_desc_ring[bd_idx];
		pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
			       BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
		if (--nbd)
			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
	}

	/* release skb */
855
	WARN_ON(!skb);
Eliezer Tamir's avatar
Eliezer Tamir committed
856
857
858
859
	dev_kfree_skb(skb);
	tx_buf->first_bd = 0;
	tx_buf->skb = NULL;

860
	return new_cons;
Eliezer Tamir's avatar
Eliezer Tamir committed
861
862
}

863
static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamir's avatar
Eliezer Tamir committed
864
{
865
866
867
	s16 used;
	u16 prod;
	u16 cons;
Eliezer Tamir's avatar
Eliezer Tamir committed
868

869
	barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamir's avatar
Eliezer Tamir committed
870
871
872
	prod = fp->tx_bd_prod;
	cons = fp->tx_bd_cons;

873
874
875
	/* NUM_TX_RINGS = number of "next-page" entries
	   It will be used as a threshold */
	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamir's avatar
Eliezer Tamir committed
876

877
#ifdef BNX2X_STOP_ON_ERROR
878
879
880
	WARN_ON(used < 0);
	WARN_ON(used > fp->bp->tx_ring_size);
	WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
881
#endif
Eliezer Tamir's avatar
Eliezer Tamir committed
882

883
	return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamir's avatar
Eliezer Tamir committed
884
885
}

886
static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamir's avatar
Eliezer Tamir committed
887
888
{
	struct bnx2x *bp = fp->bp;
Eilon Greenstein's avatar
Eilon Greenstein committed
889
	struct netdev_queue *txq;
Eliezer Tamir's avatar
Eliezer Tamir committed
890
891
892
893
894
895
896
897
	u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
	int done = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return;
#endif

Eilon Greenstein's avatar
Eilon Greenstein committed
898
	txq = netdev_get_tx_queue(bp->dev, fp->index);
Eliezer Tamir's avatar
Eliezer Tamir committed
899
900
901
902
903
904
905
906
907
908
	hw_cons = le16_to_cpu(*fp->tx_cons_sb);
	sw_cons = fp->tx_pkt_cons;

	while (sw_cons != hw_cons) {
		u16 pkt_cons;

		pkt_cons = TX_BD(sw_cons);

		/* prefetch(bp->tx_buf_ring[pkt_cons].skb); */

909
		DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
Eliezer Tamir's avatar
Eliezer Tamir committed
910
911
		   hw_cons, sw_cons, pkt_cons);

912
/*		if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamir's avatar
Eliezer Tamir committed
913
914
915
916
917
918
919
920
921
922
923
924
925
			rmb();
			prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
		}
*/
		bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
		sw_cons++;
		done++;
	}

	fp->tx_pkt_cons = sw_cons;
	fp->tx_bd_cons = bd_cons;

	/* TBD need a thresh? */
Eilon Greenstein's avatar
Eilon Greenstein committed
926
	if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamir's avatar
Eliezer Tamir committed
927

Eilon Greenstein's avatar
Eilon Greenstein committed
928
		__netif_tx_lock(txq, smp_processor_id());
Eliezer Tamir's avatar
Eliezer Tamir committed
929

930
931
932
933
934
935
936
937
		/* Need to make the tx_bd_cons update visible to start_xmit()
		 * before checking for netif_tx_queue_stopped().  Without the
		 * memory barrier, there is a small possibility that
		 * start_xmit() will miss it and cause the queue to be stopped
		 * forever.
		 */
		smp_mb();

Eilon Greenstein's avatar
Eilon Greenstein committed
938
		if ((netif_tx_queue_stopped(txq)) &&
939
		    (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamir's avatar
Eliezer Tamir committed
940
		    (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein's avatar
Eilon Greenstein committed
941
			netif_tx_wake_queue(txq);
Eliezer Tamir's avatar
Eliezer Tamir committed
942

Eilon Greenstein's avatar
Eilon Greenstein committed
943
		__netif_tx_unlock(txq);
Eliezer Tamir's avatar
Eliezer Tamir committed
944
945
946
	}
}

947

Eliezer Tamir's avatar
Eliezer Tamir committed
948
949
950
951
952
953
954
static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
			   union eth_rx_cqe *rr_cqe)
{
	struct bnx2x *bp = fp->bp;
	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);

955
	DP(BNX2X_MSG_SP,
Eliezer Tamir's avatar
Eliezer Tamir committed
956
	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
957
	   fp->index, cid, command, bp->state,
958
	   rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamir's avatar
Eliezer Tamir committed
959
960
961

	bp->spq_left++;

962
	if (fp->index) {
Eliezer Tamir's avatar
Eliezer Tamir committed
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
		switch (command | fp->state) {
		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
						BNX2X_FP_STATE_OPENING):
			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
			   cid);
			fp->state = BNX2X_FP_STATE_OPEN;
			break;

		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
			   cid);
			fp->state = BNX2X_FP_STATE_HALTED;
			break;

		default:
978
979
980
			BNX2X_ERR("unexpected MC reply (%d)  "
				  "fp->state is %x\n", command, fp->state);
			break;
Eliezer Tamir's avatar
Eliezer Tamir committed
981
		}
982
		mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamir's avatar
Eliezer Tamir committed
983
984
		return;
	}
Eliezer Tamir's avatar
Eliezer Tamir committed
985

Eliezer Tamir's avatar
Eliezer Tamir committed
986
987
988
989
990
991
992
993
994
995
996
997
998
	switch (command | bp->state) {
	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
		bp->state = BNX2X_STATE_OPEN;
		break;

	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
		fp->state = BNX2X_FP_STATE_HALTED;
		break;

	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
999
		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1000
		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamir's avatar
Eliezer Tamir committed
1001
1002
		break;

1003

Eliezer Tamir's avatar
Eliezer Tamir committed
1004
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1005
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamir's avatar
Eliezer Tamir committed
1006
		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Yitchak Gertner's avatar
Yitchak Gertner committed
1007
		bp->set_mac_pending = 0;
Eliezer Tamir's avatar
Eliezer Tamir committed
1008
1009
		break;

1010
	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1011
		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1012
1013
		break;

Eliezer Tamir's avatar
Eliezer Tamir committed
1014
	default:
1015
		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
Eliezer Tamir's avatar
Eliezer Tamir committed
1016
			  command, bp->state);
1017
		break;
Eliezer Tamir's avatar
Eliezer Tamir committed
1018
	}
1019
	mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamir's avatar
Eliezer Tamir committed
1020
1021
}

1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
				     struct bnx2x_fastpath *fp, u16 index)
{
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct page *page = sw_buf->page;
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];

	/* Skip "next page" elements */
	if (!page)
		return;

	pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1034
		       SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
	__free_pages(page, PAGES_PER_SGE_SHIFT);

	sw_buf->page = NULL;
	sge->addr_hi = 0;
	sge->addr_lo = 0;
}

static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
					   struct bnx2x_fastpath *fp, int last)
{
	int i;

	for (i = 0; i < last; i++)
		bnx2x_free_rx_sge(bp, fp, i);
}

static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
				     struct bnx2x_fastpath *fp, u16 index)
{
	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
	dma_addr_t mapping;

	if (unlikely(page == NULL))
		return -ENOMEM;

1062
	mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1063
			       PCI_DMA_FROMDEVICE);
1064
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
		__free_pages(page, PAGES_PER_SGE_SHIFT);
		return -ENOMEM;
	}

	sw_buf->page = page;
	pci_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
	sge->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

Eliezer Tamir's avatar
Eliezer Tamir committed
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
				     struct bnx2x_fastpath *fp, u16 index)
{
	struct sk_buff *skb;
	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
	dma_addr_t mapping;

	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
	if (unlikely(skb == NULL))
		return -ENOMEM;

1090
	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamir's avatar
Eliezer Tamir committed
1091
				 PCI_DMA_FROMDEVICE);
1092
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamir's avatar
Eliezer Tamir committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
		dev_kfree_skb(skb);
		return -ENOMEM;
	}

	rx_buf->skb = skb;
	pci_unmap_addr_set(rx_buf, mapping, mapping);

	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

	return 0;
}

/* note that we are not allocating a new skb,
 * we are just moving one from cons to prod
 * we are not creating a new mapping,
 * so there is no need to check for dma_mapping_error().
 */
static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
			       struct sk_buff *skb, u16 cons, u16 prod)
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];

	pci_dma_sync_single_for_device(bp->pdev,
				       pci_unmap_addr(cons_rx_buf, mapping),
1122
				       RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamir's avatar
Eliezer Tamir committed
1123
1124
1125
1126
1127
1128
1129

	prod_rx_buf->skb = cons_rx_buf->skb;
	pci_unmap_addr_set(prod_rx_buf, mapping,
			   pci_unmap_addr(cons_rx_buf, mapping));
	*prod_bd = *cons_bd;
}

1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
					     u16 idx)
{
	u16 last_max = fp->last_max_sge;

	if (SUB_S16(idx, last_max) > 0)
		fp->last_max_sge = idx;
}

static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
{
	int i, j;

	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
		int idx = RX_SGE_CNT * i - 1;

		for (j = 0; j < 2; j++) {
			SGE_MASK_CLEAR_BIT(fp, idx);
			idx--;
		}
	}
}

static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
				  struct eth_fast_path_rx_cqe *fp_cqe)
{
	struct bnx2x *bp = fp->bp;
1157
	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1158
				     le16_to_cpu(fp_cqe->len_on_bd)) >>
1159
		      SGE_PAGE_SHIFT;
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
	u16 last_max, last_elem, first_elem;
	u16 delta = 0;
	u16 i;

	if (!sge_len)
		return;

	/* First mark all used pages */
	for (i = 0; i < sge_len; i++)
		SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));

	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
	   sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));

	/* Here we assume that the last SGE index is the biggest */
	prefetch((void *)(fp->sge_mask));
	bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));

	last_max = RX_SGE(fp->last_max_sge);
	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;

	/* If ring is not full */
	if (last_elem + 1 != first_elem)
		last_elem++;

	/* Now update the prod */
	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
		if (likely(fp->sge_mask[i]))
			break;

		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
		delta += RX_SGE_MASK_ELEM_SZ;
	}

	if (delta > 0) {
		fp->rx_sge_prod += delta;
		/* clear page-end entries */
		bnx2x_clear_sge_mask_next_elems(fp);
	}

	DP(NETIF_MSG_RX_STATUS,
	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
	   fp->last_max_sge, fp->rx_sge_prod);
}

static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
	memset(fp->sge_mask, 0xff,
	       (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));

Eilon Greenstein's avatar
Eilon Greenstein committed
1212
1213
	/* Clear the two last indices in the page to 1:
	   these are the indices that correspond to the "next" element,
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
	   hence will never be indicated and should be removed from
	   the calculations. */
	bnx2x_clear_sge_mask_next_elems(fp);
}

static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
			    struct sk_buff *skb, u16 cons, u16 prod)
{
	struct bnx2x *bp = fp->bp;
	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
	dma_addr_t mapping;

	/* move empty skb from pool to prod and map it */
	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
	mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1231
				 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
	pci_unmap_addr_set(prod_rx_buf, mapping, mapping);

	/* move partial skb from cons to pool (don't unmap yet) */
	fp->tpa_pool[queue] = *cons_rx_buf;

	/* mark bin state as start - print error if current state != stop */
	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
		BNX2X_ERR("start of bin not in stop [%d]\n", queue);

	fp->tpa_state[queue] = BNX2X_TPA_START;

	/* point prod_bd to new skb */
	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));

#ifdef BNX2X_STOP_ON_ERROR
	fp->tpa_queue_used |= (1 << queue);
#ifdef __powerpc64__
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
#else
	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
#endif
	   fp->tpa_queue_used);
#endif
}

static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			       struct sk_buff *skb,
			       struct eth_fast_path_rx_cqe *fp_cqe,
			       u16 cqe_idx)
{
	struct sw_rx_page *rx_pg, old_rx_pg;
	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
	u32 i, frag_len, frag_size, pages;
	int err;
	int j;

	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1270
	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1271
1272
1273

	/* This is needed in order to enable forwarding support */
	if (frag_size)
1274
		skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1275
1276
1277
					       max(frag_size, (u32)len_on_bd));

#ifdef BNX2X_STOP_ON_ERROR
1278
1279
	if (pages >
	    min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
			  pages, cqe_idx);
		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
			  fp_cqe->pkt_len, len_on_bd);
		bnx2x_panic();
		return -EINVAL;
	}
#endif

	/* Run through the SGL and compose the fragmented skb */
	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
		u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));

		/* FW gives the indices of the SGE as if the ring is an array
		   (meaning that "next" element will consume 2 indices) */
1295
		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1296
1297
1298
1299
1300
1301
1302
		rx_pg = &fp->rx_page_ring[sge_idx];
		old_rx_pg = *rx_pg;

		/* If we fail to allocate a substitute page, we simply stop
		   where we are and drop the whole packet */
		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
		if (unlikely(err)) {
Eilon Greenstein's avatar
Eilon Greenstein committed
1303
			fp->eth_q_stats.rx_skb_alloc_failed++;
1304
1305
1306
1307
1308
			return err;
		}

		/* Unmap the page as we r going to pass it to the stack */
		pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1309
			      SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336

		/* Add one frag and update the appropriate fields in the skb */
		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);

		skb->data_len += frag_len;
		skb->truesize += frag_len;
		skb->len += frag_len;

		frag_size -= frag_len;
	}

	return 0;
}

static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
			   u16 cqe_idx)
{
	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
	struct sk_buff *skb = rx_buf->skb;
	/* alloc new skb */
	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);

	/* Unmap skb in the pool anyway, as we are going to change
	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
	   fails. */
	pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1337
			 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1338
1339

	if (likely(new_skb)) {
Yitchak Gertner's avatar
Yitchak Gertner committed
1340
1341
		/* fix ip xsum and give it to the stack */
		/* (no need to map the new skb) */
1342
1343
1344
1345
1346
1347
1348
#ifdef BCM_VLAN
		int is_vlan_cqe =
			(le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
			 PARSING_FLAGS_VLAN);
		int is_not_hwaccel_vlan_cqe =
			(is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
#endif
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372

		prefetch(skb);
		prefetch(((char *)(skb)) + 128);

#ifdef BNX2X_STOP_ON_ERROR
		if (pad + len > bp->rx_buf_size) {
			BNX2X_ERR("skb_put is about to fail...  "
				  "pad %d  len %d  rx_buf_size %d\n",
				  pad, len, bp->rx_buf_size);
			bnx2x_panic();
			return;
		}
#endif

		skb_reserve(skb, pad);
		skb_put(skb, len);

		skb->protocol = eth_type_trans(skb, bp->dev);
		skb->ip_summed = CHECKSUM_UNNECESSARY;

		{
			struct iphdr *iph;

			iph = (struct iphdr *)skb->data;
1373
1374
1375
1376
1377
1378
#ifdef BCM_VLAN
			/* If there is no Rx VLAN offloading -
			   take VLAN tag into an account */
			if (unlikely(is_not_hwaccel_vlan_cqe))
				iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
#endif
1379
1380
1381
1382
1383
1384
1385
			iph->check = 0;
			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
		}

		if (!bnx2x_fill_frag_skb(bp, fp, skb,
					 &cqe->fast_path_cqe, cqe_idx)) {
#ifdef BCM_VLAN
1386
1387
			if ((bp->vlgrp != NULL) && is_vlan_cqe &&
			    (!is_not_hwaccel_vlan_cqe))
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
				vlan_hwaccel_receive_skb(skb, bp->vlgrp,
						le16_to_cpu(cqe->fast_path_cqe.
							    vlan_tag));
			else
#endif
				netif_receive_skb(skb);
		} else {
			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
			   " - dropping packet!\n");
			dev_kfree_skb(skb);
		}


		/* put new skb in bin */
		fp->tpa_pool[queue].skb = new_skb;

	} else {
Yitchak Gertner's avatar
Yitchak Gertner committed
1405
		/* else drop the packet and keep the buffer in the bin */
1406
1407
		DP(NETIF_MSG_RX_STATUS,
		   "Failed to allocate new skb - dropping packet!\n");
Eilon Greenstein's avatar
Eilon Greenstein committed
1408
		fp->eth_q_stats.rx_skb_alloc_failed++;
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
	}

	fp->tpa_state[queue] = BNX2X_TPA_STOP;
}

static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
					struct bnx2x_fastpath *fp,
					u16 bd_prod, u16 rx_comp_prod,
					u16 rx_sge_prod)
{
Eilon Greenstein's avatar
Eilon Greenstein committed
1419
	struct ustorm_eth_rx_producers rx_prods = {0};
1420
1421
1422
1423
1424
1425
1426
	int i;

	/* Update producers */
	rx_prods.bd_prod = bd_prod;
	rx_prods.cqe_prod = rx_comp_prod;
	rx_prods.sge_prod = rx_sge_prod;

1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
	/*
	 * Make sure that the BD and SGE data is updated before updating the
	 * producers since FW might read the BD/SGE right after the producer
	 * is updated.
	 * This is only applicable for weak-ordered memory model archs such
	 * as IA-64. The following barrier is also mandatory since FW will
	 * assumes BDs must have buffers.
	 */
	wmb();

Eilon Greenstein's avatar
Eilon Greenstein committed
1437
1438
	for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
		REG_WR(bp, BAR_USTRORM_INTMEM +
1439
		       USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1440
1441
		       ((u32 *)&rx_prods)[i]);

1442
1443
	mmiowb(); /* keep prod updates ordered */

1444
	DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein's avatar
Eilon Greenstein committed
1445
1446
	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1447
1448
}

Eliezer Tamir's avatar
Eliezer Tamir committed
1449
1450
1451
static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
	struct bnx2x *bp = fp->bp;
1452
	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamir's avatar
Eliezer Tamir committed
1453
1454
1455
1456
1457
1458
1459
1460
	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
	int rx_pkt = 0;

#ifdef BNX2X_STOP_ON_ERROR
	if (unlikely(bp->panic))
		return 0;
#endif

1461
1462
	/* CQ "next element" is of the size of the regular element,
	   that's why it's ok here */
Eliezer Tamir's avatar
Eliezer Tamir committed
1463
1464
1465
1466
1467
1468
	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
		hw_comp_cons++;

	bd_cons = fp->rx_bd_cons;
	bd_prod = fp->rx_bd_prod;
1469
	bd_prod_fw = bd_prod;
Eliezer Tamir's avatar
Eliezer Tamir committed
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
	sw_comp_cons = fp->rx_comp_cons;
	sw_comp_prod = fp->rx_comp_prod;

	/* Memory barrier necessary as speculative reads of the rx
	 * buffer can be ahead of the index in the status block
	 */
	rmb();

	DP(NETIF_MSG_RX_STATUS,
	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1480
	   fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamir's avatar
Eliezer Tamir committed
1481
1482

	while (sw_comp_cons != hw_comp_cons) {
1483
		struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamir's avatar
Eliezer Tamir committed
1484
1485
		struct sk_buff *skb;
		union eth_rx_cqe *cqe;
1486
1487
		u8 cqe_fp_flags;
		u16 len, pad;
Eliezer Tamir's avatar
Eliezer Tamir committed
1488
1489
1490
1491
1492
1493

		comp_ring_cons = RCQ_BD(sw_comp_cons);
		bd_prod = RX_BD(bd_prod);
		bd_cons = RX_BD(bd_cons);

		cqe = &fp->rx_comp_ring[comp_ring_cons];
1494
		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamir's avatar
Eliezer Tamir committed
1495
1496

		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1497
1498
		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein's avatar
Eilon Greenstein committed
1499
		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1500
1501
		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamir's avatar
Eliezer Tamir committed
1502
1503

		/* is this a slowpath msg? */
1504
		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamir's avatar
Eliezer Tamir committed
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
			bnx2x_sp_event(fp, cqe);
			goto next_cqe;

		/* this is an rx packet */
		} else {
			rx_buf = &fp->rx_buf_ring[bd_cons];
			skb = rx_buf->skb;
			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
			pad = cqe->fast_path_cqe.placement_offset;

1515
1516
1517
1518
1519
			/* If CQE is marked both TPA_START and TPA_END
			   it is a non-TPA CQE */
			if ((!fp->disable_tpa) &&
			    (TPA_TYPE(cqe_fp_flags) !=
					(TPA_TYPE_START | TPA_TYPE_END))) {
1520
				u16 queue = cqe->fast_path_cqe.queue_index;
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548

				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
					DP(NETIF_MSG_RX_STATUS,
					   "calling tpa_start on queue %d\n",
					   queue);

					bnx2x_tpa_start(fp, queue, skb,
							bd_cons, bd_prod);
					goto next_rx;
				}

				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
					DP(NETIF_MSG_RX_STATUS,
					   "calling tpa_stop on queue %d\n",
					   queue);

					if (!BNX2X_RX_SUM_FIX(cqe))
						BNX2X_ERR("STOP on none TCP "
							  "data\n");

					/* This is a size of the linear data
					   on this skb */
					len = le16_to_cpu(cqe->fast_path_cqe.
								len_on_bd);
					bnx2x_tpa_stop(bp, fp, queue, pad,
						    len, cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
					if (bp->panic)
1549
						return 0;
1550
1551
1552
1553
1554
1555
1556
1557
#endif

					bnx2x_update_sge_prod(fp,
							&cqe->fast_path_cqe);
					goto next_cqe;
				}
			}

Eliezer Tamir's avatar
Eliezer Tamir committed
1558
1559
1560
1561
1562
1563
1564
1565
			pci_dma_sync_single_for_device(bp->pdev,
					pci_unmap_addr(rx_buf, mapping),
						       pad + RX_COPY_THRESH,
						       PCI_DMA_FROMDEVICE);
			prefetch(skb);
			prefetch(((char *)(skb)) + 128);

			/* is this an error packet? */
1566
			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamir's avatar
Eliezer Tamir committed
1567
				DP(NETIF_MSG_RX_ERR,
1568
1569
				   "ERROR  flags %x  rx packet %u\n",
				   cqe_fp_flags, sw_comp_cons);
Eilon Greenstein's avatar
Eilon Greenstein committed
1570
				fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamir's avatar
Eliezer Tamir committed
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
				goto reuse_rx;
			}

			/* Since we don't have a jumbo ring
			 * copy small packets if mtu > 1500
			 */
			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
			    (len <= RX_COPY_THRESH)) {
				struct sk_buff *new_skb;

				new_skb = netdev_alloc_skb(bp->dev,
							   len + pad);
				if (new_skb == NULL) {
					DP(NETIF_MSG_RX_ERR,
1585
					   "ERROR  packet dropped "
Eliezer Tamir's avatar
Eliezer Tamir committed
1586
					   "because of alloc failure\n");
Eilon Greenstein's avatar
Eilon Greenstein committed
1587
					fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamir's avatar
Eliezer Tamir committed
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
					goto reuse_rx;
				}

				/* aligned copy */
				skb_copy_from_linear_data_offset(skb, pad,
						    new_skb->data + pad, len);
				skb_reserve(new_skb, pad);
				skb_put(new_skb, len);

				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);

				skb = new_skb;

			} else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
				pci_unmap_single(bp->pdev,
					pci_unmap_addr(rx_buf, mapping),
1604
						 bp->rx_buf_size,
Eliezer Tamir's avatar
Eliezer Tamir committed
1605
1606
1607
1608
1609
1610
						 PCI_DMA_FROMDEVICE);
				skb_reserve(skb, pad);
				skb_put(skb, len);

			} else {
				DP(NETIF_MSG_RX_ERR,
1611
				   "ERROR  packet dropped because "
Eliezer Tamir's avatar
Eliezer Tamir committed
1612
				   "of alloc failure\n");
Eilon Greenstein's avatar
Eilon Greenstein committed
1613
				fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamir's avatar
Eliezer Tamir committed
1614
1615
1616
1617
1618
1619
1620
1621
reuse_rx:
				bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
				goto next_rx;
			}

			skb->protocol = eth_type_trans(skb, bp->dev);

			skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner's avatar
Yitchak Gertner committed
1622
			if (bp->rx_csum) {
1623
1624
				if (likely(BNX2X_RX_CSUM_OK(cqe)))
					skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner's avatar
Yitchak Gertner committed
1625
				else
Eilon Greenstein's avatar
Eilon Greenstein committed
1626
					fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner's avatar
Yitchak Gertner committed
1627
			}
Eliezer Tamir's avatar
Eliezer Tamir committed
1628
1629
		}

1630
		skb_record_rx_queue(skb, fp->index);