bnx2x.h 69.7 KB
Newer Older
Eliezer Tamir's avatar
Eliezer Tamir committed
1
2
/* bnx2x.h: Broadcom Everest network driver.
 *
3
 * Copyright (c) 2007-2012 Broadcom Corporation
Eliezer Tamir's avatar
Eliezer Tamir committed
4
5
6
7
8
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 *
9
10
 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
 * Written by: Eliezer Tamir
Eliezer Tamir's avatar
Eliezer Tamir committed
11
12
13
14
15
 * Based on code from Michael Chan's bnx2 driver
 */

#ifndef BNX2X_H
#define BNX2X_H
16
17

#include <linux/pci.h>
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
18
#include <linux/netdevice.h>
19
#include <linux/dma-mapping.h>
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
20
#include <linux/types.h>
21
#include <linux/pci_regs.h>
Eliezer Tamir's avatar
Eliezer Tamir committed
22

23
24
25
26
27
28
/* compilation time flags */

/* define this to make the driver freeze on error to allow getting debug info
 * (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */

29
30
#define DRV_MODULE_VERSION      "1.78.01-0"
#define DRV_MODULE_RELDATE      "2012/10/30"
31
32
#define BNX2X_BC_VER            0x040200

Shmulik Ravid's avatar
Shmulik Ravid committed
33
#if defined(CONFIG_DCB)
34
#define BCM_DCBNL
Shmulik Ravid's avatar
Shmulik Ravid committed
35
#endif
36
37
38
39


#include "bnx2x_hsi.h"

40
#include "../cnic_if.h"
41

42
43

#define BNX2X_MIN_MSIX_VEC_CNT(bp)		((bp)->min_msix_vec_cnt)
Eilon Greenstein's avatar
Eilon Greenstein committed
44
45

#include <linux/mdio.h>
46

Eilon Greenstein's avatar
Eilon Greenstein committed
47
48
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
49
#include "bnx2x_mfw_req.h"
Eilon Greenstein's avatar
Eilon Greenstein committed
50
#include "bnx2x_link.h"
51
#include "bnx2x_sp.h"
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
52
#include "bnx2x_dcb.h"
53
#include "bnx2x_stats.h"
54
#include "bnx2x_vfpf.h"
Eilon Greenstein's avatar
Eilon Greenstein committed
55

56
57
58
59
60
61
enum bnx2x_int_mode {
	BNX2X_INT_MODE_MSIX,
	BNX2X_INT_MODE_INTX,
	BNX2X_INT_MODE_MSI
};

Eliezer Tamir's avatar
Eliezer Tamir committed
62
63
/* error/debug prints */

64
#define DRV_MODULE_NAME		"bnx2x"
Eliezer Tamir's avatar
Eliezer Tamir committed
65
66

/* for messages that are currently off */
Merav Sicron's avatar
Merav Sicron committed
67
68
69
70
71
72
73
74
75
76
77
#define BNX2X_MSG_OFF			0x0
#define BNX2X_MSG_MCP			0x0010000 /* was: NETIF_MSG_HW */
#define BNX2X_MSG_STATS			0x0020000 /* was: NETIF_MSG_TIMER */
#define BNX2X_MSG_NVM			0x0040000 /* was: NETIF_MSG_HW */
#define BNX2X_MSG_DMAE			0x0080000 /* was: NETIF_MSG_HW */
#define BNX2X_MSG_SP			0x0100000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_FP			0x0200000 /* was: NETIF_MSG_INTR */
#define BNX2X_MSG_IOV			0x0800000
#define BNX2X_MSG_IDLE			0x2000000 /* used for idle check*/
#define BNX2X_MSG_ETHTOOL		0x4000000
#define BNX2X_MSG_DCB			0x8000000
Eliezer Tamir's avatar
Eliezer Tamir committed
78
79

/* regular debug print */
80
#define DP(__mask, fmt, ...)					\
81
do {								\
Merav Sicron's avatar
Merav Sicron committed
82
	if (unlikely(bp->msg_enable & (__mask)))		\
83
84
85
86
		pr_notice("[%s:%d(%s)]" fmt,			\
			  __func__, __LINE__,			\
			  bp->dev ? (bp->dev->name) : "?",	\
			  ##__VA_ARGS__);			\
87
} while (0)
Eliezer Tamir's avatar
Eliezer Tamir committed
88

89
#define DP_CONT(__mask, fmt, ...)				\
90
do {								\
Merav Sicron's avatar
Merav Sicron committed
91
	if (unlikely(bp->msg_enable & (__mask)))		\
92
		pr_cont(fmt, ##__VA_ARGS__);			\
93
94
} while (0)

95
/* errors debug print */
96
#define BNX2X_DBG_ERR(fmt, ...)					\
97
do {								\
Merav Sicron's avatar
Merav Sicron committed
98
	if (unlikely(netif_msg_probe(bp)))			\
99
		pr_err("[%s:%d(%s)]" fmt,			\
100
101
		       __func__, __LINE__,			\
		       bp->dev ? (bp->dev->name) : "?",		\
102
		       ##__VA_ARGS__);				\
103
} while (0)
Eliezer Tamir's avatar
Eliezer Tamir committed
104

105
/* for errors (never masked) */
106
#define BNX2X_ERR(fmt, ...)					\
107
do {								\
108
	pr_err("[%s:%d(%s)]" fmt,				\
109
110
	       __func__, __LINE__,				\
	       bp->dev ? (bp->dev->name) : "?",			\
111
112
	       ##__VA_ARGS__);					\
} while (0)
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
113

114
115
#define BNX2X_ERROR(fmt, ...)					\
	pr_err("[%s:%d]" fmt, __func__, __LINE__, ##__VA_ARGS__)
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
116

117

Eliezer Tamir's avatar
Eliezer Tamir committed
118
/* before we have a dev->name use dev_info() */
119
#define BNX2X_DEV_INFO(fmt, ...)				 \
120
do {								 \
Merav Sicron's avatar
Merav Sicron committed
121
	if (unlikely(netif_msg_probe(bp)))			 \
122
		dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__);	 \
123
} while (0)
Eliezer Tamir's avatar
Eliezer Tamir committed
124
125

#ifdef BNX2X_STOP_ON_ERROR
126
void bnx2x_int_disable(struct bnx2x *bp);
127
128
129
130
131
132
133
#define bnx2x_panic()				\
do {						\
	bp->panic = 1;				\
	BNX2X_ERR("driver assert\n");		\
	bnx2x_int_disable(bp);			\
	bnx2x_panic_dump(bp);			\
} while (0)
Eliezer Tamir's avatar
Eliezer Tamir committed
134
#else
135
136
137
138
139
140
#define bnx2x_panic()				\
do {						\
	bp->panic = 1;				\
	BNX2X_ERR("driver assert\n");		\
	bnx2x_panic_dump(bp);			\
} while (0)
Eliezer Tamir's avatar
Eliezer Tamir committed
141
142
#endif

143
#define bnx2x_mc_addr(ha)      ((ha)->addr)
144
#define bnx2x_uc_addr(ha)      ((ha)->addr)
Eliezer Tamir's avatar
Eliezer Tamir committed
145

146
147
148
#define U64_LO(x)			(u32)(((u64)(x)) & 0xffffffff)
#define U64_HI(x)			(u32)(((u64)(x)) >> 32)
#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
Eliezer Tamir's avatar
Eliezer Tamir committed
149
150


151
#define REG_ADDR(bp, offset)		((bp->regview) + (offset))
Eliezer Tamir's avatar
Eliezer Tamir committed
152

153
154
#define REG_RD(bp, offset)		readl(REG_ADDR(bp, offset))
#define REG_RD8(bp, offset)		readb(REG_ADDR(bp, offset))
155
#define REG_RD16(bp, offset)		readw(REG_ADDR(bp, offset))
156
157

#define REG_WR(bp, offset, val)		writel((u32)val, REG_ADDR(bp, offset))
Eliezer Tamir's avatar
Eliezer Tamir committed
158
#define REG_WR8(bp, offset, val)	writeb((u8)val, REG_ADDR(bp, offset))
159
#define REG_WR16(bp, offset, val)	writew((u16)val, REG_ADDR(bp, offset))
Eliezer Tamir's avatar
Eliezer Tamir committed
160

161
162
#define REG_RD_IND(bp, offset)		bnx2x_reg_rd_ind(bp, offset)
#define REG_WR_IND(bp, offset, val)	bnx2x_reg_wr_ind(bp, offset, val)
Eliezer Tamir's avatar
Eliezer Tamir committed
163

Yaniv Rosner's avatar
Yaniv Rosner committed
164
165
166
#define REG_RD_DMAE(bp, offset, valp, len32) \
	do { \
		bnx2x_read_dmae(bp, offset, len32);\
167
		memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
Yaniv Rosner's avatar
Yaniv Rosner committed
168
169
	} while (0)

170
#define REG_WR_DMAE(bp, offset, valp, len32) \
Eliezer Tamir's avatar
Eliezer Tamir committed
171
	do { \
172
		memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
Eliezer Tamir's avatar
Eliezer Tamir committed
173
174
175
176
		bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
				 offset, len32); \
	} while (0)

177
178
179
#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
	REG_WR_DMAE(bp, offset, valp, len32)

180
#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
181
182
183
184
185
	do { \
		memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
		bnx2x_write_big_buf_wb(bp, addr, len32); \
	} while (0)

186
187
188
189
#define SHMEM_ADDR(bp, field)		(bp->common.shmem_base + \
					 offsetof(struct shmem_region, field))
#define SHMEM_RD(bp, field)		REG_RD(bp, SHMEM_ADDR(bp, field))
#define SHMEM_WR(bp, field, val)	REG_WR(bp, SHMEM_ADDR(bp, field), val)
Eliezer Tamir's avatar
Eliezer Tamir committed
190

191
192
193
194
#define SHMEM2_ADDR(bp, field)		(bp->common.shmem2_base + \
					 offsetof(struct shmem2_region, field))
#define SHMEM2_RD(bp, field)		REG_RD(bp, SHMEM2_ADDR(bp, field))
#define SHMEM2_WR(bp, field, val)	REG_WR(bp, SHMEM2_ADDR(bp, field), val)
195
196
#define MF_CFG_ADDR(bp, field)		(bp->common.mf_cfg_base + \
					 offsetof(struct mf_cfg, field))
Dmitry Kravkov's avatar
Dmitry Kravkov committed
197
#define MF2_CFG_ADDR(bp, field)		(bp->common.mf2_cfg_base + \
Dmitry Kravkov's avatar
Dmitry Kravkov committed
198
					 offsetof(struct mf2_cfg, field))
199

200
201
202
#define MF_CFG_RD(bp, field)		REG_RD(bp, MF_CFG_ADDR(bp, field))
#define MF_CFG_WR(bp, field, val)	REG_WR(bp,\
					       MF_CFG_ADDR(bp, field), (val))
Dmitry Kravkov's avatar
Dmitry Kravkov committed
203
#define MF2_CFG_RD(bp, field)		REG_RD(bp, MF2_CFG_ADDR(bp, field))
Dmitry Kravkov's avatar
Dmitry Kravkov committed
204

Dmitry Kravkov's avatar
Dmitry Kravkov committed
205
206
207
#define SHMEM2_HAS(bp, field)		((bp)->common.shmem2_base &&	\
					 (SHMEM2_RD((bp), size) >	\
					 offsetof(struct shmem2_region, field)))
208

209
#define EMAC_RD(bp, reg)		REG_RD(bp, emac_base + reg)
210
#define EMAC_WR(bp, reg, val)		REG_WR(bp, emac_base + reg, val)
Eliezer Tamir's avatar
Eliezer Tamir committed
211

212
213
214
215
216
217
218
219
/* SP SB indices */

/* General SP events - stats query, cfc delete, etc  */
#define HC_SP_INDEX_ETH_DEF_CONS		3

/* EQ completions */
#define HC_SP_INDEX_EQ_CONS			7

Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
220
221
222
/* FCoE L2 connection completions */
#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS		6
#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS		4
223
224
225
226
/* iSCSI L2 */
#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS		5
#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS	1

Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
227
228
229
230
231
232
233
234
235
236
237
238
/* Special clients parameters */

/* SB indices */
/* FCoE L2 */
#define BNX2X_FCOE_L2_RX_INDEX \
	(&bp->def_status_blk->sp_sb.\
	index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])

#define BNX2X_FCOE_L2_TX_INDEX \
	(&bp->def_status_blk->sp_sb.\
	index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])

239
240
241
242
243
244
245
246
/**
 *  CIDs and CLIDs:
 *  CLIDs below is a CLID for func 0, then the CLID for other
 *  functions will be calculated by the formula:
 *
 *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
 *
 */
247
248
249
250
251
252
enum {
	BNX2X_ISCSI_ETH_CL_ID_IDX,
	BNX2X_FCOE_ETH_CL_ID_IDX,
	BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};

253
254
#define BNX2X_CNIC_START_ETH_CID(bp)	(BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
					 (bp)->max_cos)
255
	/* iSCSI L2 */
256
#define	BNX2X_ISCSI_ETH_CID(bp)		(BNX2X_CNIC_START_ETH_CID(bp))
257
	/* FCoE L2 */
258
#define	BNX2X_FCOE_ETH_CID(bp)		(BNX2X_CNIC_START_ETH_CID(bp) + 1)
Vladislav Zolotarov's avatar
Vladislav Zolotarov committed
259

260
261
262
263
#define CNIC_SUPPORT(bp)		((bp)->cnic_support)
#define CNIC_ENABLED(bp)		((bp)->cnic_enabled)
#define CNIC_LOADED(bp)			((bp)->cnic_loaded)
#define FCOE_INIT(bp)			((bp)->fcoe_init)
264

265
266
267
#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR

268
269
#define SM_RX_ID			0
#define SM_TX_ID			1
Eliezer Tamir's avatar
Eliezer Tamir committed
270

271
272
273
274
275
/* defines for multiple tx priority indices */
#define FIRST_TX_ONLY_COS_INDEX		1
#define FIRST_TX_COS_INDEX		0

/* rules for calculating the cids of tx-only connections */
276
277
278
#define CID_TO_FP(cid, bp)		((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
				(cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
279
280

/* fp index inside class of service range */
281
282
283
284
285
286
287
288
#define FP_COS_TO_TXQ(fp, cos, bp) \
			((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))

/* Indexes for transmission queues array:
 * txdata for RSS i CoS j is at location i + (j * num of RSS)
 * txdata for FCoE (if exist) is at location max cos * num of RSS
 * txdata for FWD (if exist) is one location after FCoE
 * txdata for OOO (if exist) is one location after FWD
289
 */
290
291
292
293
294
295
296
enum {
	FCOE_TXQ_IDX_OFFSET,
	FWD_TXQ_IDX_OFFSET,
	OOO_TXQ_IDX_OFFSET,
};
#define MAX_ETH_TXQ_IDX(bp)	(BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
#define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
Eliezer Tamir's avatar
Eliezer Tamir committed
297

298
/* fast path */
299
300
301
302
303
/*
 * This driver uses new build_skb() API :
 * RX ring buffer contains pointer to kmalloc() data only,
 * skb are built only after Hardware filled the frame.
 */
Eliezer Tamir's avatar
Eliezer Tamir committed
304
struct sw_rx_bd {
305
	u8		*data;
306
	DEFINE_DMA_UNMAP_ADDR(mapping);
Eliezer Tamir's avatar
Eliezer Tamir committed
307
308
309
};

struct sw_tx_bd {
310
311
	struct sk_buff	*skb;
	u16		first_bd;
Eilon Greenstein's avatar
Eilon Greenstein committed
312
313
314
	u8		flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD		(1<<0)
Eliezer Tamir's avatar
Eliezer Tamir committed
315
316
};

317
318
struct sw_rx_page {
	struct page	*page;
319
	DEFINE_DMA_UNMAP_ADDR(mapping);
320
321
};

Eilon Greenstein's avatar
Eilon Greenstein committed
322
323
324
325
326
union db_prod {
	struct doorbell_set_prod data;
	u32		raw;
};

327
328
329
330
331
332
333
334
/* dropless fc FW/HW related params */
#define BRB_SIZE(bp)		(CHIP_IS_E3(bp) ? 1024 : 512)
#define MAX_AGG_QS(bp)		(CHIP_IS_E1(bp) ? \
					ETH_MAX_AGGREGATION_QUEUES_E1 :\
					ETH_MAX_AGGREGATION_QUEUES_E1H_E2)
#define FW_DROP_LEVEL(bp)	(3 + MAX_SPQ_PENDING + MAX_AGG_QS(bp))
#define FW_PREFETCH_CNT		16
#define DROPLESS_FC_HEADROOM	100
335
336

/* MC hsi */
337
338
339
#define BCM_PAGE_SHIFT		12
#define BCM_PAGE_SIZE		(1 << BCM_PAGE_SHIFT)
#define BCM_PAGE_MASK		(~(BCM_PAGE_SIZE - 1))
340
341
#define BCM_PAGE_ALIGN(addr)	(((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)

342
343
344
345
346
#define PAGES_PER_SGE_SHIFT	0
#define PAGES_PER_SGE		(1 << PAGES_PER_SGE_SHIFT)
#define SGE_PAGE_SIZE		PAGE_SIZE
#define SGE_PAGE_SHIFT		PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
347
348
349
#define SGE_PAGES		(SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE		min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
					    SGE_PAGES), 0xffff)
350
351

/* SGE ring related macros */
352
#define NUM_RX_SGE_PAGES	2
353
#define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
354
355
#define NEXT_PAGE_SGE_DESC_CNT	2
#define MAX_RX_SGE_CNT		(RX_SGE_CNT - NEXT_PAGE_SGE_DESC_CNT)
Eilon Greenstein's avatar
Eilon Greenstein committed
356
/* RX_SGE_CNT is promised to be a power of 2 */
357
358
359
#define RX_SGE_MASK		(RX_SGE_CNT - 1)
#define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)
#define MAX_RX_SGE		(NUM_RX_SGE - 1)
360
#define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \
361
362
363
				  (MAX_RX_SGE_CNT - 1)) ? \
					(x) + 1 + NEXT_PAGE_SGE_DESC_CNT : \
					(x) + 1)
364
365
#define RX_SGE(x)		((x) & MAX_RX_SGE)

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
/*
 * Number of required  SGEs is the sum of two:
 * 1. Number of possible opened aggregations (next packet for
 *    these aggregations will probably consume SGE immidiatelly)
 * 2. Rest of BRB blocks divided by 2 (block will consume new SGE only
 *    after placement on BD for new TPA aggregation)
 *
 * Takes into account NEXT_PAGE_SGE_DESC_CNT "next" elements on each page
 */
#define NUM_SGE_REQ		(MAX_AGG_QS(bp) + \
					(BRB_SIZE(bp) - MAX_AGG_QS(bp)) / 2)
#define NUM_SGE_PG_REQ		((NUM_SGE_REQ + MAX_RX_SGE_CNT - 1) / \
						MAX_RX_SGE_CNT)
#define SGE_TH_LO(bp)		(NUM_SGE_REQ + \
				 NUM_SGE_PG_REQ * NEXT_PAGE_SGE_DESC_CNT)
#define SGE_TH_HI(bp)		(SGE_TH_LO(bp) + DROPLESS_FC_HEADROOM)

383
/* Manipulate a bit vector defined as an array of u64 */
384
385

/* Number of bits in one sge_mask array element */
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
#define BIT_VEC64_ELEM_SZ		64
#define BIT_VEC64_ELEM_SHIFT		6
#define BIT_VEC64_ELEM_MASK		((u64)BIT_VEC64_ELEM_SZ - 1)


#define __BIT_VEC64_SET_BIT(el, bit) \
	do { \
		el = ((el) | ((u64)0x1 << (bit))); \
	} while (0)

#define __BIT_VEC64_CLEAR_BIT(el, bit) \
	do { \
		el = ((el) & (~((u64)0x1 << (bit)))); \
	} while (0)


#define BIT_VEC64_SET_BIT(vec64, idx) \
	__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
			   (idx) & BIT_VEC64_ELEM_MASK)

#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
	__BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
			     (idx) & BIT_VEC64_ELEM_MASK)

#define BIT_VEC64_TEST_BIT(vec64, idx) \
	(((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
	((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
413
414
415

/* Creates a bitmask of all ones in less significant bits.
   idx - index of the most significant bit in the created mask */
416
417
418
419
420
421
422
#define BIT_VEC64_ONES_MASK(idx) \
		(((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
#define BIT_VEC64_ELEM_ONE_MASK	((u64)(~0))

/*******************************************************/


423
424

/* Number of u64 elements in SGE mask array */
425
#define RX_SGE_MASK_LEN			(NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
426
427
428
#define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)

429
430
431
union host_hc_status_block {
	/* pointer to fp status block e1x */
	struct host_hc_status_block_e1x *e1x_sb;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
432
433
	/* pointer to fp status block e2 */
	struct host_hc_status_block_e2  *e2_sb;
434
};
435

436
437
struct bnx2x_agg_info {
	/*
438
439
	 * First aggregation buffer is a data buffer, the following - are pages.
	 * We will preallocate the data buffer for each aggregation when
440
441
442
443
444
445
446
447
448
449
450
451
452
	 * we open the interface and will replace the BD at the consumer
	 * with this one when we receive the TPA_START CQE in order to
	 * keep the Rx BD ring consistent.
	 */
	struct sw_rx_bd		first_buf;
	u8			tpa_state;
#define BNX2X_TPA_START			1
#define BNX2X_TPA_STOP			2
#define BNX2X_TPA_ERROR			3
	u8			placement_offset;
	u16			parsing_flags;
	u16			vlan_tag;
	u16			len_on_bd;
453
	u32			rxhash;
Eric Dumazet's avatar
Eric Dumazet committed
454
	bool			l4_rxhash;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
455
456
	u16			gro_size;
	u16			full_page;
457
458
459
460
461
};

#define Q_STATS_OFFSET32(stat_name) \
			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)

462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
struct bnx2x_fp_txdata {

	struct sw_tx_bd		*tx_buf_ring;

	union eth_tx_bd_types	*tx_desc_ring;
	dma_addr_t		tx_desc_mapping;

	u32			cid;

	union db_prod		tx_db;

	u16			tx_pkt_prod;
	u16			tx_pkt_cons;
	u16			tx_bd_prod;
	u16			tx_bd_cons;

	unsigned long		tx_pkt;

	__le16			*tx_cons_sb;

	int			txq_index;
483
484
	struct bnx2x_fastpath	*parent_fp;
	int			tx_ring_size;
485
486
};

Dmitry Kravkov's avatar
Dmitry Kravkov committed
487
488
489
490
491
enum bnx2x_tpa_mode_t {
	TPA_MODE_LRO,
	TPA_MODE_GRO
};

Eliezer Tamir's avatar
Eliezer Tamir committed
492
struct bnx2x_fastpath {
493
	struct bnx2x		*bp; /* parent */
Eliezer Tamir's avatar
Eliezer Tamir committed
494

495
#define BNX2X_NAPI_WEIGHT       128
496
	struct napi_struct	napi;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
497
	union host_hc_status_block	status_blk;
498
499
500
501
502
503
	/* chip independed shortcuts into sb structure */
	__le16			*sb_index_values;
	__le16			*sb_running_index;
	/* chip independed shortcut into rx_prods_offset memory */
	u32			ustorm_rx_prods_offset;

504
	u32			rx_buf_size;
Eric Dumazet's avatar
Eric Dumazet committed
505
	u32			rx_frag_size; /* 0 if kmalloced(), or rx_buf_size + NET_SKB_PAD */
506
	dma_addr_t		status_blk_mapping;
Eliezer Tamir's avatar
Eliezer Tamir committed
507

Dmitry Kravkov's avatar
Dmitry Kravkov committed
508
509
	enum bnx2x_tpa_mode_t	mode;

510
	u8			max_cos; /* actual number of active tx coses */
511
	struct bnx2x_fp_txdata	*txdata_ptr[BNX2X_MULTI_TX_COS];
Eliezer Tamir's avatar
Eliezer Tamir committed
512

513
514
	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
Eliezer Tamir's avatar
Eliezer Tamir committed
515
516

	struct eth_rx_bd	*rx_desc_ring;
517
	dma_addr_t		rx_desc_mapping;
Eliezer Tamir's avatar
Eliezer Tamir committed
518
519

	union eth_rx_cqe	*rx_comp_ring;
520
521
	dma_addr_t		rx_comp_mapping;

522
523
524
525
526
527
	/* SGE ring */
	struct eth_rx_sge	*rx_sge_ring;
	dma_addr_t		rx_sge_mapping;

	u64			sge_mask[RX_SGE_MASK_LEN];

528
	u32			cid;
529

530
531
	__le16			fp_hc_idx;

Dmitry Kravkov's avatar
Dmitry Kravkov committed
532
	u8			index;		/* number in fp array */
533
	u8			rx_queue;	/* index for skb_record */
Dmitry Kravkov's avatar
Dmitry Kravkov committed
534
	u8			cl_id;		/* eth client id */
535
536
537
	u8			cl_qzone_id;
	u8			fw_sb_id;	/* status block number in FW */
	u8			igu_sb_id;	/* status block number in HW */
538
539
540
541
542

	u16			rx_bd_prod;
	u16			rx_bd_cons;
	u16			rx_comp_prod;
	u16			rx_comp_cons;
543
544
545
	u16			rx_sge_prod;
	/* The last maximal completed SGE */
	u16			last_max_sge;
546
	__le16			*rx_cons_sb;
547
	unsigned long		rx_pkt,
Yitchak Gertner's avatar
Yitchak Gertner committed
548
				rx_calls;
549

550
	/* TPA related */
551
	struct bnx2x_agg_info	*tpa_info;
552
553
554
555
	u8			disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
	u64			tpa_queue_used;
#endif
Eilon Greenstein's avatar
Eilon Greenstein committed
556
557
558
559
560
561
	/* The size is calculated using the following:
	     sizeof name field from netdev structure +
	     4 ('-Xx-' string) +
	     4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
	char			name[FP_NAME_SIZE];
Eliezer Tamir's avatar
Eliezer Tamir committed
562
563
};

564
565
566
567
#define bnx2x_fp(bp, nr, var)	((bp)->fp[(nr)].var)
#define bnx2x_sp_obj(bp, fp)	((bp)->sp_objs[(fp)->index])
#define bnx2x_fp_stats(bp, fp)	(&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp)	(&((bp)->fp_stats[(fp)->index].eth_q_stats))
568
569
570
571

/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU	2500

572
573
574
575
576
577
#define	FCOE_IDX_OFFSET		0

#define FCOE_IDX(bp)		(BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
				 FCOE_IDX_OFFSET)
#define bnx2x_fcoe_fp(bp)	(&bp->fp[FCOE_IDX(bp)])
#define bnx2x_fcoe(bp, var)	(bnx2x_fcoe_fp(bp)->var)
578
579
#define bnx2x_fcoe_inner_sp_obj(bp)	(&bp->sp_objs[FCOE_IDX(bp)])
#define bnx2x_fcoe_sp_obj(bp, var)	(bnx2x_fcoe_inner_sp_obj(bp)->var)
580
581
582
#define bnx2x_fcoe_tx(bp, var)	(bnx2x_fcoe_fp(bp)-> \
						txdata_ptr[FIRST_TX_COS_INDEX] \
						->var)
583
584


585
586
587
#define IS_ETH_FP(fp)		((fp)->index < BNX2X_NUM_ETH_QUEUES((fp)->bp))
#define IS_FCOE_FP(fp)		((fp)->index == FCOE_IDX((fp)->bp))
#define IS_FCOE_IDX(idx)	((idx) == FCOE_IDX(bp))
588
589
590


/* MC hsi */
591
592
#define MAX_FETCH_BD		13	/* HW max BDs per packet */
#define RX_COPY_THRESH		92
593

594
#define NUM_TX_RINGS		16
Eilon Greenstein's avatar
Eilon Greenstein committed
595
#define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
596
597
#define NEXT_PAGE_TX_DESC_CNT	1
#define MAX_TX_DESC_CNT		(TX_DESC_CNT - NEXT_PAGE_TX_DESC_CNT)
598
599
600
#define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)
#define MAX_TX_BD		(NUM_TX_BD - 1)
#define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
601
#define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \
602
603
604
				  (MAX_TX_DESC_CNT - 1)) ? \
					(x) + 1 + NEXT_PAGE_TX_DESC_CNT : \
					(x) + 1)
605
606
#define TX_BD(x)		((x) & MAX_TX_BD)
#define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)
607

608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
/* number of NEXT_PAGE descriptors may be required during placement */
#define NEXT_CNT_PER_TX_PKT(bds)	\
				(((bds) + MAX_TX_DESC_CNT - 1) / \
				 MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
/* max BDs per tx packet w/o next_pages:
 * START_BD		- describes packed
 * START_BD(splitted)	- includes unpaged data segment for GSO
 * PARSING_BD		- for TSO and CSUM data
 * Frag BDs		- decribes pages for frags
 */
#define BDS_PER_TX_PKT		3
#define MAX_BDS_PER_TX_PKT	(MAX_SKB_FRAGS + BDS_PER_TX_PKT)
/* max BDs per tx packet including next pages */
#define MAX_DESC_PER_TX_PKT	(MAX_BDS_PER_TX_PKT + \
				 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))

624
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
625
#define NUM_RX_RINGS		8
626
#define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
627
628
#define NEXT_PAGE_RX_DESC_CNT	2
#define MAX_RX_DESC_CNT		(RX_DESC_CNT - NEXT_PAGE_RX_DESC_CNT)
629
630
631
632
#define RX_DESC_MASK		(RX_DESC_CNT - 1)
#define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD		(NUM_RX_BD - 1)
#define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648

/* dropless fc calculations for BDs
 *
 * Number of BDs should as number of buffers in BRB:
 * Low threshold takes into account NEXT_PAGE_RX_DESC_CNT
 * "next" elements on each page
 */
#define NUM_BD_REQ		BRB_SIZE(bp)
#define NUM_BD_PG_REQ		((NUM_BD_REQ + MAX_RX_DESC_CNT - 1) / \
					      MAX_RX_DESC_CNT)
#define BD_TH_LO(bp)		(NUM_BD_REQ + \
				 NUM_BD_PG_REQ * NEXT_PAGE_RX_DESC_CNT + \
				 FW_DROP_LEVEL(bp))
#define BD_TH_HI(bp)		(BD_TH_LO(bp) + DROPLESS_FC_HEADROOM)

#define MIN_RX_AVAIL		((bp)->dropless_fc ? BD_TH_HI(bp) + 128 : 128)
649
650
651
652
653
654
655
656
657

#define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \
					ETH_MIN_RX_CQES_WITH_TPA_E1 : \
					ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
#define MIN_RX_SIZE_TPA		(max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
#define MIN_RX_SIZE_NONTPA	(max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
								MIN_RX_AVAIL))

658
#define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \
659
660
661
				  (MAX_RX_DESC_CNT - 1)) ? \
					(x) + 1 + NEXT_PAGE_RX_DESC_CNT : \
					(x) + 1)
662
#define RX_BD(x)		((x) & MAX_RX_BD)
663

664
665
666
667
668
669
/*
 * As long as CQE is X times bigger than BD entry we have to allocate X times
 * more pages for CQ ring in order to keep it balanced with BD ring
 */
#define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
#define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)
670
#define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
671
672
#define NEXT_PAGE_RCQ_DESC_CNT	1
#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - NEXT_PAGE_RCQ_DESC_CNT)
673
674
675
#define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)
#define MAX_RCQ_BD		(NUM_RCQ_BD - 1)
#define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
676
#define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \
677
678
679
				  (MAX_RCQ_DESC_CNT - 1)) ? \
					(x) + 1 + NEXT_PAGE_RCQ_DESC_CNT : \
					(x) + 1)
680
#define RCQ_BD(x)		((x) & MAX_RCQ_BD)
681

682
683
684
685
686
687
688
689
690
691
692
693
694
695
/* dropless fc calculations for RCQs
 *
 * Number of RCQs should be as number of buffers in BRB:
 * Low threshold takes into account NEXT_PAGE_RCQ_DESC_CNT
 * "next" elements on each page
 */
#define NUM_RCQ_REQ		BRB_SIZE(bp)
#define NUM_RCQ_PG_REQ		((NUM_BD_REQ + MAX_RCQ_DESC_CNT - 1) / \
					      MAX_RCQ_DESC_CNT)
#define RCQ_TH_LO(bp)		(NUM_RCQ_REQ + \
				 NUM_RCQ_PG_REQ * NEXT_PAGE_RCQ_DESC_CNT + \
				 FW_DROP_LEVEL(bp))
#define RCQ_TH_HI(bp)		(RCQ_TH_LO(bp) + DROPLESS_FC_HEADROOM)

696

Eilon Greenstein's avatar
Eilon Greenstein committed
697
/* This is needed for determining of last_max */
698
699
#define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b))
#define SUB_S32(a, b)		(s32)((s32)(a) - (s32)(b))
700
701


702
703
#define BNX2X_SWCID_SHIFT	17
#define BNX2X_SWCID_MASK	((0x1 << BNX2X_SWCID_SHIFT) - 1)
704
705

/* used on a CID received from the HW */
706
#define SW_CID(x)			(le32_to_cpu(x) & BNX2X_SWCID_MASK)
707
708
709
#define CQE_CMD(x)			(le32_to_cpu(x) >> \
					COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)

Yitchak Gertner's avatar
Yitchak Gertner committed
710
711
712
713
#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr_hi), \
						 le32_to_cpu((bd)->addr_lo))
#define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))

714
715
#define BNX2X_DB_MIN_SHIFT		3	/* 8 bytes */
#define BNX2X_DB_SHIFT			7	/* 128 bytes*/
716
717
718
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
719
720
721
#define DPM_TRIGER_TYPE			0x40
#define DOORBELL(bp, cid, val) \
	do { \
722
		writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
		       DPM_TRIGER_TYPE); \
	} while (0)


/* TX CSUM helpers */
#define SKB_CS_OFF(skb)		(offsetof(struct tcphdr, check) - \
				 skb->csum_offset)
#define SKB_CS(skb)		(*(u16 *)(skb_transport_header(skb) + \
					  skb->csum_offset))

#define pbd_tcp_flags(skb)	(ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)

#define XMIT_PLAIN			0
#define XMIT_CSUM_V4			0x1
#define XMIT_CSUM_V6			0x2
#define XMIT_CSUM_TCP			0x4
#define XMIT_GSO_V4			0x8
#define XMIT_GSO_V6			0x10

#define XMIT_CSUM			(XMIT_CSUM_V4 | XMIT_CSUM_V6)
#define XMIT_GSO			(XMIT_GSO_V4 | XMIT_GSO_V6)


746
/* stuff added to make the code fit 80Col */
747
748
749
750
751
#define CQE_TYPE(cqe_fp_flags)	 ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
752

753
754
#define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG

Eilon Greenstein's avatar
Eilon Greenstein committed
755
756
757
758
759
#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
				(((le16_to_cpu(flags) & \
				   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
				 == PRS_FLAG_OVERETH_IPV4)
760
#define BNX2X_RX_SUM_FIX(cqe) \
Eilon Greenstein's avatar
Eilon Greenstein committed
761
	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
762

763
764
765
766
767
768

#define FP_USB_FUNC_OFF	\
			offsetof(struct cstorm_status_block_u, func)
#define FP_CSB_FUNC_OFF	\
			offsetof(struct cstorm_status_block_c, func)

769
#define HC_INDEX_ETH_RX_CQ_CONS		1
770

771
#define HC_INDEX_OOO_TX_CQ_CONS		4
772

773
774
775
#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5

#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6
776

777
778
779
#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7

#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0
Eliezer Tamir's avatar
Eliezer Tamir committed
780

781
#define BNX2X_RX_SB_INDEX \
782
	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
Eliezer Tamir's avatar
Eliezer Tamir committed
783

784
785
786
787
#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0

#define BNX2X_TX_SB_INDEX_COS0 \
	(&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
788
789
790

/* end of fast path */

791
/* common */
Eliezer Tamir's avatar
Eliezer Tamir committed
792

793
struct bnx2x_common {
Eliezer Tamir's avatar
Eliezer Tamir committed
794

795
	u32			chip_id;
Eliezer Tamir's avatar
Eliezer Tamir committed
796
/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
797
#define CHIP_ID(bp)			(bp->common.chip_id & 0xfffffff0)
798

799
#define CHIP_NUM(bp)			(bp->common.chip_id >> 16)
800
801
802
#define CHIP_NUM_57710			0x164e
#define CHIP_NUM_57711			0x164f
#define CHIP_NUM_57711E			0x1650
Dmitry Kravkov's avatar
Dmitry Kravkov committed
803
#define CHIP_NUM_57712			0x1662
804
#define CHIP_NUM_57712_MF		0x1663
805
#define CHIP_NUM_57712_VF		0x166f
806
807
808
809
#define CHIP_NUM_57713			0x1651
#define CHIP_NUM_57713E			0x1652
#define CHIP_NUM_57800			0x168a
#define CHIP_NUM_57800_MF		0x16a5
810
#define CHIP_NUM_57800_VF		0x16a9
811
812
#define CHIP_NUM_57810			0x168e
#define CHIP_NUM_57810_MF		0x16ae
813
#define CHIP_NUM_57810_VF		0x16af
814
815
#define CHIP_NUM_57811			0x163d
#define CHIP_NUM_57811_MF		0x163e
816
#define CHIP_NUM_57811_VF		0x163f
Yuval Mintz's avatar
Yuval Mintz committed
817
818
819
820
821
#define CHIP_NUM_57840_OBSOLETE	0x168d
#define CHIP_NUM_57840_MF_OBSOLETE	0x16ab
#define CHIP_NUM_57840_4_10		0x16a1
#define CHIP_NUM_57840_2_20		0x16a2
#define CHIP_NUM_57840_MF		0x16a4
822
#define CHIP_NUM_57840_VF		0x16ad
823
824
825
#define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711E)
Dmitry Kravkov's avatar
Dmitry Kravkov committed
826
#define CHIP_IS_57712(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712)
827
#define CHIP_IS_57712_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_VF)
828
829
830
#define CHIP_IS_57712_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_MF)
#define CHIP_IS_57800(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800)
#define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
831
#define CHIP_IS_57800_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_VF)
832
833
#define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
#define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
834
#define CHIP_IS_57810_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_VF)
835
836
#define CHIP_IS_57811(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_MF)
837
#define CHIP_IS_57811_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57811_VF)
Yuval Mintz's avatar
Yuval Mintz committed
838
839
840
841
842
843
#define CHIP_IS_57840(bp)		\
		((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
		 (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
		 (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
#define CHIP_IS_57840_MF(bp)	((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
				 (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
844
#define CHIP_IS_57840_VF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_VF)
845
846
#define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
					 CHIP_IS_57711E(bp))
Dmitry Kravkov's avatar
Dmitry Kravkov committed
847
#define CHIP_IS_E2(bp)			(CHIP_IS_57712(bp) || \
848
849
850
851
852
					 CHIP_IS_57712_MF(bp))
#define CHIP_IS_E3(bp)			(CHIP_IS_57800(bp) || \
					 CHIP_IS_57800_MF(bp) || \
					 CHIP_IS_57810(bp) || \
					 CHIP_IS_57810_MF(bp) || \
853
					 CHIP_IS_57810_VF(bp) || \
854
855
					 CHIP_IS_57811(bp) || \
					 CHIP_IS_57811_MF(bp) || \
856
					 CHIP_IS_57811_VF(bp) || \
857
					 CHIP_IS_57840(bp) || \
858
859
					 CHIP_IS_57840_MF(bp) || \
					 CHIP_IS_57840_VF(bp))
Dmitry Kravkov's avatar
Dmitry Kravkov committed
860
#define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
861
862
863
864
865
866
867
868
#define USES_WARPCORE(bp)		(CHIP_IS_E3(bp))
#define IS_E1H_OFFSET			(!CHIP_IS_E1(bp))

#define CHIP_REV_SHIFT			12
#define CHIP_REV_MASK			(0xF << CHIP_REV_SHIFT)
#define CHIP_REV_VAL(bp)		(bp->common.chip_id & CHIP_REV_MASK)
#define CHIP_REV_Ax			(0x0 << CHIP_REV_SHIFT)
#define CHIP_REV_Bx			(0x1 << CHIP_REV_SHIFT)
869
/* assume maximum 5 revisions */
870
#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV_VAL(bp) > 0x00005000)
871
872
/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
#define CHIP_REV_IS_EMUL(bp)		((CHIP_REV_IS_SLOW(bp)) && \
873
					 !(CHIP_REV_VAL(bp) & 0x00001000))
874
875
/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
#define CHIP_REV_IS_FPGA(bp)		((CHIP_REV_IS_SLOW(bp)) && \
876
					 (CHIP_REV_VAL(bp) & 0x00001000))
877
878
879
880

#define CHIP_TIME(bp)			((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
					((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))

881
882
#define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
#define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
883
884
885
886
887
888
889
890
891
892
#define CHIP_REV_SIM(bp)		(((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
					   (CHIP_REV_SHIFT + 1)) \
						<< CHIP_REV_SHIFT)
#define CHIP_REV(bp)			(CHIP_REV_IS_SLOW(bp) ? \
						CHIP_REV_SIM(bp) :\
						CHIP_REV_VAL(bp))
#define CHIP_IS_E3B0(bp)		(CHIP_IS_E3(bp) && \
					 (CHIP_REV(bp) == CHIP_REV_Bx))
#define CHIP_IS_E3A0(bp)		(CHIP_IS_E3(bp) && \
					 (CHIP_REV(bp) == CHIP_REV_Ax))
893
894
895
896
897
898
899
900
901
902
903
904
/* This define is used in two main places:
 * 1. In the early stages of nic_load, to know if to configrue Parser / Searcher
 * to nic-only mode or to offload mode. Offload mode is configured if either the
 * chip is E1x (where MIC_MODE register is not applicable), or if cnic already
 * registered for this port (which means that the user wants storage services).
 * 2. During cnic-related load, to know if offload mode is already configured in
 * the HW or needs to be configrued.
 * Since the transition from nic-mode to offload-mode in HW causes traffic
 * coruption, nic-mode is configured only in ports on which storage services
 * where never requested.
 */
#define CONFIGURE_NIC_MODE(bp)		(!CHIP_IS_E1x(bp) && !CNIC_ENABLED(bp))
Eliezer Tamir's avatar
Eliezer Tamir committed
905

906
	int			flash_size;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
907
908
909
#define BNX2X_NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
#define BNX2X_NVRAM_TIMEOUT_COUNT		30000
#define BNX2X_NVRAM_PAGE_SIZE			256
Eliezer Tamir's avatar
Eliezer Tamir committed
910

911
	u32			shmem_base;
912
	u32			shmem2_base;
913
	u32			mf_cfg_base;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
914
	u32			mf2_cfg_base;
915
916

	u32			hw_config;
Yaniv Rosner's avatar
Yaniv Rosner committed
917

918
	u32			bc_ver;
919
920
921

	u8			int_block;
#define INT_BLOCK_HC			0
Dmitry Kravkov's avatar
Dmitry Kravkov committed
922
923
924
925
#define INT_BLOCK_IGU			1
#define INT_BLOCK_MODE_NORMAL		0
#define INT_BLOCK_MODE_BW_COMP		2
#define CHIP_INT_MODE_IS_NBC(bp)		\
926
			(!CHIP_IS_E1x(bp) &&	\
Dmitry Kravkov's avatar
Dmitry Kravkov committed
927
928
929
			!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))

930
	u8			chip_port_mode;
Dmitry Kravkov's avatar
Dmitry Kravkov committed
931
932
#define CHIP_4_PORT_MODE			0x0
#define CHIP_2_PORT_MODE			0x1
933
#define CHIP_PORT_MODE_NONE			0x2
Dmitry Kravkov's avatar
Dmitry Kravkov committed
934
935
#define CHIP_MODE(bp)			(bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
936
937

	u32			boot_mode;
938
};
Yaniv Rosner's avatar
Yaniv Rosner committed
939

Dmitry Kravkov's avatar
Dmitry Kravkov committed
940
941
942
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
#define BNX2X_IGU_STAS_MSG_VF_CNT 64
#define BNX2X_IGU_STAS_MSG_PF_CNT 4
943

944
#define MAX_IGU_ATTN_ACK_TO       100
945
946
947
948
949
950
/* end of common */

/* port */

struct bnx2x_port {
	u32			pmf;
Yaniv Rosner's avatar
Yaniv Rosner committed
951

Yaniv Rosner's avatar
Yaniv Rosner committed
952
	u32			link_config[LINK_CONFIG_SIZE];
Eliezer Tamir's avatar
Eliezer Tamir committed
953

Yaniv Rosner's avatar
Yaniv Rosner committed
954
	u32			supported[LINK_CONFIG_SIZE];
955
956
957
/* link settings - missing defines */
#define SUPPORTED_2500baseX_Full	(1 << 15)

Yaniv Rosner's avatar
Yaniv Rosner committed
958
	u32			advertising[LINK_CONFIG_SIZE];
Eliezer Tamir's avatar
Eliezer Tamir committed
959
/* link settings - missing defines */
960
#define ADVERTISED_2500baseX_Full	(1 << 15)
Eliezer Tamir's avatar
Eliezer Tamir committed
961

962
	u32			phy_addr;
Yaniv Rosner's avatar
Yaniv Rosner committed
963
964
965
966

	/* used to synchronize phy accesses */
	struct mutex		phy_mutex;

967
	u32			port_stx;
Eliezer Tamir's avatar
Eliezer Tamir committed
968

969
970
	struct nig_stats	old_nig_stats;
};
Eliezer Tamir's avatar
Eliezer Tamir committed
971

972
973
/* end of port */

974
975
#define STATS_OFFSET32(stat_name) \
			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
Yitchak Gertner's avatar
Yitchak Gertner committed
976

977
978
979
980
981
982
/* slow path */

/* slow path work-queue */
extern struct workqueue_struct *bnx2x_wq;

#define BNX2X_MAX_NUM_OF_VFS	64
983
984
#define BNX2X_VF_CID_WND	0
#define BNX2X_CIDS_PER_VF	(1 << BNX2X_VF_CID_WND)
985
#define BNX2X_CLIENTS_PER_VF	1
986
#define BNX2X_FIRST_VF_CID	256
987
#define BNX2X_VF_CIDS		(BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
988
#define BNX2X_VF_ID_INVALID	0xFF
989

990
991
992
993
994
995
996
997
998
999
1000
/*
 * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
 * control by the number of fast-path status blocks supported by the
 * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
 * status block represents an independent interrupts context that can
 * serve a regular L2 networking queue. However special L2 queues such
 * as the FCoE queue do not require a FP-SB and other components like
 * the CNIC may consume FP-SB reducing the number of possible L2 queues
 *
 * If the maximum number of FP-SB available is X then:
 * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
For faster browsing, not all history is shown. View entire blame