be.h 16.9 KB
Newer Older
Sathya Perla's avatar
Sathya Perla committed
1
/*
2
 * Copyright (C) 2005 - 2013 Emulex
Sathya Perla's avatar
Sathya Perla committed
3
4
5
6
7
8
9
10
 * All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation.  The full GNU General
 * Public License is included in this distribution in the file called COPYING.
 *
 * Contact Information:
11
 * linux-drivers@emulex.com
Sathya Perla's avatar
Sathya Perla committed
12
 *
13
14
15
 * Emulex
 * 3333 Susan Street
 * Costa Mesa, CA 92626
Sathya Perla's avatar
Sathya Perla committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
 */

#ifndef BE_H
#define BE_H

#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/delay.h>
#include <net/tcp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
30
#include <linux/firmware.h>
31
#include <linux/slab.h>
32
#include <linux/u64_stats_sync.h>
Sathya Perla's avatar
Sathya Perla committed
33
34

#include "be_hw.h"
35
#include "be_roce.h"
Sathya Perla's avatar
Sathya Perla committed
36

37
#define DRV_VER			"4.6.62.0u"
Sathya Perla's avatar
Sathya Perla committed
38
#define DRV_NAME		"be2net"
39
40
41
#define BE_NAME			"Emulex BladeEngine2"
#define BE3_NAME		"Emulex BladeEngine3"
#define OC_NAME			"Emulex OneConnect"
42
43
#define OC_NAME_BE		OC_NAME	"(be3)"
#define OC_NAME_LANCER		OC_NAME "(Lancer)"
44
#define OC_NAME_SH		OC_NAME "(Skyhawk)"
45
#define DRV_DESC		"Emulex OneConnect 10Gbps NIC Driver"
Sathya Perla's avatar
Sathya Perla committed
46

47
#define BE_VENDOR_ID 		0x19a2
48
#define EMULEX_VENDOR_ID	0x10df
49
#define BE_DEVICE_ID1		0x211
50
#define BE_DEVICE_ID2		0x221
51
52
53
#define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
#define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
#define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
54
#define OC_DEVICE_ID4           0xe228   /* Device id for VF in Lancer */
55
#define OC_DEVICE_ID5		0x720	/* Device Id for Skyhawk cards */
56
#define OC_DEVICE_ID6		0x728   /* Device id for VF in SkyHawk */
57
58
59
60
#define OC_SUBSYS_DEVICE_ID1	0xE602
#define OC_SUBSYS_DEVICE_ID2	0xE642
#define OC_SUBSYS_DEVICE_ID3	0xE612
#define OC_SUBSYS_DEVICE_ID4	0xE652
61
62
63

static inline char *nic_name(struct pci_dev *pdev)
{
64
65
	switch (pdev->device) {
	case OC_DEVICE_ID1:
66
		return OC_NAME;
67
	case OC_DEVICE_ID2:
68
69
		return OC_NAME_BE;
	case OC_DEVICE_ID3:
70
	case OC_DEVICE_ID4:
71
		return OC_NAME_LANCER;
72
73
	case BE_DEVICE_ID2:
		return BE3_NAME;
74
	case OC_DEVICE_ID5:
75
	case OC_DEVICE_ID6:
76
		return OC_NAME_SH;
77
	default:
78
		return BE_NAME;
79
	}
80
81
}

Sathya Perla's avatar
Sathya Perla committed
82
/* Number of bytes of an RX frame that are copied to skb->data */
83
#define BE_HDR_LEN		((u16) 64)
84
85
86
/* allocate extra space to allow tunneling decapsulation without head reallocation */
#define BE_RX_SKB_ALLOC_SIZE (BE_HDR_LEN + 64)

Sathya Perla's avatar
Sathya Perla committed
87
88
89
90
#define BE_MAX_JUMBO_FRAME_SIZE	9018
#define BE_MIN_MTU		256

#define BE_NUM_VLANS_SUPPORTED	64
Sathya Perla's avatar
Sathya Perla committed
91
#define BE_MAX_EQD		96u
Sathya Perla's avatar
Sathya Perla committed
92
93
94
95
96
97
98
#define	BE_MAX_TX_FRAG_COUNT	30

#define EVNT_Q_LEN		1024
#define TX_Q_LEN		2048
#define TX_CQ_LEN		1024
#define RX_Q_LEN		1024	/* Does not support any other value */
#define RX_CQ_LEN		1024
99
#define MCC_Q_LEN		128	/* total size not to exceed 8 pages */
Sathya Perla's avatar
Sathya Perla committed
100
101
#define MCC_CQ_LEN		256

Sathya Perla's avatar
Sathya Perla committed
102
103
104
#define BE3_MAX_RSS_QS		8
#define BE2_MAX_RSS_QS		4
#define MAX_RSS_QS		BE3_MAX_RSS_QS
105
#define MAX_RX_QS		(MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
Sathya Perla's avatar
Sathya Perla committed
106

107
#define MAX_TX_QS		8
108
109
#define MAX_ROCE_EQS		5
#define MAX_MSIX_VECTORS	(MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
Sathya Perla's avatar
Sathya Perla committed
110
#define BE_TX_BUDGET		256
Sathya Perla's avatar
Sathya Perla committed
111
#define BE_NAPI_WEIGHT		64
Sathya Perla's avatar
Sathya Perla committed
112
#define MAX_RX_POST		BE_NAPI_WEIGHT /* Frags posted at a time */
Sathya Perla's avatar
Sathya Perla committed
113
114
#define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST)

115
#define MAX_VFS			30 /* Max VFs supported by BE3 FW */
116
117
#define FW_VER_LEN		32

Sathya Perla's avatar
Sathya Perla committed
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
struct be_dma_mem {
	void *va;
	dma_addr_t dma;
	u32 size;
};

struct be_queue_info {
	struct be_dma_mem dma_mem;
	u16 len;
	u16 entry_size;	/* Size of an element in the queue */
	u16 id;
	u16 tail, head;
	bool created;
	atomic_t used;	/* Number of valid elements in the queue */
};

134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
static inline u32 MODULO(u16 val, u16 limit)
{
	BUG_ON(limit & (limit - 1));
	return val & (limit - 1);
}

static inline void index_adv(u16 *index, u16 val, u16 limit)
{
	*index = MODULO((*index + val), limit);
}

static inline void index_inc(u16 *index, u16 limit)
{
	*index = MODULO((*index + 1), limit);
}

static inline void *queue_head_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->head * q->entry_size;
}

static inline void *queue_tail_node(struct be_queue_info *q)
{
	return q->dma_mem.va + q->tail * q->entry_size;
}

160
161
162
163
164
static inline void *queue_index_node(struct be_queue_info *q, u16 index)
{
	return q->dma_mem.va + index * q->entry_size;
}

165
166
167
168
169
static inline void queue_head_inc(struct be_queue_info *q)
{
	index_inc(&q->head, q->len);
}

170
171
172
173
174
static inline void index_dec(u16 *index, u16 limit)
{
	*index = MODULO((*index - 1), limit);
}

175
176
177
178
179
180
181
182
183
184
185
static inline void queue_tail_inc(struct be_queue_info *q)
{
	index_inc(&q->tail, q->len);
}

struct be_eq_obj {
	struct be_queue_info q;
	char desc[32];

	/* Adaptive interrupt coalescing (AIC) info */
	bool enable_aic;
Sathya Perla's avatar
Sathya Perla committed
186
187
188
189
	u32 min_eqd;		/* in usecs */
	u32 max_eqd;		/* in usecs */
	u32 eqd;		/* configured val when aic is off */
	u32 cur_eqd;		/* in usecs */
190

Sathya Perla's avatar
Sathya Perla committed
191
192
	u8 idx;			/* array index */
	u16 tx_budget;
193
	u16 spurious_intr;
194
	struct napi_struct napi;
Sathya Perla's avatar
Sathya Perla committed
195
196
	struct be_adapter *adapter;
} ____cacheline_aligned_in_smp;
197
198
199
200

struct be_mcc_obj {
	struct be_queue_info q;
	struct be_queue_info cq;
201
	bool rearm_cq;
202
203
};

204
struct be_tx_stats {
205
206
207
208
209
210
211
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_reqs;
	u64 tx_wrbs;
	u64 tx_compl;
	ulong tx_jiffies;
	u32 tx_stops;
212
213
	struct u64_stats_sync sync;
	struct u64_stats_sync sync_compl;
Sathya Perla's avatar
Sathya Perla committed
214
215
216
};

struct be_tx_obj {
217
	u32 db_offset;
Sathya Perla's avatar
Sathya Perla committed
218
219
220
221
	struct be_queue_info q;
	struct be_queue_info cq;
	/* Remember the skbs that were transmitted */
	struct sk_buff *sent_skb_list[TX_Q_LEN];
222
	struct be_tx_stats stats;
Sathya Perla's avatar
Sathya Perla committed
223
} ____cacheline_aligned_in_smp;
Sathya Perla's avatar
Sathya Perla committed
224
225
226
227

/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
	struct page *page;
228
	DEFINE_DMA_UNMAP_ADDR(bus);
Sathya Perla's avatar
Sathya Perla committed
229
230
231
232
	u16 page_offset;
	bool last_page_user;
};

233
234
235
struct be_rx_stats {
	u64 rx_bytes;
	u64 rx_pkts;
236
237
238
239
240
241
	u64 rx_pkts_prev;
	ulong rx_jiffies;
	u32 rx_drops_no_skbs;	/* skb allocation errors */
	u32 rx_drops_no_frags;	/* HW has no fetched frags */
	u32 rx_post_fail;	/* page post alloc failures */
	u32 rx_compl;
242
	u32 rx_mcast_pkts;
243
244
	u32 rx_compl_err;	/* completions with err set */
	u32 rx_pps;		/* pkts per second */
245
	struct u64_stats_sync sync;
246
247
};

248
249
struct be_rx_compl_info {
	u32 rss_hash;
250
	u16 vlan_tag;
251
252
	u16 pkt_size;
	u16 rxq_idx;
253
	u16 port;
254
255
256
257
258
259
260
261
262
263
264
265
266
	u8 vlanf;
	u8 num_rcvd;
	u8 err;
	u8 ipf;
	u8 tcpf;
	u8 udpf;
	u8 ip_csum;
	u8 l4_csum;
	u8 ipv6;
	u8 vtm;
	u8 pkt_type;
};

Sathya Perla's avatar
Sathya Perla committed
267
struct be_rx_obj {
268
	struct be_adapter *adapter;
Sathya Perla's avatar
Sathya Perla committed
269
270
	struct be_queue_info q;
	struct be_queue_info cq;
271
	struct be_rx_compl_info rxcp;
Sathya Perla's avatar
Sathya Perla committed
272
	struct be_rx_page_info page_info_tbl[RX_Q_LEN];
273
274
275
	struct be_rx_stats stats;
	u8 rss_id;
	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
Sathya Perla's avatar
Sathya Perla committed
276
} ____cacheline_aligned_in_smp;
Sathya Perla's avatar
Sathya Perla committed
277

278
struct be_drv_stats {
279
	u32 be_on_die_temperature;
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
	u32 eth_red_drops;
	u32 rx_drops_no_pbuf;
	u32 rx_drops_no_txpb;
	u32 rx_drops_no_erx_descr;
	u32 rx_drops_no_tpre_descr;
	u32 rx_drops_too_many_frags;
	u32 forwarded_packets;
	u32 rx_drops_mtu;
	u32 rx_crc_errors;
	u32 rx_alignment_symbol_errors;
	u32 rx_pause_frames;
	u32 rx_priority_pause_frames;
	u32 rx_control_frames;
	u32 rx_in_range_errors;
	u32 rx_out_range_errors;
	u32 rx_frame_too_long;
296
	u32 rx_address_filtered;
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
	u32 rx_dropped_too_small;
	u32 rx_dropped_too_short;
	u32 rx_dropped_header_too_small;
	u32 rx_dropped_tcp_length;
	u32 rx_dropped_runt;
	u32 rx_ip_checksum_errs;
	u32 rx_tcp_checksum_errs;
	u32 rx_udp_checksum_errs;
	u32 tx_pauseframes;
	u32 tx_priority_pauseframes;
	u32 tx_controlframes;
	u32 rxpp_fifo_overflow_drop;
	u32 rx_input_fifo_overflow_drop;
	u32 pmem_fifo_overflow_drop;
	u32 jabber_events;
312
313
};

314
struct be_vf_cfg {
315
316
317
	unsigned char mac_addr[ETH_ALEN];
	int if_handle;
	int pmac_id;
318
	u16 def_vid;
319
320
	u16 vlan_tag;
	u32 tx_rate;
321
322
};

323
324
325
326
327
enum vf_state {
	ENABLED = 0,
	ASSIGNED = 1
};

328
#define BE_FLAGS_LINK_STATUS_INIT		1
329
#define BE_FLAGS_WORKER_SCHEDULED		(1 << 3)
330
#define BE_FLAGS_NAPI_ENABLED			(1 << 9)
331
332
#define BE_UC_PMAC_COUNT		30
#define BE_VF_UC_PMAC_COUNT		2
333
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD		(1 << 11)
334

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
struct phy_info {
	u8 transceiver;
	u8 autoneg;
	u8 fc_autoneg;
	u8 port_type;
	u16 phy_type;
	u16 interface_type;
	u32 misc_params;
	u16 auto_speeds_supported;
	u16 fixed_speeds_supported;
	int link_speed;
	u32 dac_cable_len;
	u32 advertising;
	u32 supported;
};

Sathya Perla's avatar
Sathya Perla committed
351
352
353
354
struct be_adapter {
	struct pci_dev *pdev;
	struct net_device *netdev;

355
	u8 __iomem *csr;	/* CSR BAR used only for BE2/3 */
356
357
	u8 __iomem *db;		/* Door Bell */

358
	struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
359
360
361
362
363
364
365
366
	struct be_dma_mem mbox_mem;
	/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
	 * is stored for freeing purpose */
	struct be_dma_mem mbox_mem_alloced;

	struct be_mcc_obj mcc_obj;
	spinlock_t mcc_lock;	/* For serializing mcc cmds to BE card */
	spinlock_t mcc_cq_lock;
Sathya Perla's avatar
Sathya Perla committed
367

368
	u32 num_msix_vec;
Sathya Perla's avatar
Sathya Perla committed
369
370
371
	u32 num_evt_qs;
	struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
	struct msix_entry msix_entries[MAX_MSIX_VECTORS];
Sathya Perla's avatar
Sathya Perla committed
372
373
374
	bool isr_registered;

	/* TX Rings */
Sathya Perla's avatar
Sathya Perla committed
375
	u32 num_tx_qs;
376
	struct be_tx_obj tx_obj[MAX_TX_QS];
Sathya Perla's avatar
Sathya Perla committed
377
378

	/* Rx rings */
379
	u32 num_rx_qs;
Sathya Perla's avatar
Sathya Perla committed
380
	struct be_rx_obj rx_obj[MAX_RX_QS];
Sathya Perla's avatar
Sathya Perla committed
381
382
	u32 big_page_size;	/* Compounded page size shared by rx wrbs */

383
	struct be_drv_stats drv_stats;
384
	u16 vlans_added;
385
	u8 vlan_tag[VLAN_N_VID];
386
387
	u8 vlan_prio_bmap;	/* Available Priority BitMap */
	u16 recommended_prio;	/* Recommended Priority */
388
	struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
Sathya Perla's avatar
Sathya Perla committed
389

390
	struct be_dma_mem stats_cmd;
Sathya Perla's avatar
Sathya Perla committed
391
392
	/* Work queue used to perform periodic tasks like getting statistics */
	struct delayed_work work;
393
	u16 work_counter;
Sathya Perla's avatar
Sathya Perla committed
394

395
	struct delayed_work func_recovery_work;
396
	u32 flags;
397
	u32 cmd_privileges;
Sathya Perla's avatar
Sathya Perla committed
398
399
	/* Ethtool knobs and info */
	char fw_ver[FW_VER_LEN];
400
	int if_handle;		/* Used to configure filtering */
401
	u32 *pmac_id;		/* MAC addr handle used by BE card */
402
	u32 beacon_state;	/* for set_phys_id */
Sathya Perla's avatar
Sathya Perla committed
403

404
	bool eeh_error;
405
	bool fw_timeout;
406
407
	bool hw_error;

Sathya Perla's avatar
Sathya Perla committed
408
	u32 port_num;
409
	bool promiscuous;
Ajit Khaparde's avatar
Ajit Khaparde committed
410
	u32 function_mode;
411
	u32 function_caps;
412
413
	u32 rx_fc;		/* Rx flow control */
	u32 tx_fc;		/* Tx flow control */
Ajit Khaparde's avatar
Ajit Khaparde committed
414
	bool stats_cmd_sent;
415
416
417
418
419
420
421
422
423
424
	u32 if_type;
	struct {
		u32 size;
		u32 total_size;
		u64 io_addr;
	} roce_db;
	u32 num_msix_roce_vec;
	struct ocrdma_dev *ocrdma_dev;
	struct list_head entry;

425
426
	u32 flash_status;
	struct completion flash_compl;
427

428
429
430
	u32 num_vfs;		/* Number of VFs provisioned by PF driver */
	u32 dev_num_vfs;	/* Number of VFs supported by HW */
	u8 virtfn;
431
432
	struct be_vf_cfg *vf_cfg;
	bool be3_native;
433
	u32 sli_family;
434
	u8 hba_port_num;
435
	u16 pvid;
436
	struct phy_info phy;
437
438
	u8 wol_cap;
	bool wol;
439
	u32 uc_macs;		/* Count of secondary UC MAC programmed */
440
	u16 asic_rev;
441
	u16 qnq_vid;
442
	u32 msg_enable;
443
	int be_get_temp_freq;
444
445
446
447
448
449
450
451
	u16 max_mcast_mac;
	u16 max_tx_queues;
	u16 max_rss_queues;
	u16 max_rx_queues;
	u16 max_pmac_cnt;
	u16 max_vlans;
	u16 max_event_queues;
	u32 if_cap_flags;
452
	u8 pf_number;
453
	u64 rss_flags;
Sathya Perla's avatar
Sathya Perla committed
454
455
};

456
#define be_physfn(adapter)		(!adapter->virtfn)
457
#define	sriov_enabled(adapter)		(adapter->num_vfs > 0)
458
459
#define	sriov_want(adapter)		(adapter->dev_num_vfs && num_vfs && \
					 be_physfn(adapter))
460
461
462
#define for_all_vfs(adapter, vf_cfg, i)					\
	for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs;	\
		i++, vf_cfg++)
463

464
465
#define ON				1
#define OFF				0
466
467
468

#define lancer_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID3 || \
				 adapter->pdev->device == OC_DEVICE_ID4)
469

470
471
#define skyhawk_chip(adapter)	(adapter->pdev->device == OC_DEVICE_ID5 || \
				 adapter->pdev->device == OC_DEVICE_ID6)
472

473
474
475
476
477
478
479
#define BE3_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID2 || \
				 adapter->pdev->device == OC_DEVICE_ID2)

#define BE2_chip(adapter)	(adapter->pdev->device == BE_DEVICE_ID1 || \
				 adapter->pdev->device == OC_DEVICE_ID1)

#define BEx_chip(adapter)	(BE3_chip(adapter) || BE2_chip(adapter))
480

Sathya Perla's avatar
Sathya Perla committed
481
482
#define be_roce_supported(adapter)	(skyhawk_chip(adapter) && \
					(adapter->function_mode & RDMA_ENABLED))
483

484
extern const struct ethtool_ops be_ethtool_ops;
Sathya Perla's avatar
Sathya Perla committed
485

486
#define msix_enabled(adapter)		(adapter->num_msix_vec > 0)
Sathya Perla's avatar
Sathya Perla committed
487
488
489
490
#define num_irqs(adapter)		(msix_enabled(adapter) ?	\
						adapter->num_msix_vec : 1)
#define tx_stats(txo)			(&(txo)->stats)
#define rx_stats(rxo)			(&(rxo)->stats)
Sathya Perla's avatar
Sathya Perla committed
491

Sathya Perla's avatar
Sathya Perla committed
492
493
/* The default RXQ is the last RXQ */
#define default_rxo(adpt)		(&adpt->rx_obj[adpt->num_rx_qs - 1])
Sathya Perla's avatar
Sathya Perla committed
494

495
496
497
498
#define for_all_rx_queues(adapter, rxo, i)				\
	for (i = 0, rxo = &adapter->rx_obj[i]; i < adapter->num_rx_qs;	\
		i++, rxo++)

Sathya Perla's avatar
Sathya Perla committed
499
/* Skip the default non-rss queue (last one)*/
500
#define for_all_rss_queues(adapter, rxo, i)				\
Sathya Perla's avatar
Sathya Perla committed
501
	for (i = 0, rxo = &adapter->rx_obj[i]; i < (adapter->num_rx_qs - 1);\
502
503
		i++, rxo++)

504
505
506
507
#define for_all_tx_queues(adapter, txo, i)				\
	for (i = 0, txo = &adapter->tx_obj[i]; i < adapter->num_tx_qs;	\
		i++, txo++)

Sathya Perla's avatar
Sathya Perla committed
508
509
510
511
512
513
514
#define for_all_evt_queues(adapter, eqo, i)				\
	for (i = 0, eqo = &adapter->eq_obj[i]; i < adapter->num_evt_qs; \
		i++, eqo++)

#define is_mcc_eqo(eqo)			(eqo->idx == 0)
#define mcc_eqo(adapter)		(&adapter->eq_obj[0])

Sathya Perla's avatar
Sathya Perla committed
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
#define PAGE_SHIFT_4K		12
#define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K)

/* Returns number of pages spanned by the data starting at the given addr */
#define PAGES_4K_SPANNED(_address, size) 				\
		((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\
			(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))

/* Returns bit offset within a DWORD of a bitfield */
#define AMAP_BIT_OFFSET(_struct, field)  				\
		(((size_t)&(((_struct *)0)->field))%32)

/* Returns the bit mask of the field that is NOT shifted into location. */
static inline u32 amap_mask(u32 bitsize)
{
	return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}

static inline void
amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value)
{
	u32 *dw = (u32 *) ptr + dw_offset;
	*dw &= ~(mask << offset);
	*dw |= (mask & value) << offset;
}

#define AMAP_SET_BITS(_struct, field, ptr, val)				\
		amap_set(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field),		\
			val)

static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset)
{
	u32 *dw = (u32 *) ptr;
	return mask & (*(dw + dw_offset) >> offset);
}

#define AMAP_GET_BITS(_struct, field, ptr)				\
		amap_get(ptr,						\
			offsetof(_struct, field)/32,			\
			amap_mask(sizeof(((_struct *)0)->field)),	\
			AMAP_BIT_OFFSET(_struct, field))

#define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len)
#define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len)
static inline void swap_dws(void *wrb, int len)
{
#ifdef __BIG_ENDIAN
	u32 *dw = wrb;
	BUG_ON(len % 4);
	do {
		*dw = cpu_to_le32(*dw);
		dw++;
		len -= 4;
	} while (len);
#endif				/* __BIG_ENDIAN */
}

static inline u8 is_tcp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_TCP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP);

	return val;
}

static inline u8 is_udp_pkt(struct sk_buff *skb)
{
	u8 val = 0;

	if (ip_hdr(skb)->version == 4)
		val = (ip_hdr(skb)->protocol == IPPROTO_UDP);
	else if (ip_hdr(skb)->version == 6)
		val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP);

	return val;
}

599
600
static inline bool is_ipv4_pkt(struct sk_buff *skb)
{
Li RongQing's avatar
Li RongQing committed
601
	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
602
603
}

604
605
606
607
608
609
610
611
612
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
	u32 addr;

	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);

	mac[5] = (u8)(addr & 0xFF);
	mac[4] = (u8)((addr >> 8) & 0xFF);
	mac[3] = (u8)((addr >> 16) & 0xFF);
613
614
	/* Use the OUI from the current MAC address */
	memcpy(mac, adapter->netdev->dev_addr, 3);
615
616
}

Ajit Khaparde's avatar
Ajit Khaparde committed
617
618
619
620
621
static inline bool be_multi_rxq(const struct be_adapter *adapter)
{
	return adapter->num_rx_qs > 1;
}

622
623
static inline bool be_error(struct be_adapter *adapter)
{
624
625
626
	return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
}

627
static inline bool be_hw_error(struct be_adapter *adapter)
628
629
630
631
632
633
634
635
636
{
	return adapter->eeh_error || adapter->hw_error;
}

static inline void  be_clear_all_error(struct be_adapter *adapter)
{
	adapter->eeh_error = false;
	adapter->hw_error = false;
	adapter->fw_timeout = false;
637
638
}

639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
static inline bool be_is_wol_excluded(struct be_adapter *adapter)
{
	struct pci_dev *pdev = adapter->pdev;

	if (!be_physfn(adapter))
		return true;

	switch (pdev->subsystem_device) {
	case OC_SUBSYS_DEVICE_ID1:
	case OC_SUBSYS_DEVICE_ID2:
	case OC_SUBSYS_DEVICE_ID3:
	case OC_SUBSYS_DEVICE_ID4:
		return true;
	default:
		return false;
	}
}

657
658
659
660
661
static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
{
	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
}

662
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
663
		u16 num_popped);
664
extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
665
extern void be_parse_stats(struct be_adapter *adapter);
666
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
667
extern bool be_is_wol_supported(struct be_adapter *adapter);
668
extern bool be_pause_supported(struct be_adapter *adapter);
669
670
extern u32 be_get_fw_log_level(struct be_adapter *adapter);

671
672
673
674
675
676
677
678
679
680
681
682
/*
 * internal function to initialize-cleanup roce device.
 */
extern void be_roce_dev_add(struct be_adapter *);
extern void be_roce_dev_remove(struct be_adapter *);

/*
 * internal function to open-close roce device during ifup-ifdown.
 */
extern void be_roce_dev_open(struct be_adapter *);
extern void be_roce_dev_close(struct be_adapter *);

Sathya Perla's avatar
Sathya Perla committed
683
#endif				/* BE_H */