ixgbe_main.c 215 KB
Newer Older
1
2
3
/*******************************************************************************

  Intel 10 Gigabit PCI Express Linux driver
Don Skidmore's avatar
Don Skidmore committed
4
  Copyright(c) 1999 - 2012 Intel Corporation.
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34

  This program is free software; you can redistribute it and/or modify it
  under the terms and conditions of the GNU General Public License,
  version 2, as published by the Free Software Foundation.

  This program is distributed in the hope it will be useful, but WITHOUT
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  more details.

  You should have received a copy of the GNU General Public License along with
  this program; if not, write to the Free Software Foundation, Inc.,
  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.

  The full GNU General Public License is included in this distribution in
  the file called "COPYING".

  Contact Information:
  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497

*******************************************************************************/

#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/in.h>
35
#include <linux/interrupt.h>
36
37
#include <linux/ip.h>
#include <linux/tcp.h>
38
#include <linux/sctp.h>
39
#include <linux/pkt_sched.h>
40
#include <linux/ipv6.h>
41
#include <linux/slab.h>
42
43
44
#include <net/checksum.h>
#include <net/ip6_checksum.h>
#include <linux/ethtool.h>
45
#include <linux/if.h>
46
#include <linux/if_vlan.h>
47
#include <linux/if_bridge.h>
48
#include <linux/prefetch.h>
49
#include <scsi/fc/fc_fcoe.h>
50
51
52

#include "ixgbe.h"
#include "ixgbe_common.h"
53
#include "ixgbe_dcb_82599.h"
54
#include "ixgbe_sriov.h"
55
56

char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger's avatar
Stephen Hemminger committed
57
static const char ixgbe_driver_string[] =
58
			      "Intel(R) 10 Gigabit PCI Express Network Driver";
59
#ifdef IXGBE_FCOE
60
61
char ixgbe_default_device_descr[] =
			      "Intel(R) 10 Gigabit Network Connection";
62
63
64
65
#else
static char ixgbe_default_device_descr[] =
			      "Intel(R) 10 Gigabit Network Connection";
#endif
66
#define MAJ 3
Don Skidmore's avatar
Don Skidmore committed
67
68
#define MIN 9
#define BUILD 15
69
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
70
	__stringify(BUILD) "-k"
Stephen Hemminger's avatar
Stephen Hemminger committed
71
const char ixgbe_driver_version[] = DRV_VERSION;
72
static const char ixgbe_copyright[] =
Don Skidmore's avatar
Don Skidmore committed
73
				"Copyright (c) 1999-2012 Intel Corporation.";
74
75

static const struct ixgbe_info *ixgbe_info_tbl[] = {
76
	[board_82598] = &ixgbe_82598_info,
77
	[board_82599] = &ixgbe_82599_info,
78
	[board_X540] = &ixgbe_X540_info,
79
80
81
82
83
84
85
86
87
88
};

/* ixgbe_pci_tbl - PCI Device ID Table
 *
 * Wildcard entries (PCI_ANY_ID) should come last
 * Last entry must be all 0s
 *
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
 *   Class, Class Mask, private data (not used) }
 */
89
static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
116
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
117
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
118
	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
119
120
121
122
123
	/* required last entry */
	{0, }
};
MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);

124
#ifdef CONFIG_IXGBE_DCA
125
static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
126
			    void *p);
127
128
129
130
131
132
133
static struct notifier_block dca_notifier = {
	.notifier_call = ixgbe_notify_dca,
	.next          = NULL,
	.priority      = 0
};
#endif

134
135
136
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
137
MODULE_PARM_DESC(max_vfs,
138
		 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
139
140
#endif /* CONFIG_PCI_IOV */

141
142
143
144
145
static unsigned int allow_unsupported_sfp;
module_param(allow_unsupported_sfp, uint, 0);
MODULE_PARM_DESC(allow_unsupported_sfp,
		 "Allow unsupported and untested SFP+ modules on 82599-based adapters");

146
147
148
149
150
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");

151
152
153
154
155
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);

156
157
158
159
160
161
162
163
164
165
166
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
	if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
	    !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
		schedule_work(&adapter->service_task);
}

static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
	BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));

Stephen Hemminger's avatar
Stephen Hemminger committed
167
	/* flush memory to make sure state is correct before next watchdog */
168
169
170
171
	smp_mb__before_clear_bit();
	clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}

172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
struct ixgbe_reg_info {
	u32 ofs;
	char *name;
};

static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {

	/* General Registers */
	{IXGBE_CTRL, "CTRL"},
	{IXGBE_STATUS, "STATUS"},
	{IXGBE_CTRL_EXT, "CTRL_EXT"},

	/* Interrupt Registers */
	{IXGBE_EICR, "EICR"},

	/* RX Registers */
	{IXGBE_SRRCTL(0), "SRRCTL"},
	{IXGBE_DCA_RXCTRL(0), "DRXCTL"},
	{IXGBE_RDLEN(0), "RDLEN"},
	{IXGBE_RDH(0), "RDH"},
	{IXGBE_RDT(0), "RDT"},
	{IXGBE_RXDCTL(0), "RXDCTL"},
	{IXGBE_RDBAL(0), "RDBAL"},
	{IXGBE_RDBAH(0), "RDBAH"},

	/* TX Registers */
	{IXGBE_TDBAL(0), "TDBAL"},
	{IXGBE_TDBAH(0), "TDBAH"},
	{IXGBE_TDLEN(0), "TDLEN"},
	{IXGBE_TDH(0), "TDH"},
	{IXGBE_TDT(0), "TDT"},
	{IXGBE_TXDCTL(0), "TXDCTL"},

	/* List Terminator */
	{}
};


/*
 * ixgbe_regdump - register printout routine
 */
static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
{
	int i = 0, j = 0;
	char rname[16];
	u32 regs[64];

	switch (reginfo->ofs) {
	case IXGBE_SRRCTL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
		break;
	case IXGBE_DCA_RXCTRL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
		break;
	case IXGBE_RDLEN(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
		break;
	case IXGBE_RDH(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
		break;
	case IXGBE_RDT(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
		break;
	case IXGBE_RXDCTL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
		break;
	case IXGBE_RDBAL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
		break;
	case IXGBE_RDBAH(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
		break;
	case IXGBE_TDBAL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
		break;
	case IXGBE_TDBAH(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
		break;
	case IXGBE_TDLEN(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
		break;
	case IXGBE_TDH(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
		break;
	case IXGBE_TDT(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
		break;
	case IXGBE_TXDCTL(0):
		for (i = 0; i < 64; i++)
			regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
		break;
	default:
277
		pr_info("%-15s %08x\n", reginfo->name,
278
279
280
281
282
283
			IXGBE_READ_REG(hw, reginfo->ofs));
		return;
	}

	for (i = 0; i < 8; i++) {
		snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
284
		pr_err("%-15s", rname);
285
		for (j = 0; j < 8; j++)
286
287
			pr_cont(" %08x", regs[i*8+j]);
		pr_cont("\n");
288
289
290
291
292
293
294
295
296
297
298
299
300
301
	}

}

/*
 * ixgbe_dump - Print registers, tx-rings and rx-rings
 */
static void ixgbe_dump(struct ixgbe_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_reg_info *reginfo;
	int n = 0;
	struct ixgbe_ring *tx_ring;
302
	struct ixgbe_tx_buffer *tx_buffer;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
	union ixgbe_adv_tx_desc *tx_desc;
	struct my_u0 { u64 a; u64 b; } *u0;
	struct ixgbe_ring *rx_ring;
	union ixgbe_adv_rx_desc *rx_desc;
	struct ixgbe_rx_buffer *rx_buffer_info;
	u32 staterr;
	int i = 0;

	if (!netif_msg_hw(adapter))
		return;

	/* Print netdevice Info */
	if (netdev) {
		dev_info(&adapter->pdev->dev, "Net device Info\n");
317
		pr_info("Device Name     state            "
318
			"trans_start      last_rx\n");
319
320
321
322
323
		pr_info("%-15s %016lX %016lX %016lX\n",
			netdev->name,
			netdev->state,
			netdev->trans_start,
			netdev->last_rx);
324
325
326
327
	}

	/* Print Registers */
	dev_info(&adapter->pdev->dev, "Register Dump\n");
328
	pr_info(" Register Name   Value\n");
329
330
331
332
333
334
335
336
337
338
	for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
	     reginfo->name; reginfo++) {
		ixgbe_regdump(hw, reginfo);
	}

	/* Print TX Ring Summary */
	if (!netdev || !netif_running(netdev))
		goto exit;

	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
339
	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
340
341
	for (n = 0; n < adapter->num_tx_queues; n++) {
		tx_ring = adapter->tx_ring[n];
342
		tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
343
		pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
344
			   n, tx_ring->next_to_use, tx_ring->next_to_clean,
345
346
347
348
			   (u64)dma_unmap_addr(tx_buffer, dma),
			   dma_unmap_len(tx_buffer, len),
			   tx_buffer->next_to_watch,
			   (u64)tx_buffer->time_stamp);
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
	}

	/* Print TX Rings */
	if (!netif_msg_tx_done(adapter))
		goto rx_ring_summary;

	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");

	/* Transmit Descriptor Formats
	 *
	 * Advanced Transmit Descriptor
	 *   +--------------------------------------------------------------+
	 * 0 |         Buffer Address [63:0]                                |
	 *   +--------------------------------------------------------------+
	 * 8 |  PAYLEN  | PORTS  | IDX | STA | DCMD  |DTYP |  RSV |  DTALEN |
	 *   +--------------------------------------------------------------+
	 *   63       46 45    40 39 36 35 32 31   24 23 20 19              0
	 */

	for (n = 0; n < adapter->num_tx_queues; n++) {
		tx_ring = adapter->tx_ring[n];
370
371
372
373
		pr_info("------------------------------------\n");
		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
		pr_info("------------------------------------\n");
		pr_info("T [desc]     [address 63:0  ] "
374
375
376
377
			"[PlPOIdStDDt Ln] [bi->dma       ] "
			"leng  ntw timestamp        bi->skb\n");

		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
378
			tx_desc = IXGBE_TX_DESC(tx_ring, i);
379
			tx_buffer = &tx_ring->tx_buffer_info[i];
380
			u0 = (struct my_u0 *)tx_desc;
381
			pr_info("T [0x%03X]    %016llX %016llX %016llX"
382
				" %04X  %p %016llX %p", i,
383
384
				le64_to_cpu(u0->a),
				le64_to_cpu(u0->b),
385
386
387
388
389
				(u64)dma_unmap_addr(tx_buffer, dma),
				dma_unmap_len(tx_buffer, len),
				tx_buffer->next_to_watch,
				(u64)tx_buffer->time_stamp,
				tx_buffer->skb);
390
391
			if (i == tx_ring->next_to_use &&
				i == tx_ring->next_to_clean)
392
				pr_cont(" NTC/U\n");
393
			else if (i == tx_ring->next_to_use)
394
				pr_cont(" NTU\n");
395
			else if (i == tx_ring->next_to_clean)
396
				pr_cont(" NTC\n");
397
			else
398
				pr_cont("\n");
399
400

			if (netif_msg_pktdata(adapter) &&
401
			    tx_buffer->skb)
402
403
				print_hex_dump(KERN_INFO, "",
					DUMP_PREFIX_ADDRESS, 16, 1,
404
					tx_buffer->skb->data,
405
406
					dma_unmap_len(tx_buffer, len),
					true);
407
408
409
410
411
412
		}
	}

	/* Print RX Rings Summary */
rx_ring_summary:
	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
413
	pr_info("Queue [NTU] [NTC]\n");
414
415
	for (n = 0; n < adapter->num_rx_queues; n++) {
		rx_ring = adapter->rx_ring[n];
416
417
		pr_info("%5d %5X %5X\n",
			n, rx_ring->next_to_use, rx_ring->next_to_clean);
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
	}

	/* Print RX Rings */
	if (!netif_msg_rx_status(adapter))
		goto exit;

	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");

	/* Advanced Receive Descriptor (Read) Format
	 *    63                                           1        0
	 *    +-----------------------------------------------------+
	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
	 *    +----------------------------------------------+------+
	 *  8 |       Header Buffer Address [63:1]           |  DD  |
	 *    +-----------------------------------------------------+
	 *
	 *
	 * Advanced Receive Descriptor (Write-Back) Format
	 *
	 *   63       48 47    32 31  30      21 20 16 15   4 3     0
	 *   +------------------------------------------------------+
	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
	 *   | Checksum   Ident  |   |           |    | Type | Type |
	 *   +------------------------------------------------------+
	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
	 *   +------------------------------------------------------+
	 *   63       48 47    32 31            20 19               0
	 */
	for (n = 0; n < adapter->num_rx_queues; n++) {
		rx_ring = adapter->rx_ring[n];
448
449
450
451
		pr_info("------------------------------------\n");
		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
		pr_info("------------------------------------\n");
		pr_info("R  [desc]      [ PktBuf     A0] "
452
453
			"[  HeadBuf   DD] [bi->dma       ] [bi->skb] "
			"<-- Adv Rx Read format\n");
454
		pr_info("RWB[desc]      [PcsmIpSHl PtRs] "
455
456
457
458
459
			"[vl er S cks ln] ---------------- [bi->skb] "
			"<-- Adv Rx Write-Back format\n");

		for (i = 0; i < rx_ring->count; i++) {
			rx_buffer_info = &rx_ring->rx_buffer_info[i];
460
			rx_desc = IXGBE_RX_DESC(rx_ring, i);
461
462
463
464
			u0 = (struct my_u0 *)rx_desc;
			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
			if (staterr & IXGBE_RXD_STAT_DD) {
				/* Descriptor Done */
465
				pr_info("RWB[0x%03X]     %016llX "
466
467
468
469
470
					"%016llX ---------------- %p", i,
					le64_to_cpu(u0->a),
					le64_to_cpu(u0->b),
					rx_buffer_info->skb);
			} else {
471
				pr_info("R  [0x%03X]     %016llX "
472
473
474
475
476
477
					"%016llX %016llX %p", i,
					le64_to_cpu(u0->a),
					le64_to_cpu(u0->b),
					(u64)rx_buffer_info->dma,
					rx_buffer_info->skb);

478
479
				if (netif_msg_pktdata(adapter) &&
				    rx_buffer_info->dma) {
480
481
					print_hex_dump(KERN_INFO, "",
					   DUMP_PREFIX_ADDRESS, 16, 1,
482
483
					   page_address(rx_buffer_info->page) +
						    rx_buffer_info->page_offset,
484
					   ixgbe_rx_bufsz(rx_ring), true);
485
486
487
488
				}
			}

			if (i == rx_ring->next_to_use)
489
				pr_cont(" NTU\n");
490
			else if (i == rx_ring->next_to_clean)
491
				pr_cont(" NTC\n");
492
			else
493
				pr_cont("\n");
494
495
496
497
498
499
500
501

		}
	}

exit:
	return;
}

502
503
504
505
506
507
508
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
	u32 ctrl_ext;

	/* Let firmware take over control of h/w */
	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
509
			ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
510
511
512
513
514
515
516
517
518
}

static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
{
	u32 ctrl_ext;

	/* Let firmware know the driver has taken over */
	ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
519
			ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
520
}
521

522
/**
523
524
525
526
527
528
529
530
 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
 * @adapter: pointer to adapter struct
 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
 * @queue: queue to map the corresponding interrupt to
 * @msix_vector: the vector to map to the corresponding queue
 *
 */
static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
531
			   u8 queue, u8 msix_vector)
532
533
{
	u32 ivar, index;
534
535
536
537
538
539
540
541
542
543
544
545
546
	struct ixgbe_hw *hw = &adapter->hw;
	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
		msix_vector |= IXGBE_IVAR_ALLOC_VAL;
		if (direction == -1)
			direction = 0;
		index = (((direction * 64) + queue) >> 2) & 0x1F;
		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
		ivar &= ~(0xFF << (8 * (queue & 0x3)));
		ivar |= (msix_vector << (8 * (queue & 0x3)));
		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
		break;
	case ixgbe_mac_82599EB:
Don Skidmore's avatar
Don Skidmore committed
547
	case ixgbe_mac_X540:
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
		if (direction == -1) {
			/* other causes */
			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
			index = ((queue & 1) * 8);
			ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
			ivar &= ~(0xFF << index);
			ivar |= (msix_vector << index);
			IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
			break;
		} else {
			/* tx or rx causes */
			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
			index = ((16 * (queue & 1)) + (8 * direction));
			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
			ivar &= ~(0xFF << index);
			ivar |= (msix_vector << index);
			IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
			break;
		}
	default:
		break;
	}
570
571
}

572
static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
573
					  u64 qmask)
574
575
576
{
	u32 mask;

577
578
	switch (adapter->hw.mac.type) {
	case ixgbe_mac_82598EB:
579
580
		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
581
582
		break;
	case ixgbe_mac_82599EB:
Don Skidmore's avatar
Don Skidmore committed
583
	case ixgbe_mac_X540:
584
585
586
587
		mask = (qmask & 0xFFFFFFFF);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
		mask = (qmask >> 32);
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
588
589
590
		break;
	default:
		break;
591
592
593
	}
}

594
595
void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
				      struct ixgbe_tx_buffer *tx_buffer)
596
{
597
598
599
	if (tx_buffer->skb) {
		dev_kfree_skb_any(tx_buffer->skb);
		if (dma_unmap_len(tx_buffer, len))
600
			dma_unmap_single(ring->dev,
601
602
603
604
605
606
607
608
					 dma_unmap_addr(tx_buffer, dma),
					 dma_unmap_len(tx_buffer, len),
					 DMA_TO_DEVICE);
	} else if (dma_unmap_len(tx_buffer, len)) {
		dma_unmap_page(ring->dev,
			       dma_unmap_addr(tx_buffer, dma),
			       dma_unmap_len(tx_buffer, len),
			       DMA_TO_DEVICE);
609
	}
610
611
612
613
	tx_buffer->next_to_watch = NULL;
	tx_buffer->skb = NULL;
	dma_unmap_len_set(tx_buffer, len, 0);
	/* tx_buffer must be completely set up in the transmit path */
614
615
}

616
static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
617
618
619
620
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_hw_stats *hwstats = &adapter->stats;
	int i;
621
	u32 data;
622

623
624
625
	if ((hw->fc.current_mode != ixgbe_fc_full) &&
	    (hw->fc.current_mode != ixgbe_fc_rx_pause))
		return;
626

627
628
629
630
631
632
633
634
	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
		break;
	default:
		data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
	}
	hwstats->lxoffrxc += data;
635

636
637
	/* refill credits (no tx hang) if we received xoff */
	if (!data)
638
		return;
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657

	for (i = 0; i < adapter->num_tx_queues; i++)
		clear_bit(__IXGBE_HANG_CHECK_ARMED,
			  &adapter->tx_ring[i]->state);
}

static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_hw_stats *hwstats = &adapter->stats;
	u32 xoff[8] = {0};
	int i;
	bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;

	if (adapter->ixgbe_ieee_pfc)
		pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);

	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) {
		ixgbe_update_xoff_rx_lfc(adapter);
658
		return;
659
	}
660
661
662
663
664
665

	/* update stats for each tc, only valid with PFC enabled */
	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
		switch (hw->mac.type) {
		case ixgbe_mac_82598EB:
			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
666
			break;
667
668
		default:
			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
669
		}
670
671
672
673
674
675
		hwstats->pxoffrxc[i] += xoff[i];
	}

	/* disarm tx queues that have received xoff frames */
	for (i = 0; i < adapter->num_tx_queues; i++) {
		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
676
		u8 tc = tx_ring->dcb_tc;
677
678
679

		if (xoff[tc])
			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
680
681
682
	}
}

683
static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
684
{
685
	return ring->stats.packets;
686
687
688
689
690
}

static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
{
	struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
691
692
	struct ixgbe_hw *hw = &adapter->hw;

693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
	u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
	u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));

	if (head != tail)
		return (head < tail) ?
			tail - head : (tail + ring->count - head);

	return 0;
}

static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
{
	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
	bool ret = false;

710
	clear_check_for_tx_hang(tx_ring);
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732

	/*
	 * Check for a hung queue, but be thorough. This verifies
	 * that a transmit has been completed since the previous
	 * check AND there is at least one packet pending. The
	 * ARMED bit is set to indicate a potential hang. The
	 * bit is cleared if a pause frame is received to remove
	 * false hang detection due to PFC or 802.3x frames. By
	 * requiring this to fail twice we avoid races with
	 * pfc clearing the ARMED bit and conditions where we
	 * run the check_tx_hang logic with a transmit completion
	 * pending but without time to complete it yet.
	 */
	if ((tx_done_old == tx_done) && tx_pending) {
		/* make sure it is true for two checks in a row */
		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
				       &tx_ring->state);
	} else {
		/* update completed stats and continue */
		tx_ring->tx_stats.tx_done_old = tx_done;
		/* reset the countdown */
		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
733
734
	}

735
	return ret;
736
737
}

738
739
740
741
742
743
744
745
746
747
748
749
750
/**
 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
 * @adapter: driver private struct
 **/
static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
{

	/* Do the reset outside of interrupt context */
	if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
		adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
		ixgbe_service_event_schedule(adapter);
	}
}
751

752
753
/**
 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
754
 * @q_vector: structure containing interrupt and ring information
755
 * @tx_ring: tx ring to clean
756
 **/
757
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
758
			       struct ixgbe_ring *tx_ring)
759
{
760
	struct ixgbe_adapter *adapter = q_vector->adapter;
761
762
	struct ixgbe_tx_buffer *tx_buffer;
	union ixgbe_adv_tx_desc *tx_desc;
763
	unsigned int total_bytes = 0, total_packets = 0;
764
	unsigned int budget = q_vector->tx.work_limit;
765
766
767
768
	unsigned int i = tx_ring->next_to_clean;

	if (test_bit(__IXGBE_DOWN, &adapter->state))
		return true;
769

770
	tx_buffer = &tx_ring->tx_buffer_info[i];
771
	tx_desc = IXGBE_TX_DESC(tx_ring, i);
772
	i -= tx_ring->count;
773

774
	do {
775
776
777
778
779
780
		union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;

		/* if next_to_watch is not set then there is no work pending */
		if (!eop_desc)
			break;

781
782
783
		/* prevent any other reads prior to eop_desc */
		rmb();

784
785
786
		/* if DD is not set pending work has not been completed */
		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
			break;
787

788
789
		/* clear next_to_watch to prevent false hangs */
		tx_buffer->next_to_watch = NULL;
790

791
792
793
794
		/* update the statistics for this packet */
		total_bytes += tx_buffer->bytecount;
		total_packets += tx_buffer->gso_segs;

795
#ifdef CONFIG_IXGBE_PTP
Jacob Keller's avatar
Jacob Keller committed
796
797
		if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
			ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
798
#endif
Jacob Keller's avatar
Jacob Keller committed
799

800
801
802
		/* free the skb */
		dev_kfree_skb_any(tx_buffer->skb);

803
804
805
806
807
808
		/* unmap skb header data */
		dma_unmap_single(tx_ring->dev,
				 dma_unmap_addr(tx_buffer, dma),
				 dma_unmap_len(tx_buffer, len),
				 DMA_TO_DEVICE);

809
810
		/* clear tx_buffer data */
		tx_buffer->skb = NULL;
811
		dma_unmap_len_set(tx_buffer, len, 0);
812

813
814
		/* unmap remaining buffers */
		while (tx_desc != eop_desc) {
815
816
			tx_buffer++;
			tx_desc++;
817
			i++;
818
819
			if (unlikely(!i)) {
				i -= tx_ring->count;
820
				tx_buffer = tx_ring->tx_buffer_info;
821
				tx_desc = IXGBE_TX_DESC(tx_ring, 0);
822
			}
823

824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
			/* unmap any remaining paged data */
			if (dma_unmap_len(tx_buffer, len)) {
				dma_unmap_page(tx_ring->dev,
					       dma_unmap_addr(tx_buffer, dma),
					       dma_unmap_len(tx_buffer, len),
					       DMA_TO_DEVICE);
				dma_unmap_len_set(tx_buffer, len, 0);
			}
		}

		/* move us one more past the eop_desc for start of next pkt */
		tx_buffer++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_buffer = tx_ring->tx_buffer_info;
			tx_desc = IXGBE_TX_DESC(tx_ring, 0);
		}

		/* issue prefetch for next Tx descriptor */
		prefetch(tx_desc);
846

847
848
849
850
851
		/* update budget accounting */
		budget--;
	} while (likely(budget));

	i += tx_ring->count;
852
	tx_ring->next_to_clean = i;
853
	u64_stats_update_begin(&tx_ring->syncp);
854
	tx_ring->stats.bytes += total_bytes;
855
	tx_ring->stats.packets += total_packets;
856
	u64_stats_update_end(&tx_ring->syncp);
857
858
	q_vector->tx.total_bytes += total_bytes;
	q_vector->tx.total_packets += total_packets;
859

860
861
862
863
864
865
866
867
868
869
870
871
872
873
	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
		/* schedule immediate reset if we believe we hung */
		struct ixgbe_hw *hw = &adapter->hw;
		e_err(drv, "Detected Tx Unit Hang\n"
			"  Tx Queue             <%d>\n"
			"  TDH, TDT             <%x>, <%x>\n"
			"  next_to_use          <%x>\n"
			"  next_to_clean        <%x>\n"
			"tx_buffer_info[next_to_clean]\n"
			"  time_stamp           <%lx>\n"
			"  jiffies              <%lx>\n",
			tx_ring->queue_index,
			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
874
875
			tx_ring->next_to_use, i,
			tx_ring->tx_buffer_info[i].time_stamp, jiffies);
876
877
878
879
880
881
882

		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);

		e_info(probe,
		       "tx hang %d detected on queue %d, resetting adapter\n",
			adapter->tx_timeout_count + 1, tx_ring->queue_index);

883
		/* schedule immediate reset if we believe we hung */
884
		ixgbe_tx_timeout_reset(adapter);
885
886

		/* the adapter is about to reset, no point in enabling stuff */
887
		return true;
888
	}
889

890
891
892
	netdev_tx_completed_queue(txring_txq(tx_ring),
				  total_packets, total_bytes);

893
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
894
	if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
895
		     (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
896
897
898
899
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
		smp_mb();
900
901
902
903
904
		if (__netif_subqueue_stopped(tx_ring->netdev,
					     tx_ring->queue_index)
		    && !test_bit(__IXGBE_DOWN, &adapter->state)) {
			netif_wake_subqueue(tx_ring->netdev,
					    tx_ring->queue_index);
905
			++tx_ring->tx_stats.restart_queue;
906
		}
907
	}
908

909
	return !!budget;
910
911
}

912
#ifdef CONFIG_IXGBE_DCA
913
914
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
				struct ixgbe_ring *tx_ring,
915
				int cpu)
916
{
917
	struct ixgbe_hw *hw = &adapter->hw;
918
919
	u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
	u16 reg_offset;
920
921
922

	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
923
		reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx);
924
925
		break;
	case ixgbe_mac_82599EB:
Don Skidmore's avatar
Don Skidmore committed
926
	case ixgbe_mac_X540:
927
928
		reg_offset = IXGBE_DCA_TXCTRL_82599(tx_ring->reg_idx);
		txctrl <<= IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599;
929
930
		break;
	default:
931
932
		/* for unknown hardware do not write register */
		return;
933
	}
934
935
936
937
938
939
940
941
942
943
944

	/*
	 * We can enable relaxed ordering for reads, but not writes when
	 * DCA is enabled.  This is due to a known issue in some chipsets
	 * which will cause the DCA tag to be cleared.
	 */
	txctrl |= IXGBE_DCA_TXCTRL_DESC_RRO_EN |
		  IXGBE_DCA_TXCTRL_DATA_RRO_EN |
		  IXGBE_DCA_TXCTRL_DESC_DCA_EN;

	IXGBE_WRITE_REG(hw, reg_offset, txctrl);
945
946
}

947
948
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
				struct ixgbe_ring *rx_ring,
949
				int cpu)
950
{
951
	struct ixgbe_hw *hw = &adapter->hw;
952
953
954
	u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu);
	u8 reg_idx = rx_ring->reg_idx;

955
956
957

	switch (hw->mac.type) {
	case ixgbe_mac_82599EB:
Don Skidmore's avatar
Don Skidmore committed
958
	case ixgbe_mac_X540:
959
		rxctrl <<= IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599;
960
961
962
963
		break;
	default:
		break;
	}
964
965
966
967
968
969
970
971
972
973
974

	/*
	 * We can enable relaxed ordering for reads, but not writes when
	 * DCA is enabled.  This is due to a known issue in some chipsets
	 * which will cause the DCA tag to be cleared.
	 */
	rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
		  IXGBE_DCA_RXCTRL_DATA_DCA_EN |
		  IXGBE_DCA_RXCTRL_DESC_DCA_EN;

	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
975
976
977
978
979
}

static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
{
	struct ixgbe_adapter *adapter = q_vector->adapter;
980
	struct ixgbe_ring *ring;
981
982
	int cpu = get_cpu();

983
984
985
	if (q_vector->cpu == cpu)
		goto out_no_update;

986
	ixgbe_for_each_ring(ring, q_vector->tx)
987
		ixgbe_update_tx_dca(adapter, ring, cpu);
988

989
	ixgbe_for_each_ring(ring, q_vector->rx)
990
		ixgbe_update_rx_dca(adapter, ring, cpu);
991
992
993

	q_vector->cpu = cpu;
out_no_update:
994
995
996
997
998
999
1000
	put_cpu();
}

static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
	int i;

For faster browsing, not all history is shown. View entire blame