enic_main.c 63.1 KB
Newer Older
1
/*
Vasanthy Kolluri's avatar
Vasanthy Kolluri committed
2
 * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
 *
 * This program is free software; you may redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/init.h>
26
#include <linux/interrupt.h>
27
28
29
30
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
31
#include <linux/if.h>
32
33
34
35
36
37
38
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
Vasanthy Kolluri's avatar
Vasanthy Kolluri committed
39
#include <linux/rtnetlink.h>
40
#include <linux/prefetch.h>
41
#include <net/ip6_checksum.h>
42
43
44
45
46

#include "cq_enet_desc.h"
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
47
#include "vnic_vic.h"
48
49
#include "enic_res.h"
#include "enic.h"
50
#include "enic_dev.h"
51
#include "enic_pp.h"
52
53

#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
54
55
56
57
58
#define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
#define MAX_TSO				(1 << 16)
#define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)

#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
59
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
60
#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
61
62

/* Supported devices */
63
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
64
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
65
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
66
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
	{ 0, }	/* end of table */
};

MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);

struct enic_stat {
	char name[ETH_GSTRING_LEN];
	unsigned int offset;
};

#define ENIC_TX_STAT(stat)	\
	{ .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
#define ENIC_RX_STAT(stat)	\
	{ .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }

static const struct enic_stat enic_tx_stats[] = {
	ENIC_TX_STAT(tx_frames_ok),
	ENIC_TX_STAT(tx_unicast_frames_ok),
	ENIC_TX_STAT(tx_multicast_frames_ok),
	ENIC_TX_STAT(tx_broadcast_frames_ok),
	ENIC_TX_STAT(tx_bytes_ok),
	ENIC_TX_STAT(tx_unicast_bytes_ok),
	ENIC_TX_STAT(tx_multicast_bytes_ok),
	ENIC_TX_STAT(tx_broadcast_bytes_ok),
	ENIC_TX_STAT(tx_drops),
	ENIC_TX_STAT(tx_errors),
	ENIC_TX_STAT(tx_tso),
};

static const struct enic_stat enic_rx_stats[] = {
	ENIC_RX_STAT(rx_frames_ok),
	ENIC_RX_STAT(rx_frames_total),
	ENIC_RX_STAT(rx_unicast_frames_ok),
	ENIC_RX_STAT(rx_multicast_frames_ok),
	ENIC_RX_STAT(rx_broadcast_frames_ok),
	ENIC_RX_STAT(rx_bytes_ok),
	ENIC_RX_STAT(rx_unicast_bytes_ok),
	ENIC_RX_STAT(rx_multicast_bytes_ok),
	ENIC_RX_STAT(rx_broadcast_bytes_ok),
	ENIC_RX_STAT(rx_drop),
	ENIC_RX_STAT(rx_no_bufs),
	ENIC_RX_STAT(rx_errors),
	ENIC_RX_STAT(rx_rss),
	ENIC_RX_STAT(rx_crc_errors),
	ENIC_RX_STAT(rx_frames_64),
	ENIC_RX_STAT(rx_frames_127),
	ENIC_RX_STAT(rx_frames_255),
	ENIC_RX_STAT(rx_frames_511),
	ENIC_RX_STAT(rx_frames_1023),
	ENIC_RX_STAT(rx_frames_1518),
	ENIC_RX_STAT(rx_frames_to_max),
};

static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);

127
int enic_is_dynamic(struct enic *enic)
128
129
130
131
{
	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
}

Roopa Prabhu's avatar
Roopa Prabhu committed
132
133
134
135
136
int enic_sriov_enabled(struct enic *enic)
{
	return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
}

137
138
139
140
141
static int enic_is_sriov_vf(struct enic *enic)
{
	return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
}

142
143
144
145
146
147
148
149
150
int enic_is_valid_vf(struct enic *enic, int vf)
{
#ifdef CONFIG_PCI_IOV
	return vf >= 0 && vf < enic->num_vfs;
#else
	return 0;
#endif
}

151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
{
	return rq;
}

static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
{
	return enic->rq_count + wq;
}

static inline unsigned int enic_legacy_io_intr(void)
{
	return 0;
}

static inline unsigned int enic_legacy_err_intr(void)
{
	return 1;
}

static inline unsigned int enic_legacy_notify_intr(void)
{
	return 2;
}

static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
{
178
	return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
179
180
181
182
}

static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
{
183
	return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
184
185
186
187
188
189
190
191
192
193
194
195
}

static inline unsigned int enic_msix_err_intr(struct enic *enic)
{
	return enic->rq_count + enic->wq_count;
}

static inline unsigned int enic_msix_notify_intr(struct enic *enic)
{
	return enic->rq_count + enic->wq_count + 1;
}

196
197
198
199
200
201
202
203
204
205
206
static int enic_get_settings(struct net_device *netdev,
	struct ethtool_cmd *ecmd)
{
	struct enic *enic = netdev_priv(netdev);

	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
	ecmd->port = PORT_FIBRE;
	ecmd->transceiver = XCVR_EXTERNAL;

	if (netif_carrier_ok(netdev)) {
207
		ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
208
209
		ecmd->duplex = DUPLEX_FULL;
	} else {
210
		ethtool_cmd_speed_set(ecmd, -1);
211
212
213
214
215
216
217
218
219
220
221
222
223
224
		ecmd->duplex = -1;
	}

	ecmd->autoneg = AUTONEG_DISABLE;

	return 0;
}

static void enic_get_drvinfo(struct net_device *netdev,
	struct ethtool_drvinfo *drvinfo)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_devcmd_fw_info *fw_info;

225
	enic_dev_fw_info(enic, &fw_info);
226

227
228
229
	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
	strlcpy(drvinfo->fw_version, fw_info->fw_version,
230
		sizeof(drvinfo->fw_version));
231
	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
		sizeof(drvinfo->bus_info));
}

static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
	unsigned int i;

	switch (stringset) {
	case ETH_SS_STATS:
		for (i = 0; i < enic_n_tx_stats; i++) {
			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		for (i = 0; i < enic_n_rx_stats; i++) {
			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
			data += ETH_GSTRING_LEN;
		}
		break;
	}
}

253
static int enic_get_sset_count(struct net_device *netdev, int sset)
254
{
255
256
257
258
259
260
	switch (sset) {
	case ETH_SS_STATS:
		return enic_n_tx_stats + enic_n_rx_stats;
	default:
		return -EOPNOTSUPP;
	}
261
262
263
264
265
266
267
268
269
}

static void enic_get_ethtool_stats(struct net_device *netdev,
	struct ethtool_stats *stats, u64 *data)
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_stats *vstats;
	unsigned int i;

270
	enic_dev_stats_dump(enic, &vstats);
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289

	for (i = 0; i < enic_n_tx_stats; i++)
		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
	for (i = 0; i < enic_n_rx_stats; i++)
		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
}

static u32 enic_get_msglevel(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	return enic->msg_enable;
}

static void enic_set_msglevel(struct net_device *netdev, u32 value)
{
	struct enic *enic = netdev_priv(netdev);
	enic->msg_enable = value;
}

290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
static int enic_get_coalesce(struct net_device *netdev,
	struct ethtool_coalesce *ecmd)
{
	struct enic *enic = netdev_priv(netdev);

	ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;

	return 0;
}

static int enic_set_coalesce(struct net_device *netdev,
	struct ethtool_coalesce *ecmd)
{
	struct enic *enic = netdev_priv(netdev);
	u32 tx_coalesce_usecs;
	u32 rx_coalesce_usecs;
307
	unsigned int i, intr;
308

309
310
311
312
	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
		vnic_dev_get_intr_coal_timer_max(enic->vdev));
	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
		vnic_dev_get_intr_coal_timer_max(enic->vdev));
313
314
315
316
317
318

	switch (vnic_dev_get_intr_mode(enic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
		if (tx_coalesce_usecs != rx_coalesce_usecs)
			return -EINVAL;

319
320
		intr = enic_legacy_io_intr();
		vnic_intr_coalescing_timer_set(&enic->intr[intr],
321
			tx_coalesce_usecs);
322
323
324
325
326
327
		break;
	case VNIC_DEV_INTR_MODE_MSI:
		if (tx_coalesce_usecs != rx_coalesce_usecs)
			return -EINVAL;

		vnic_intr_coalescing_timer_set(&enic->intr[0],
328
			tx_coalesce_usecs);
329
330
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
331
332
333
		for (i = 0; i < enic->wq_count; i++) {
			intr = enic_msix_wq_intr(enic, i);
			vnic_intr_coalescing_timer_set(&enic->intr[intr],
334
				tx_coalesce_usecs);
335
336
337
338
339
		}

		for (i = 0; i < enic->rq_count; i++) {
			intr = enic_msix_rq_intr(enic, i);
			vnic_intr_coalescing_timer_set(&enic->intr[intr],
340
				rx_coalesce_usecs);
341
342
		}

343
344
345
346
347
348
349
350
351
352
353
		break;
	default:
		break;
	}

	enic->tx_coalesce_usecs = tx_coalesce_usecs;
	enic->rx_coalesce_usecs = rx_coalesce_usecs;

	return 0;
}

354
static const struct ethtool_ops enic_ethtool_ops = {
355
356
357
358
359
360
	.get_settings = enic_get_settings,
	.get_drvinfo = enic_get_drvinfo,
	.get_msglevel = enic_get_msglevel,
	.set_msglevel = enic_set_msglevel,
	.get_link = ethtool_op_get_link,
	.get_strings = enic_get_strings,
361
	.get_sset_count = enic_get_sset_count,
362
	.get_ethtool_stats = enic_get_ethtool_stats,
363
364
	.get_coalesce = enic_get_coalesce,
	.set_coalesce = enic_set_coalesce,
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
};

static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
	struct enic *enic = vnic_dev_priv(wq->vdev);

	if (buf->sop)
		pci_unmap_single(enic->pdev, buf->dma_addr,
			buf->len, PCI_DMA_TODEVICE);
	else
		pci_unmap_page(enic->pdev, buf->dma_addr,
			buf->len, PCI_DMA_TODEVICE);

	if (buf->os_buf)
		dev_kfree_skb_any(buf->os_buf);
}

static void enic_wq_free_buf(struct vnic_wq *wq,
	struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
{
	enic_free_wq_buf(wq, buf);
}

static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
	u8 type, u16 q_number, u16 completed_index, void *opaque)
{
	struct enic *enic = vnic_dev_priv(vdev);

	spin_lock(&enic->wq_lock[q_number]);

	vnic_wq_service(&enic->wq[q_number], cq_desc,
		completed_index, enic_wq_free_buf,
		opaque);

	if (netif_queue_stopped(enic->netdev) &&
400
401
	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
		netif_wake_queue(enic->netdev);

	spin_unlock(&enic->wq_lock[q_number]);

	return 0;
}

static void enic_log_q_error(struct enic *enic)
{
	unsigned int i;
	u32 error_status;

	for (i = 0; i < enic->wq_count; i++) {
		error_status = vnic_wq_error_status(&enic->wq[i]);
		if (error_status)
417
418
			netdev_err(enic->netdev, "WQ[%d] error_status %d\n",
				i, error_status);
419
420
421
422
423
	}

	for (i = 0; i < enic->rq_count; i++) {
		error_status = vnic_rq_error_status(&enic->rq[i]);
		if (error_status)
424
425
			netdev_err(enic->netdev, "RQ[%d] error_status %d\n",
				i, error_status);
426
427
428
	}
}

429
static void enic_msglvl_check(struct enic *enic)
430
{
431
	u32 msg_enable = vnic_dev_msg_lvl(enic->vdev);
432

433
	if (msg_enable != enic->msg_enable) {
434
435
		netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n",
			enic->msg_enable, msg_enable);
436
		enic->msg_enable = msg_enable;
437
438
439
440
441
442
	}
}

static void enic_mtu_check(struct enic *enic)
{
	u32 mtu = vnic_dev_mtu(enic->vdev);
443
	struct net_device *netdev = enic->netdev;
444

445
	if (mtu && mtu != enic->port_mtu) {
446
		enic->port_mtu = mtu;
447
		if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
448
449
450
451
452
453
454
455
456
457
458
			mtu = max_t(int, ENIC_MIN_MTU,
				min_t(int, ENIC_MAX_MTU, mtu));
			if (mtu != netdev->mtu)
				schedule_work(&enic->change_mtu_work);
		} else {
			if (mtu < netdev->mtu)
				netdev_warn(netdev,
					"interface MTU (%d) set higher "
					"than switch port MTU (%d)\n",
					netdev->mtu, mtu);
		}
459
460
461
	}
}

462
static void enic_link_check(struct enic *enic)
463
{
464
465
	int link_status = vnic_dev_link_status(enic->vdev);
	int carrier_ok = netif_carrier_ok(enic->netdev);
466

467
	if (link_status && !carrier_ok) {
468
		netdev_info(enic->netdev, "Link UP\n");
469
470
		netif_carrier_on(enic->netdev);
	} else if (!link_status && carrier_ok) {
471
		netdev_info(enic->netdev, "Link DOWN\n");
472
		netif_carrier_off(enic->netdev);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
	}
}

static void enic_notify_check(struct enic *enic)
{
	enic_msglvl_check(enic);
	enic_mtu_check(enic);
	enic_link_check(enic);
}

#define ENIC_TEST_INTR(pba, i) (pba & (1 << i))

static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
489
490
491
	unsigned int io_intr = enic_legacy_io_intr();
	unsigned int err_intr = enic_legacy_err_intr();
	unsigned int notify_intr = enic_legacy_notify_intr();
492
493
	u32 pba;

494
	vnic_intr_mask(&enic->intr[io_intr]);
495
496
497

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
498
		vnic_intr_unmask(&enic->intr[io_intr]);
499
500
501
		return IRQ_NONE;	/* not our interrupt */
	}

502
503
	if (ENIC_TEST_INTR(pba, notify_intr)) {
		vnic_intr_return_all_credits(&enic->intr[notify_intr]);
504
		enic_notify_check(enic);
505
	}
506

507
508
	if (ENIC_TEST_INTR(pba, err_intr)) {
		vnic_intr_return_all_credits(&enic->intr[err_intr]);
509
510
511
512
513
514
		enic_log_q_error(enic);
		/* schedule recovery from WQ/RQ error */
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

515
516
517
	if (ENIC_TEST_INTR(pba, io_intr)) {
		if (napi_schedule_prep(&enic->napi[0]))
			__napi_schedule(&enic->napi[0]);
518
	} else {
519
		vnic_intr_unmask(&enic->intr[io_intr]);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
	}

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msi(int irq, void *data)
{
	struct enic *enic = data;

	/* With MSI, there is no sharing of interrupts, so this is
	 * our interrupt and there is no need to ack it.  The device
	 * is not providing per-vector masking, so the OS will not
	 * write to PCI config space to mask/unmask the interrupt.
	 * We're using mask_on_assertion for MSI, so the device
	 * automatically masks the interrupt when the interrupt is
	 * generated.  Later, when exiting polling, the interrupt
	 * will be unmasked (see enic_poll).
	 *
	 * Also, the device uses the same PCIe Traffic Class (TC)
	 * for Memory Write data and MSI, so there are no ordering
	 * issues; the MSI will always arrive at the Root Complex
	 * _after_ corresponding Memory Writes (i.e. descriptor
	 * writes).
	 */

545
	napi_schedule(&enic->napi[0]);
546
547
548
549
550
551

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_rq(int irq, void *data)
{
552
	struct napi_struct *napi = data;
553
554

	/* schedule NAPI polling for RQ cleanup */
555
	napi_schedule(napi);
556
557
558
559
560
561
562

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
	struct enic *enic = data;
563
564
	unsigned int cq = enic_cq_wq(enic, 0);
	unsigned int intr = enic_msix_wq_intr(enic, 0);
565
566
567
	unsigned int wq_work_to_do = -1; /* no limit */
	unsigned int wq_work_done;

568
	wq_work_done = vnic_cq_service(&enic->cq[cq],
569
570
		wq_work_to_do, enic_wq_service, NULL);

571
	vnic_intr_return_credits(&enic->intr[intr],
572
573
574
575
576
577
578
579
580
581
		wq_work_done,
		1 /* unmask intr */,
		1 /* reset intr timer */);

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_err(int irq, void *data)
{
	struct enic *enic = data;
582
	unsigned int intr = enic_msix_err_intr(enic);
583

584
	vnic_intr_return_all_credits(&enic->intr[intr]);
585

586
587
588
589
590
591
592
593
594
595
596
	enic_log_q_error(enic);

	/* schedule recovery from WQ/RQ error */
	schedule_work(&enic->reset);

	return IRQ_HANDLED;
}

static irqreturn_t enic_isr_msix_notify(int irq, void *data)
{
	struct enic *enic = data;
597
	unsigned int intr = enic_msix_notify_intr(enic);
598

599
	vnic_intr_return_all_credits(&enic->intr[intr]);
600
601
602
603
604
605
606
	enic_notify_check(enic);

	return IRQ_HANDLED;
}

static inline void enic_queue_wq_skb_cont(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
607
	unsigned int len_left, int loopback)
608
{
609
	const skb_frag_t *frag;
610
611
612

	/* Queue additional data fragments */
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
613
		len_left -= skb_frag_size(frag);
614
		enic_queue_wq_desc_cont(wq, skb,
615
			skb_frag_dma_map(&enic->pdev->dev,
616
					 frag, 0, skb_frag_size(frag),
617
					 DMA_TO_DEVICE),
618
			skb_frag_size(frag),
619
620
			(len_left == 0),	/* EOP? */
			loopback);
621
622
623
624
625
	}
}

static inline void enic_queue_wq_skb_vlan(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
626
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
627
628
629
630
631
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
	int eop = (len_left == 0);

632
633
634
635
636
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
637
638
639
640
641
	enic_queue_wq_desc(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
		head_len,
		vlan_tag_insert, vlan_tag,
642
		eop, loopback);
643
644

	if (!eop)
645
		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
646
647
648
649
}

static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb,
650
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
651
652
653
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
654
	unsigned int hdr_len = skb_checksum_start_offset(skb);
655
656
657
	unsigned int csum_offset = hdr_len + skb->csum_offset;
	int eop = (len_left == 0);

658
659
660
661
662
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
663
664
665
666
667
668
669
	enic_queue_wq_desc_csum_l4(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
		head_len,
		csum_offset,
		hdr_len,
		vlan_tag_insert, vlan_tag,
670
		eop, loopback);
671
672

	if (!eop)
673
		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
674
675
676
677
}

static inline void enic_queue_wq_skb_tso(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
678
	int vlan_tag_insert, unsigned int vlan_tag, int loopback)
679
{
680
681
	unsigned int frag_len_left = skb_headlen(skb);
	unsigned int len_left = skb->len - frag_len_left;
682
683
	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int eop = (len_left == 0);
684
685
686
687
	unsigned int len;
	dma_addr_t dma_addr;
	unsigned int offset = 0;
	skb_frag_t *frag;
688
689
690
691
692
693

	/* Preload TCP csum field with IP pseudo hdr calculated
	 * with IP length set to zero.  HW will later add in length
	 * to each TCP segment resulting from the TSO.
	 */

694
	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
695
696
697
		ip_hdr(skb)->check = 0;
		tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
			ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
698
	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
699
700
701
702
		tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	}

703
704
705
706
707
708
709
710
711
712
713
714
	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for the main skb fragment
	 */
	while (frag_len_left) {
		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
		dma_addr = pci_map_single(enic->pdev, skb->data + offset,
				len, PCI_DMA_TODEVICE);
		enic_queue_wq_desc_tso(wq, skb,
			dma_addr,
			len,
			mss, hdr_len,
			vlan_tag_insert, vlan_tag,
715
			eop && (len == frag_len_left), loopback);
716
717
718
		frag_len_left -= len;
		offset += len;
	}
719

720
721
722
723
724
725
726
	if (eop)
		return;

	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for additional data fragments
	 */
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
727
728
		len_left -= skb_frag_size(frag);
		frag_len_left = skb_frag_size(frag);
729
		offset = 0;
730
731
732
733

		while (frag_len_left) {
			len = min(frag_len_left,
				(unsigned int)WQ_ENET_MAX_DESC_LEN);
734
735
			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,
						    offset, len,
736
						    DMA_TO_DEVICE);
737
738
739
740
			enic_queue_wq_desc_cont(wq, skb,
				dma_addr,
				len,
				(len_left == 0) &&
741
742
				(len == frag_len_left),		/* EOP? */
				loopback);
743
744
745
746
			frag_len_left -= len;
			offset += len;
		}
	}
747
748
749
750
751
752
753
754
}

static inline void enic_queue_wq_skb(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb)
{
	unsigned int mss = skb_shinfo(skb)->gso_size;
	unsigned int vlan_tag = 0;
	int vlan_tag_insert = 0;
755
	int loopback = 0;
756

757
	if (vlan_tx_tag_present(skb)) {
758
759
760
		/* VLAN tag from trunking driver */
		vlan_tag_insert = 1;
		vlan_tag = vlan_tx_tag_get(skb);
761
762
763
	} else if (enic->loop_enable) {
		vlan_tag = enic->loop_tag;
		loopback = 1;
764
765
766
767
	}

	if (mss)
		enic_queue_wq_skb_tso(enic, wq, skb, mss,
768
			vlan_tag_insert, vlan_tag, loopback);
769
770
	else if	(skb->ip_summed == CHECKSUM_PARTIAL)
		enic_queue_wq_skb_csum_l4(enic, wq, skb,
771
			vlan_tag_insert, vlan_tag, loopback);
772
773
	else
		enic_queue_wq_skb_vlan(enic, wq, skb,
774
			vlan_tag_insert, vlan_tag, loopback);
775
776
}

777
/* netif_tx_lock held, process context with BHs disabled, or BH */
778
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
779
	struct net_device *netdev)
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_wq *wq = &enic->wq[0];
	unsigned long flags;

	if (skb->len <= 0) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
	 * which is very likely.  In the off chance it's going to take
	 * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
	 */

	if (skb_shinfo(skb)->gso_size == 0 &&
	    skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
	    skb_linearize(skb)) {
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	spin_lock_irqsave(&enic->wq_lock[0], flags);

804
805
	if (vnic_wq_desc_avail(wq) <
	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
806
807
		netif_stop_queue(netdev);
		/* This is a hard error, log it */
808
		netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
809
810
811
812
813
814
		spin_unlock_irqrestore(&enic->wq_lock[0], flags);
		return NETDEV_TX_BUSY;
	}

	enic_queue_wq_skb(enic, wq, skb);

815
	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
816
817
818
819
820
821
822
823
		netif_stop_queue(netdev);

	spin_unlock_irqrestore(&enic->wq_lock[0], flags);

	return NETDEV_TX_OK;
}

/* dev_base_lock rwlock held, nominally process context */
824
825
static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
						struct rtnl_link_stats64 *net_stats)
826
827
828
829
{
	struct enic *enic = netdev_priv(netdev);
	struct vnic_stats *stats;

830
	enic_dev_stats_dump(enic, &stats);
831

832
833
834
835
	net_stats->tx_packets = stats->tx.tx_frames_ok;
	net_stats->tx_bytes = stats->tx.tx_bytes_ok;
	net_stats->tx_errors = stats->tx.tx_errors;
	net_stats->tx_dropped = stats->tx.tx_drops;
836

837
838
839
840
	net_stats->rx_packets = stats->rx.rx_frames_ok;
	net_stats->rx_bytes = stats->rx.rx_bytes_ok;
	net_stats->rx_errors = stats->rx.rx_errors;
	net_stats->multicast = stats->rx.rx_multicast_frames_ok;
841
	net_stats->rx_over_errors = enic->rq_truncated_pkts;
842
	net_stats->rx_crc_errors = enic->rq_bad_fcs;
843
	net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
844

845
	return net_stats;
846
847
}

848
void enic_reset_addr_lists(struct enic *enic)
849
850
{
	enic->mc_count = 0;
851
	enic->uc_count = 0;
852
	enic->flags = 0;
853
854
855
856
}

static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
857
858
	struct enic *enic = netdev_priv(netdev);

859
	if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
860
861
862
863
864
865
		if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
			return -EADDRNOTAVAIL;
	} else {
		if (!is_valid_ether_addr(addr))
			return -EADDRNOTAVAIL;
	}
866
867

	memcpy(netdev->dev_addr, addr, netdev->addr_len);
868
	netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
869
870
871
872

	return 0;
}

873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
{
	struct enic *enic = netdev_priv(netdev);
	struct sockaddr *saddr = p;
	char *addr = saddr->sa_data;
	int err;

	if (netif_running(enic->netdev)) {
		err = enic_dev_del_station_addr(enic);
		if (err)
			return err;
	}

	err = enic_set_mac_addr(netdev, addr);
	if (err)
		return err;

	if (netif_running(enic->netdev)) {
		err = enic_dev_add_station_addr(enic);
		if (err)
			return err;
	}

	return err;
}

static int enic_set_mac_address(struct net_device *netdev, void *p)
{
Roopa Prabhu's avatar
Roopa Prabhu committed
901
	struct sockaddr *saddr = p;
902
903
904
905
906
907
908
909
910
911
912
	char *addr = saddr->sa_data;
	struct enic *enic = netdev_priv(netdev);
	int err;

	err = enic_dev_del_station_addr(enic);
	if (err)
		return err;

	err = enic_set_mac_addr(netdev, addr);
	if (err)
		return err;
Roopa Prabhu's avatar
Roopa Prabhu committed
913

914
	return enic_dev_add_station_addr(enic);
915
916
}

917
static void enic_update_multicast_addr_list(struct enic *enic)
918
{
919
	struct net_device *netdev = enic->netdev;
920
	struct netdev_hw_addr *ha;
921
	unsigned int mc_count = netdev_mc_count(netdev);
922
923
924
	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
	unsigned int i, j;

925
926
927
928
	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
		netdev_warn(netdev, "Registering only %d out of %d "
			"multicast addresses\n",
			ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
929
		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
930
	}
931
932
933
934
935
936
937

	/* Is there an easier way?  Trying to minimize to
	 * calls to add/del multicast addrs.  We keep the
	 * addrs from the last call in enic->mc_addr and
	 * look for changes to add/del.
	 */

938
	i = 0;
939
	netdev_for_each_mc_addr(ha, netdev) {
940
941
		if (i == mc_count)
			break;
942
		memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
943
944
945
946
	}

	for (i = 0; i < enic->mc_count; i++) {
		for (j = 0; j < mc_count; j++)
947
			if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
948
949
				break;
		if (j == mc_count)
950
			enic_dev_del_addr(enic, enic->mc_addr[i]);
951
952
953
954
	}

	for (i = 0; i < mc_count; i++) {
		for (j = 0; j < enic->mc_count; j++)
955
			if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
956
957
				break;
		if (j == enic->mc_count)
958
			enic_dev_add_addr(enic, mc_addr[i]);
959
960
961
962
963
964
965
966
967
968
969
	}

	/* Save the list to compare against next time
	 */

	for (i = 0; i < mc_count; i++)
		memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);

	enic->mc_count = mc_count;
}

970
static void enic_update_unicast_addr_list(struct enic *enic)
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
{
	struct net_device *netdev = enic->netdev;
	struct netdev_hw_addr *ha;
	unsigned int uc_count = netdev_uc_count(netdev);
	u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
	unsigned int i, j;

	if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
		netdev_warn(netdev, "Registering only %d out of %d "
			"unicast addresses\n",
			ENIC_UNICAST_PERFECT_FILTERS, uc_count);
		uc_count = ENIC_UNICAST_PERFECT_FILTERS;
	}

	/* Is there an easier way?  Trying to minimize to
	 * calls to add/del unicast addrs.  We keep the
	 * addrs from the last call in enic->uc_addr and
	 * look for changes to add/del.
	 */

	i = 0;
	netdev_for_each_uc_addr(ha, netdev) {
		if (i == uc_count)
			break;
		memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
	}

	for (i = 0; i < enic->uc_count; i++) {
		for (j = 0; j < uc_count; j++)
1000
			if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
1001
1002
1003
1004
1005
1006
1007
				break;
		if (j == uc_count)
			enic_dev_del_addr(enic, enic->uc_addr[i]);
	}

	for (i = 0; i < uc_count; i++) {
		for (j = 0; j < enic->uc_count; j++)
1008
			if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
				break;
		if (j == enic->uc_count)
			enic_dev_add_addr(enic, uc_addr[i]);
	}

	/* Save the list to compare against next time
	 */

	for (i = 0; i < uc_count; i++)
		memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);

	enic->uc_count = uc_count;
}

/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	int directed = 1;
	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
	int promisc = (netdev->flags & IFF_PROMISC) ||
		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
	unsigned int flags = netdev->flags |
		(allmulti ? IFF_ALLMULTI : 0) |
		(promisc ? IFF_PROMISC : 0);

	if (enic->flags != flags) {
		enic->flags = flags;
		enic_dev_packet_filter(enic, directed,
			multicast, broadcast, promisc, allmulti);
	}

	if (!promisc) {
1045
		enic_update_unicast_addr_list(enic);
1046
		if (!allmulti)
1047
			enic_update_multicast_addr_list(enic);
1048
1049
1050
	}
}

1051
1052
1053
1054
1055
1056
1057
/* netif_tx_lock held, BHs disabled */
static void enic_tx_timeout(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	schedule_work(&enic->reset);
}

1058
1059
1060
static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
	struct enic *enic = netdev_priv(netdev);
1061
1062
	struct enic_port_profile *pp;
	int err;
1063

1064
1065
1066
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;
1067

1068
	if (is_valid_ether_addr(mac) || is_zero_ether_addr(mac)) {
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
		if (vf == PORT_SELF_VF) {
			memcpy(pp->vf_mac, mac, ETH_ALEN);
			return 0;
		} else {
			/*
			 * For sriov vf's set the mac in hw
			 */
			ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
				vnic_dev_set_mac_addr, mac);
			return enic_dev_status_to_errno(err);
		}
1080
1081
1082
1083
	} else
		return -EINVAL;
}

1084
1085
1086
1087
static int enic_set_vf_port(struct net_device *netdev, int vf,
	struct nlattr *port[])
{
	struct enic *enic = netdev_priv(netdev);
1088
	struct enic_port_profile prev_pp;
1089
	struct enic_port_profile *pp;
1090
	int err = 0, restore_pp = 1;
1091

1092
1093
1094
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;
1095

1096
1097
1098
	if (!port[IFLA_PORT_REQUEST])
		return -EOPNOTSUPP;

1099
1100
	memcpy(&prev_pp, pp, sizeof(*enic->pp));
	memset(pp, 0, sizeof(*enic->pp));
1101

1102
1103
	pp->set |= ENIC_SET_REQUEST;
	pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1104
1105

	if (port[IFLA_PORT_PROFILE]) {
1106
1107
		pp->set |= ENIC_SET_NAME;
		memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
1108
1109
1110
1111
			PORT_PROFILE_MAX);
	}

	if (port[IFLA_PORT_INSTANCE_UUID]) {
1112
1113
		pp->set |= ENIC_SET_INSTANCE;
		memcpy(pp->instance_uuid,
1114
1115
1116
1117
			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
	}

	if (port[IFLA_PORT_HOST_UUID]) {
1118
1119
		pp->set |= ENIC_SET_HOST;
		memcpy(pp->host_uuid,
1120
1121
			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
	}
1122

1123
1124
1125
1126
	if (vf == PORT_SELF_VF) {
		/* Special case handling: mac came from IFLA_VF_MAC */
		if (!is_zero_ether_addr(prev_pp.vf_mac))
			memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN);
1127

1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
		if (is_zero_ether_addr(netdev->dev_addr))
			eth_hw_addr_random(netdev);
	} else {
		/* SR-IOV VF: get mac from adapter */
		ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic,
			vnic_dev_get_mac_addr, pp->mac_addr);
		if (err) {
			netdev_err(netdev, "Error getting mac for vf %d\n", vf);
			memcpy(pp, &prev_pp, sizeof(*pp));
			return enic_dev_status_to_errno(err);
		}
	}
1140

1141
	err = enic_process_set_pp_request(enic, vf, &prev_pp, &restore_pp);
1142
1143
1144
1145
1146
	if (err) {
		if (restore_pp) {
			/* Things are still the way they were: Implicit
			 * DISASSOCIATE failed
			 */
1147
			memcpy(pp, &prev_pp, sizeof(*pp));
1148
		} else {
1149
1150
1151
			memset(pp, 0, sizeof(*pp));
			if (vf == PORT_SELF_VF)
				memset(netdev->dev_addr, 0, ETH_ALEN);
1152
1153
1154
1155
1156
		}
	} else {
		/* Set flag to indicate that the port assoc/disassoc
		 * request has been sent out to fw
		 */
1157
		pp->set |= ENIC_PORT_REQUEST_APPLIED;
1158
1159

		/* If DISASSOCIATE, clean up all assigned/saved macaddresses */
1160
1161
1162
1163
		if (pp->request == PORT_REQUEST_DISASSOCIATE) {
			memset(pp->mac_addr, 0, ETH_ALEN);
			if (vf == PORT_SELF_VF)
				memset(netdev->dev_addr, 0, ETH_ALEN);
1164
1165
		}
	}
1166

1167
1168
	if (vf == PORT_SELF_VF)
		memset(pp->vf_mac, 0, ETH_ALEN);
1169
1170

	return err;
1171
1172
1173
1174
1175
1176
1177
}

static int enic_get_vf_port(struct net_device *netdev, int vf,
	struct sk_buff *skb)
{
	struct enic *enic = netdev_priv(netdev);
	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1178
	struct enic_port_profile *pp;
1179
	int err;
1180

1181
1182
1183
1184
1185
	ENIC_PP_BY_INDEX(enic, vf, pp, &err);
	if (err)
		return err;

	if (!(pp->set & ENIC_PORT_REQUEST_APPLIED))
1186
		return -ENODATA;
1187

1188
	err = enic_process_get_pp_request(enic, vf, pp->request, &response);
1189
	if (err)
1190
		return err;
1191

David S. Miller's avatar
David S. Miller committed
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
	if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
	    nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
	    ((pp->set & ENIC_SET_NAME) &&
	     nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
	    ((pp->set & ENIC_SET_INSTANCE) &&
	     nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
		     pp->instance_uuid)) ||
	    ((pp->set & ENIC_SET_HOST) &&
	     nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
		goto nla_put_failure;
1202
1203
1204
1205
1206
1207
	return 0;

nla_put_failure:
	return -EMSGSIZE;
}

1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);

	if (!buf->os_buf)
		return;

	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);
	dev_kfree_skb_any(buf->os_buf);
}

static int enic_rq_alloc_buf(struct vnic_rq *rq)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
Scott Feldman's avatar
Scott Feldman committed
1223
	struct net_device *netdev = enic->netdev;
1224
	struct sk_buff *skb;
1225
	unsigned int len = netdev->mtu + VLAN_ETH_HLEN;
1226
1227
1228
	unsigned int os_buf_index = 0;
	dma_addr_t dma_addr;

1229
	skb = netdev_alloc_skb_ip_align(netdev, len);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
	if (!skb)
		return -ENOMEM;

	dma_addr = pci_map_single(enic->pdev, skb->data,
		len, PCI_DMA_FROMDEVICE);

	enic_queue_rq_desc(rq, skb, os_buf_index,
		dma_addr, len);

	return 0;
}

static void enic_rq_indicate_buf(struct vnic_rq *rq,
	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
	int skipped, void *opaque)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
1247
	struct net_device *netdev = enic->netdev;
1248
1249
1250
1251
1252
1253
1254
	struct sk_buff *skb;

	u8 type, color, eop, sop, ingress_port, vlan_stripped;
	u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
	u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
	u8 packet_error;
1255
	u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
	u32 rss_hash;

	if (skipped)
		return;

	skb = buf->os_buf;
	prefetch(skb->data - NET_IP_ALIGN);
	pci_unmap_single(enic->pdev, buf->dma_addr,
		buf->len, PCI_DMA_FROMDEVICE);

	cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
		&type, &color, &q_number, &completed_index,
		&ingress_port, &fcoe, &eop, &sop, &rss_type,
		&csum_not_calc, &rss_hash, &bytes_written,
1270
		&packet_error, &vlan_stripped, &vlan_tci, &checksum,
1271
1272
1273
1274
1275
1276
1277
		&fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
		&fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
		&ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
		&fcs_ok);

	if (packet_error) {

1278
1279
1280
1281
1282
1283
		if (!fcs_ok) {
			if (bytes_written > 0)
				enic->rq_bad_fcs++;
			else if (bytes_written == 0)
				enic->rq_truncated_pkts++;
		}
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295

		dev_kfree_skb_any(skb);

		return;
	}

	if (eop && bytes_written > 0) {

		/* Good receive
		 */

		skb_put(skb, bytes_written);
1296
		skb->protocol = eth_type_trans(skb, netdev);
1297

1298
		if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
1299
1300
1301
1302
			skb->csum = htons(checksum);
			skb->ip_summed = CHECKSUM_COMPLETE;
		}

Jiri Pirko's avatar
Jiri Pirko committed
1303
1304
		if (vlan_stripped)
			__vlan_hwaccel_put_tag(skb, vlan_tci);
1305

Jiri Pirko's avatar
Jiri Pirko committed
1306
1307
1308
1309
		if (netdev->features & NETIF_F_GRO)
			napi_gro_receive(&enic->napi[q_number], skb);
		else
			netif_receive_skb(skb);