cxgb4_main.c 147 KB
Newer Older
1 2 3
/*
 * This file is part of the Chelsio T4 Ethernet driver for Linux.
 *
4
 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
44
#include <linux/if.h>
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
#include <linux/if_vlan.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/rtnetlink.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/sockios.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
63
#include <net/addrconf.h>
64
#include <net/bonding.h>
65
#include <net/addrconf.h>
66
#include <asm/uaccess.h>
67
#include <linux/crash_dump.h>
68 69 70

#include "cxgb4.h"
#include "t4_regs.h"
71
#include "t4_values.h"
72 73
#include "t4_msg.h"
#include "t4fw_api.h"
74
#include "t4fw_version.h"
75
#include "cxgb4_dcb.h"
76
#include "cxgb4_debugfs.h"
77
#include "clip_tbl.h"
78 79
#include "l2t.h"

80 81
char cxgb4_driver_name[] = KBUILD_MODNAME;

82 83 84
#ifdef DRV_VERSION
#undef DRV_VERSION
#endif
85
#define DRV_VERSION "2.0.0-ko"
86
const char cxgb4_driver_version[] = DRV_VERSION;
87
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
88

Vipul Pandya's avatar
Vipul Pandya committed
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/* Host shadow copy of ingress filter entry.  This is in host native format
 * and doesn't match the ordering or bit order, etc. of the hardware of the
 * firmware command.  The use of bit-field structure elements is purely to
 * remind ourselves of the field size limitations and save memory in the case
 * where the filter table is large.
 */
struct filter_entry {
	/* Administrative fields for filter.
	 */
	u32 valid:1;            /* filter allocated and valid */
	u32 locked:1;           /* filter is administratively locked */

	u32 pending:1;          /* filter action is pending firmware reply */
	u32 smtidx:8;           /* Source MAC Table index for smac */
	struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */

	/* The filter itself.  Most of this is a straight copy of information
	 * provided by the extended ioctl().  Some fields are translated to
	 * internal forms -- for instance the Ingress Queue ID passed in from
	 * the ioctl() is translated into the Absolute Ingress Queue ID.
	 */
	struct ch_filter_specification fs;
};

113 114 115 116
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)

117 118 119
/* Macros needed to support the PCI Device ID Table ...
 */
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
120
	static const struct pci_device_id cxgb4_pci_tbl[] = {
121
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
122

123 124 125 126 127 128 129 130 131 132 133 134 135
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
 * called for both.
 */
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0

#define CH_PCI_ID_TABLE_ENTRY(devid) \
		{PCI_VDEVICE(CHELSIO, (devid)), 4}

#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
		{ 0, } \
	}

#include "t4_pci_id_tbl.h"
136

137
#define FW4_FNAME "cxgb4/t4fw.bin"
Santosh Rastapur's avatar
Santosh Rastapur committed
138
#define FW5_FNAME "cxgb4/t5fw.bin"
139
#define FW6_FNAME "cxgb4/t6fw.bin"
140
#define FW4_CFNAME "cxgb4/t4-config.txt"
Santosh Rastapur's avatar
Santosh Rastapur committed
141
#define FW5_CFNAME "cxgb4/t5-config.txt"
142
#define FW6_CFNAME "cxgb4/t6-config.txt"
143 144 145 146
#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
#define PHY_AQ1202_DEVICEID 0x4409
#define PHY_BCM84834_DEVICEID 0x4486
147 148 149 150 151 152

MODULE_DESCRIPTION(DRV_DESC);
MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
153
MODULE_FIRMWARE(FW4_FNAME);
Santosh Rastapur's avatar
Santosh Rastapur committed
154
MODULE_FIRMWARE(FW5_FNAME);
155
MODULE_FIRMWARE(FW6_FNAME);
156

157 158 159 160 161 162 163 164 165
/*
 * Normally we're willing to become the firmware's Master PF but will be happy
 * if another PF has already become the Master and initialized the adapter.
 * Setting "force_init" will cause this driver to forcibly establish itself as
 * the Master PF and initialize the adapter.
 */
static uint force_init;

module_param(force_init, uint, 0644);
166 167
MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
		 "deprecated parameter");
168

169 170 171
static int dflt_msg_enable = DFLT_MSG_ENABLE;

module_param(dflt_msg_enable, int, 0644);
172 173
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
		 "deprecated parameter");
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

/*
 * The driver uses the best interrupt scheme available on a platform in the
 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 * of these schemes the driver may consider as follows:
 *
 * msi = 2: choose from among all three options
 * msi = 1: only consider MSI and INTx interrupts
 * msi = 0: force INTx interrupts
 */
static int msi = 2;

module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");

189 190 191 192 193 194 195 196 197 198 199 200 201 202
/*
 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 * boundaries.  This is a requirement for many architectures which will throw
 * a machine check fault if an attempt is made to access one of the 4-byte IP
 * header fields on a non-4-byte boundary.  And it's a major performance issue
 * even on some architectures which allow it like some implementations of the
 * x86 ISA.  However, some architectures don't mind this and for some very
 * edge-case performance sensitive applications (like forwarding large volumes
 * of small packets), setting this DMA offset to 0 will decrease the number of
 * PCI-E Bus transfers enough to measurably affect performance.
 */
static int rx_dma_offset = 2;

203
#ifdef CONFIG_PCI_IOV
204 205
/* Configure the number of PCI-E Virtual Function which are to be instantiated
 * on SR-IOV Capable Physical Functions.
Santosh Rastapur's avatar
Santosh Rastapur committed
206
 */
207
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
208 209

module_param_array(num_vf, uint, NULL, 0644);
210
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
211 212
#endif

213 214 215 216 217 218 219 220 221 222 223
/* TX Queue select used to determine what algorithm to use for selecting TX
 * queue. Select between the kernel provided function (select_queue=0) or user
 * cxgb_select_queue function (select_queue=1)
 *
 * Default: select_queue=0
 */
static int select_queue;
module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue,
		 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");

224 225 226 227
static struct dentry *cxgb4_debugfs_root;

static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(uld_mutex);
228 229 230
/* Adapter list to be accessed from atomic context */
static LIST_HEAD(adap_rcu_list);
static DEFINE_SPINLOCK(adap_rcu_lock);
231
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
232
static const char *const uld_str[] = { "RDMA", "iSCSI", "iSCSIT" };
233 234 235 236 237 238 239 240

static void link_report(struct net_device *dev)
{
	if (!netif_carrier_ok(dev))
		netdev_info(dev, "link down\n");
	else {
		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };

241
		const char *s;
242 243 244
		const struct port_info *p = netdev_priv(dev);

		switch (p->link_cfg.speed) {
245
		case 10000:
246 247
			s = "10Gbps";
			break;
248
		case 1000:
249 250
			s = "1000Mbps";
			break;
251
		case 100:
252 253
			s = "100Mbps";
			break;
254
		case 40000:
255 256
			s = "40Gbps";
			break;
257 258 259 260
		default:
			pr_info("%s: unsupported speed: %d\n",
				dev->name, p->link_cfg.speed);
			return;
261 262 263 264 265 266 267
		}

		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
			    fc[p->link_cfg.fc]);
	}
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
#ifdef CONFIG_CHELSIO_T4_DCB
/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
{
	struct port_info *pi = netdev_priv(dev);
	struct adapter *adap = pi->adapter;
	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
	int i;

	/* We use a simple mapping of Port TX Queue Index to DCB
	 * Priority when we're enabling DCB.
	 */
	for (i = 0; i < pi->nqsets; i++, txq++) {
		u32 name, value;
		int err;

284 285 286 287
		name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
			FW_PARAMS_PARAM_X_V(
				FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
			FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
288 289 290 291 292 293
		value = enable ? i : 0xffffffff;

		/* Since we can be called while atomic (from "interrupt
		 * level") we need to issue the Set Parameters Commannd
		 * without sleeping (timeout < 0).
		 */
294
		err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
295 296
					    &name, &value,
					    -FW_CMD_MAX_TIMEOUT);
297 298 299 300 301

		if (err)
			dev_err(adap->pdev_dev,
				"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
				enable ? "set" : "unset", pi->port_id, i, -err);
302 303
		else
			txq->dcb_prio = value;
304 305 306 307
	}
}
#endif /* CONFIG_CHELSIO_T4_DCB */

308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
int cxgb4_dcb_enabled(const struct net_device *dev)
{
#ifdef CONFIG_CHELSIO_T4_DCB
	struct port_info *pi = netdev_priv(dev);

	if (!pi->dcb.enabled)
		return 0;

	return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
		(pi->dcb.state == CXGB4_DCB_STATE_HOST));
#else
	return 0;
#endif
}
EXPORT_SYMBOL(cxgb4_dcb_enabled);

324 325 326 327 328 329 330 331
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
	struct net_device *dev = adapter->port[port_id];

	/* Skip changes from disabled ports. */
	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
		if (link_stat)
			netif_carrier_on(dev);
332 333
		else {
#ifdef CONFIG_CHELSIO_T4_DCB
334 335 336 337
			if (cxgb4_dcb_enabled(dev)) {
				cxgb4_dcb_state_init(dev);
				dcb_tx_queue_prio_enable(dev, false);
			}
338
#endif /* CONFIG_CHELSIO_T4_DCB */
339
			netif_carrier_off(dev);
340
		}
341 342 343 344 345 346 347 348

		link_report(dev);
	}
}

void t4_os_portmod_changed(const struct adapter *adap, int port_id)
{
	static const char *mod_str[] = {
349
		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
350 351 352 353 354 355 356
	};

	const struct net_device *dev = adap->port[port_id];
	const struct port_info *pi = netdev_priv(dev);

	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
		netdev_info(dev, "port module unplugged\n");
357
	else if (pi->mod_type < ARRAY_SIZE(mod_str))
358
		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
359 360 361 362 363 364 365 366 367 368 369
	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
		netdev_info(dev, "%s: unsupported port module inserted\n",
			    dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
		netdev_info(dev, "%s: unknown port module inserted\n",
			    dev->name);
	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
		netdev_info(dev, "%s: transceiver module error\n", dev->name);
	else
		netdev_info(dev, "%s: unknown module type %d inserted\n",
			    dev->name, pi->mod_type);
370 371
}

372 373 374 375
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
module_param(dbfifo_int_thresh, int, 0644);
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");

376
/*
377
 * usecs to sleep while draining the dbfifo
378
 */
379 380 381 382 383 384
static int dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay,
		 "usecs to sleep while draining the dbfifo");

static inline int cxgb4_set_addr_hash(struct port_info *pi)
385
{
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	struct adapter *adap = pi->adapter;
	u64 vec = 0;
	bool ucast = false;
	struct hash_mac_addr *entry;

	/* Calculate the hash vector for the updated list and program it */
	list_for_each_entry(entry, &adap->mac_hlist, list) {
		ucast |= is_unicast_ether_addr(entry->addr);
		vec |= (1ULL << hash_mac_addr(entry->addr));
	}
	return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
				vec, false);
}

static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adap = pi->adapter;
	int ret;
405 406
	u64 mhash = 0;
	u64 uhash = 0;
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
	bool free = false;
	bool ucast = is_unicast_ether_addr(mac_addr);
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *new_entry;

	ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
				NULL, ucast ? &uhash : &mhash, false);
	if (ret < 0)
		goto out;
	/* if hash != 0, then add the addr to hash addr list
	 * so on the end we will calculate the hash for the
	 * list and program it
	 */
	if (uhash || mhash) {
		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
		if (!new_entry)
			return -ENOMEM;
		ether_addr_copy(new_entry->addr, mac_addr);
		list_add_tail(&new_entry->list, &adap->mac_hlist);
		ret = cxgb4_set_addr_hash(pi);
427
	}
428 429 430
out:
	return ret < 0 ? ret : 0;
}
431

432 433 434 435 436 437 438
static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
{
	struct port_info *pi = netdev_priv(netdev);
	struct adapter *adap = pi->adapter;
	int ret;
	const u8 *maclist[1] = {mac_addr};
	struct hash_mac_addr *entry, *tmp;
439

440 441 442 443 444 445 446 447
	/* If the MAC address to be removed is in the hash addr
	 * list, delete it from the list and update hash vector
	 */
	list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
		if (ether_addr_equal(entry->addr, mac_addr)) {
			list_del(&entry->list);
			kfree(entry);
			return cxgb4_set_addr_hash(pi);
448 449 450
		}
	}

451 452
	ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
	return ret < 0 ? -EINVAL : 0;
453 454 455 456 457 458 459 460 461
}

/*
 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 * If @mtu is -1 it is left unchanged.
 */
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
	struct port_info *pi = netdev_priv(dev);
462
	struct adapter *adapter = pi->adapter;
463

464 465
	__dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
	__dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
466 467 468 469 470

	return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
			     (dev->flags & IFF_PROMISC) ? 1 : 0,
			     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
			     sleep_ok);
471 472 473 474 475 476 477 478 479 480 481 482
}

/**
 *	link_start - enable a port
 *	@dev: the port to enable
 *
 *	Performs the MAC and PHY actions needed to enable a port.
 */
static int link_start(struct net_device *dev)
{
	int ret;
	struct port_info *pi = netdev_priv(dev);
483
	unsigned int mb = pi->adapter->pf;
484 485 486 487 488

	/*
	 * We do not set address filters and promiscuity here, the stack does
	 * that step explicitly.
	 */
489
	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
490
			    !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
491
	if (ret == 0) {
492
		ret = t4_change_mac(pi->adapter, mb, pi->viid,
493
				    pi->xact_addr_filt, dev->dev_addr, true,
494
				    true);
495 496 497 498 499 500
		if (ret >= 0) {
			pi->xact_addr_filt = ret;
			ret = 0;
		}
	}
	if (ret == 0)
501
		ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
502
				    &pi->link_cfg);
503 504
	if (ret == 0) {
		local_bh_disable();
505 506
		ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
					  true, CXGB4_DCB_ENABLED);
507 508
		local_bh_enable();
	}
509

510 511 512
	return ret;
}

513 514 515 516
#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
517
	int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
518
	struct net_device *dev = adap->port[adap->chan_map[port]];
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	int old_dcb_enabled = cxgb4_dcb_enabled(dev);
	int new_dcb_enabled;

	cxgb4_dcb_handle_fw_update(adap, pcmd);
	new_dcb_enabled = cxgb4_dcb_enabled(dev);

	/* If the DCB has become enabled or disabled on the port then we're
	 * going to need to set up/tear down DCB Priority parameters for the
	 * TX Queues associated with the port.
	 */
	if (new_dcb_enabled != old_dcb_enabled)
		dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
}
#endif /* CONFIG_CHELSIO_T4_DCB */

Vipul Pandya's avatar
Vipul Pandya committed
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
/* Clear a filter and release any of its resources that we own.  This also
 * clears the filter's "pending" status.
 */
static void clear_filter(struct adapter *adap, struct filter_entry *f)
{
	/* If the new or old filter have loopback rewriteing rules then we'll
	 * need to free any existing Layer Two Table (L2T) entries of the old
	 * filter rule.  The firmware will handle freeing up any Source MAC
	 * Table (SMT) entries used for rewriting Source MAC Addresses in
	 * loopback rules.
	 */
	if (f->l2t)
		cxgb4_l2t_release(f->l2t);

	/* The zeroing of the filter rule below clears the filter valid,
	 * pending, locked flags, l2t pointer, etc. so it's all we need for
	 * this operation.
	 */
	memset(f, 0, sizeof(*f));
}

/* Handle a filter write/deletion reply.
 */
static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
	unsigned int idx = GET_TID(rpl);
	unsigned int nidx = idx - adap->tids.ftid_base;
	unsigned int ret;
	struct filter_entry *f;

	if (idx >= adap->tids.ftid_base && nidx <
	   (adap->tids.nftids + adap->tids.nsftids)) {
		idx = nidx;
567
		ret = TCB_COOKIE_G(rpl->cookie);
Vipul Pandya's avatar
Vipul Pandya committed
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
		f = &adap->tids.ftid_tab[idx];

		if (ret == FW_FILTER_WR_FLT_DELETED) {
			/* Clear the filter when we get confirmation from the
			 * hardware that the filter has been deleted.
			 */
			clear_filter(adap, f);
		} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
			dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
				idx);
			clear_filter(adap, f);
		} else if (ret == FW_FILTER_WR_FLT_ADDED) {
			f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
			f->pending = 0;  /* asynchronous setup completed */
			f->valid = 1;
		} else {
			/* Something went wrong.  Issue a warning about the
			 * problem and clear everything out.
			 */
			dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
				idx, ret);
			clear_filter(adap, f);
		}
	}
}

/* Response queue handler for the FW event queue.
595 596 597 598 599 600 601
 */
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
			  const struct pkt_gl *gl)
{
	u8 opcode = ((const struct rss_header *)rsp)->opcode;

	rsp++;                                          /* skip RSS header */
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616

	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
	 */
	if (unlikely(opcode == CPL_FW4_MSG &&
	   ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
		rsp++;
		opcode = ((const struct rss_header *)rsp)->opcode;
		rsp++;
		if (opcode != CPL_SGE_EGR_UPDATE) {
			dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
				, opcode);
			goto out;
		}
	}

617 618
	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
		const struct cpl_sge_egr_update *p = (void *)rsp;
619
		unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
620
		struct sge_txq *txq;
621

622
		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
623
		txq->restarts++;
624
		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
625 626 627 628 629 630 631 632 633 634 635 636 637
			struct sge_eth_txq *eq;

			eq = container_of(txq, struct sge_eth_txq, q);
			netif_tx_wake_queue(eq->txq);
		} else {
			struct sge_ofld_txq *oq;

			oq = container_of(txq, struct sge_ofld_txq, q);
			tasklet_schedule(&oq->qresume_tsk);
		}
	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
		const struct cpl_fw6_msg *p = (void *)rsp;

638 639
#ifdef CONFIG_CHELSIO_T4_DCB
		const struct fw_port_cmd *pcmd = (const void *)p->data;
640
		unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
641
		unsigned int action =
642
			FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
643 644 645

		if (cmd == FW_PORT_CMD &&
		    action == FW_PORT_ACTION_GET_PORT_INFO) {
646
			int port = FW_PORT_CMD_PORTID_G(
647
					be32_to_cpu(pcmd->op_to_portid));
648 649
			struct net_device *dev =
				q->adap->port[q->adap->chan_map[port]];
650
			int state_input = ((pcmd->u.info.dcbxdis_pkd &
651
					    FW_PORT_CMD_DCBXDIS_F)
652 653 654 655 656 657 658 659 660 661 662 663 664
					   ? CXGB4_DCB_INPUT_FW_DISABLED
					   : CXGB4_DCB_INPUT_FW_ENABLED);

			cxgb4_dcb_state_fsm(dev, state_input);
		}

		if (cmd == FW_PORT_CMD &&
		    action == FW_PORT_ACTION_L2_DCB_CFG)
			dcb_rpl(q->adap, pcmd);
		else
#endif
			if (p->type == 0)
				t4_handle_fw_rpl(q->adap, p->data);
665 666 667 668
	} else if (opcode == CPL_L2T_WRITE_RPL) {
		const struct cpl_l2t_write_rpl *p = (void *)rsp;

		do_l2t_write_rpl(q->adap, p);
Vipul Pandya's avatar
Vipul Pandya committed
669 670 671 672
	} else if (opcode == CPL_SET_TCB_RPL) {
		const struct cpl_set_tcb_rpl *p = (void *)rsp;

		filter_rpl(q->adap, p);
673 674 675
	} else
		dev_err(q->adap->pdev_dev,
			"unexpected CPL %#x on FW event queue\n", opcode);
676
out:
677 678 679
	return 0;
}

680 681 682 683 684 685 686
/* Flush the aggregated lro sessions */
static void uldrx_flush_handler(struct sge_rspq *q)
{
	if (ulds[q->uld].lro_flush)
		ulds[q->uld].lro_flush(&q->lro_mgr);
}

687 688 689 690 691 692 693 694 695 696 697 698 699
/**
 *	uldrx_handler - response queue handler for ULD queues
 *	@q: the response queue that received the packet
 *	@rsp: the response queue descriptor holding the offload message
 *	@gl: the gather list of packet fragments
 *
 *	Deliver an ingress offload packet to a ULD.  All processing is done by
 *	the ULD, we just maintain statistics.
 */
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
			 const struct pkt_gl *gl)
{
	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
700
	int ret;
701

702 703 704 705 706 707
	/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
	 */
	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
		rsp += 2;

708 709 710 711 712 713 714 715 716
	if (q->flush_handler)
		ret = ulds[q->uld].lro_rx_handler(q->adap->uld_handle[q->uld],
						  rsp, gl, &q->lro_mgr,
						  &q->napi);
	else
		ret = ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld],
					      rsp, gl);

	if (ret) {
717 718 719
		rxq->stats.nomem++;
		return -1;
	}
720

721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	if (gl == NULL)
		rxq->stats.imm++;
	else if (gl == CXGB4_MSG_AN)
		rxq->stats.an++;
	else
		rxq->stats.pkts++;
	return 0;
}

static void disable_msi(struct adapter *adapter)
{
	if (adapter->flags & USING_MSIX) {
		pci_disable_msix(adapter->pdev);
		adapter->flags &= ~USING_MSIX;
	} else if (adapter->flags & USING_MSI) {
		pci_disable_msi(adapter->pdev);
		adapter->flags &= ~USING_MSI;
	}
}

/*
 * Interrupt handler for non-data events used with MSI-X.
 */
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
{
	struct adapter *adap = cookie;
747
	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
748

749
	if (v & PFSW_F) {
750
		adap->swintr = 1;
751
		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
752
	}
753 754
	if (adap->flags & MASTER_PF)
		t4_slow_intr_handler(adap);
755 756 757 758 759 760 761 762
	return IRQ_HANDLED;
}

/*
 * Name the MSI-X interrupts.
 */
static void name_msix_vecs(struct adapter *adap)
{
763
	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
764 765

	/* non-data interrupts */
766
	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
767 768

	/* FW events */
769 770
	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
		 adap->port[0]->name);
771 772 773 774 775 776

	/* Ethernet queues */
	for_each_port(adap, j) {
		struct net_device *d = adap->port[j];
		const struct port_info *pi = netdev_priv(d);

777
		for (i = 0; i < pi->nqsets; i++, msi_idx++)
778 779 780 781 782
			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
				 d->name, i);
	}

	/* offload queues */
783 784
	for_each_iscsirxq(&adap->sge, i)
		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iscsi%d",
785
			 adap->port[0]->name, i);
786

787 788 789 790
	for_each_iscsitrxq(&adap->sge, i)
		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-iSCSIT%d",
			 adap->port[0]->name, i);

791 792
	for_each_rdmarxq(&adap->sge, i)
		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
793
			 adap->port[0]->name, i);
794 795 796 797

	for_each_rdmaciq(&adap->sge, i)
		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
			 adap->port[0]->name, i);
798 799 800 801 802
}

static int request_msix_queue_irqs(struct adapter *adap)
{
	struct sge *s = &adap->sge;
803
	int err, ethqidx, iscsiqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
804
	int iscsitqidx = 0;
805
	int msi_index = 2;
806 807 808 809 810 811 812

	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
			  adap->msix_info[1].desc, &s->fw_evtq);
	if (err)
		return err;

	for_each_ethrxq(s, ethqidx) {
813 814 815
		err = request_irq(adap->msix_info[msi_index].vec,
				  t4_sge_intr_msix, 0,
				  adap->msix_info[msi_index].desc,
816 817 818
				  &s->ethrxq[ethqidx].rspq);
		if (err)
			goto unwind;
819
		msi_index++;
820
	}
821
	for_each_iscsirxq(s, iscsiqidx) {
822 823 824
		err = request_irq(adap->msix_info[msi_index].vec,
				  t4_sge_intr_msix, 0,
				  adap->msix_info[msi_index].desc,
825
				  &s->iscsirxq[iscsiqidx].rspq);
826 827
		if (err)
			goto unwind;
828
		msi_index++;
829
	}
830 831 832 833 834 835 836 837 838
	for_each_iscsitrxq(s, iscsitqidx) {
		err = request_irq(adap->msix_info[msi_index].vec,
				  t4_sge_intr_msix, 0,
				  adap->msix_info[msi_index].desc,
				  &s->iscsitrxq[iscsitqidx].rspq);
		if (err)
			goto unwind;
		msi_index++;
	}
839
	for_each_rdmarxq(s, rdmaqidx) {
840 841 842
		err = request_irq(adap->msix_info[msi_index].vec,
				  t4_sge_intr_msix, 0,
				  adap->msix_info[msi_index].desc,
843 844 845
				  &s->rdmarxq[rdmaqidx].rspq);
		if (err)
			goto unwind;
846
		msi_index++;
847
	}
848 849 850 851 852 853 854 855 856
	for_each_rdmaciq(s, rdmaciqqidx) {
		err = request_irq(adap->msix_info[msi_index].vec,
				  t4_sge_intr_msix, 0,
				  adap->msix_info[msi_index].desc,
				  &s->rdmaciq[rdmaciqqidx].rspq);
		if (err)
			goto unwind;
		msi_index++;
	}
857 858 859
	return 0;

unwind:
860 861 862
	while (--rdmaciqqidx >= 0)
		free_irq(adap->msix_info[--msi_index].vec,
			 &s->rdmaciq[rdmaciqqidx].rspq);
863
	while (--rdmaqidx >= 0)
864
		free_irq(adap->msix_info[--msi_index].vec,
865
			 &s->rdmarxq[rdmaqidx].rspq);
866 867 868
	while (--iscsitqidx >= 0)
		free_irq(adap->msix_info[--msi_index].vec,
			 &s->iscsitrxq[iscsitqidx].rspq);
869
	while (--iscsiqidx >= 0)
870
		free_irq(adap->msix_info[--msi_index].vec,
871
			 &s->iscsirxq[iscsiqidx].rspq);
872
	while (--ethqidx >= 0)
873 874
		free_irq(adap->msix_info[--msi_index].vec,
			 &s->ethrxq[ethqidx].rspq);
875 876 877 878 879 880
	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
	return err;
}

static void free_msix_queue_irqs(struct adapter *adap)
{
881
	int i, msi_index = 2;
882 883 884 885
	struct sge *s = &adap->sge;

	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
	for_each_ethrxq(s, i)
886
		free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
887 888 889
	for_each_iscsirxq(s, i)
		free_irq(adap->msix_info[msi_index++].vec,
			 &s->iscsirxq[i].rspq);
890 891 892
	for_each_iscsitrxq(s, i)
		free_irq(adap->msix_info[msi_index++].vec,
			 &s->iscsitrxq[i].rspq);
893
	for_each_rdmarxq(s, i)
894
		free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
895 896
	for_each_rdmaciq(s, i)
		free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
897 898
}

899
/**
900
 *	cxgb4_write_rss - write the RSS table for a given port
901 902 903 904 905
 *	@pi: the port
 *	@queues: array of queue indices for RSS
 *
 *	Sets up the portion of the HW RSS table for the port's VI to distribute
 *	packets to the Rx queues in @queues.
906
 *	Should never be called before setting up sge eth rx queues
907
 */
908
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
909 910 911
{
	u16 *rss;
	int i, err;
912 913
	struct adapter *adapter = pi->adapter;
	const struct sge_eth_rxq *rxq;
914

915
	rxq = &adapter->sge.ethrxq[pi->first_qset];
916 917 918 919 920 921
	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
	if (!rss)
		return -ENOMEM;

	/* map the queue indices to queue ids */
	for (i = 0; i < pi->rss_size; i++, queues++)
922
		rss[i] = rxq[*queues].rspq.abs_id;
923

924
	err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
925
				  pi->rss_size, rss, pi->rss_size);
926 927 928 929 930 931 932 933 934 935 936 937 938
	/* If Tunnel All Lookup isn't specified in the global RSS
	 * Configuration, then we need to specify a default Ingress
	 * Queue for any ingress packets which aren't hashed.  We'll
	 * use our first ingress queue ...
	 */
	if (!err)
		err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
				       FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
				       FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
				       FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
				       FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
				       FW_RSS_VI_CONFIG_CMD_UDPEN_F,
				       rss[0]);
939 940 941 942
	kfree(rss);
	return err;
}

943 944 945 946
/**
 *	setup_rss - configure RSS
 *	@adap: the adapter
 *
947
 *	Sets up RSS for each port.
948 949 950
 */
static int setup_rss(struct adapter *adap)
{
951
	int i, j, err;
952 953 954 955

	for_each_port(adap, i) {
		const struct port_info *pi = adap2pinfo(adap, i);

956 957 958 959
		/* Fill default values with equal distribution */
		for (j = 0; j < pi->rss_size; j++)
			pi->rss[j] = j % pi->nqsets;

960
		err = cxgb4_write_rss(pi, pi->rss);
961 962 963 964 965 966
		if (err)
			return err;
	}
	return 0;
}

967 968 969 970 971 972 973 974 975
/*
 * Return the channel of the ingress queue with the given qid.
 */
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
{
	qid -= p->ingr_start;
	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}

976 977 978 979 980 981 982
/*
 * Wait until all NAPI handlers are descheduled.
 */
static void quiesce_rx(struct adapter *adap)
{
	int i;

983
	for (i = 0; i < adap->sge.ingr_sz; i++) {
984 985
		struct sge_rspq *q = adap->sge.ingr_map[i];

986
		if (q && q->handler) {
987
			napi_disable(&q->napi);
988 989 990 991 992 993
			local_bh_disable();
			while (!cxgb_poll_lock_napi(q))
				mdelay(1);
			local_bh_enable();
		}

994 995 996
	}
}

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
	if (adap->flags & FULL_INIT_DONE) {
		t4_intr_disable(adap);
		if (adap->flags & USING_MSIX) {
			free_msix_queue_irqs(adap);
			free_irq(adap->msix_info[0].vec, adap);
		} else {
			free_irq(adap->pdev->irq, adap);
		}
		quiesce_rx(adap);
	}
}

1012 1013 1014 1015 1016 1017 1018
/*
 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 */
static void enable_rx(struct adapter *adap)
{
	int i;

1019
	for (i = 0; i < adap->sge.ingr_sz; i++) {
1020 1021 1022 1023
		struct sge_rspq *q = adap->sge.ingr_map[i];

		if (!q)
			continue;
1024 1025
		if (q->handler) {
			cxgb_busy_poll_init_lock(q);
1026
			napi_enable(&q->napi);
1027
		}
1028
		/* 0-increment GTS to start the timer and enable interrupts */
1029 1030 1031
		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
			     SEINTARM_V(q->intr_params) |
			     INGRESSQID_V(q->cntxt_id));
1032 1033 1034
	}
}

1035 1036
static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
			   unsigned int nq, unsigned int per_chan, int msi_idx,
1037
			   u16 *ids, bool lro)
1038 1039 1040 1041 1042 1043 1044 1045 1046
{
	int i, err;

	for (i = 0; i < nq; i++, q++) {
		if (msi_idx > 0)
			msi_idx++;
		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
				       adap->port[i / per_chan],
				       msi_idx, q->fl.size ? &q->fl : NULL,
1047 1048 1049
				       uldrx_handler,
				       lro ? uldrx_flush_handler : NULL,
				       0);
1050 1051 1052 1053 1054 1055 1056 1057 1058
		if (err)
			return err;
		memset(&q->stats, 0, sizeof(q->stats));
		if (ids)
			ids[i] = q->rspq.abs_id;
	}
	return 0;
}

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
/**
 *	setup_sge_queues - configure SGE Tx/Rx/response queues
 *	@adap: the adapter
 *
 *	Determines how many sets of SGE queues to use and initializes them.
 *	We support multiple queue sets per port if we have MSI-X, otherwise
 *	just one queue set per port.
 */
static int setup_sge_queues(struct adapter *adap)
{
	int err, msi_idx, i, j;
	struct sge *s = &adap->sge;

1072 1073
	bitmap_zero(s->starving_fl, s->egr_sz);
	bitmap_zero(s->txq_maperr, s->egr_sz);
1074 1075 1076 1077 1078

	if (adap->flags & USING_MSIX)
		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
	else {
		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1079
				       NULL, NULL, NULL, -1);
1080 1081 1082 1083 1084
		if (err)
			return err;
		msi_idx = -((int)s->intrq.abs_id + 1);
	}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
	/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
	 * don't forget to update the following which need to be
	 * synchronized to and changes here.
	 *
	 * 1. The calculations of MAX_INGQ in cxgb4.h.
	 *
	 * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
	 *    to accommodate any new/deleted Ingress Queues
	 *    which need MSI-X Vectors.
	 *
	 * 3. Update sge_qinfo_show() to include information on the
	 *    new/deleted queues.
	 */
1098
	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],