Skip to content
Snippets Groups Projects
ixgbe_ethtool.c 76.1 KiB
Newer Older
	if (wol->wolopts & WAKE_UCAST)
		adapter->wol |= IXGBE_WUFC_EX;
	if (wol->wolopts & WAKE_MCAST)
		adapter->wol |= IXGBE_WUFC_MC;
	if (wol->wolopts & WAKE_BCAST)
		adapter->wol |= IXGBE_WUFC_BC;
	if (wol->wolopts & WAKE_MAGIC)
		adapter->wol |= IXGBE_WUFC_MAG;

	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);

	return 0;
}

static int ixgbe_nway_reset(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	if (netif_running(netdev))
		ixgbe_reinit_locked(adapter);
static int ixgbe_set_phys_id(struct net_device *netdev,
			     enum ethtool_phys_id_state state)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	switch (state) {
	case ETHTOOL_ID_ACTIVE:
		adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
		return 2;
	case ETHTOOL_ID_ON:
		hw->mac.ops.led_on(hw, IXGBE_LED_ON);
		break;

	case ETHTOOL_ID_OFF:
		hw->mac.ops.led_off(hw, IXGBE_LED_ON);
	case ETHTOOL_ID_INACTIVE:
		/* Restore LED settings */
		IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
		break;
	}

	return 0;
}

static int ixgbe_get_coalesce(struct net_device *netdev,
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);

	ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;

	/* only valid if in constant ITR mode */
	switch (adapter->rx_itr_setting) {
	case 0:
		/* throttling disabled */
		ec->rx_coalesce_usecs = 0;
		break;
	case 1:
		/* dynamic ITR mode */
		ec->rx_coalesce_usecs = 1;
		break;
	default:
		/* fixed interrupt rate mode */
		ec->rx_coalesce_usecs = 1000000/adapter->rx_eitr_param;
	/* if in mixed tx/rx queues per vector mode, report only rx settings */
	if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
		return 0;

	/* only valid if in constant ITR mode */
	switch (adapter->tx_itr_setting) {
	case 0:
		/* throttling disabled */
		ec->tx_coalesce_usecs = 0;
		break;
	case 1:
		/* dynamic ITR mode */
		ec->tx_coalesce_usecs = 1;
		break;
	default:
		ec->tx_coalesce_usecs = 1000000/adapter->tx_eitr_param;
		break;
	}

/*
 * this function must be called before setting the new value of
 * rx_itr_setting
 */
static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
			     struct ethtool_coalesce *ec)
{
	struct net_device *netdev = adapter->netdev;

	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
		return false;

	/* if interrupt rate is too high then disable RSC */
	if (ec->rx_coalesce_usecs != 1 &&
	    ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
			e_info(probe, "rx-usecs set too low, "
				      "disabling RSC\n");
			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
			return true;
		}
	} else {
		/* check the feature flag value and enable RSC if necessary */
		if ((netdev->features & NETIF_F_LRO) &&
		    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
			e_info(probe, "rx-usecs set to %d, "
				      "re-enabling RSC\n",
			       ec->rx_coalesce_usecs);
			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
			return true;
		}
	}
	return false;
}

static int ixgbe_set_coalesce(struct net_device *netdev,
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_q_vector *q_vector;
	bool need_reset = false;
	/* don't accept tx specific changes if we've got mixed RxTx vectors */
	if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
	   && ec->tx_coalesce_usecs)
	if (ec->tx_max_coalesced_frames_irq)
		adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;

	if (ec->rx_coalesce_usecs > 1) {
		/* check the limits */
		if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
		    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
			return -EINVAL;

		/* check the old value and enable RSC if necessary */
		need_reset = ixgbe_update_rsc(adapter, ec);

		/* store the value in ints/second */
		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;

		/* static value of interrupt rate */
		adapter->rx_itr_setting = adapter->rx_eitr_param;
		/* clear the lower bit as its used for dynamic state */
		adapter->rx_itr_setting &= ~1;
	} else if (ec->rx_coalesce_usecs == 1) {
		/* check the old value and enable RSC if necessary */
		need_reset = ixgbe_update_rsc(adapter, ec);

		/* 1 means dynamic mode */
		adapter->rx_eitr_param = 20000;
		adapter->rx_itr_setting = 1;
		/* check the old value and enable RSC if necessary */
		need_reset = ixgbe_update_rsc(adapter, ec);
		/*
		 * any other value means disable eitr, which is best
		 * served by setting the interrupt rate very high
		 */
		adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
		adapter->rx_itr_setting = 0;
	}

	if (ec->tx_coalesce_usecs > 1) {
		/*
		 * don't have to worry about max_int as above because
		 * tx vectors don't do hardware RSC (an rx function)
		 */
		/* check the limits */
		if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
		    (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
			return -EINVAL;

		/* store the value in ints/second */
		adapter->tx_eitr_param = 1000000/ec->tx_coalesce_usecs;

		/* static value of interrupt rate */
		adapter->tx_itr_setting = adapter->tx_eitr_param;

		/* clear the lower bit as its used for dynamic state */
		adapter->tx_itr_setting &= ~1;
	} else if (ec->tx_coalesce_usecs == 1) {
		/* 1 means dynamic mode */
		adapter->tx_eitr_param = 10000;
		adapter->tx_itr_setting = 1;
	} else {
		adapter->tx_eitr_param = IXGBE_MAX_INT_RATE;
		adapter->tx_itr_setting = 0;
	/* MSI/MSIx Interrupt Mode */
	if (adapter->flags &
	    (IXGBE_FLAG_MSIX_ENABLED | IXGBE_FLAG_MSI_ENABLED)) {
		int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
		for (i = 0; i < num_vectors; i++) {
			q_vector = adapter->q_vector[i];
			if (q_vector->txr_count && !q_vector->rxr_count)
				/* tx only */
				q_vector->eitr = adapter->tx_eitr_param;
				q_vector->eitr = adapter->rx_eitr_param;
			ixgbe_write_eitr(q_vector);
		}
	/* Legacy Interrupt Mode */
	} else {
		q_vector = adapter->q_vector[0];
		q_vector->eitr = adapter->rx_eitr_param;
		ixgbe_write_eitr(q_vector);
	/*
	 * do reset here at the end to make sure EITR==0 case is handled
	 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
	 * also locks in RSC enable/disable which requires reset
	 */
	if (need_reset) {
		if (netif_running(netdev))
			ixgbe_reinit_locked(adapter);
		else
			ixgbe_reset(adapter);
	}

static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_IXGBE_DCB
	if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
	    !(data & ETH_FLAG_RXVLAN))
		return -EINVAL;
#endif

	need_reset = (data & ETH_FLAG_RXVLAN) !=
		     (netdev->features & NETIF_F_HW_VLAN_RX);

Emil Tantilov's avatar
Emil Tantilov committed
	if ((data & ETH_FLAG_RXHASH) &&
	    !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
		return -EOPNOTSUPP;

Emil Tantilov's avatar
Emil Tantilov committed
	rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
Emil Tantilov's avatar
Emil Tantilov committed
				  ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
				  ETH_FLAG_RXHASH);

	/* if state changes we need to update adapter->flags and reset */
	if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
	    (!!(data & ETH_FLAG_LRO) !=
	     !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
		if ((data & ETH_FLAG_LRO) &&
		    (!adapter->rx_itr_setting ||
		     (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
			e_info(probe, "rx-usecs set too low, "
				      "not enabling RSC.\n");
		} else {
			adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
			switch (adapter->hw.mac.type) {
			case ixgbe_mac_82599EB:
				need_reset = true;
				break;
			case ixgbe_mac_X540: {
				int i;
				for (i = 0; i < adapter->num_rx_queues; i++) {
					struct ixgbe_ring *ring =
					                  adapter->rx_ring[i];
					if (adapter->flags2 &
					    IXGBE_FLAG2_RSC_ENABLED) {
						ixgbe_configure_rscctl(adapter,
						                       ring);
					} else {
						ixgbe_clear_rscctl(adapter,
						                   ring);
					}
				}
			}
				break;
	}

	/*
	 * Check if Flow Director n-tuple support was enabled or disabled.  If
	 * the state changed, we need to reset.
	 */
	if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
	    (!(data & ETH_FLAG_NTUPLE))) {
		/* turn off Flow Director perfect, set hash and reset */
		adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
		need_reset = true;
	} else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
	           (data & ETH_FLAG_NTUPLE)) {
		/* turn off Flow Director hash, enable perfect and reset */
		adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
		adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
		need_reset = true;
	} else {
		/* no state change */
	}

	if (need_reset) {
		if (netif_running(netdev))
			ixgbe_reinit_locked(adapter);
		else
			ixgbe_reset(adapter);
	}
	return 0;
}

static int ixgbe_set_rx_ntuple(struct net_device *dev,
                               struct ethtool_rx_ntuple *cmd)
{
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
	union ixgbe_atr_input input_struct;
	struct ixgbe_atr_input_masks input_masks;
	int target_queue;

	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
		return -EOPNOTSUPP;

	/*
	 * Don't allow programming if the action is a queue greater than
	 * the number of online Tx queues.
	 */
	if ((fs->action >= adapter->num_tx_queues) ||
	    (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
	memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
	memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));

	/* record flow type */
	switch (fs->flow_type) {
	case IPV4_FLOW:
		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
		break;
		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
	/* copy vlan tag minus the CFI bit */
	if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
		input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
		if (!fs->vlan_tag_mask) {
			input_masks.vlan_id_mask = htons(0xEFFF);
		} else {
			switch (~fs->vlan_tag_mask & 0xEFFF) {
			/* all of these are valid vlan-mask values */
			case 0xEFFF:
			case 0xE000:
			case 0x0FFF:
			case 0x0000:
				input_masks.vlan_id_mask =
					htons(~fs->vlan_tag_mask);
				break;
			/* exit with error if vlan-mask is invalid */
			default:
				e_err(drv, "Partial VLAN ID or "
				      "priority mask in vlan-mask is not "
				      "supported by hardware\n");
				return -1;
			}
		}
	}

	/* make sure we only use the first 2 bytes of user data */
	if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
		input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
		if (!(fs->data_mask & 0xFFFF)) {
			input_masks.flex_mask = 0xFFFF;
		} else if (~fs->data_mask & 0xFFFF) {
			e_err(drv, "Partial user-def-mask is not "
			      "supported by hardware\n");
			return -1;
		}
	}

	/*
	 * Copy input into formatted structures
	 *
	 * These assignments are based on the following logic
	 * If neither input or mask are set assume value is masked out.
	 * If input is set, but mask is not mask should default to accept all.
	 * If input is not set, but mask is set then mask likely results in 0.
	 * If input is set and mask is set then assign both.
	 */
	if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
		input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
		if (!fs->m_u.tcp_ip4_spec.ip4src)
			input_masks.src_ip_mask[0] = 0xFFFFFFFF;
		else
			input_masks.src_ip_mask[0] =
				~fs->m_u.tcp_ip4_spec.ip4src;
	}
	if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
		input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
		if (!fs->m_u.tcp_ip4_spec.ip4dst)
			input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
		else
			input_masks.dst_ip_mask[0] =
				~fs->m_u.tcp_ip4_spec.ip4dst;
	}
	if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
		input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
		if (!fs->m_u.tcp_ip4_spec.psrc)
			input_masks.src_port_mask = 0xFFFF;
		else
			input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
	}
	if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
		input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
		if (!fs->m_u.tcp_ip4_spec.pdst)
			input_masks.dst_port_mask = 0xFFFF;
		else
			input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
	}

	/* determine if we need to drop or route the packet */
	if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
		target_queue = fs->action;
	err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
						  &input_struct,
						  &input_masks, 0,
						  target_queue);
	spin_unlock(&adapter->fdir_perfect_lock);

	return err ? -1 : 0;
static const struct ethtool_ops ixgbe_ethtool_ops = {
	.get_settings           = ixgbe_get_settings,
	.set_settings           = ixgbe_set_settings,
	.get_drvinfo            = ixgbe_get_drvinfo,
	.get_regs_len           = ixgbe_get_regs_len,
	.get_regs               = ixgbe_get_regs,
	.get_wol                = ixgbe_get_wol,
	.set_wol                = ixgbe_set_wol,
	.nway_reset             = ixgbe_nway_reset,
	.get_link               = ethtool_op_get_link,
	.get_eeprom_len         = ixgbe_get_eeprom_len,
	.get_eeprom             = ixgbe_get_eeprom,
	.get_ringparam          = ixgbe_get_ringparam,
	.set_ringparam          = ixgbe_set_ringparam,
	.get_pauseparam         = ixgbe_get_pauseparam,
	.set_pauseparam         = ixgbe_set_pauseparam,
	.get_rx_csum            = ixgbe_get_rx_csum,
	.set_rx_csum            = ixgbe_set_rx_csum,
	.get_tx_csum            = ixgbe_get_tx_csum,
	.set_tx_csum            = ixgbe_set_tx_csum,
	.get_sg                 = ethtool_op_get_sg,
	.set_sg                 = ethtool_op_set_sg,
	.get_msglevel           = ixgbe_get_msglevel,
	.set_msglevel           = ixgbe_set_msglevel,
	.get_tso                = ethtool_op_get_tso,
	.set_tso                = ixgbe_set_tso,
	.self_test              = ixgbe_diag_test,
	.get_strings            = ixgbe_get_strings,
	.set_phys_id            = ixgbe_set_phys_id,
	.get_sset_count         = ixgbe_get_sset_count,
	.get_ethtool_stats      = ixgbe_get_ethtool_stats,
	.get_coalesce           = ixgbe_get_coalesce,
	.set_coalesce           = ixgbe_set_coalesce,
	.get_flags              = ethtool_op_get_flags,
	.set_flags              = ixgbe_set_flags,
	.set_rx_ntuple          = ixgbe_set_rx_ntuple,
};

void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
	SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
}