Skip to content
Snippets Groups Projects
netxen_nic_main.c 67.4 KiB
Newer Older
	int err = 0;

	rtnl_lock();
	if (netif_running(netdev))
		err = __netxen_nic_up(adapter, netdev);
	rtnl_unlock();

	return err;
}

/* with rtnl_lock */
__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
	if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
		return;

	if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
		return;
	spin_lock(&adapter->tx_clean_lock);
	netif_carrier_off(netdev);
	netif_tx_disable(netdev);

	if (adapter->stop_port)
		adapter->stop_port(adapter);

	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
		netxen_p3_free_mac_list(adapter);

	adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);

	netxen_napi_disable(adapter);

	netxen_release_tx_buffers(adapter);
	spin_unlock(&adapter->tx_clean_lock);
/* Usage: During suspend and firmware recovery module */

static inline void
netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
{
	rtnl_lock();
	if (netif_running(netdev))
		__netxen_nic_down(adapter, netdev);
	rtnl_unlock();

}

static int
netxen_nic_attach(struct netxen_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	struct pci_dev *pdev = adapter->pdev;
	struct nx_host_rds_ring *rds_ring;
	struct nx_host_tx_ring *tx_ring;
	if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
		return 0;

	err = netxen_init_firmware(adapter);
	if (err)
		return err;

	err = netxen_napi_add(adapter, netdev);
	if (err)
		return err;

	err = netxen_alloc_sw_resources(adapter);
	if (err) {
		printk(KERN_ERR "%s: Error in setting sw resources\n",
				netdev->name);
		return err;
	}

	err = netxen_alloc_hw_resources(adapter);
	if (err) {
		printk(KERN_ERR "%s: Error in setting hw resources\n",
				netdev->name);
		goto err_out_free_sw;
	}

	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
		tx_ring = adapter->tx_ring;
		tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
				crb_cmd_producer[adapter->portnum]);
		tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
				crb_cmd_consumer[adapter->portnum]);
		tx_ring->producer = 0;
		tx_ring->sw_consumer = 0;

		netxen_nic_update_cmd_producer(adapter, tx_ring);
		netxen_nic_update_cmd_consumer(adapter, tx_ring);
	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
		rds_ring = &adapter->recv_ctx.rds_rings[ring];
		netxen_post_rx_buffers(adapter, ring, rds_ring);
	}

	err = netxen_nic_request_irq(adapter);
	if (err) {
		dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
				netdev->name);
		goto err_out_free_rxbuf;
	}

	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
		netxen_nic_init_coalesce_defaults(adapter);

	netxen_create_sysfs_entries(adapter);

	adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
	return 0;

err_out_free_rxbuf:
	netxen_release_rx_buffers(adapter);
	netxen_free_hw_resources(adapter);
err_out_free_sw:
	netxen_free_sw_resources(adapter);
	return err;
}

static void
netxen_nic_detach(struct netxen_adapter *adapter)
{
	if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
		return;

	netxen_remove_sysfs_entries(adapter);

	netxen_free_hw_resources(adapter);
	netxen_release_rx_buffers(adapter);
	netxen_nic_free_irq(adapter);
	netxen_napi_del(adapter);
	netxen_free_sw_resources(adapter);

	adapter->is_up = 0;
}

int
netxen_nic_reset_context(struct netxen_adapter *adapter)
{
	int err = 0;
	struct net_device *netdev = adapter->netdev;

	if (test_and_set_bit(__NX_RESETTING, &adapter->state))
		return -EBUSY;

	if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {

		netif_device_detach(netdev);

		if (netif_running(netdev))
			__netxen_nic_down(adapter, netdev);
		if (netif_running(netdev)) {
			err = netxen_nic_attach(adapter);
			if (!err)
				err = __netxen_nic_up(adapter, netdev);
			if (err)
				goto done;
		}

		netif_device_attach(netdev);
	clear_bit(__NX_RESETTING, &adapter->state);
static int
netxen_setup_netdev(struct netxen_adapter *adapter,
		struct net_device *netdev)
{
	int err = 0;
	struct pci_dev *pdev = adapter->pdev;

	adapter->rx_csum = 1;
	adapter->mc_enabled = 0;
	if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
		adapter->max_mc_count = 38;
	else
		adapter->max_mc_count = 16;

	netdev->netdev_ops	   = &netxen_netdev_ops;
	netdev->watchdog_timeo     = 2*HZ;

	netxen_nic_change_mtu(netdev, netdev->mtu);

	SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);

	netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
	netdev->features |= (NETIF_F_GRO);
	netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);

	if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
		netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
		netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
	}

	if (adapter->pci_using_dac) {
		netdev->features |= NETIF_F_HIGHDMA;
		netdev->vlan_features |= NETIF_F_HIGHDMA;
	}

	if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
		netdev->features |= (NETIF_F_HW_VLAN_TX);

	if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
		netdev->features |= NETIF_F_LRO;

	netdev->irq = adapter->msix_entries[0].vector;

	INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);

	if (netxen_read_mac_addr(adapter))
		dev_warn(&pdev->dev, "failed to read mac addr\n");

	netif_carrier_off(netdev);
	netif_stop_queue(netdev);

	err = register_netdev(netdev);
	if (err) {
		dev_err(&pdev->dev, "failed to register net device\n");
		return err;
	}

	return 0;
}

static int __devinit
netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *netdev = NULL;
	struct netxen_adapter *adapter = NULL;
	int pci_func_id = PCI_FUNC(pdev->devfn);
	uint8_t revision_id;
	if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
		pr_warning("%s: chip revisions between 0x%x-0x%x"
				"will not be enabled.\n",
				module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
	if ((err = pci_enable_device(pdev)))
		return err;
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		err = -ENODEV;
		goto err_out_disable_pdev;
	}

	if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
		goto err_out_disable_pdev;

	if (NX_IS_REVISION_P3(pdev->revision))
		pci_enable_pcie_error_reporting(pdev);

	pci_set_master(pdev);

	netdev = alloc_etherdev(sizeof(struct netxen_adapter));
	if(!netdev) {
		dev_err(&pdev->dev, "failed to allocate net_device\n");
		err = -ENOMEM;
		goto err_out_free_res;
	}

	SET_NETDEV_DEV(netdev, &pdev->dev);

	adapter = netdev_priv(netdev);
	adapter->netdev  = netdev;
	adapter->pdev    = pdev;
	adapter->ahw.pci_func  = pci_func_id;

	revision_id = pdev->revision;
	adapter->ahw.revision_id = revision_id;

	rwlock_init(&adapter->ahw.crb_lock);
	spin_lock_init(&adapter->ahw.mem_lock);

	spin_lock_init(&adapter->tx_clean_lock);
	INIT_LIST_HEAD(&adapter->mac_list);
	err = netxen_setup_pci_map(adapter);
	if (err)
		goto err_out_free_netdev;
	adapter->portnum = pci_func_id;

	err = netxen_nic_get_board_info(adapter);
	if (err) {
		dev_err(&pdev->dev, "Error getting board config info.\n");
		goto err_out_iounmap;
	}
	/* Mezz cards have PCI function 0,2,3 enabled */
	switch (adapter->ahw.board_type) {
	case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
	case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
		if (pci_func_id >= 2)
			adapter->portnum = pci_func_id - 2;
	err = netxen_start_firmware(adapter);
		goto err_out_decr_ref;
	 * See if the firmware gave us a virtual-physical port mapping.
	adapter->physical_port = adapter->portnum;
	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
		i = NXRD32(adapter, CRB_V2P(adapter->portnum));
		if (i != 0x55555555)
			adapter->physical_port = i;
	}
	netxen_nic_clear_stats(adapter);
	netxen_setup_intr(adapter);
	err = netxen_setup_netdev(adapter, netdev);
	if (err)
		goto err_out_disable_msi;

	pci_set_drvdata(pdev, adapter);
	netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);

	switch (adapter->ahw.port_type) {
	case NETXEN_NIC_GBE:
		dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
				adapter->netdev->name);
		break;
	case NETXEN_NIC_XGBE:
		dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
				adapter->netdev->name);
		break;
	netxen_create_diag_entries(adapter);

	return 0;

err_out_disable_msi:
	netxen_teardown_intr(adapter);
	netxen_free_dummy_dma(adapter);
	nx_decr_dev_ref_cnt(adapter);

	netxen_cleanup_pci_map(adapter);
err_out_free_netdev:
	free_netdev(netdev);

err_out_free_res:
	pci_release_regions(pdev);
	pci_set_drvdata(pdev, NULL);
	pci_disable_device(pdev);
	return err;
}

static void __devexit netxen_nic_remove(struct pci_dev *pdev)
{
	struct netxen_adapter *adapter;
	struct net_device *netdev;
	adapter = pci_get_drvdata(pdev);
	if (adapter == NULL)
		return;

	netdev = adapter->netdev;

	netxen_cancel_fw_work(adapter);

	cancel_work_sync(&adapter->tx_timeout_task);

	netxen_nic_detach(adapter);
	nx_decr_dev_ref_cnt(adapter);

	if (adapter->portnum == 0)
		netxen_free_dummy_dma(adapter);
	clear_bit(__NX_RESETTING, &adapter->state);

	netxen_teardown_intr(adapter);
	netxen_remove_diag_entries(adapter);

	netxen_cleanup_pci_map(adapter);
	netxen_release_firmware(adapter);

	if (NX_IS_REVISION_P3(pdev->revision))
		pci_disable_pcie_error_reporting(pdev);

	pci_release_regions(pdev);
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);

	free_netdev(netdev);

static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;

	netif_device_detach(netdev);

	netxen_cancel_fw_work(adapter);

	if (netif_running(netdev))
		netxen_nic_down(adapter, netdev);

	cancel_work_sync(&adapter->tx_timeout_task);

	netxen_nic_detach(adapter);
	if (adapter->portnum == 0)
		netxen_free_dummy_dma(adapter);

	nx_decr_dev_ref_cnt(adapter);

	clear_bit(__NX_RESETTING, &adapter->state);
static int netxen_nic_attach_func(struct pci_dev *pdev)
{
	struct netxen_adapter *adapter = pci_get_drvdata(pdev);
	struct net_device *netdev = adapter->netdev;
	int err;

	err = pci_enable_device(pdev);
	if (err)
		return err;

	pci_set_power_state(pdev, PCI_D0);
	pci_set_master(pdev);
	pci_restore_state(pdev);

	adapter->ahw.crb_win = -1;
	adapter->ahw.ocm_win = -1;
	err = netxen_start_firmware(adapter);
	if (err) {
		dev_err(&pdev->dev, "failed to start firmware\n");
		return err;
	}

	if (netif_running(netdev)) {
		err = netxen_nic_attach(adapter);
		if (err)
			goto err_out;

		err = netxen_nic_up(adapter, netdev);
		if (err)
			goto err_out_detach;
		netxen_config_indev_addr(netdev, NETDEV_UP);
	netif_device_attach(netdev);
	netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);

err_out_detach:
	netxen_nic_detach(adapter);
err_out:
	nx_decr_dev_ref_cnt(adapter);
	return err;

static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
						pci_channel_state_t state)
{
	struct netxen_adapter *adapter = pci_get_drvdata(pdev);

	if (state == pci_channel_io_perm_failure)
		return PCI_ERS_RESULT_DISCONNECT;

	if (nx_dev_request_aer(adapter))
		return PCI_ERS_RESULT_RECOVERED;

	netxen_nic_detach_func(adapter);

	pci_disable_device(pdev);

	return PCI_ERS_RESULT_NEED_RESET;
}

static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
{
	int err = 0;

	err = netxen_nic_attach_func(pdev);

	return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}

static void netxen_io_resume(struct pci_dev *pdev)
{
	pci_cleanup_aer_uncorrect_error_status(pdev);
}

static void netxen_nic_shutdown(struct pci_dev *pdev)
{
	struct netxen_adapter *adapter = pci_get_drvdata(pdev);

	netxen_nic_detach_func(adapter);

	if (pci_save_state(pdev))
		return;

	if (netxen_nic_wol_supported(adapter)) {
		pci_enable_wake(pdev, PCI_D3cold, 1);
		pci_enable_wake(pdev, PCI_D3hot, 1);
	}

	pci_disable_device(pdev);
}

#ifdef CONFIG_PM
static int
netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
{
	struct netxen_adapter *adapter = pci_get_drvdata(pdev);
	int retval;

	netxen_nic_detach_func(adapter);

	retval = pci_save_state(pdev);
	if (retval)
		return retval;

	if (netxen_nic_wol_supported(adapter)) {
		pci_enable_wake(pdev, PCI_D3cold, 1);
		pci_enable_wake(pdev, PCI_D3hot, 1);
	}

	pci_disable_device(pdev);
	pci_set_power_state(pdev, pci_choose_state(pdev, state));

	return 0;
}

static int
netxen_nic_resume(struct pci_dev *pdev)
{
	return netxen_nic_attach_func(pdev);
}
static int netxen_nic_open(struct net_device *netdev)
{
	struct netxen_adapter *adapter = netdev_priv(netdev);
	if (adapter->driver_mismatch)
		return -EIO;

	err = netxen_nic_attach(adapter);
	if (err)
		return err;
	err = __netxen_nic_up(adapter, netdev);
	if (err)
		goto err_out;
	netif_start_queue(netdev);
err_out:
	netxen_nic_detach(adapter);
}

/*
 * netxen_nic_close - Disables a network interface entry point
 */
static int netxen_nic_close(struct net_device *netdev)
{
	struct netxen_adapter *adapter = netdev_priv(netdev);
	__netxen_nic_down(adapter, netdev);
static void
netxen_tso_check(struct net_device *netdev,
		struct nx_host_tx_ring *tx_ring,
		struct cmd_desc_type0 *first_desc,
		struct sk_buff *skb)
	u8 opcode = TX_ETHER_PKT;
	__be16 protocol = skb->protocol;
	u16 flags = 0, vid = 0;
	u32 producer;
	int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
	struct cmd_desc_type0 *hwdesc;
	struct vlan_ethhdr *vh;
	if (protocol == cpu_to_be16(ETH_P_8021Q)) {

		vh = (struct vlan_ethhdr *)skb->data;
		protocol = vh->h_vlan_encapsulated_proto;
		flags = FLAGS_VLAN_TAGGED;

	} else if (vlan_tx_tag_present(skb)) {

		flags = FLAGS_VLAN_OOB;
		vid = vlan_tx_tag_get(skb);
		netxen_set_tx_vlan_tci(first_desc, vid);
		vlan_oob = 1;
	if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
			skb_shinfo(skb)->gso_size > 0) {

		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);

		first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
		first_desc->total_hdr_length = hdr_len;
		if (vlan_oob) {
			first_desc->total_hdr_length += VLAN_HLEN;
			first_desc->tcp_hdr_offset = VLAN_HLEN;
			first_desc->ip_hdr_offset = VLAN_HLEN;
			/* Only in case of TSO on vlan device */
			flags |= FLAGS_VLAN_TAGGED;
		}
		opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
				TX_TCP_LSO6 : TX_TCP_LSO;
		tso = 1;

	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
		if (protocol == cpu_to_be16(ETH_P_IP)) {
			l4proto = ip_hdr(skb)->protocol;

			if (l4proto == IPPROTO_TCP)
				opcode = TX_TCP_PKT;
			else if(l4proto == IPPROTO_UDP)
				opcode = TX_UDP_PKT;
		} else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
			l4proto = ipv6_hdr(skb)->nexthdr;

			if (l4proto == IPPROTO_TCP)
				opcode = TX_TCPV6_PKT;
			else if(l4proto == IPPROTO_UDP)
				opcode = TX_UDPV6_PKT;
		}

	first_desc->tcp_hdr_offset += skb_transport_offset(skb);
	first_desc->ip_hdr_offset += skb_network_offset(skb);
	netxen_set_tx_flags_opcode(first_desc, flags, opcode);

	if (!tso)
		return;

	/* For LSO, we need to copy the MAC/IP/TCP headers into
	 * the descriptor ring
	 */
	producer = tx_ring->producer;
	copied = 0;
	offset = 2;

	if (vlan_oob) {
		/* Create a TSO vlan header template for firmware */

		hwdesc = &tx_ring->desc_head[producer];
		tx_ring->cmd_buf_arr[producer].skb = NULL;

		copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
				hdr_len + VLAN_HLEN);

		vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
		skb_copy_from_linear_data(skb, vh, 12);
		vh->h_vlan_proto = htons(ETH_P_8021Q);
		vh->h_vlan_TCI = htons(vid);
		skb_copy_from_linear_data_offset(skb, 12,
				(char *)vh + 16, copy_len - 16);

		copied = copy_len - VLAN_HLEN;
		offset = 0;

		producer = get_next_index(producer, tx_ring->num_desc);
	}

	while (copied < hdr_len) {

		copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
				(hdr_len - copied));

		hwdesc = &tx_ring->desc_head[producer];
		tx_ring->cmd_buf_arr[producer].skb = NULL;

		skb_copy_from_linear_data_offset(skb, copied,
				 (char *)hwdesc + offset, copy_len);

		copied += copy_len;
		offset = 0;

		producer = get_next_index(producer, tx_ring->num_desc);
	}

	tx_ring->producer = producer;
	barrier();
static int
netxen_map_tx_skb(struct pci_dev *pdev,
		struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
	struct netxen_skb_frag *nf;
	struct skb_frag_struct *frag;
	int i, nr_frags;
	dma_addr_t map;

	nr_frags = skb_shinfo(skb)->nr_frags;
	nf = &pbuf->frag_array[0];

	map = pci_map_single(pdev, skb->data,
			skb_headlen(skb), PCI_DMA_TODEVICE);
	if (pci_dma_mapping_error(pdev, map))
		goto out_err;
	nf->dma = map;
	nf->length = skb_headlen(skb);

	for (i = 0; i < nr_frags; i++) {
		frag = &skb_shinfo(skb)->frags[i];
		nf = &pbuf->frag_array[i+1];

		map = pci_map_page(pdev, frag->page, frag->page_offset,
				frag->size, PCI_DMA_TODEVICE);
		if (pci_dma_mapping_error(pdev, map))
			goto unwind;

		nf->dma = map;
		nf->length = frag->size;
	}

	return 0;
	while (--i >= 0) {
		nf = &pbuf->frag_array[i+1];
		pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);

	nf = &pbuf->frag_array[0];
	pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);

out_err:
	return -ENOMEM;
static inline void
netxen_clear_cmddesc(u64 *desc)
{
	desc[0] = 0ULL;
	desc[2] = 0ULL;
netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
	struct netxen_adapter *adapter = netdev_priv(netdev);
	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
	struct netxen_cmd_buffer *pbuf;
	struct netxen_skb_frag *buffrag;
	struct cmd_desc_type0 *hwdesc, *first_desc;
	struct pci_dev *pdev;
	u32 producer;
	int frag_count, no_of_desc;
	u32 num_txd = tx_ring->num_desc;

	frag_count = skb_shinfo(skb)->nr_frags + 1;

	/* 4 fragments per cmd des */
	no_of_desc = (frag_count + 3) >> 2;
Roel Kluin's avatar
Roel Kluin committed
	if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) {
		netif_stop_queue(netdev);
		return NETDEV_TX_BUSY;
	producer = tx_ring->producer;
	pbuf = &tx_ring->cmd_buf_arr[producer];
	pdev = adapter->pdev;
	if (netxen_map_tx_skb(pdev, skb, pbuf))
		goto drop_packet;
	pbuf->skb = skb;
	pbuf->frag_count = frag_count;
	first_desc = hwdesc = &tx_ring->desc_head[producer];
	netxen_clear_cmddesc((u64 *)hwdesc);
	netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
	netxen_set_tx_port(first_desc, adapter->portnum);
	for (i = 0; i < frag_count; i++) {
		k = i % 4;

		if ((k == 0) && (i > 0)) {
			/* move to next desc.*/
			producer = get_next_index(producer, num_txd);
			hwdesc = &tx_ring->desc_head[producer];
			netxen_clear_cmddesc((u64 *)hwdesc);
			tx_ring->cmd_buf_arr[producer].skb = NULL;
		buffrag = &pbuf->frag_array[i];
		hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
		switch (k) {
		case 0:
			hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
			break;
		case 1:
			hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
			break;
		case 2:
			hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
			break;
		case 3:
			hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
	tx_ring->producer = get_next_index(producer, num_txd);
	netxen_tso_check(netdev, tx_ring, first_desc, skb);
	netxen_nic_update_cmd_producer(adapter, tx_ring);
	adapter->stats.txbytes += skb->len;
	adapter->stats.xmitcalled++;

	return NETDEV_TX_OK;

drop_packet:
	adapter->stats.txdropped++;
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
static int netxen_nic_check_temp(struct netxen_adapter *adapter)
{
	struct net_device *netdev = adapter->netdev;
	uint32_t temp, temp_state, temp_val;
	int rv = 0;

	temp = NXRD32(adapter, CRB_TEMP_STATE);

	temp_state = nx_get_temp_state(temp);
	temp_val = nx_get_temp_val(temp);

	if (temp_state == NX_TEMP_PANIC) {
		printk(KERN_ALERT
		       "%s: Device temperature %d degrees C exceeds"
		       " maximum allowed. Hardware has been shut down.\n",
		       netdev->name, temp_val);
		rv = 1;
	} else if (temp_state == NX_TEMP_WARN) {
		if (adapter->temp == NX_TEMP_NORMAL) {
			printk(KERN_ALERT
			       "%s: Device temperature %d degrees C "
			       "exceeds operating range."
			       " Immediate action needed.\n",
			       netdev->name, temp_val);
		}
	} else {
		if (adapter->temp == NX_TEMP_WARN) {
			printk(KERN_INFO
			       "%s: Device temperature is now %d degrees C"
			       " in normal range.\n", netdev->name,
			       temp_val);
		}
	}
	adapter->temp = temp_state;
	return rv;
}

void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
{
	struct net_device *netdev = adapter->netdev;

	if (adapter->ahw.linkup && !linkup) {
		printk(KERN_INFO "%s: %s NIC Link is down\n",
		       netxen_nic_driver_name, netdev->name);
		adapter->ahw.linkup = 0;
		if (netif_running(netdev)) {
			netif_carrier_off(netdev);
			netif_stop_queue(netdev);
		}
		adapter->link_changed = !adapter->has_link_events;
	} else if (!adapter->ahw.linkup && linkup) {
		printk(KERN_INFO "%s: %s NIC Link is up\n",
		       netxen_nic_driver_name, netdev->name);
		adapter->ahw.linkup = 1;
		if (netif_running(netdev)) {
			netif_carrier_on(netdev);
			netif_wake_queue(netdev);
		}
		adapter->link_changed = !adapter->has_link_events;
static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
{
	u32 val, port, linkup;

	port = adapter->physical_port;

	if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
		val = NXRD32(adapter, CRB_XG_STATE_P3);
		val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
		linkup = (val == XG_LINK_UP_P3);
	} else {
		val = NXRD32(adapter, CRB_XG_STATE);
		if (adapter->ahw.port_type == NETXEN_NIC_GBE)
			linkup = (val >> port) & 1;
		else {
			val = (val >> port*8) & 0xff;
			linkup = (val == XG_LINK_UP);
		}
	}

	netxen_advert_link_change(adapter, linkup);
}

static void netxen_tx_timeout(struct net_device *netdev)
{
	struct netxen_adapter *adapter = netdev_priv(netdev);
	if (test_bit(__NX_RESETTING, &adapter->state))
		return;

	dev_err(&netdev->dev, "transmit timeout, resetting.\n");
	schedule_work(&adapter->tx_timeout_task);
static void netxen_tx_timeout_task(struct work_struct *work)
	struct netxen_adapter *adapter =
		container_of(work, struct netxen_adapter, tx_timeout_task);
	if (!netif_running(adapter->netdev))
		return;

	if (test_and_set_bit(__NX_RESETTING, &adapter->state))
	if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
		goto request_reset;

	if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
		/* try to scrub interrupt */
		netxen_napi_disable(adapter);
		adapter->netdev->trans_start = jiffies;
		netxen_napi_enable(adapter);

		netif_wake_queue(adapter->netdev);