Skip to content
Snippets Groups Projects
3c59x.c 101 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
		/* Adapter failure requires Tx/Rx reset and reinit. */
		if (vp->full_bus_master_tx) {
			int bus_status = ioread32(ioaddr + PktStatus);
Linus Torvalds's avatar
Linus Torvalds committed
			/* 0x80000000 PCI master abort. */
			/* 0x40000000 PCI target abort. */
			if (vortex_debug)
				pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
Linus Torvalds's avatar
Linus Torvalds committed

			/* In this case, blow the card away */
			/* Must not enter D3 or we can't legally issue the reset! */
			vortex_down(dev, 0);
			issue_and_wait(dev, TotalReset | 0xff);
			vortex_up(dev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
		} else if (fifo_diag & 0x0400)
			do_tx_reset = 1;
		if (fifo_diag & 0x3000) {
			/* Reset Rx fifo and upload logic */
			issue_and_wait(dev, RxReset|0x07);
			/* Set the Rx filter to the current state. */
			set_rx_mode(dev);
			/* enable 802.1q VLAN tagged frames */
			set_8021q_mode(dev, 1);
			iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
			iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		}
	}

	if (do_tx_reset) {
		issue_and_wait(dev, TxReset|reset_mask);
		iowrite16(TxEnable, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		if (!vp->full_bus_master_tx)
			netif_wake_queue(dev);
	}
}

static netdev_tx_t
Linus Torvalds's avatar
Linus Torvalds committed
vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed

	/* Put out the doubleword header... */
	iowrite32(skb->len, ioaddr + TX_FIFO);
Linus Torvalds's avatar
Linus Torvalds committed
	if (vp->bus_master) {
		/* Set the bus-master controller to transfer the packet. */
		int len = (skb->len + 3) & ~3;
		iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
Linus Torvalds's avatar
Linus Torvalds committed
				ioaddr + Wn7_MasterAddr);
		iowrite16(len, ioaddr + Wn7_MasterLen);
Linus Torvalds's avatar
Linus Torvalds committed
		vp->tx_skb = skb;
		iowrite16(StartDMADown, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		/* netif_wake_queue() will be called at the DMADone interrupt. */
	} else {
		/* ... and the packet rounded to a doubleword. */
		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
Linus Torvalds's avatar
Linus Torvalds committed
		dev_kfree_skb (skb);
		if (ioread16(ioaddr + TxFree) > 1536) {
Linus Torvalds's avatar
Linus Torvalds committed
			netif_start_queue (dev);	/* AKPM: redundant? */
		} else {
			/* Interrupt us when the FIFO has room for max-sized packet. */
			netif_stop_queue(dev);
			iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		}
	}

	dev->trans_start = jiffies;

	/* Clear the Tx status stack. */
	{
		int tx_status;
		int i = 32;

		while (--i > 0	&&	(tx_status = ioread8(ioaddr + TxStatus)) > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
				if (vortex_debug > 2)
				  pr_debug("%s: Tx error, status %2.2x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
						 dev->name, tx_status);
				if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
				if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
Linus Torvalds's avatar
Linus Torvalds committed
				if (tx_status & 0x30) {
					issue_and_wait(dev, TxReset);
				}
				iowrite16(TxEnable, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
			}
			iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
Linus Torvalds's avatar
Linus Torvalds committed
		}
	}
static netdev_tx_t
Linus Torvalds's avatar
Linus Torvalds committed
boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	/* Calculate the next Tx descriptor entry. */
	int entry = vp->cur_tx % TX_RING_SIZE;
	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
	unsigned long flags;

	if (vortex_debug > 6) {
		pr_debug("boomerang_start_xmit()\n");
		pr_debug("%s: Trying to send a packet, Tx index %d.\n",
Linus Torvalds's avatar
Linus Torvalds committed
	}

	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
		if (vortex_debug > 0)
			pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
Linus Torvalds's avatar
Linus Torvalds committed
				   dev->name);
		netif_stop_queue(dev);
Linus Torvalds's avatar
Linus Torvalds committed
	}

	vp->tx_skbuff[entry] = skb;

	vp->tx_ring[entry].next = 0;
#if DO_ZEROCOPY
	if (skb->ip_summed != CHECKSUM_PARTIAL)
Linus Torvalds's avatar
Linus Torvalds committed
			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
	else
			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);

	if (!skb_shinfo(skb)->nr_frags) {
		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
										skb->len, PCI_DMA_TODEVICE));
		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
	} else {
		int i;

		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
										skb->len-skb->data_len, PCI_DMA_TODEVICE));
		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);

		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

			vp->tx_ring[entry].frag[i+1].addr =
					cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
											   (void*)page_address(frag->page) + frag->page_offset,
											   frag->size, PCI_DMA_TODEVICE));

			if (i == skb_shinfo(skb)->nr_frags-1)
					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
			else
					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
		}
	}
#else
	vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif

	spin_lock_irqsave(&vp->lock, flags);
	/* Wait for the stall to complete. */
	issue_and_wait(dev, DownStall);
	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
	if (ioread32(ioaddr + DownListPtr) == 0) {
		iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
Linus Torvalds's avatar
Linus Torvalds committed
		vp->queued_packet++;
	}

	vp->cur_tx++;
	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
		netif_stop_queue (dev);
	} else {					/* Clear previous interrupt enable. */
#if defined(tx_interrupt_mitigation)
		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
		 * were selected, this would corrupt DN_COMPLETE. No?
		 */
		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
	}
	iowrite16(DownUnstall, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
	spin_unlock_irqrestore(&vp->lock, flags);
	dev->trans_start = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
}

/* The interrupt handler does all of the Rx thread work and cleans up
   after the Tx thread. */

/*
 * This is the ISR for the vortex series chips.
 * full_bus_master_tx == 0 && full_bus_master_rx == 0
 */

static irqreturn_t
vortex_interrupt(int irq, void *dev_id)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct net_device *dev = dev_id;
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int status;
	int work_done = max_interrupt_work;
	int handled = 0;

	ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	spin_lock(&vp->lock);

	status = ioread16(ioaddr + EL3_STATUS);
Linus Torvalds's avatar
Linus Torvalds committed

	if (vortex_debug > 6)
		pr_debug("vortex_interrupt(). status=0x%4x\n", status);
Linus Torvalds's avatar
Linus Torvalds committed

	if ((status & IntLatch) == 0)
		goto handler_exit;		/* No interrupt: shared IRQs cause this */
	handled = 1;

	if (status & IntReq) {
		status |= vp->deferred;
		vp->deferred = 0;
	}

	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
		goto handler_exit;

	if (vortex_debug > 4)
		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
			   dev->name, status, ioread8(ioaddr + Timer));
Linus Torvalds's avatar
Linus Torvalds committed

	do {
		if (vortex_debug > 5)
				pr_debug("%s: In interrupt loop, status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
					   dev->name, status);
		if (status & RxComplete)
			vortex_rx(dev);

		if (status & TxAvailable) {
			if (vortex_debug > 5)
				pr_debug("	TX room bit was handled.\n");
Linus Torvalds's avatar
Linus Torvalds committed
			/* There's room in the FIFO for a full-sized packet. */
			iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
			netif_wake_queue (dev);
		}

		if (status & DMADone) {
			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
Linus Torvalds's avatar
Linus Torvalds committed
				pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
				dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
				if (ioread16(ioaddr + TxFree) > 1536) {
Linus Torvalds's avatar
Linus Torvalds committed
					/*
					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
					 * insufficient FIFO room, the TxAvailable test will succeed and call
					 * netif_wake_queue()
					 */
					netif_wake_queue(dev);
				} else { /* Interrupt when FIFO has room for max-sized packet. */
					iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
					netif_stop_queue(dev);
				}
			}
		}
		/* Check for all uncommon interrupts at once. */
		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
			if (status == 0xffff)
				break;
			vortex_error(dev, status);
		}

		if (--work_done < 0) {
			pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
				dev->name, status);
Linus Torvalds's avatar
Linus Torvalds committed
			/* Disable all pending interrupts. */
			do {
				vp->deferred |= status;
				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
Linus Torvalds's avatar
Linus Torvalds committed
					 ioaddr + EL3_CMD);
				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
Linus Torvalds's avatar
Linus Torvalds committed
			/* The timer will reenable interrupts. */
			mod_timer(&vp->timer, jiffies + 1*HZ);
			break;
		}
		/* Acknowledge the IRQ. */
		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
Linus Torvalds's avatar
Linus Torvalds committed

	if (vortex_debug > 4)
		pr_debug("%s: exiting interrupt, status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
			   dev->name, status);
handler_exit:
	spin_unlock(&vp->lock);
	return IRQ_RETVAL(handled);
}

/*
 * This is the ISR for the boomerang series chips.
 * full_bus_master_tx == 1 && full_bus_master_rx == 1
 */

static irqreturn_t
boomerang_interrupt(int irq, void *dev_id)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct net_device *dev = dev_id;
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int status;
	int work_done = max_interrupt_work;

	ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
	 * and boomerang_start_xmit
	 */
	spin_lock(&vp->lock);

	status = ioread16(ioaddr + EL3_STATUS);
Linus Torvalds's avatar
Linus Torvalds committed

	if (vortex_debug > 6)
		pr_debug("boomerang_interrupt. status=0x%4x\n", status);
Linus Torvalds's avatar
Linus Torvalds committed

	if ((status & IntLatch) == 0)
		goto handler_exit;		/* No interrupt: shared IRQs can cause this */

	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
		if (vortex_debug > 1)
			pr_debug("boomerang_interrupt(1): status = 0xffff\n");
Linus Torvalds's avatar
Linus Torvalds committed
		goto handler_exit;
	}

	if (status & IntReq) {
		status |= vp->deferred;
		vp->deferred = 0;
	}

	if (vortex_debug > 4)
		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
			   dev->name, status, ioread8(ioaddr + Timer));
Linus Torvalds's avatar
Linus Torvalds committed
	do {
		if (vortex_debug > 5)
				pr_debug("%s: In interrupt loop, status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
					   dev->name, status);
		if (status & UpComplete) {
			iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
			if (vortex_debug > 5)
				pr_debug("boomerang_interrupt->boomerang_rx\n");
Linus Torvalds's avatar
Linus Torvalds committed
			boomerang_rx(dev);
		}

		if (status & DownComplete) {
			unsigned int dirty_tx = vp->dirty_tx;

			iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
			while (vp->cur_tx - dirty_tx > 0) {
				int entry = dirty_tx % TX_RING_SIZE;
#if 1	/* AKPM: the latter is faster, but cyclone-only */
				if (ioread32(ioaddr + DownListPtr) ==
Linus Torvalds's avatar
Linus Torvalds committed
					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
					break;			/* It still hasn't been processed. */
#else
				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
					break;			/* It still hasn't been processed. */
#endif
Linus Torvalds's avatar
Linus Torvalds committed
				if (vp->tx_skbuff[entry]) {
					struct sk_buff *skb = vp->tx_skbuff[entry];
#if DO_ZEROCOPY
Linus Torvalds's avatar
Linus Torvalds committed
					int i;
					for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
							pci_unmap_single(VORTEX_PCI(vp),
											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
											 PCI_DMA_TODEVICE);
#else
					pci_unmap_single(VORTEX_PCI(vp),
						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
					dev_kfree_skb_irq(skb);
					vp->tx_skbuff[entry] = NULL;
				} else {
					pr_debug("boomerang_interrupt: no skb!\n");
Linus Torvalds's avatar
Linus Torvalds committed
				}
				/* dev->stats.tx_packets++;  Counted below. */
Linus Torvalds's avatar
Linus Torvalds committed
				dirty_tx++;
			}
			vp->dirty_tx = dirty_tx;
			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
				if (vortex_debug > 6)
					pr_debug("boomerang_interrupt: wake queue\n");
Linus Torvalds's avatar
Linus Torvalds committed
				netif_wake_queue (dev);
			}
		}

		/* Check for all uncommon interrupts at once. */
		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
			vortex_error(dev, status);

		if (--work_done < 0) {
			pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
				dev->name, status);
Linus Torvalds's avatar
Linus Torvalds committed
			/* Disable all pending interrupts. */
			do {
				vp->deferred |= status;
				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
Linus Torvalds's avatar
Linus Torvalds committed
					 ioaddr + EL3_CMD);
				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
Linus Torvalds's avatar
Linus Torvalds committed
			/* The timer will reenable interrupts. */
			mod_timer(&vp->timer, jiffies + 1*HZ);
			break;
		}
		/* Acknowledge the IRQ. */
		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
			iowrite32(0x8000, vp->cb_fn_base + 4);
Linus Torvalds's avatar
Linus Torvalds committed

	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
Linus Torvalds's avatar
Linus Torvalds committed

	if (vortex_debug > 4)
		pr_debug("%s: exiting interrupt, status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
			   dev->name, status);
handler_exit:
	spin_unlock(&vp->lock);
	return IRQ_HANDLED;
}

static int vortex_rx(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int i;
	short rx_status;

	if (vortex_debug > 5)
		pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
			   ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
	while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
Linus Torvalds's avatar
Linus Torvalds committed
		if (rx_status & 0x4000) { /* Error, update stats. */
			unsigned char rx_error = ioread8(ioaddr + RxErrors);
Linus Torvalds's avatar
Linus Torvalds committed
			if (vortex_debug > 2)
				pr_debug(" Rx error: status %2.2x.\n", rx_error);
			dev->stats.rx_errors++;
			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
Linus Torvalds's avatar
Linus Torvalds committed
		} else {
			/* The packet length: up to 4.5K!. */
			int pkt_len = rx_status & 0x1fff;
			struct sk_buff *skb;

			skb = dev_alloc_skb(pkt_len + 5);
			if (vortex_debug > 4)
				pr_debug("Receiving packet size %d status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
					   pkt_len, rx_status);
			if (skb != NULL) {
				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
				/* 'skb_put()' points to the start of sk_buff data area. */
				if (vp->bus_master &&
					! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
Linus Torvalds's avatar
Linus Torvalds committed
					dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
									   pkt_len, PCI_DMA_FROMDEVICE);
					iowrite32(dma, ioaddr + Wn7_MasterAddr);
					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
					iowrite16(StartDMAUp, ioaddr + EL3_CMD);
					while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
Linus Torvalds's avatar
Linus Torvalds committed
						;
					pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
				} else {
					ioread32_rep(ioaddr + RX_FIFO,
					             skb_put(skb, pkt_len),
						     (pkt_len + 3) >> 2);
Linus Torvalds's avatar
Linus Torvalds committed
				}
				iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
Linus Torvalds's avatar
Linus Torvalds committed
				skb->protocol = eth_type_trans(skb, dev);
				netif_rx(skb);
				dev->stats.rx_packets++;
Linus Torvalds's avatar
Linus Torvalds committed
				/* Wait a limited time to go to next packet. */
				for (i = 200; i >= 0; i--)
					if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
Linus Torvalds's avatar
Linus Torvalds committed
						break;
				continue;
			} else if (vortex_debug > 0)
				pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
					dev->name, pkt_len);
			dev->stats.rx_dropped++;
Linus Torvalds's avatar
Linus Torvalds committed
		}
		issue_and_wait(dev, RxDiscard);
	}

	return 0;
}

static int
boomerang_rx(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	int entry = vp->cur_rx % RX_RING_SIZE;
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int rx_status;
	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;

	if (vortex_debug > 5)
		pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
Linus Torvalds's avatar
Linus Torvalds committed

	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
		if (--rx_work_limit < 0)
			break;
		if (rx_status & RxDError) { /* Error, update stats. */
			unsigned char rx_error = rx_status >> 16;
			if (vortex_debug > 2)
				pr_debug(" Rx error: status %2.2x.\n", rx_error);
			dev->stats.rx_errors++;
			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
Linus Torvalds's avatar
Linus Torvalds committed
		} else {
			/* The packet length: up to 4.5K!. */
			int pkt_len = rx_status & 0x1fff;
			struct sk_buff *skb;
			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);

			if (vortex_debug > 4)
				pr_debug("Receiving packet size %d status %4.4x.\n",
Linus Torvalds's avatar
Linus Torvalds committed
					   pkt_len, rx_status);

			/* Check if the packet is long enough to just accept without
			   copying to a properly sized skbuff. */
			if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
Linus Torvalds's avatar
Linus Torvalds committed
				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
				pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
				/* 'skb_put()' points to the start of sk_buff data area. */
				memcpy(skb_put(skb, pkt_len),
Linus Torvalds's avatar
Linus Torvalds committed
					   pkt_len);
				pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
				vp->rx_copy++;
			} else {
				/* Pass up the skbuff already on the Rx ring. */
				skb = vp->rx_skbuff[entry];
				vp->rx_skbuff[entry] = NULL;
				skb_put(skb, pkt_len);
				pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
				vp->rx_nocopy++;
			}
			skb->protocol = eth_type_trans(skb, dev);
			{					/* Use hardware checksum info. */
				int csum_bits = rx_status & 0xee000000;
				if (csum_bits &&
					(csum_bits == (IPChksumValid | TCPChksumValid) ||
					 csum_bits == (IPChksumValid | UDPChksumValid))) {
					skb->ip_summed = CHECKSUM_UNNECESSARY;
					vp->rx_csumhits++;
				}
			}
			netif_rx(skb);
			dev->stats.rx_packets++;
Linus Torvalds's avatar
Linus Torvalds committed
		}
		entry = (++vp->cur_rx) % RX_RING_SIZE;
	}
	/* Refill the Rx ring buffers. */
	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
		struct sk_buff *skb;
		entry = vp->dirty_rx % RX_RING_SIZE;
		if (vp->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
Linus Torvalds's avatar
Linus Torvalds committed
			if (skb == NULL) {
				static unsigned long last_jif;
				if (time_after(jiffies, last_jif + 10 * HZ)) {
					pr_warning("%s: memory shortage\n", dev->name);
Linus Torvalds's avatar
Linus Torvalds committed
					last_jif = jiffies;
				}
				if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
					mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
				break;			/* Bad news!  */
			}
			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
Linus Torvalds's avatar
Linus Torvalds committed
			vp->rx_skbuff[entry] = skb;
		}
		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
		iowrite16(UpUnstall, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
	}
	return 0;
}

/*
 * If we've hit a total OOM refilling the Rx ring we poll once a second
 * for some memory.  Otherwise there is no way to restart the rx process.
 */
static void
rx_oom_timer(unsigned long arg)
{
	struct net_device *dev = (struct net_device *)arg;
	struct vortex_private *vp = netdev_priv(dev);

	spin_lock_irq(&vp->lock);
	if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)	/* This test is redundant, but makes me feel good */
		boomerang_rx(dev);
	if (vortex_debug > 1) {
		pr_debug("%s: rx_oom_timer %s\n", dev->name,
Linus Torvalds's avatar
Linus Torvalds committed
			((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
	}
	spin_unlock_irq(&vp->lock);
}

static void
vortex_down(struct net_device *dev, int final_down)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed

	netif_stop_queue (dev);

	del_timer_sync(&vp->rx_oom_timer);
	del_timer_sync(&vp->timer);

	/* Turn off statistics ASAP.  We update dev->stats below. */
	iowrite16(StatsDisable, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed

	/* Disable the receiver and transmitter. */
	iowrite16(RxDisable, ioaddr + EL3_CMD);
	iowrite16(TxDisable, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed

	/* Disable receiving 802.1q tagged frames */
	set_8021q_mode(dev, 0);

	if (dev->if_port == XCVR_10base2)
		/* Turn off thinnet power.  Green! */
		iowrite16(StopCoax, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed

	iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed

	update_stats(ioaddr, dev);
	if (vp->full_bus_master_rx)
		iowrite32(0, ioaddr + UpListPtr);
Linus Torvalds's avatar
Linus Torvalds committed
	if (vp->full_bus_master_tx)
		iowrite32(0, ioaddr + DownListPtr);
Linus Torvalds's avatar
Linus Torvalds committed

	if (final_down && VORTEX_PCI(vp)) {
Linus Torvalds's avatar
Linus Torvalds committed
		pci_save_state(VORTEX_PCI(vp));
		acpi_set_WOL(dev);
	}
}

static int
vortex_close(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int i;

	if (netif_device_present(dev))
		vortex_down(dev, 1);

	if (vortex_debug > 1) {
		pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
			   dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
		pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
Linus Torvalds's avatar
Linus Torvalds committed
			   " tx_queued %d Rx pre-checksummed %d.\n",
			   dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
	}

#if DO_ZEROCOPY
	if (vp->rx_csumhits &&
	    (vp->drv_flags & HAS_HWCKSM) == 0 &&
	    (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
		pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
Linus Torvalds's avatar
Linus Torvalds committed
	}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
	free_irq(dev->irq, dev);

	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
		for (i = 0; i < RX_RING_SIZE; i++)
			if (vp->rx_skbuff[i]) {
				pci_unmap_single(	VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
									PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
				dev_kfree_skb(vp->rx_skbuff[i]);
				vp->rx_skbuff[i] = NULL;
			}
	}
	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
		for (i = 0; i < TX_RING_SIZE; i++) {
			if (vp->tx_skbuff[i]) {
				struct sk_buff *skb = vp->tx_skbuff[i];
#if DO_ZEROCOPY
				int k;

				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
						pci_unmap_single(VORTEX_PCI(vp),
										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
										 PCI_DMA_TODEVICE);
#else
				pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
#endif
				dev_kfree_skb(skb);
				vp->tx_skbuff[i] = NULL;
			}
		}
	}

	return 0;
}

static void
dump_tx_ring(struct net_device *dev)
{
	if (vortex_debug > 0) {
	struct vortex_private *vp = netdev_priv(dev);
		void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
		if (vp->full_bus_master_tx) {
			int i;
			int stalled = ioread32(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
Linus Torvalds's avatar
Linus Torvalds committed

			pr_err("  Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
Linus Torvalds's avatar
Linus Torvalds committed
					vp->full_bus_master_tx,
					vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
					vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
			pr_err("  Transmit list %8.8x vs. %p.\n",
				   ioread32(ioaddr + DownListPtr),
Linus Torvalds's avatar
Linus Torvalds committed
				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
			issue_and_wait(dev, DownStall);
			for (i = 0; i < TX_RING_SIZE; i++) {
				unsigned int length;

Linus Torvalds's avatar
Linus Torvalds committed
#if DO_ZEROCOPY
				length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
Linus Torvalds's avatar
Linus Torvalds committed
#else
				length = le32_to_cpu(vp->tx_ring[i].length);
Linus Torvalds's avatar
Linus Torvalds committed
#endif
				pr_err("  %d: @%p  length %8.8x status %8.8x\n",
					   i, &vp->tx_ring[i], length,
Linus Torvalds's avatar
Linus Torvalds committed
					   le32_to_cpu(vp->tx_ring[i].status));
			}
			if (!stalled)
				iowrite16(DownUnstall, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
		}
	}
}

static struct net_device_stats *vortex_get_stats(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;

	if (netif_device_present(dev)) {	/* AKPM: Used to be netif_running */
		spin_lock_irqsave (&vp->lock, flags);
		update_stats(ioaddr, dev);
Linus Torvalds's avatar
Linus Torvalds committed
		spin_unlock_irqrestore (&vp->lock, flags);
	}
	return &dev->stats;
Linus Torvalds's avatar
Linus Torvalds committed
}

/*  Update statistics.
	Unlike with the EL3 we need not worry about interrupts changing
	the window setting from underneath us, but we must still guard
	against a race condition with a StatsUpdate interrupt updating the
	table.  This is done by checking that the ASM (!) code generated uses
	atomic updates with '+='.
	*/
static void update_stats(void __iomem *ioaddr, struct net_device *dev)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct vortex_private *vp = netdev_priv(dev);
	int old_window = ioread16(ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed

	if (old_window == 0xffff)	/* Chip suspended or ejected. */
		return;
	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
	/* Switch to the stats window, and read everything. */
	EL3WINDOW(6);
	dev->stats.tx_carrier_errors		+= ioread8(ioaddr + 0);
	dev->stats.tx_heartbeat_errors		+= ioread8(ioaddr + 1);
	dev->stats.tx_window_errors		+= ioread8(ioaddr + 4);
	dev->stats.rx_fifo_errors		+= ioread8(ioaddr + 5);
	dev->stats.tx_packets			+= ioread8(ioaddr + 6);
	dev->stats.tx_packets			+= (ioread8(ioaddr + 9)&0x30) << 4;
	/* Rx packets	*/			ioread8(ioaddr + 7);   /* Must read to clear */
Linus Torvalds's avatar
Linus Torvalds committed
	/* Don't bother with register 9, an extension of registers 6&7.
	   If we do use the 6&7 values the atomic update assumption above
	   is invalid. */
	dev->stats.rx_bytes 			+= ioread16(ioaddr + 10);
	dev->stats.tx_bytes 			+= ioread16(ioaddr + 12);
Linus Torvalds's avatar
Linus Torvalds committed
	/* Extra stats for get_ethtool_stats() */
	vp->xstats.tx_multiple_collisions	+= ioread8(ioaddr + 2);
	vp->xstats.tx_single_collisions         += ioread8(ioaddr + 3);
	vp->xstats.tx_deferred			+= ioread8(ioaddr + 8);
Linus Torvalds's avatar
Linus Torvalds committed
	EL3WINDOW(4);
	vp->xstats.rx_bad_ssd			+= ioread8(ioaddr + 12);
Linus Torvalds's avatar
Linus Torvalds committed

	dev->stats.collisions = vp->xstats.tx_multiple_collisions
		+ vp->xstats.tx_single_collisions
		+ vp->xstats.tx_max_collisions;

Linus Torvalds's avatar
Linus Torvalds committed
	{
		u8 up = ioread8(ioaddr + 13);
		dev->stats.rx_bytes += (up & 0x0f) << 16;
		dev->stats.tx_bytes += (up & 0xf0) << 12;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	EL3WINDOW(old_window >> 13);
	return;
}

static int vortex_nway_reset(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&vp->lock, flags);
	EL3WINDOW(4);
	rc = mii_nway_restart(&vp->mii);
	spin_unlock_irqrestore(&vp->lock, flags);
	return rc;
}

static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&vp->lock, flags);
	EL3WINDOW(4);
	rc = mii_ethtool_gset(&vp->mii, cmd);
	spin_unlock_irqrestore(&vp->lock, flags);
	return rc;
}

static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&vp->lock, flags);
	EL3WINDOW(4);
	rc = mii_ethtool_sset(&vp->mii, cmd);
	spin_unlock_irqrestore(&vp->lock, flags);
	return rc;
}

static u32 vortex_get_msglevel(struct net_device *dev)
{
	return vortex_debug;
}

static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
{
	vortex_debug = dbg;
}

static int vortex_get_sset_count(struct net_device *dev, int sset)
Linus Torvalds's avatar
Linus Torvalds committed
{
	switch (sset) {
	case ETH_SS_STATS:
		return VORTEX_NUM_STATS;
	default:
		return -EOPNOTSUPP;
	}
Linus Torvalds's avatar
Linus Torvalds committed
}

static void vortex_get_ethtool_stats(struct net_device *dev,
	struct ethtool_stats *stats, u64 *data)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;

	spin_lock_irqsave(&vp->lock, flags);
	update_stats(ioaddr, dev);
Linus Torvalds's avatar
Linus Torvalds committed
	spin_unlock_irqrestore(&vp->lock, flags);

	data[0] = vp->xstats.tx_deferred;
	data[1] = vp->xstats.tx_max_collisions;
	data[2] = vp->xstats.tx_multiple_collisions;
	data[3] = vp->xstats.tx_single_collisions;
	data[4] = vp->xstats.rx_bad_ssd;
Linus Torvalds's avatar
Linus Torvalds committed
}


static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
	switch (stringset) {
	case ETH_SS_STATS:
		memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
		break;
	default:
		WARN_ON(1);
		break;
	}
}

static void vortex_get_drvinfo(struct net_device *dev,
					struct ethtool_drvinfo *info)
{
	struct vortex_private *vp = netdev_priv(dev);

	strcpy(info->driver, DRV_NAME);
	if (VORTEX_PCI(vp)) {
		strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
	} else {
		if (VORTEX_EISA(vp))
			strcpy(info->bus_info, dev_name(vp->gendev));
Linus Torvalds's avatar
Linus Torvalds committed
		else
			sprintf(info->bus_info, "EISA 0x%lx %d",
					dev->base_addr, dev->irq);
	}
}

static const struct ethtool_ops vortex_ethtool_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
	.get_drvinfo		= vortex_get_drvinfo,
	.get_strings            = vortex_get_strings,
	.get_msglevel           = vortex_get_msglevel,
	.set_msglevel           = vortex_set_msglevel,
	.get_ethtool_stats      = vortex_get_ethtool_stats,
	.get_sset_count		= vortex_get_sset_count,
Linus Torvalds's avatar
Linus Torvalds committed
	.get_settings           = vortex_get_settings,
	.set_settings           = vortex_set_settings,
	.get_link               = ethtool_op_get_link,
Linus Torvalds's avatar
Linus Torvalds committed
	.nway_reset             = vortex_nway_reset,
};

#ifdef CONFIG_PCI
/*
 *	Must power the device up to do MDIO operations
 */
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	int err;
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;
	pci_power_t state = 0;
Linus Torvalds's avatar
Linus Torvalds committed

	if(VORTEX_PCI(vp))
		state = VORTEX_PCI(vp)->current_state;

	/* The kernel core really should have pci_get_power_state() */

	if(state != 0)
		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
	spin_lock_irqsave(&vp->lock, flags);
	EL3WINDOW(4);
	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
	spin_unlock_irqrestore(&vp->lock, flags);
	if(state != 0)
		pci_set_power_state(VORTEX_PCI(vp), state);

	return err;
}
#endif


/* Pre-Cyclone chips have no documented multicast filter, so the only
   multicast setting is to receive all multicast frames.  At least
   the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct net_device *dev)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
Linus Torvalds's avatar
Linus Torvalds committed
	int new_mode;

	if (dev->flags & IFF_PROMISC) {
		if (vortex_debug > 3)
			pr_notice("%s: Setting promiscuous mode.\n", dev->name);
Linus Torvalds's avatar
Linus Torvalds committed
		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
	} else	if ((dev->mc_list)  ||  (dev->flags & IFF_ALLMULTI)) {
		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
	} else
		new_mode = SetRxFilter | RxStation | RxBroadcast;

	iowrite16(new_mode, ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
}

#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
   Note that this must be done after each RxReset due to some backwards
   compatibility logic in the Cyclone and Tornado ASICs */

/* The Ethernet Type used for 802.1q tagged frames */
#define VLAN_ETHER_TYPE 0x8100

static void set_8021q_mode(struct net_device *dev, int enable)
{
	struct vortex_private *vp = netdev_priv(dev);
	void __iomem *ioaddr = vp->ioaddr;
	int old_window = ioread16(ioaddr + EL3_CMD);
Linus Torvalds's avatar
Linus Torvalds committed
	int mac_ctrl;

	if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
		/* cyclone and tornado chipsets can recognize 802.1q
		 * tagged frames and treat them correctly */

		int max_pkt_size = dev->mtu+14;	/* MTU+Ethernet header */