Newer
Older
pr_debug("%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
ioread16(ioaddr + EL3_STATUS) & StatsFull) {
pr_warning("%s: Updating statistics failed, disabling "
iowrite16(SetIntrEnb |
(window_read16(vp, 5, 10) & ~StatsFull),
ioaddr + EL3_CMD);
vp->intr_enable &= ~StatsFull;
DoneDidThat++;
}
}
if (status & IntReq) { /* Restore all interrupt sources. */
iowrite16(vp->status_enable, ioaddr + EL3_CMD);
iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
int bus_status = ioread32(ioaddr + PktStatus);
/* 0x80000000 PCI master abort. */
/* 0x40000000 PCI target abort. */
if (vortex_debug)
pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
/* In this case, blow the card away */
/* Must not enter D3 or we can't legally issue the reset! */
vortex_down(dev, 0);
issue_and_wait(dev, TotalReset | 0xff);
vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
} else if (fifo_diag & 0x0400)
do_tx_reset = 1;
if (fifo_diag & 0x3000) {
/* Reset Rx fifo and upload logic */
issue_and_wait(dev, RxReset|0x07);
/* Set the Rx filter to the current state. */
set_rx_mode(dev);
/* enable 802.1q VLAN tagged frames */
set_8021q_mode(dev, 1);
iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
}
}
if (do_tx_reset) {
issue_and_wait(dev, TxReset|reset_mask);
iowrite16(TxEnable, ioaddr + EL3_CMD);
if (!vp->full_bus_master_tx)
netif_wake_queue(dev);
}
}
vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
iowrite32(skb->len, ioaddr + TX_FIFO);
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
PCI_DMA_TODEVICE);
spin_lock_irq(&vp->window_lock);
window_set(vp, 7);
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
iowrite16(len, ioaddr + Wn7_MasterLen);
spin_unlock_irq(&vp->window_lock);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
/* Interrupt us when the FIFO has room for max-sized packet. */
netif_stop_queue(dev);
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
}
/* Clear the Tx status stack. */
{
int tx_status;
int i = 32;
while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
pr_debug("%s: Tx error, status %2.2x.\n",
if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
iowrite16(TxEnable, ioaddr + EL3_CMD);
iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
return NETDEV_TX_OK;
boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
if (vortex_debug > 6) {
pr_debug("boomerang_start_xmit()\n");
pr_debug("%s: Trying to send a packet, Tx index %d.\n",
John W. Linville
committed
dev->name, vp->cur_tx);
}
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n",
return NETDEV_TX_BUSY;
}
vp->tx_skbuff[entry] = skb;
vp->tx_ring[entry].next = 0;
#if DO_ZEROCOPY
if (skb->ip_summed != CHECKSUM_PARTIAL)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
else
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
(void*)page_address(frag->page) + frag->page_offset,
frag->size, PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
else
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
}
}
#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
spin_lock_irqsave(&vp->lock, flags);
/* Wait for the stall to complete. */
issue_and_wait(dev, DownStall);
prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
if (ioread32(ioaddr + DownListPtr) == 0) {
iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
vp->queued_packet++;
}
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
#if defined(tx_interrupt_mitigation)
/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
* were selected, this would corrupt DN_COMPLETE. No?
*/
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
/*
* This is the ISR for the vortex series chips.
* full_bus_master_tx == 0 && full_bus_master_rx == 0
*/
static irqreturn_t
vortex_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
int handled = 0;
status = ioread16(ioaddr + EL3_STATUS);
pr_debug("vortex_interrupt(). status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs cause this */
handled = 1;
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (status == 0xffff) /* h/w no longer present (hotplug)? */
goto handler_exit;
if (vortex_debug > 4)
pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
spin_lock(&vp->window_lock);
window_set(vp, 7);
pr_debug("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
vortex_rx(dev);
if (status & TxAvailable) {
if (vortex_debug > 5)
pr_debug(" TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
}
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
* AKPM: FIXME: I don't think we need this. If the queue was stopped due to
* insufficient FIFO room, the TxAvailable test will succeed and call
* netif_wake_queue()
*/
netif_wake_queue(dev);
} else { /* Interrupt when FIFO has room for max-sized packet. */
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
netif_stop_queue(dev);
}
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
vortex_error(dev, status);
}
if (--work_done < 0) {
pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
spin_unlock(&vp->window_lock);
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
/*
* This is the ISR for the boomerang series chips.
* full_bus_master_tx == 1 && full_bus_master_rx == 1
*/
static irqreturn_t
boomerang_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
status = ioread16(ioaddr + EL3_STATUS);
pr_debug("boomerang_interrupt. status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
pr_debug("boomerang_interrupt(1): status = 0xffff\n");
goto handler_exit;
}
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (vortex_debug > 4)
pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
pr_debug("%s: In interrupt loop, status %4.4x.\n",
iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
pr_debug("boomerang_interrupt->boomerang_rx\n");
boomerang_rx(dev);
}
if (status & DownComplete) {
unsigned int dirty_tx = vp->dirty_tx;
iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
while (vp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
#if 1 /* AKPM: the latter is faster, but cyclone-only */
if (ioread32(ioaddr + DownListPtr) ==
vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
break; /* It still hasn't been processed. */
#else
if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
break; /* It still hasn't been processed. */
#endif
if (vp->tx_skbuff[entry]) {
struct sk_buff *skb = vp->tx_skbuff[entry];
int i;
for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
pr_debug("boomerang_interrupt: no skb!\n");
/* dev->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
if (vortex_debug > 6)
pr_debug("boomerang_interrupt: wake queue\n");
netif_wake_queue (dev);
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
vortex_error(dev, status);
if (--work_done < 0) {
pr_warning("%s: Too much work in interrupt, status %4.4x.\n",
dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
pr_debug("%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
short rx_status;
if (vortex_debug > 5)
pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
unsigned char rx_error = ioread8(ioaddr + RxErrors);
pr_debug(" Rx error: status %2.2x.\n", rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01) dev->stats.rx_over_errors++;
if (rx_error & 0x02) dev->stats.rx_length_errors++;
if (rx_error & 0x04) dev->stats.rx_frame_errors++;
if (rx_error & 0x08) dev->stats.rx_crc_errors++;
if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len + 5);
if (vortex_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
pkt_len, PCI_DMA_FROMDEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
} else {
ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (vortex_debug > 0)
pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
dev->name, pkt_len);
dev->stats.rx_dropped++;
}
issue_and_wait(dev, RxDiscard);
}
return 0;
}
static int
boomerang_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
break;
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
pr_debug(" Rx error: status %2.2x.\n", rx_error);
dev->stats.rx_errors++;
if (rx_error & 0x01) dev->stats.rx_over_errors++;
if (rx_error & 0x02) dev->stats.rx_length_errors++;
if (rx_error & 0x04) dev->stats.rx_frame_errors++;
if (rx_error & 0x08) dev->stats.rx_crc_errors++;
if (rx_error & 0x10) dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
vp->rx_skbuff[entry]->data,
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
{ /* Use hardware checksum info. */
int csum_bits = rx_status & 0xee000000;
if (csum_bits &&
(csum_bits == (IPChksumValid | TCPChksumValid) ||
csum_bits == (IPChksumValid | UDPChksumValid))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
vp->rx_csumhits++;
}
}
netif_rx(skb);
dev->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
pr_warning("%s: memory shortage\n", dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
}
return 0;
}
/*
* If we've hit a total OOM refilling the Rx ring we poll once a second
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
rx_oom_timer(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
struct vortex_private *vp = netdev_priv(dev);
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
pr_debug("%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
}
static void
vortex_down(struct net_device *dev, int final_down)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
netif_stop_queue (dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update dev->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
iowrite16(RxDisable, ioaddr + EL3_CMD);
iowrite16(TxDisable, ioaddr + EL3_CMD);
/* Disable receiving 802.1q tagged frames */
set_8021q_mode(dev, 0);
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
iowrite16(StopCoax, ioaddr + EL3_CMD);
iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx)
iowrite32(0, ioaddr + UpListPtr);
iowrite32(0, ioaddr + DownListPtr);
vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
acpi_set_WOL(dev);
}
}
static int
vortex_close(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
if (netif_device_present(dev))
vortex_down(dev, 1);
if (vortex_debug > 1) {
pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
}
#if DO_ZEROCOPY
if (vp->rx_csumhits &&
(vp->drv_flags & HAS_HWCKSM) == 0 &&
(vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name);
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
free_irq(dev->irq, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (vp->tx_skbuff[i]) {
struct sk_buff *skb = vp->tx_skbuff[i];
#if DO_ZEROCOPY
int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL;
}
}
}
return 0;
}
static void
dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
pr_err(" Transmit list %8.8x vs. %p.\n",
ioread32(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
length = le32_to_cpu(vp->tx_ring[i].length);
pr_err(" %d: @%p length %8.8x status %8.8x\n",
i, &vp->tx_ring[i], length,
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)
iowrite16(DownUnstall, ioaddr + EL3_CMD);
}
}
}
static struct net_device_stats *vortex_get_stats(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
spin_lock_irqsave (&vp->lock, flags);
update_stats(ioaddr, dev);
}
/* Update statistics.
Unlike with the EL3 we need not worry about interrupts changing
the window setting from underneath us, but we must still guard
against a race condition with a StatsUpdate interrupt updating the
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
static void update_stats(void __iomem *ioaddr, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
dev->stats.tx_carrier_errors += window_read8(vp, 6, 0);
dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1);
dev->stats.tx_window_errors += window_read8(vp, 6, 4);
dev->stats.rx_fifo_errors += window_read8(vp, 6, 5);
dev->stats.tx_packets += window_read8(vp, 6, 6);
dev->stats.tx_packets += (window_read8(vp, 6, 9) &
0x30) << 4;
/* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
dev->stats.rx_bytes += window_read16(vp, 6, 10);
dev->stats.tx_bytes += window_read16(vp, 6, 12);
vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2);
vp->xstats.tx_single_collisions += window_read8(vp, 6, 3);
vp->xstats.tx_deferred += window_read8(vp, 6, 8);
vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12);
dev->stats.collisions = vp->xstats.tx_multiple_collisions
+ vp->xstats.tx_single_collisions
+ vp->xstats.tx_max_collisions;
u8 up = window_read8(vp, 4, 13);
dev->stats.rx_bytes += (up & 0x0f) << 16;
dev->stats.tx_bytes += (up & 0xf0) << 12;
}
}
static int vortex_nway_reset(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
return mii_nway_restart(&vp->mii);
}
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
return mii_ethtool_gset(&vp->mii, cmd);
}
static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
return mii_ethtool_sset(&vp->mii, cmd);
}
static u32 vortex_get_msglevel(struct net_device *dev)
{
return vortex_debug;
}
static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
{
vortex_debug = dbg;
}
static int vortex_get_sset_count(struct net_device *dev, int sset)
switch (sset) {
case ETH_SS_STATS:
return VORTEX_NUM_STATS;
default:
return -EOPNOTSUPP;
}
}
static void vortex_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
spin_lock_irqsave(&vp->lock, flags);
update_stats(ioaddr, dev);
spin_unlock_irqrestore(&vp->lock, flags);
data[0] = vp->xstats.tx_deferred;
data[1] = vp->xstats.tx_max_collisions;
data[2] = vp->xstats.tx_multiple_collisions;
data[3] = vp->xstats.tx_single_collisions;
data[4] = vp->xstats.rx_bad_ssd;
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
}
static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys));
break;
default:
WARN_ON(1);
break;
}
}
static void vortex_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct vortex_private *vp = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
if (VORTEX_PCI(vp)) {
strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
} else {
if (VORTEX_EISA(vp))
strcpy(info->bus_info, dev_name(vp->gendev));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
}
}
static const struct ethtool_ops vortex_ethtool_ops = {
.get_drvinfo = vortex_get_drvinfo,
.get_strings = vortex_get_strings,
.get_msglevel = vortex_get_msglevel,
.set_msglevel = vortex_set_msglevel,
.get_ethtool_stats = vortex_get_ethtool_stats,
.get_sset_count = vortex_get_sset_count,
.get_settings = vortex_get_settings,
.set_settings = vortex_set_settings,
.get_link = ethtool_op_get_link,
.nway_reset = vortex_nway_reset,
};
#ifdef CONFIG_PCI
/*
* Must power the device up to do MDIO operations
*/
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
unsigned long flags;
pci_power_t state = 0;
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
if(VORTEX_PCI(vp))
state = VORTEX_PCI(vp)->current_state;
/* The kernel core really should have pci_get_power_state() */
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
spin_lock_irqsave(&vp->lock, flags);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
return err;
}
#endif
/* Pre-Cyclone chips have no documented multicast filter, so the only
multicast setting is to receive all multicast frames. At least
the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int new_mode;
if (dev->flags & IFF_PROMISC) {
if (vortex_debug > 3)
pr_notice("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
iowrite16(new_mode, ioaddr + EL3_CMD);
}
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
Note that this must be done after each RxReset due to some backwards
compatibility logic in the Cyclone and Tornado ASICs */
/* The Ethernet Type used for 802.1q tagged frames */
#define VLAN_ETHER_TYPE 0x8100
static void set_8021q_mode(struct net_device *dev, int enable)
{
struct vortex_private *vp = netdev_priv(dev);
int mac_ctrl;
if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
/* cyclone and tornado chipsets can recognize 802.1q
* tagged frames and treat them correctly */