"README.md" did not exist on "b83db862ffb871e3131e5d2160c741b288eea9aa"
Newer
Older
ioread16(ioaddr + Wn4_FIFODiag));
if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
" network cable problem?\n", dev->name);
if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
printk(KERN_ERR "%s: Interrupt posted but not delivered --"
" IRQ blocked by another device?\n", dev->name);
/* Bad idea here.. but we might as well handle a few events. */
{
/*
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
local_irq_save(flags);
if (vp->full_bus_master_tx)
boomerang_interrupt(dev->irq, dev, NULL);
else
vortex_interrupt(dev->irq, dev, NULL);
local_irq_restore(flags);
}
}
if (vortex_debug > 0)
dump_tx_ring(dev);
issue_and_wait(dev, TxReset);
vp->stats.tx_errors++;
if (vp->full_bus_master_tx) {
printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name);
if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0)
iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
ioaddr + DownListPtr);
if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE)
netif_wake_queue (dev);
if (vp->drv_flags & IS_BOOMERANG)
iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
iowrite16(DownUnstall, ioaddr + EL3_CMD);
} else {
vp->stats.tx_dropped++;
netif_wake_queue(dev);
}
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
dev->trans_start = jiffies;
/* Switch to register set 7 for normal use. */
EL3WINDOW(7);
}
/*
* Handle uncommon interrupt sources. This is a separate routine to minimize
* the cache impact.
*/
static void
vortex_error(struct net_device *dev, int status)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int do_tx_reset = 0, reset_mask = 0;
unsigned char tx_status = 0;
if (vortex_debug > 2) {
printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", dev->name, status);
}
if (status & TxComplete) { /* Really "TxError" for us. */
tx_status = ioread8(ioaddr + TxStatus);
/* Presumably a tx-timeout. We must merely re-enable. */
if (vortex_debug > 2
|| (tx_status != 0x88 && vortex_debug > 0)) {
printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
dev->name, tx_status);
if (tx_status == 0x82) {
printk(KERN_ERR "Probably a duplex mismatch. See "
"Documentation/networking/vortex.txt\n");
}
dump_tx_ring(dev);
}
if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
iowrite8(0, ioaddr + TxStatus);
if (tx_status & 0x30) { /* txJabber or txUnderrun */
do_tx_reset = 1;
} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */
do_tx_reset = 1;
reset_mask = 0x0108; /* Reset interface logic, but not download logic */
} else { /* Merely re-enable the transmitter. */
iowrite16(TxEnable, ioaddr + EL3_CMD);
}
}
if (status & RxEarly) { /* Rx early is unused. */
vortex_rx(dev);
iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 &&
ioread16(ioaddr + EL3_STATUS) & StatsFull) {
printk(KERN_WARNING "%s: Updating statistics failed, disabling "
"stats as an interrupt source.\n", dev->name);
EL3WINDOW(5);
iowrite16(SetIntrEnb | (ioread16(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
vp->intr_enable &= ~StatsFull;
EL3WINDOW(7);
DoneDidThat++;
}
}
if (status & IntReq) { /* Restore all interrupt sources. */
iowrite16(vp->status_enable, ioaddr + EL3_CMD);
iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
}
if (status & HostError) {
u16 fifo_diag;
EL3WINDOW(4);
fifo_diag = ioread16(ioaddr + Wn4_FIFODiag);
printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
dev->name, fifo_diag);
/* Adapter failure requires Tx/Rx reset and reinit. */
if (vp->full_bus_master_tx) {
int bus_status = ioread32(ioaddr + PktStatus);
/* 0x80000000 PCI master abort. */
/* 0x40000000 PCI target abort. */
if (vortex_debug)
printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
/* In this case, blow the card away */
/* Must not enter D3 or we can't legally issue the reset! */
vortex_down(dev, 0);
issue_and_wait(dev, TotalReset | 0xff);
vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */
} else if (fifo_diag & 0x0400)
do_tx_reset = 1;
if (fifo_diag & 0x3000) {
/* Reset Rx fifo and upload logic */
issue_and_wait(dev, RxReset|0x07);
/* Set the Rx filter to the current state. */
set_rx_mode(dev);
/* enable 802.1q VLAN tagged frames */
set_8021q_mode(dev, 1);
iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
}
}
if (do_tx_reset) {
issue_and_wait(dev, TxReset|reset_mask);
iowrite16(TxEnable, ioaddr + EL3_CMD);
if (!vp->full_bus_master_tx)
netif_wake_queue(dev);
}
}
static int
vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
iowrite32(skb->len, ioaddr + TX_FIFO);
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
iowrite16(len, ioaddr + Wn7_MasterLen);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
/* Interrupt us when the FIFO has room for max-sized packet. */
netif_stop_queue(dev);
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
}
dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
int tx_status;
int i = 32;
while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
iowrite16(TxEnable, ioaddr + EL3_CMD);
iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
return 0;
}
static int
boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
if (vortex_debug > 6) {
printk(KERN_DEBUG "boomerang_start_xmit()\n");
John W. Linville
committed
printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
dev->name, vp->cur_tx);
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
}
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
dev->name);
netif_stop_queue(dev);
return 1;
}
vp->tx_skbuff[entry] = skb;
vp->tx_ring[entry].next = 0;
#if DO_ZEROCOPY
if (skb->ip_summed != CHECKSUM_HW)
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
else
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len-skb->data_len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
(void*)page_address(frag->page) + frag->page_offset,
frag->size, PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
else
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
}
}
#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
spin_lock_irqsave(&vp->lock, flags);
/* Wait for the stall to complete. */
issue_and_wait(dev, DownStall);
prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
if (ioread32(ioaddr + DownListPtr) == 0) {
iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
vp->queued_packet++;
}
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
#if defined(tx_interrupt_mitigation)
/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
* were selected, this would corrupt DN_COMPLETE. No?
*/
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
dev->trans_start = jiffies;
return 0;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
/*
* This is the ISR for the vortex series chips.
* full_bus_master_tx == 0 && full_bus_master_rx == 0
*/
static irqreturn_t
vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
int handled = 0;
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk("vortex_interrupt(). status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs cause this */
handled = 1;
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (status == 0xffff) /* h/w no longer present (hotplug)? */
goto handler_exit;
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
vortex_rx(dev);
if (status & TxAvailable) {
if (vortex_debug > 5)
printk(KERN_DEBUG " TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
}
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
* AKPM: FIXME: I don't think we need this. If the queue was stopped due to
* insufficient FIFO room, the TxAvailable test will succeed and call
* netif_wake_queue()
*/
netif_wake_queue(dev);
} else { /* Interrupt when FIFO has room for max-sized packet. */
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
netif_stop_queue(dev);
}
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
vortex_error(dev, status);
}
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
/*
* This is the ISR for the boomerang series chips.
* full_bus_master_tx == 1 && full_bus_master_rx == 1
*/
static irqreturn_t
boomerang_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
goto handler_exit;
}
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & UpComplete) {
iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
if (vortex_debug > 5)
printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
boomerang_rx(dev);
}
if (status & DownComplete) {
unsigned int dirty_tx = vp->dirty_tx;
iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
while (vp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
#if 1 /* AKPM: the latter is faster, but cyclone-only */
if (ioread32(ioaddr + DownListPtr) ==
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
break; /* It still hasn't been processed. */
#else
if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
break; /* It still hasn't been processed. */
#endif
if (vp->tx_skbuff[entry]) {
struct sk_buff *skb = vp->tx_skbuff[entry];
#if DO_ZEROCOPY
int i;
for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
}
/* vp->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
if (vortex_debug > 6)
printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
netif_wake_queue (dev);
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
vortex_error(dev, status);
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
short rx_status;
if (vortex_debug > 5)
printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
unsigned char rx_error = ioread8(ioaddr + RxErrors);
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len + 5);
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
pkt_len, PCI_DMA_FROMDEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
} else {
ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (vortex_debug > 0)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
}
vp->stats.rx_dropped++;
issue_and_wait(dev, RxDiscard);
}
return 0;
}
static int
boomerang_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
break;
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
vp->rx_skbuff[entry]->data,
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
{ /* Use hardware checksum info. */
int csum_bits = rx_status & 0xee000000;
if (csum_bits &&
(csum_bits == (IPChksumValid | TCPChksumValid) ||
csum_bits == (IPChksumValid | UDPChksumValid))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
vp->rx_csumhits++;
}
}
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(PKT_BUF_SZ);
if (skb == NULL) {
static unsigned long last_jif;
if ((jiffies - last_jif) > 10 * HZ) {
printk(KERN_WARNING "%s: memory shortage\n", dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
}
return 0;
}
/*
* If we've hit a total OOM refilling the Rx ring we poll once a second
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
rx_oom_timer(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
struct vortex_private *vp = netdev_priv(dev);
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
}
static void
vortex_down(struct net_device *dev, int final_down)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
netif_stop_queue (dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update vp->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
iowrite16(RxDisable, ioaddr + EL3_CMD);
iowrite16(TxDisable, ioaddr + EL3_CMD);
/* Disable receiving 802.1q tagged frames */
set_8021q_mode(dev, 0);
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
iowrite16(StopCoax, ioaddr + EL3_CMD);
iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx)
iowrite32(0, ioaddr + UpListPtr);
iowrite32(0, ioaddr + DownListPtr);
vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
acpi_set_WOL(dev);
}
}
static int
vortex_close(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
if (netif_device_present(dev))
vortex_down(dev, 1);
if (vortex_debug > 1) {
printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
}
#if DO_ZEROCOPY
if ( vp->rx_csumhits &&
((vp->drv_flags & HAS_HWCKSM) == 0) &&
(hw_checksums[vp->card_idx] == -1)) {
printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", dev->name);
}
#endif
free_irq(dev->irq, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (vp->tx_skbuff[i]) {
struct sk_buff *skb = vp->tx_skbuff[i];
#if DO_ZEROCOPY
int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL;
}
}
}
return 0;
}
static void
dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
ioread32(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
#if DO_ZEROCOPY
le32_to_cpu(vp->tx_ring[i].frag[0].length),
#else
le32_to_cpu(vp->tx_ring[i].length),
#endif
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)
iowrite16(DownUnstall, ioaddr + EL3_CMD);
}
}
}
static struct net_device_stats *vortex_get_stats(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
spin_lock_irqsave (&vp->lock, flags);
update_stats(ioaddr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
}
return &vp->stats;
}
/* Update statistics.
Unlike with the EL3 we need not worry about interrupts changing
the window setting from underneath us, but we must still guard
against a race condition with a StatsUpdate interrupt updating the
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
static void update_stats(void __iomem *ioaddr, struct net_device *dev)
int old_window = ioread16(ioaddr + EL3_CMD);
if (old_window == 0xffff) /* Chip suspended or ejected. */
return;
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
vp->stats.collisions += ioread8(ioaddr + 3);
vp->stats.tx_window_errors += ioread8(ioaddr + 4);
vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
vp->stats.tx_packets += ioread8(ioaddr + 6);
vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
/* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
vp->stats.rx_bytes += ioread16(ioaddr + 10);
vp->stats.tx_bytes += ioread16(ioaddr + 12);
vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
vp->xstats.tx_deferred += ioread8(ioaddr + 8);
vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
u8 up = ioread8(ioaddr + 13);
vp->stats.rx_bytes += (up & 0x0f) << 16;
vp->stats.tx_bytes += (up & 0xf0) << 12;
}
EL3WINDOW(old_window >> 13);
return;
}
static int vortex_nway_reset(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_nway_restart(&vp->mii);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static u32 vortex_get_link(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_link_ok(&vp->mii);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_ethtool_gset(&vp->mii, cmd);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_ethtool_sset(&vp->mii, cmd);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static u32 vortex_get_msglevel(struct net_device *dev)
{
return vortex_debug;
}
static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
{
vortex_debug = dbg;
}
static int vortex_get_stats_count(struct net_device *dev)
{
return VORTEX_NUM_STATS;
}