Newer
Older
void __iomem *ioaddr = vp->ioaddr;
iowrite32(skb->len, ioaddr + TX_FIFO);
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
int len = (skb->len + 3) & ~3;
iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE),
iowrite16(len, ioaddr + Wn7_MasterLen);
iowrite16(StartDMADown, ioaddr + EL3_CMD);
/* netif_wake_queue() will be called at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
if (ioread16(ioaddr + TxFree) > 1536) {
netif_start_queue (dev); /* AKPM: redundant? */
} else {
/* Interrupt us when the FIFO has room for max-sized packet. */
netif_stop_queue(dev);
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
}
}
dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
int tx_status;
int i = 32;
while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (vortex_debug > 2)
printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
dev->name, tx_status);
if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
issue_and_wait(dev, TxReset);
}
iowrite16(TxEnable, ioaddr + EL3_CMD);
iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
return 0;
}
static int
boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
unsigned long flags;
if (vortex_debug > 6) {
printk(KERN_DEBUG "boomerang_start_xmit()\n");
John W. Linville
committed
printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
dev->name, vp->cur_tx);
}
if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
if (vortex_debug > 0)
printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
dev->name);
netif_stop_queue(dev);
return 1;
}
vp->tx_skbuff[entry] = skb;
vp->tx_ring[entry].next = 0;
#if DO_ZEROCOPY
if (skb->ip_summed != CHECKSUM_PARTIAL)
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
else
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
if (!skb_shinfo(skb)->nr_frags) {
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
} else {
int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len-skb->data_len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
vp->tx_ring[entry].frag[i+1].addr =
cpu_to_le32(pci_map_single(VORTEX_PCI(vp),
(void*)page_address(frag->page) + frag->page_offset,
frag->size, PCI_DMA_TODEVICE));
if (i == skb_shinfo(skb)->nr_frags-1)
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
else
vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
}
}
#else
vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
#endif
spin_lock_irqsave(&vp->lock, flags);
/* Wait for the stall to complete. */
issue_and_wait(dev, DownStall);
prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
if (ioread32(ioaddr + DownListPtr) == 0) {
iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
vp->queued_packet++;
}
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
netif_stop_queue (dev);
} else { /* Clear previous interrupt enable. */
#if defined(tx_interrupt_mitigation)
/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
* were selected, this would corrupt DN_COMPLETE. No?
*/
prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
#endif
}
iowrite16(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
dev->trans_start = jiffies;
return 0;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
/*
* This is the ISR for the vortex series chips.
* full_bus_master_tx == 0 && full_bus_master_rx == 0
*/
static irqreturn_t
vortex_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
int handled = 0;
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk("vortex_interrupt(). status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs cause this */
handled = 1;
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (status == 0xffff) /* h/w no longer present (hotplug)? */
goto handler_exit;
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
vortex_rx(dev);
if (status & TxAvailable) {
if (vortex_debug > 5)
printk(KERN_DEBUG " TX room bit was handled.\n");
/* There's room in the FIFO for a full-sized packet. */
iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue (dev);
}
if (status & DMADone) {
if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */
if (ioread16(ioaddr + TxFree) > 1536) {
/*
* AKPM: FIXME: I don't think we need this. If the queue was stopped due to
* insufficient FIFO room, the TxAvailable test will succeed and call
* netif_wake_queue()
*/
netif_wake_queue(dev);
} else { /* Interrupt when FIFO has room for max-sized packet. */
iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
netif_stop_queue(dev);
}
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
if (status == 0xffff)
break;
vortex_error(dev, status);
}
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_RETVAL(handled);
}
/*
* This is the ISR for the boomerang series chips.
* full_bus_master_tx == 1 && full_bus_master_rx == 1
*/
static irqreturn_t
boomerang_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr;
int status;
int work_done = max_interrupt_work;
/*
* It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
* and boomerang_start_xmit
*/
spin_lock(&vp->lock);
status = ioread16(ioaddr + EL3_STATUS);
if (vortex_debug > 6)
printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
if ((status & IntLatch) == 0)
goto handler_exit; /* No interrupt: shared IRQs can cause this */
if (status == 0xffff) { /* h/w no longer present (hotplug)? */
if (vortex_debug > 1)
printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
goto handler_exit;
}
if (status & IntReq) {
status |= vp->deferred;
vp->deferred = 0;
}
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
dev->name, status, ioread8(ioaddr + Timer));
do {
if (vortex_debug > 5)
printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & UpComplete) {
iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
if (vortex_debug > 5)
printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
boomerang_rx(dev);
}
if (status & DownComplete) {
unsigned int dirty_tx = vp->dirty_tx;
iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
while (vp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
#if 1 /* AKPM: the latter is faster, but cyclone-only */
if (ioread32(ioaddr + DownListPtr) ==
vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
break; /* It still hasn't been processed. */
#else
if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
break; /* It still hasn't been processed. */
#endif
if (vp->tx_skbuff[entry]) {
struct sk_buff *skb = vp->tx_skbuff[entry];
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
int i;
for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb_irq(skb);
vp->tx_skbuff[entry] = NULL;
} else {
printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
}
/* vp->stats.tx_packets++; Counted below. */
dirty_tx++;
}
vp->dirty_tx = dirty_tx;
if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
if (vortex_debug > 6)
printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
netif_wake_queue (dev);
}
}
/* Check for all uncommon interrupts at once. */
if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
vortex_error(dev, status);
if (--work_done < 0) {
printk(KERN_WARNING "%s: Too much work in interrupt, status "
"%4.4x.\n", dev->name, status);
/* Disable all pending interrupts. */
do {
vp->deferred |= status;
iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
/* The timer will reenable interrupts. */
mod_timer(&vp->timer, jiffies + 1*HZ);
break;
}
/* Acknowledge the IRQ. */
iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
iowrite32(0x8000, vp->cb_fn_base + 4);
} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
if (vortex_debug > 4)
printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
dev->name, status);
handler_exit:
spin_unlock(&vp->lock);
return IRQ_HANDLED;
}
static int vortex_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
short rx_status;
if (vortex_debug > 5)
printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
unsigned char rx_error = ioread8(ioaddr + RxErrors);
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len + 5);
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
if (vp->bus_master &&
! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len),
pkt_len, PCI_DMA_FROMDEVICE);
iowrite32(dma, ioaddr + Wn7_MasterAddr);
iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
iowrite16(StartDMAUp, ioaddr + EL3_CMD);
while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
;
pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE);
} else {
ioread32_rep(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (vortex_debug > 0)
printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
"size %d.\n", dev->name, pkt_len);
}
issue_and_wait(dev, RxDiscard);
}
return 0;
}
static int
boomerang_rx(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
int entry = vp->cur_rx % RX_RING_SIZE;
void __iomem *ioaddr = vp->ioaddr;
int rx_status;
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
if (vortex_debug > 5)
printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
if (--rx_work_limit < 0)
break;
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (vortex_debug > 2)
printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
vp->stats.rx_errors++;
if (rx_error & 0x01) vp->stats.rx_over_errors++;
if (rx_error & 0x02) vp->stats.rx_length_errors++;
if (rx_error & 0x04) vp->stats.rx_frame_errors++;
if (rx_error & 0x08) vp->stats.rx_crc_errors++;
if (rx_error & 0x10) vp->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
int pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
if (vortex_debug > 4)
printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
skb->dev = dev;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
vp->rx_skbuff[entry]->data,
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
pkt_len);
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_copy++;
} else {
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
vp->rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
{ /* Use hardware checksum info. */
int csum_bits = rx_status & 0xee000000;
if (csum_bits &&
(csum_bits == (IPChksumValid | TCPChksumValid) ||
csum_bits == (IPChksumValid | UDPChksumValid))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
vp->rx_csumhits++;
}
}
netif_rx(skb);
dev->last_rx = jiffies;
vp->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(PKT_BUF_SZ);
if (skb == NULL) {
static unsigned long last_jif;
if (time_after(jiffies, last_jif + 10 * HZ)) {
printk(KERN_WARNING "%s: memory shortage\n", dev->name);
last_jif = jiffies;
}
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
break; /* Bad news! */
}
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
iowrite16(UpUnstall, ioaddr + EL3_CMD);
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
}
return 0;
}
/*
* If we've hit a total OOM refilling the Rx ring we poll once a second
* for some memory. Otherwise there is no way to restart the rx process.
*/
static void
rx_oom_timer(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
struct vortex_private *vp = netdev_priv(dev);
spin_lock_irq(&vp->lock);
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
boomerang_rx(dev);
if (vortex_debug > 1) {
printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name,
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
}
spin_unlock_irq(&vp->lock);
}
static void
vortex_down(struct net_device *dev, int final_down)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
netif_stop_queue (dev);
del_timer_sync(&vp->rx_oom_timer);
del_timer_sync(&vp->timer);
/* Turn off statistics ASAP. We update vp->stats below. */
iowrite16(StatsDisable, ioaddr + EL3_CMD);
iowrite16(RxDisable, ioaddr + EL3_CMD);
iowrite16(TxDisable, ioaddr + EL3_CMD);
/* Disable receiving 802.1q tagged frames */
set_8021q_mode(dev, 0);
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
iowrite16(StopCoax, ioaddr + EL3_CMD);
iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx)
iowrite32(0, ioaddr + UpListPtr);
iowrite32(0, ioaddr + DownListPtr);
vp->pm_state_valid = 1;
pci_save_state(VORTEX_PCI(vp));
acpi_set_WOL(dev);
}
}
static int
vortex_close(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int i;
if (netif_device_present(dev))
vortex_down(dev, 1);
if (vortex_debug > 1) {
printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
" tx_queued %d Rx pre-checksummed %d.\n",
dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
}
#if DO_ZEROCOPY
if (vp->rx_csumhits &&
(vp->drv_flags & HAS_HWCKSM) == 0 &&
(vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
printk(KERN_WARNING "%s supports hardware checksums, and we're "
"not using them!\n", dev->name);
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
free_irq(dev->irq, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr),
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
for (i = 0; i < TX_RING_SIZE; i++) {
if (vp->tx_skbuff[i]) {
struct sk_buff *skb = vp->tx_skbuff[i];
#if DO_ZEROCOPY
int k;
for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[i].frag[k].addr),
le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
PCI_DMA_TODEVICE);
#else
pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
#endif
dev_kfree_skb(skb);
vp->tx_skbuff[i] = NULL;
}
}
}
return 0;
}
static void
dump_tx_ring(struct net_device *dev)
{
if (vortex_debug > 0) {
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */
printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
vp->full_bus_master_tx,
vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
printk(KERN_ERR " Transmit list %8.8x vs. %p.\n",
ioread32(ioaddr + DownListPtr),
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
#if DO_ZEROCOPY
le32_to_cpu(vp->tx_ring[i].frag[0].length),
#else
le32_to_cpu(vp->tx_ring[i].length),
#endif
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)
iowrite16(DownUnstall, ioaddr + EL3_CMD);
}
}
}
static struct net_device_stats *vortex_get_stats(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */
spin_lock_irqsave (&vp->lock, flags);
update_stats(ioaddr, dev);
spin_unlock_irqrestore (&vp->lock, flags);
}
return &vp->stats;
}
/* Update statistics.
Unlike with the EL3 we need not worry about interrupts changing
the window setting from underneath us, but we must still guard
against a race condition with a StatsUpdate interrupt updating the
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
static void update_stats(void __iomem *ioaddr, struct net_device *dev)
int old_window = ioread16(ioaddr + EL3_CMD);
if (old_window == 0xffff) /* Chip suspended or ejected. */
return;
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
vp->stats.tx_carrier_errors += ioread8(ioaddr + 0);
vp->stats.tx_heartbeat_errors += ioread8(ioaddr + 1);
vp->stats.tx_window_errors += ioread8(ioaddr + 4);
vp->stats.rx_fifo_errors += ioread8(ioaddr + 5);
vp->stats.tx_packets += ioread8(ioaddr + 6);
vp->stats.tx_packets += (ioread8(ioaddr + 9)&0x30) << 4;
/* Rx packets */ ioread8(ioaddr + 7); /* Must read to clear */
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
vp->stats.rx_bytes += ioread16(ioaddr + 10);
vp->stats.tx_bytes += ioread16(ioaddr + 12);
vp->xstats.tx_multiple_collisions += ioread8(ioaddr + 2);
vp->xstats.tx_single_collisions += ioread8(ioaddr + 3);
vp->xstats.tx_deferred += ioread8(ioaddr + 8);
vp->xstats.rx_bad_ssd += ioread8(ioaddr + 12);
vp->stats.collisions = vp->xstats.tx_multiple_collisions
+ vp->xstats.tx_single_collisions
+ vp->xstats.tx_max_collisions;
u8 up = ioread8(ioaddr + 13);
vp->stats.rx_bytes += (up & 0x0f) << 16;
vp->stats.tx_bytes += (up & 0xf0) << 12;
}
EL3WINDOW(old_window >> 13);
return;
}
static int vortex_nway_reset(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_nway_restart(&vp->mii);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_ethtool_gset(&vp->mii, cmd);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
unsigned long flags;
int rc;
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
rc = mii_ethtool_sset(&vp->mii, cmd);
spin_unlock_irqrestore(&vp->lock, flags);
return rc;
}
static u32 vortex_get_msglevel(struct net_device *dev)
{
return vortex_debug;
}
static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
{
vortex_debug = dbg;
}
static int vortex_get_stats_count(struct net_device *dev)
{
return VORTEX_NUM_STATS;
}
static void vortex_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
unsigned long flags;
spin_lock_irqsave(&vp->lock, flags);
update_stats(ioaddr, dev);
spin_unlock_irqrestore(&vp->lock, flags);
data[0] = vp->xstats.tx_deferred;
data[1] = vp->xstats.tx_max_collisions;
data[2] = vp->xstats.tx_multiple_collisions;
data[3] = vp->xstats.tx_single_collisions;
data[4] = vp->xstats.rx_bad_ssd;
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
}
static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys));
break;
default:
WARN_ON(1);
break;
}
}
static void vortex_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct vortex_private *vp = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
if (VORTEX_PCI(vp)) {
strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
} else {
if (VORTEX_EISA(vp))
sprintf(info->bus_info, vp->gendev->bus_id);
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
}
}
static const struct ethtool_ops vortex_ethtool_ops = {
.get_drvinfo = vortex_get_drvinfo,
.get_strings = vortex_get_strings,
.get_msglevel = vortex_get_msglevel,
.set_msglevel = vortex_set_msglevel,
.get_ethtool_stats = vortex_get_ethtool_stats,
.get_stats_count = vortex_get_stats_count,
.get_settings = vortex_get_settings,
.set_settings = vortex_set_settings,
.get_link = ethtool_op_get_link,
.get_perm_addr = ethtool_op_get_perm_addr,
};
#ifdef CONFIG_PCI
/*
* Must power the device up to do MDIO operations
*/
static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int err;
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
unsigned long flags;
int state = 0;
if(VORTEX_PCI(vp))
state = VORTEX_PCI(vp)->current_state;
/* The kernel core really should have pci_get_power_state() */
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
spin_lock_irqsave(&vp->lock, flags);
EL3WINDOW(4);
err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
spin_unlock_irqrestore(&vp->lock, flags);
if(state != 0)
pci_set_power_state(VORTEX_PCI(vp), state);
return err;
}
#endif
/* Pre-Cyclone chips have no documented multicast filter, so the only
multicast setting is to receive all multicast frames. At least
the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct net_device *dev)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int new_mode;
if (dev->flags & IFF_PROMISC) {
if (vortex_debug > 3)
printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
iowrite16(new_mode, ioaddr + EL3_CMD);
}
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* Setup the card so that it can receive frames with an 802.1q VLAN tag.
Note that this must be done after each RxReset due to some backwards
compatibility logic in the Cyclone and Tornado ASICs */
/* The Ethernet Type used for 802.1q tagged frames */
#define VLAN_ETHER_TYPE 0x8100
static void set_8021q_mode(struct net_device *dev, int enable)
{
struct vortex_private *vp = netdev_priv(dev);
void __iomem *ioaddr = vp->ioaddr;
int old_window = ioread16(ioaddr + EL3_CMD);
int mac_ctrl;
if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
/* cyclone and tornado chipsets can recognize 802.1q
* tagged frames and treat them correctly */
int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */
if (enable)
max_pkt_size += 4; /* 802.1Q VLAN tag */
EL3WINDOW(3);
iowrite16(max_pkt_size, ioaddr+Wn3_MaxPktSize);
/* set VlanEtherType to let the hardware checksumming
treat tagged frames correctly */
EL3WINDOW(7);
iowrite16(VLAN_ETHER_TYPE, ioaddr+Wn7_VlanEtherType);
} else {
/* on older cards we have to enable large frames */
vp->large_frames = dev->mtu > 1500 || enable;
EL3WINDOW(3);
mac_ctrl = ioread16(ioaddr+Wn3_MAC_Ctrl);
if (vp->large_frames)
mac_ctrl |= 0x40;
else
mac_ctrl &= ~0x40;
iowrite16(mac_ctrl, ioaddr+Wn3_MAC_Ctrl);
}
EL3WINDOW(old_window);
}
#else
static void set_8021q_mode(struct net_device *dev, int enable)
{
}
#endif
/* MII transceiver control section.
Read and write the MII registers using software-generated serial
MDIO protocol. See the MII specifications or DP83840A data sheet