Newer
Older
static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct corkscrew_private *vp = netdev_priv(dev);
int ioaddr = dev->base_addr;
/* Block a timer-based transmit from overlapping. */
netif_stop_queue(dev);
if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */
/* Calculate the next Tx descriptor entry. */
int entry = vp->cur_tx % TX_RING_SIZE;
struct boom_tx_desc *prev_entry;
unsigned long flags;
int i;
return NETDEV_TX_BUSY;
if (vp->cur_tx != 0)
prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE];
else
prev_entry = NULL;
if (corkscrew_debug > 3)
pr_debug("%s: Trying to send a packet, Tx index %d.\n",
dev->name, vp->cur_tx);
/* vp->tx_full = 1; */
vp->tx_skbuff[entry] = skb;
vp->tx_ring[entry].next = 0;
vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data);
vp->tx_ring[entry].length = skb->len | 0x80000000;
vp->tx_ring[entry].status = skb->len | 0x80000000;
spin_lock_irqsave(&vp->lock, flags);
outw(DownStall, ioaddr + EL3_CMD);
/* Wait for the stall to complete. */
for (i = 20; i >= 0; i--)
if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
break;
if (prev_entry)
prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]);
if (inl(ioaddr + DownListPtr) == 0) {
outl(isa_virt_to_bus(&vp->tx_ring[entry]),
ioaddr + DownListPtr);
queued_packet++;
}
outw(DownUnstall, ioaddr + EL3_CMD);
spin_unlock_irqrestore(&vp->lock, flags);
vp->cur_tx++;
if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
vp->tx_full = 1;
else { /* Clear previous interrupt enable. */
if (prev_entry)
prev_entry->status &= ~0x80000000;
netif_wake_queue(dev);
}
dev->trans_start = jiffies;
return NETDEV_TX_OK;
}
/* Put out the doubleword header... */
outl(skb->len, ioaddr + TX_FIFO);
dev->stats.tx_bytes += skb->len;
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
#ifdef VORTEX_BUS_MASTER
if (vp->bus_master) {
/* Set the bus-master controller to transfer the packet. */
outl((int) (skb->data), ioaddr + Wn7_MasterAddr);
outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
vp->tx_skb = skb;
outw(StartDMADown, ioaddr + EL3_CMD);
/* queue will be woken at the DMADone interrupt. */
} else {
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb(skb);
if (inw(ioaddr + TxFree) > 1536) {
netif_wake_queue(dev);
} else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + (1536 >> 2),
ioaddr + EL3_CMD);
}
#else
/* ... and the packet rounded to a doubleword. */
outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
dev_kfree_skb(skb);
if (inw(ioaddr + TxFree) > 1536) {
netif_wake_queue(dev);
} else
/* Interrupt us when the FIFO has room for max-sized packet. */
outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
#endif /* bus master */
dev->trans_start = jiffies;
/* Clear the Tx status stack. */
{
short tx_status;
int i = 4;
while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
if (corkscrew_debug > 2)
pr_debug("%s: Tx error, status %2.2x.\n",
dev->stats.tx_fifo_errors++;
dev->stats.tx_aborted_errors++;
if (tx_status & 0x30) {
int j;
outw(TxReset, ioaddr + EL3_CMD);
for (j = 20; j >= 0; j--)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
}
outw(TxEnable, ioaddr + EL3_CMD);
}
outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
}
}
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
{
/* Use the now-standard shared IRQ implementation. */
struct net_device *dev = dev_id;
struct corkscrew_private *lp = netdev_priv(dev);
int ioaddr, status;
int latency;
int i = max_interrupt_work;
ioaddr = dev->base_addr;
latency = inb(ioaddr + Timer);
spin_lock(&lp->lock);
status = inw(ioaddr + EL3_STATUS);
if (corkscrew_debug > 4)
pr_debug("%s: interrupt, status %4.4x, timer %d.\n",
dev->name, status, latency);
if ((status & 0xE000) != 0xE000) {
static int donedidthis;
/* Some interrupt controllers store a bogus interrupt from boot-time.
Ignore a single early interrupt, but don't hang the machine for
other interrupt problems. */
if (donedidthis++ > 100) {
pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n",
dev->name, status, netif_running(dev));
free_irq(dev->irq, dev);
dev->irq = -1;
}
}
do {
if (corkscrew_debug > 5)
pr_debug("%s: In interrupt loop, status %4.4x.\n",
dev->name, status);
if (status & RxComplete)
corkscrew_rx(dev);
if (status & TxAvailable) {
if (corkscrew_debug > 5)
pr_debug(" TX room bit was handled.\n");
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
/* There's room in the FIFO for a full-sized packet. */
outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
netif_wake_queue(dev);
}
if (status & DownComplete) {
unsigned int dirty_tx = lp->dirty_tx;
while (lp->cur_tx - dirty_tx > 0) {
int entry = dirty_tx % TX_RING_SIZE;
if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry]))
break; /* It still hasn't been processed. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
lp->dirty_tx = dirty_tx;
outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
lp->tx_full = 0;
netif_wake_queue(dev);
}
}
#ifdef VORTEX_BUS_MASTER
if (status & DMADone) {
outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */
netif_wake_queue(dev);
}
#endif
if (status & UpComplete) {
boomerang_rx(dev);
outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
}
if (status & (AdapterFailure | RxEarly | StatsFull)) {
/* Handle all uncommon interrupts at once. */
if (status & RxEarly) { /* Rx early is unused. */
corkscrew_rx(dev);
outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
}
if (status & StatsFull) { /* Empty statistics. */
static int DoneDidThat;
if (corkscrew_debug > 4)
pr_debug("%s: Updating stats.\n", dev->name);
update_stats(ioaddr, dev);
/* DEBUG HACK: Disable statistics as an interrupt source. */
/* This occurs when we have the wrong media type! */
if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) {
int win, reg;
pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n",
dev->name);
pr_notice("Vortex window %d:", win);
pr_cont(" %2.2x", inb(ioaddr + reg));
pr_cont("\n");
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
}
EL3WINDOW(7);
outw(SetIntrEnb | TxAvailable |
RxComplete | AdapterFailure |
UpComplete | DownComplete |
TxComplete, ioaddr + EL3_CMD);
DoneDidThat++;
}
}
if (status & AdapterFailure) {
/* Adapter failure requires Rx reset and reinit. */
outw(RxReset, ioaddr + EL3_CMD);
/* Set the Rx filter to the current state. */
set_rx_mode(dev);
outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
outw(AckIntr | AdapterFailure,
ioaddr + EL3_CMD);
}
}
if (--i < 0) {
pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n",
dev->name, status, SetStatusEnb | ((~status) & 0x7FE));
/* Disable all pending interrupts. */
outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
break;
}
/* Acknowledge the IRQ. */
outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
spin_unlock(&lp->lock);
if (corkscrew_debug > 4)
pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status);
return IRQ_HANDLED;
}
static int corkscrew_rx(struct net_device *dev)
{
int ioaddr = dev->base_addr;
int i;
short rx_status;
if (corkscrew_debug > 5)
pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
if (rx_status & 0x4000) { /* Error, update stats. */
unsigned char rx_error = inb(ioaddr + RxErrors);
if (corkscrew_debug > 2)
pr_debug(" Rx error: status %2.2x.\n",
dev->stats.rx_over_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_frame_errors++;
dev->stats.rx_crc_errors++;
dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
short pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len + 5 + 2);
if (corkscrew_debug > 4)
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
if (skb != NULL) {
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
insl(ioaddr + RX_FIFO,
skb_put(skb, pkt_len),
(pkt_len + 3) >> 2);
outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
/* Wait a limited time to go to next packet. */
for (i = 200; i >= 0; i--)
if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
continue;
} else if (corkscrew_debug)
pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
dev->stats.rx_dropped++;
/* Wait a limited time to skip this packet. */
for (i = 200; i >= 0; i--)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
}
return 0;
}
static int boomerang_rx(struct net_device *dev)
{
struct corkscrew_private *vp = netdev_priv(dev);
int entry = vp->cur_rx % RX_RING_SIZE;
int ioaddr = dev->base_addr;
int rx_status;
if (corkscrew_debug > 5)
pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus));
while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) {
if (rx_status & RxDError) { /* Error, update stats. */
unsigned char rx_error = rx_status >> 16;
if (corkscrew_debug > 2)
pr_debug(" Rx error: status %2.2x.\n",
dev->stats.rx_over_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_frame_errors++;
dev->stats.rx_crc_errors++;
dev->stats.rx_length_errors++;
} else {
/* The packet length: up to 4.5K!. */
short pkt_len = rx_status & 0x1fff;
struct sk_buff *skb;
dev->stats.rx_bytes += pkt_len;
pr_debug("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
/* Check if the packet is long enough to just accept without
copying to a properly sized skbuff. */
if (pkt_len < rx_copybreak
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
/* 'skb_put()' points to the start of sk_buff data area. */
memcpy(skb_put(skb, pkt_len),
isa_bus_to_virt(vp->rx_ring[entry].
addr), pkt_len);
rx_copy++;
} else {
void *temp;
/* Pass up the skbuff already on the Rx ring. */
skb = vp->rx_skbuff[entry];
vp->rx_skbuff[entry] = NULL;
temp = skb_put(skb, pkt_len);
/* Remove this checking code for final release. */
if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp)
pr_warning("%s: Warning -- the skbuff addresses do not match"
" in boomerang_rx: %p vs. %p / %p.\n",
dev->name,
isa_bus_to_virt(vp->
rx_ring[entry].
addr), skb->head,
temp);
rx_nocopy++;
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
}
entry = (++vp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
struct sk_buff *skb;
entry = vp->dirty_rx % RX_RING_SIZE;
if (vp->rx_skbuff[entry] == NULL) {
skb = dev_alloc_skb(PKT_BUF_SZ);
if (skb == NULL)
break; /* Bad news! */
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
vp->rx_skbuff[entry] = skb;
}
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
}
return 0;
}
static int corkscrew_close(struct net_device *dev)
{
struct corkscrew_private *vp = netdev_priv(dev);
int ioaddr = dev->base_addr;
int i;
netif_stop_queue(dev);
if (corkscrew_debug > 1) {
pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n",
dev->name, inw(ioaddr + EL3_STATUS),
inb(ioaddr + TxStatus));
pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n",
dev->name, rx_nocopy, rx_copy, queued_packet);
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
}
del_timer(&vp->timer);
/* Turn off statistics ASAP. We update lp->stats below. */
outw(StatsDisable, ioaddr + EL3_CMD);
/* Disable the receiver and transmitter. */
outw(RxDisable, ioaddr + EL3_CMD);
outw(TxDisable, ioaddr + EL3_CMD);
if (dev->if_port == XCVR_10base2)
/* Turn off thinnet power. Green! */
outw(StopCoax, ioaddr + EL3_CMD);
free_irq(dev->irq, dev);
outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
update_stats(ioaddr, dev);
if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
outl(0, ioaddr + UpListPtr);
for (i = 0; i < RX_RING_SIZE; i++)
if (vp->rx_skbuff[i]) {
dev_kfree_skb(vp->rx_skbuff[i]);
vp->rx_skbuff[i] = NULL;
}
}
if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
outl(0, ioaddr + DownListPtr);
for (i = 0; i < TX_RING_SIZE; i++)
if (vp->tx_skbuff[i]) {
dev_kfree_skb(vp->tx_skbuff[i]);
vp->tx_skbuff[i] = NULL;
}
}
return 0;
}
static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
{
struct corkscrew_private *vp = netdev_priv(dev);
unsigned long flags;
if (netif_running(dev)) {
spin_lock_irqsave(&vp->lock, flags);
update_stats(dev->base_addr, dev);
spin_unlock_irqrestore(&vp->lock, flags);
}
}
/* Update statistics.
Unlike with the EL3 we need not worry about interrupts changing
the window setting from underneath us, but we must still guard
against a race condition with a StatsUpdate interrupt updating the
table. This is done by checking that the ASM (!) code generated uses
atomic updates with '+='.
*/
static void update_stats(int ioaddr, struct net_device *dev)
{
/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
/* Switch to the stats window, and read everything. */
EL3WINDOW(6);
dev->stats.tx_carrier_errors += inb(ioaddr + 0);
dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
dev->stats.collisions += inb(ioaddr + 3);
dev->stats.tx_window_errors += inb(ioaddr + 4);
dev->stats.rx_fifo_errors += inb(ioaddr + 5);
dev->stats.tx_packets += inb(ioaddr + 6);
dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
/* Rx packets */ inb(ioaddr + 7);
/* Must read to clear */
/* Tx deferrals */ inb(ioaddr + 8);
/* Don't bother with register 9, an extension of registers 6&7.
If we do use the 6&7 values the atomic update assumption above
is invalid. */
inw(ioaddr + 10); /* Total Rx and Tx octets. */
inw(ioaddr + 12);
/* New: On the Vortex we must also clear the BadSSD counter. */
EL3WINDOW(4);
inb(ioaddr + 12);
/* We change back to window 7 (not 1) with the Vortex. */
EL3WINDOW(7);
return;
}
/* This new version of set_rx_mode() supports v1.4 kernels.
The Vortex chip has no documented multicast filter, so the only
multicast setting is to receive all multicast frames. At least
the chip has a very clean way to set the mode, unlike many others. */
static void set_rx_mode(struct net_device *dev)
{
int ioaddr = dev->base_addr;
short new_mode;
if (dev->flags & IFF_PROMISC) {
if (corkscrew_debug > 3)
pr_debug("%s: Setting promiscuous mode.\n",
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
dev->name);
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
} else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
outw(new_mode, ioaddr + EL3_CMD);
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
{
return corkscrew_debug;
}
static void netdev_set_msglevel(struct net_device *dev, u32 level)
{
corkscrew_debug = level;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
};
#ifdef MODULE
void cleanup_module(void)
{
while (!list_empty(&root_corkscrew_dev)) {
struct net_device *dev;
struct corkscrew_private *vp;
vp = list_entry(root_corkscrew_dev.next,
struct corkscrew_private, list);
dev = vp->our_dev;
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
#endif /* MODULE */