Newer
Older
tp = netdev_priv(dev);
/* note: tp->chipset set in rtl8139_init_board */
tp->drv_flags = board_info[ent->driver_data].hw_flags;
tp->mmio_addr = ioaddr;
tp->msg_enable =
(debug < 0 ? RTL8139_DEF_MSG_ENABLE : ((1 << debug) - 1));
spin_lock_init (&tp->lock);
spin_lock_init (&tp->rx_lock);
INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
tp->mii.dev = dev;
tp->mii.mdio_read = mdio_read;
tp->mii.mdio_write = mdio_write;
tp->mii.phy_id_mask = 0x3f;
tp->mii.reg_num_mask = 0x1f;
/* dev is fully set up and ready to use now */
pr_debug("about to register device named %s (%p)...\n",
dev->name, dev);
i = register_netdev (dev);
if (i) goto err_out;
pci_set_drvdata (pdev, dev);
netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
board_info[ent->driver_data].name,
dev->base_addr, dev->dev_addr, dev->irq);
netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
rtl_chip_info[tp->chipset].name);
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
takes too much time. */
#ifdef CONFIG_8139TOO_8129
if (tp->drv_flags & HAS_MII_XCVR) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
int mii_status = mdio_read(dev, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
phy, mii_status, advertising);
netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
tp->phys[0] = 32;
}
} else
#endif
tp->phys[0] = 32;
tp->mii.phy_id = tp->phys[0];
/* The lower four bits are the media type. */
option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
if (option > 0) {
tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
tp->default_port = option & 0xFF;
if (tp->default_port)
tp->mii.force_media = 1;
}
if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
tp->mii.full_duplex = full_duplex[board_idx];
if (tp->mii.full_duplex) {
netdev_info(dev, "Media type forced to Full Duplex\n");
/* Changing the MII-advertised media because might prevent
re-connection. */
tp->mii.force_media = 1;
}
if (tp->default_port) {
netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
(option & 0x20 ? 100 : 10),
(option & 0x10 ? "full" : "half"));
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
mdio_write(dev, tp->phys[0], 0,
((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
}
/* Put the chip into low-power mode. */
if (rtl_chip_info[tp->chipset].flags & HasHltClk)
RTL_W8 (HltClk, 'H'); /* 'R' would leave the clock running. */
return 0;
err_out:
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
}
static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata (pdev);
struct rtl8139_private *tp = netdev_priv(dev);
cancel_delayed_work_sync(&tp->thread);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
unregister_netdev (dev);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
}
/* Serial EEPROM section. */
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
#define EE_WRITE_0 0x00
#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
#define EE_ENB (0x80 | EE_CS)
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
#define eeprom_delay() (void)RTL_R32(Cfg9346)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_len)
{
int i;
unsigned retval = 0;
int read_cmd = location | (EE_READ_CMD << addr_len);
RTL_W8 (Cfg9346, EE_ENB & ~EE_CS);
RTL_W8 (Cfg9346, EE_ENB);
eeprom_delay ();
/* Shift the read command bits out. */
for (i = 4 + addr_len; i >= 0; i--) {
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
RTL_W8 (Cfg9346, EE_ENB | dataval);
RTL_W8 (Cfg9346, EE_ENB | dataval | EE_SHIFT_CLK);
eeprom_delay ();
for (i = 16; i > 0; i--) {
RTL_W8 (Cfg9346, EE_ENB | EE_SHIFT_CLK);
(retval << 1) | ((RTL_R8 (Cfg9346) & EE_DATA_READ) ? 1 :
eeprom_delay ();
}
/* Terminate the EEPROM access. */
eeprom_delay ();
return retval;
}
/* MII serial management: mostly bogus for now. */
/* Read and write the MII management registers using software-generated
serial MDIO protocol.
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
#define MDIO_DIR 0x80
#define MDIO_DATA_OUT 0x04
#define MDIO_DATA_IN 0x02
#define MDIO_CLK 0x01
#define MDIO_WRITE0 (MDIO_DIR)
#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
#define mdio_delay() RTL_R8(Config4)
static const char mii_2_8139_map[8] = {
BasicModeCtrl,
BasicModeStatus,
0,
0,
NWayAdvert,
NWayLPAR,
NWayExpansion,
0
};
#ifdef CONFIG_8139TOO_8129
/* Syncronize the MII management interface by shifting 32 one bits out. */
static void mdio_sync (void __iomem *ioaddr)
RTL_W8 (Config4, MDIO_WRITE1);
mdio_delay ();
RTL_W8 (Config4, MDIO_WRITE1 | MDIO_CLK);
mdio_delay ();
}
}
#endif
static int mdio_read (struct net_device *dev, int phy_id, int location)
{
struct rtl8139_private *tp = netdev_priv(dev);
int retval = 0;
#ifdef CONFIG_8139TOO_8129
void __iomem *ioaddr = tp->mmio_addr;
int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
int i;
#endif
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
void __iomem *ioaddr = tp->mmio_addr;
RTL_R16 (mii_2_8139_map[location]) : 0;
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
RTL_W8 (Config4, MDIO_DIR | dataval);
mdio_delay ();
RTL_W8 (Config4, MDIO_DIR | dataval | MDIO_CLK);
mdio_delay ();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
RTL_W8 (Config4, 0);
mdio_delay ();
retval = (retval << 1) | ((RTL_R8 (Config4) & MDIO_DATA_IN) ? 1 : 0);
RTL_W8 (Config4, MDIO_CLK);
mdio_delay ();
}
#endif
return (retval >> 1) & 0xffff;
}
static void mdio_write (struct net_device *dev, int phy_id, int location,
int value)
{
struct rtl8139_private *tp = netdev_priv(dev);
#ifdef CONFIG_8139TOO_8129
void __iomem *ioaddr = tp->mmio_addr;
int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
int i;
#endif
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
void __iomem *ioaddr = tp->mmio_addr;
if (location == 0) {
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W16 (BasicModeCtrl, value);
RTL_W8 (Cfg9346, Cfg9346_Lock);
} else if (location < 8 && mii_2_8139_map[location])
RTL_W16 (mii_2_8139_map[location], value);
return;
}
#ifdef CONFIG_8139TOO_8129
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval =
(mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
RTL_W8 (Config4, dataval);
mdio_delay ();
RTL_W8 (Config4, dataval | MDIO_CLK);
mdio_delay ();
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
RTL_W8 (Config4, 0);
mdio_delay ();
RTL_W8 (Config4, MDIO_CLK);
mdio_delay ();
}
#endif
}
static int rtl8139_open (struct net_device *dev)
{
struct rtl8139_private *tp = netdev_priv(dev);
int retval;
void __iomem *ioaddr = tp->mmio_addr;
retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
&tp->tx_bufs_dma, GFP_KERNEL);
tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
&tp->rx_ring_dma, GFP_KERNEL);
if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
free_irq(dev->irq, dev);
if (tp->tx_bufs)
dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
return -ENOMEM;
}
napi_enable(&tp->napi);
tp->mii.full_duplex = tp->mii.force_media;
tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
rtl8139_init_ring (dev);
rtl8139_hw_start (dev);
netif_start_queue (dev);
netif_dbg(tp, ifup, dev,
"%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
__func__,
(unsigned long long)pci_resource_start (tp->pci_dev, 1),
dev->irq, RTL_R8 (MediaStatus),
tp->mii.full_duplex ? "full" : "half");
rtl8139_start_thread(tp);
return 0;
}
static void rtl_check_media (struct net_device *dev, unsigned int init_media)
{
struct rtl8139_private *tp = netdev_priv(dev);
if (tp->phys[0] >= 0) {
mii_check_media(&tp->mii, netif_msg_link(tp), init_media);
}
}
/* Start the hardware at open or resume. */
static void rtl8139_hw_start (struct net_device *dev)
{
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 i;
u8 tmp;
/* Bring old chips out of low-power mode. */
if (rtl_chip_info[tp->chipset].flags & HasHltClk)
RTL_W8 (HltClk, 'R');
rtl8139_chip_reset (ioaddr);
/* unlock Config[01234] and BMCR register writes */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
/* Restore our idea of the MAC address. */
RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
tp->cur_rx = 0;
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
RTL_W32 (RxConfig, tp->rx_config);
RTL_W32 (TxConfig, rtl8139_tx_config);
rtl_check_media (dev, 1);
if (tp->chipset >= CH_8139B) {
/* Disable magic packet scanning, which is enabled
* when PM is enabled in Config1. It can be reenabled
* via ETHTOOL_SWOL if desired. */
RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
}
netdev_dbg(dev, "init buffer addresses\n");
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
RTL_W32 (RxMissed, 0);
rtl8139_set_rx_mode (dev);
/* no early-rx interrupts */
RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
/* make sure RxTx has started */
tmp = RTL_R8 (ChipCmd);
if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
/* Enable all known interrupts by setting the interrupt mask. */
RTL_W16 (IntrMask, rtl8139_intr_mask);
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static void rtl8139_init_ring (struct net_device *dev)
{
struct rtl8139_private *tp = netdev_priv(dev);
int i;
tp->cur_rx = 0;
tp->cur_tx = 0;
tp->dirty_tx = 0;
for (i = 0; i < NUM_TX_DESC; i++)
tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
}
/* This must be global for CONFIG_8139TOO_TUNE_TWISTER case */
static int next_tick = 3 * HZ;
#ifndef CONFIG_8139TOO_TUNE_TWISTER
static inline void rtl8139_tune_twister (struct net_device *dev,
struct rtl8139_private *tp) {}
#else
enum TwisterParamVals {
PARA78_default = 0x78fa8388,
PARA7c_default = 0xcb38de43, /* param[0][3] */
PARA7c_xxx = 0xcb38de43,
};
static const unsigned long param[4][4] = {
{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
};
static void rtl8139_tune_twister (struct net_device *dev,
struct rtl8139_private *tp)
{
int linkcase;
void __iomem *ioaddr = tp->mmio_addr;
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
/* This is a complicated state machine to configure the "twister" for
impedance/echos based on the cable length.
All of this is magic and undocumented.
*/
switch (tp->twistie) {
case 1:
if (RTL_R16 (CSCR) & CSCR_LinkOKBit) {
/* We have link beat, let us tune the twister. */
RTL_W16 (CSCR, CSCR_LinkDownOffCmd);
tp->twistie = 2; /* Change to state 2. */
next_tick = HZ / 10;
} else {
/* Just put in some reasonable defaults for when beat returns. */
RTL_W16 (CSCR, CSCR_LinkDownCmd);
RTL_W32 (FIFOTMS, 0x20); /* Turn on cable test mode. */
RTL_W32 (PARA78, PARA78_default);
RTL_W32 (PARA7c, PARA7c_default);
tp->twistie = 0; /* Bail from future actions. */
}
break;
case 2:
/* Read how long it took to hear the echo. */
linkcase = RTL_R16 (CSCR) & CSCR_LinkStatusBits;
if (linkcase == 0x7000)
tp->twist_row = 3;
else if (linkcase == 0x3000)
tp->twist_row = 2;
else if (linkcase == 0x1000)
tp->twist_row = 1;
else
tp->twist_row = 0;
tp->twist_col = 0;
tp->twistie = 3; /* Change to state 2. */
next_tick = HZ / 10;
break;
case 3:
/* Put out four tuning parameters, one per 100msec. */
if (tp->twist_col == 0)
RTL_W16 (FIFOTMS, 0);
RTL_W32 (PARA7c, param[(int) tp->twist_row]
[(int) tp->twist_col]);
next_tick = HZ / 10;
if (++tp->twist_col >= 4) {
/* For short cables we are done.
For long cables (row == 3) check for mistune. */
tp->twistie =
(tp->twist_row == 3) ? 4 : 0;
}
break;
case 4:
/* Special case for long cables: check for mistune. */
if ((RTL_R16 (CSCR) &
CSCR_LinkStatusBits) == 0x7000) {
tp->twistie = 0;
break;
} else {
RTL_W32 (PARA7c, 0xfb38de03);
tp->twistie = 5;
next_tick = HZ / 10;
}
break;
case 5:
/* Retune for shorter cable (column 2). */
RTL_W32 (FIFOTMS, 0x20);
RTL_W32 (PARA78, PARA78_default);
RTL_W32 (PARA7c, PARA7c_default);
RTL_W32 (FIFOTMS, 0x00);
tp->twist_row = 2;
tp->twist_col = 0;
tp->twistie = 3;
next_tick = HZ / 10;
break;
default:
/* do nothing */
break;
}
}
#endif /* CONFIG_8139TOO_TUNE_TWISTER */
static inline void rtl8139_thread_iter (struct net_device *dev,
struct rtl8139_private *tp,
{
int mii_lpa;
mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
if (!tp->mii.force_media && mii_lpa != 0xffff) {
int duplex = ((mii_lpa & LPA_100FULL) ||
(mii_lpa & 0x01C0) == 0x0040);
if (tp->mii.full_duplex != duplex) {
tp->mii.full_duplex = duplex;
if (mii_lpa) {
netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
tp->mii.full_duplex ? "full" : "half",
tp->phys[0], mii_lpa);
netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
}
#if 0
RTL_W8 (Cfg9346, Cfg9346_Unlock);
RTL_W8 (Config1, tp->mii.full_duplex ? 0x60 : 0x20);
RTL_W8 (Cfg9346, Cfg9346_Lock);
#endif
}
}
next_tick = HZ * 60;
rtl8139_tune_twister (dev, tp);
netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
RTL_R16(NWayLPAR));
netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
RTL_R16(IntrMask), RTL_R16(IntrStatus));
netdev_dbg(dev, "Chip config %02x %02x\n",
RTL_R8(Config0), RTL_R8(Config1));
static void rtl8139_thread (struct work_struct *work)
struct rtl8139_private *tp =
container_of(work, struct rtl8139_private, thread.work);
struct net_device *dev = tp->mii.dev;
unsigned long thr_delay = next_tick;
rtnl_lock();
if (!netif_running(dev))
goto out_unlock;
if (tp->watchdog_fired) {
tp->watchdog_fired = 0;
} else
rtl8139_thread_iter(dev, tp, tp->mmio_addr);
if (tp->have_thread)
schedule_delayed_work(&tp->thread, thr_delay);
out_unlock:
rtnl_unlock ();
static void rtl8139_start_thread(struct rtl8139_private *tp)
{
tp->twistie = 0;
if (tp->chipset == CH_8139_K)
tp->twistie = 1;
else if (tp->drv_flags & HAS_LNK_CHNG)
return;
tp->have_thread = 1;
tp->watchdog_fired = 0;
schedule_delayed_work(&tp->thread, next_tick);
}
static inline void rtl8139_tx_clear (struct rtl8139_private *tp)
{
tp->cur_tx = 0;
tp->dirty_tx = 0;
/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
}
static void rtl8139_tx_timeout_task (struct work_struct *work)
struct rtl8139_private *tp =
container_of(work, struct rtl8139_private, thread.work);
struct net_device *dev = tp->mii.dev;
void __iomem *ioaddr = tp->mmio_addr;
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus),
RTL_R16(IntrMask), RTL_R8(MediaStatus));
netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
tp->cur_tx, tp->dirty_tx);
netdev_dbg(dev, "Tx descriptor %d is %08x%s\n",
i, RTL_R32(TxStatus0 + (i * 4)),
i == tp->dirty_tx % NUM_TX_DESC ?
" (queue head)" : "");
tp->xstats.tx_timeouts++;
/* disable Tx ASAP, if not already */
tmp8 = RTL_R8 (ChipCmd);
if (tmp8 & CmdTxEnb)
RTL_W8 (ChipCmd, CmdRxEnb);
spin_lock_bh(&tp->rx_lock);
/* Disable interrupts by clearing the interrupt mask. */
RTL_W16 (IntrMask, 0x0000);
/* Stop a shared interrupt from scavenging while we are. */
spin_lock_irq(&tp->lock);
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
if (netif_running(dev)) {
rtl8139_hw_start (dev);
netif_wake_queue (dev);
}
spin_unlock_bh(&tp->rx_lock);
static void rtl8139_tx_timeout (struct net_device *dev)
{
struct rtl8139_private *tp = netdev_priv(dev);
tp->watchdog_fired = 1;
if (!tp->have_thread) {
INIT_DELAYED_WORK(&tp->thread, rtl8139_thread);
schedule_delayed_work(&tp->thread, next_tick);
}
static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned int entry;
unsigned int len = skb->len;
/* Calculate the next Tx descriptor entry. */
entry = tp->cur_tx % NUM_TX_DESC;
/* Note: the chip doesn't have auto-pad! */
if (likely(len < TX_BUF_SIZE)) {
if (len < ETH_ZLEN)
memset(tp->tx_buf[entry], 0, ETH_ZLEN);
skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
dev_kfree_skb(skb);
} else {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
/*
* Writing to TxStatus triggers a DMA transfer of the data
* copied to tp->tx_buf[entry] above. Use a memory barrier
* to make sure that the device sees the updated data.
*/
wmb();
RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
tp->cur_tx++;
if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
netif_stop_queue (dev);
spin_unlock_irqrestore(&tp->lock, flags);
netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
len, entry);
return NETDEV_TX_OK;
}
static void rtl8139_tx_interrupt (struct net_device *dev,
struct rtl8139_private *tp,
{
unsigned long dirty_tx, tx_left;
assert (dev != NULL);
assert (ioaddr != NULL);
dirty_tx = tp->dirty_tx;
tx_left = tp->cur_tx - dirty_tx;
while (tx_left > 0) {
int entry = dirty_tx % NUM_TX_DESC;
int txstatus;
txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
txstatus);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
RTL_W32 (TxConfig, TxClearAbt);
RTL_W16 (IntrStatus, TxErr);
wmb();
}
if (txstatus & TxCarrierLost)
dev->stats.tx_carrier_errors++;
dev->stats.tx_window_errors++;
} else {
if (txstatus & TxUnderrun) {
/* Add 64 to the Tx FIFO threshold. */
if (tp->tx_flag < 0x00300000)
tp->tx_flag += 0x00020000;
dev->stats.tx_fifo_errors++;
dev->stats.collisions += (txstatus >> 24) & 15;
dev->stats.tx_bytes += txstatus & 0x7ff;
dev->stats.tx_packets++;
}
dirty_tx++;
tx_left--;
}
#ifndef RTL8139_NDEBUG
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
dirty_tx, tp->cur_tx);
dirty_tx += NUM_TX_DESC;
}
#endif /* RTL8139_NDEBUG */
/* only wake the queue if we did work, and the queue is stopped */
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
mb();
netif_wake_queue (dev);
}
}
/* TODO: clean this up! Rx reset need not be this intensive */
static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
struct rtl8139_private *tp, void __iomem *ioaddr)
{
u8 tmp8;
#ifdef CONFIG_8139_OLD_RX_RESET
int tmp_work;
#endif
netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
rx_status);
dev->stats.rx_errors++;
if (!(rx_status & RxStatusOK)) {
if (rx_status & RxTooLong) {
netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
rx_status);
/* A.C.: The chip hangs here. */
}
if (rx_status & (RxBadSymbol | RxBadAlign))
dev->stats.rx_frame_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_crc_errors++;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
} else {
tp->xstats.rx_lost_in_ring++;
}
#ifndef CONFIG_8139_OLD_RX_RESET
tmp8 = RTL_R8 (ChipCmd);
RTL_W8 (ChipCmd, tmp8 & ~CmdRxEnb);
RTL_W8 (ChipCmd, tmp8);
RTL_W32 (RxConfig, tp->rx_config);
tp->cur_rx = 0;
#else
/* Reset the receiver, based on RealTek recommendation. (Bug?) */
/* disable receive */
RTL_W8_F (ChipCmd, CmdTxEnb);
tmp_work = 200;
while (--tmp_work > 0) {
udelay(1);
tmp8 = RTL_R8 (ChipCmd);
if (!(tmp8 & CmdRxEnb))
break;
}
if (tmp_work <= 0)
netdev_warn(dev, "rx stop wait too long\n");
/* restart receive */
tmp_work = 200;
while (--tmp_work > 0) {
RTL_W8_F (ChipCmd, CmdRxEnb | CmdTxEnb);
udelay(1);
tmp8 = RTL_R8 (ChipCmd);
if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
break;
}
if (tmp_work <= 0)
netdev_warn(dev, "tx/rx enable wait too long\n");
/* and reinitialize all rx related registers */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
RTL_W32 (RxConfig, tp->rx_config);
tp->cur_rx = 0;
netdev_dbg(dev, "init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* A.C.: Reset the multicast list. */
__set_rx_mode (dev);
#endif
}
#if RX_BUF_IDX == 3
static inline void wrap_copy(struct sk_buff *skb, const unsigned char *ring,
u32 offset, unsigned int size)
{
u32 left = RX_BUF_LEN - offset;
if (size > left) {
skb_copy_to_linear_data(skb, ring + offset, left);
skb_copy_to_linear_data_offset(skb, left, ring, size - left);
skb_copy_to_linear_data(skb, ring + offset, size);
}
#endif
static void rtl8139_isr_ack(struct rtl8139_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
u16 status;
status = RTL_R16 (IntrStatus) & RxAckBits;
/* Clear out errors and receive interrupts */
if (likely(status != 0)) {
if (unlikely(status & (RxFIFOOver | RxOverflow))) {
tp->dev->stats.rx_errors++;
tp->dev->stats.rx_fifo_errors++;
}
RTL_W16_F (IntrStatus, RxAckBits);
}
}
static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
int budget)
{
void __iomem *ioaddr = tp->mmio_addr;
int received = 0;
unsigned char *rx_ring = tp->rx_ring;
unsigned int cur_rx = tp->cur_rx;
unsigned int rx_size = 0;
netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
__func__, (u16)cur_rx,
RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
while (netif_running(dev) && received < budget &&
(RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
u32 ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int pkt_size;
struct sk_buff *skb;
rmb();
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
__func__, rx_status, rx_size, cur_rx);
print_hex_dump(KERN_DEBUG, "Frame contents: ",
DUMP_PREFIX_OFFSET, 16, 1,
&rx_ring[ring_offset], 70, true);
#endif
/* Packet copy from FIFO still in progress.
* Theoretically, this should never happen
* since EarlyRx is disabled.
*/
if (unlikely(rx_size == 0xfff0)) {
if (!tp->fifo_copy_timeout)
tp->fifo_copy_timeout = jiffies + 2;
else if (time_after(jiffies, tp->fifo_copy_timeout)) {
netdev_dbg(dev, "hung FIFO. Reset\n");
netif_dbg(tp, intr, dev, "fifo copy in progress\n");
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
tp->xstats.early_rx++;
break;
}
no_early_rx:
tp->fifo_copy_timeout = 0;
/* If Rx err or invalid rx_size/rx_status received
* (which happens if we get lost in the ring),
* Rx process gets reset, so we abort any further
* Rx processing.
*/
if (unlikely((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
(rx_size < 8) ||
(!(rx_status & RxStatusOK)))) {
rtl8139_rx_err (rx_status, dev, tp, ioaddr);
received = -1;
goto out;
}
/* Malloc up new buffer, compatible with net-2e. */
/* Omit the four octet CRC from the length. */
skb = netdev_alloc_skb_ip_align(dev, pkt_size);
if (likely(skb)) {
#if RX_BUF_IDX == 3
wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
#else
skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
#endif
skb_put (skb, pkt_size);
skb->protocol = eth_type_trans (skb, dev);