Newer
Older
/**
* mc32_send_packet - queue a frame for transmit
* @skb: buffer to transmit
* @dev: 3c527 to send it out of
*
* Transmit a buffer. This normally means throwing the buffer onto
* the transmit queue as the queue is quite large. If the queue is
* full then we set tx_busy and return. Once the interrupt handler
* gets messages telling it to reclaim transmit queue entries, we will
* clear tx_busy and the kernel will start calling this again.
*
* We do not disable interrupts or acquire any locks; this can
* run concurrently with mc32_tx_ring(), and the function itself
* is serialised at a higher layer. However, similarly for the
* card itself, we must ensure that we update tx_ring_head only
* after we've established a valid packet on the tx ring (and
* before we let the card "see" it, to prevent it racing with the
* irq handler).
static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
u32 head = atomic_read(&lp->tx_ring_head);
volatile struct skb_header *p, *np;
netif_stop_queue(dev);
if(atomic_read(&lp->tx_count)==0) {
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
/* P is the last sending/sent buffer as a pointer */
p=lp->tx_ring[head].p;
head = next_tx(head);
/* NP is the buffer we will be loading */
/* We will need this to flush the buffer out */
lp->tx_ring[head].skb=skb;
np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
np->data = isa_virt_to_bus(skb->data);
np->status = 0;
np->control = CONTROL_EOP | CONTROL_EOL;
/*
* The new frame has been setup; we can now
* let the interrupt handler and card "see" it
*/
atomic_set(&lp->tx_ring_head, head);
p->control &= ~CONTROL_EOL;
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
/**
* mc32_update_stats - pull off the on board statistics
* @dev: 3c527 to service
*
* Query and reset the on-card stats. There's the small possibility
* of a race here, which would result in an underestimation of
* actual errors. As such, we'd prefer to keep all our stats
* collection in software. As a rule, we do. However it can't be
* used for rx errors and collisions as, by default, the card discards
*
* Setting the SAV BP in the rx filter command supposedly
* stops this behaviour. However, testing shows that it only seems to
* enable the collation of on-card rx statistics --- the driver
* never sees an RX descriptor with an error status set.
*
*/
static void mc32_update_stats(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct mc32_stats *st = lp->stats;
rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
dev->stats.rx_errors=rx_errors;
dev->stats.collisions+=st->dataC[10];
/* Number of packets which saw 2--15 collisions */
dev->stats.collisions+=st->dataC[11];
/**
* mc32_rx_ring - process the receive ring
* @dev: 3c527 that needs its receive ring processing
*
*
* We have received one or more indications from the card that a
* receive has completed. The buffer ring thus contains dirty
* entries. We walk the ring by iterating over the circular rx_ring
* array, starting at the next dirty buffer (which happens to be the
* one we finished up at last time around).
*
* For each completed packet, we will either copy it and pass it up
* the stack or, if the packet is near MTU sized, we allocate
* another buffer and flip the old one up the stack.
* We must succeed in keeping a buffer on the ring. If necessary we
* will toss a received packet rather than lose a ring entry. Once
* the first uncompleted descriptor is found, we move the
* End-Of-List bit to include the buffers just processed.
*
*/
static void mc32_rx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct skb_header *p;
u16 rx_ring_tail;
u16 rx_old_tail;
int x=0;
rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
{
p=lp->rx_ring[rx_ring_tail].p;
if(!(p->status & (1<<7))) { /* Not COMPLETED */
struct sk_buff *skb;
struct sk_buff *newskb;
/* Try to save time by avoiding a copy on big frames */
if ((length > RX_COPYBREAK) &&
((newskb=dev_alloc_skb(1532)) != NULL))
skb=lp->rx_ring[rx_ring_tail].skb;
skb_put(skb, length);
skb_reserve(newskb,18);
lp->rx_ring[rx_ring_tail].skb=newskb;
p->data=isa_virt_to_bus(newskb->data);
}
else
dev->stats.rx_dropped++;
}
skb_reserve(skb,2);
memcpy(skb_put(skb, length),
lp->rx_ring[rx_ring_tail].skb->data, length);
}
skb->protocol=eth_type_trans(skb,dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += length;
rx_ring_tail=next_rx(rx_ring_tail);
/* If there was actually a frame to be processed, place the EOL bit */
/* at the descriptor prior to the one to be filled next */
if (rx_ring_tail != rx_old_tail)
{
lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
}
}
/**
* mc32_tx_ring - process completed transmits
* @dev: 3c527 that needs its transmit ring processing
*
*
* This operates in a similar fashion to mc32_rx_ring. We iterate
* over the transmit ring. For each descriptor which has been
* processed by the card, we free its associated buffer and note
* any errors. This continues until the transmit ring is emptied
* or we reach a descriptor that hasn't yet been processed by the
* card.
static void mc32_tx_ring(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
volatile struct skb_header *np;
/*
* We rely on head==tail to mean 'queue empty'.
* This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
* tx_ring_head wrapping to tail and confusing a 'queue empty'
* condition with 'queue full'
*/
while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
{
u16 t;
t=next_tx(lp->tx_ring_tail);
np=lp->tx_ring[t].p;
/* Not COMPLETED */
break;
}
dev->stats.tx_packets++;
if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
{
dev->stats.tx_aborted_errors++;
dev->stats.tx_fifo_errors++;
dev->stats.tx_carrier_errors++;
dev->stats.tx_window_errors++;
dev->stats.tx_aborted_errors++;
}
}
/* Packets are sent in order - this is
basically a FIFO queue of buffers matching
the card ring */
dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
dev_kfree_skb_irq(lp->tx_ring[t].skb);
lp->tx_ring[t].skb=NULL;
atomic_inc(&lp->tx_count);
netif_wake_queue(dev);
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
/**
* mc32_interrupt - handle an interrupt from a 3c527
* @irq: Interrupt number
* @dev_id: 3c527 that requires servicing
* @regs: Registers (unused)
*
*
* An interrupt is raised whenever the 3c527 writes to the command
* register. This register contains the message it wishes to send us
* packed into a single byte field. We keep reading status entries
* until we have processed all the control items, but simply count
* transmit and receive reports. When all reports are in we empty the
* transceiver rings as appropriate. This saves the overhead of
* multiple command requests.
*
* Because MCA is level-triggered, we shouldn't miss indications.
* Therefore, we needn't ask the card to suspend interrupts within
* this handler. The card receives an implicit acknowledgment of the
* current interrupt when we read the command register.
*
*/
static irqreturn_t mc32_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct mc32_local *lp;
int ioaddr, status, boguscount = 0;
int rx_event = 0;
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
/* See whats cooking */
while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
{
status=inb(ioaddr+HOST_CMD);
pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
(status&7), (status>>3)&7, (status>>6)&1,
(status>>7)&1, boguscount);
switch(status&7)
{
case 0:
break;
case 6: /* TX fail */
case 2: /* TX ok */
break;
case 3: /* Halt */
case 4: /* Abort */
complete(&lp->xceiver_cmd);
break;
default:
pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
}
status>>=3;
switch(status&7)
{
case 0:
break;
case 2: /* RX */
break;
case 3: /* Halt */
case 4: /* Abort */
complete(&lp->xceiver_cmd);
break;
case 6:
/* Out of RX buffers stat */
/* Must restart rx */
dev->stats.rx_dropped++;
mc32_rx_ring(dev);
mc32_start_transceiver(dev);
pr_notice("%s: strange rx ack %d\n",
}
status>>=3;
if(status&1)
{
/*
* No thread is waiting: we need to tidy
* up ourself.
*/
mc32_reset_multicast_list(dev);
}
else complete(&lp->execution_cmd);
}
if(status&2)
{
/*
* We get interrupted once per
* counter that is about to overflow.
* Process the transmit and receive rings
mc32_rx_ring(dev);
return IRQ_HANDLED;
}
/**
* mc32_close - user configuring the 3c527 down
* @dev: 3c527 card to shut down
*
* The 3c527 is a bus mastering device. We must be careful how we
* shut it down. It may also be running shared interrupt so we have
* to be sure to silence it properly
*
* We indicate that the card is closing to the rest of the
* driver. Otherwise, it is possible that the card may run out
* of receive buffers and restart the transceiver while we're
* trying to close it.
* We abort any receive and transmits going on and then wait until
* any pending exec commands have completed in other code threads.
* In theory we can't get here while that is true, in practice I am
* paranoid
*
* We turn off the interrupt enable for the board to be sure it can't
* intefere with other devices.
*/
static int mc32_close(struct net_device *dev)
{
struct mc32_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
u8 regs;
u16 one=1;
lp->xceiver_desired_state = HALTED;
netif_stop_queue(dev);
/*
* Send the indications on command (handy debug check)
*/
mc32_command(dev, 4, &one, 2);
/* Shut down the transceiver */
/* Ensure we issue no more commands beyond this point */
down(&lp->cmd_mutex);
/* Ok the card is now stopping */
regs=inb(ioaddr+HOST_CTRL);
regs&=~HOST_CTRL_INTE;
outb(regs, ioaddr+HOST_CTRL);
mc32_flush_rx_ring(dev);
mc32_flush_tx_ring(dev);
return 0;
}
/**
* mc32_get_stats - hand back stats to network layer
* @dev: The 3c527 card to handle
*
* We've collected all the stats we can in software already. Now
* it's time to update those kept on-card and return the lot.
*
*/
static struct net_device_stats *mc32_get_stats(struct net_device *dev)
{
}
/**
* do_mc32_set_multicast_list - attempt to update multicasts
* @dev: 3c527 device to load the list on
* @retry: indicates this is not the first call.
*
*
* Actually set or clear the multicast filter for this adaptor. The
* locking issues are handled by this routine. We have to track
* state as it may take multiple calls to get the command sequence
* completed. We just keep trying to schedule the loads until we
* manage to process them all.
* num_addrs == -1 Promiscuous mode, receive all packets
* num_addrs == 0 Normal mode, clear multicast list
*
* num_addrs > 0 Multicast mode, receive normal and MC packets,
* and do best-effort filtering.
*
* See mc32_update_stats() regards setting the SAV BP bit.
*
*/
static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
struct mc32_local *lp = netdev_priv(dev);
u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
netdev_mc_count(dev) > 10)
else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
if(retry==0)
lp->mc_list_valid = 0;
if(!lp->mc_list_valid)
{
block[1]=0;
block[0]=netdev_mc_count(dev);
netdev_for_each_mc_addr(ha, dev) {
memcpy(bp, ha->addr, 6);
if(mc32_command_nowait(dev, 2, block,
2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
}
lp->mc_list_valid=1;
}
}
if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
lp->mc_reload_wait = 0;
}
}
/**
* mc32_set_multicast_list - queue multicast list update
* @dev: The 3c527 to use
*
* Commence loading the multicast list. This is called when the kernel
* changes the lists. It will override any pending list we are trying to
* load.
*/
static void mc32_set_multicast_list(struct net_device *dev)
{
do_mc32_set_multicast_list(dev,0);
}
/**
* mc32_reset_multicast_list - reset multicast list
* @dev: The 3c527 to use
*
* Attempt the next step in loading the multicast lists. If this attempt
* fails to complete then it will be scheduled and this function called
* again later from elsewhere.
*/
static void mc32_reset_multicast_list(struct net_device *dev)
{
do_mc32_set_multicast_list(dev,1);
}
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
}
static u32 netdev_get_msglevel(struct net_device *dev)
{
return mc32_debug;
}
static void netdev_set_msglevel(struct net_device *dev, u32 level)
{
mc32_debug = level;
}
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
};
#ifdef MODULE
static struct net_device *this_device;
/**
* init_module - entry point
*
* Probe and locate a 3c527 card. This really should probe and locate
* all the 3c527 cards in the machine not just one of them. Yes you can
* insmod multiple modules for now but it's a hack.
*/
int __init init_module(void)
{
this_device = mc32_probe(-1);
if (IS_ERR(this_device))
return PTR_ERR(this_device);
return 0;
}
/**
* cleanup_module - free resources for an unload
*
* Unloading time. We release the MCA bus resources and the interrupt
* at which point everything is ready to unload. The card must be stopped
* at this point or we would not have been called. When we unload we
* leave the card stopped but not totally shut down. When the card is
* initialized it must be rebooted or the rings reloaded before any
* transmit operations are allowed to start scribbling into memory.
*/
void __exit cleanup_module(void)
{
unregister_netdev(this_device);
cleanup_card(this_device);
free_netdev(this_device);
}
#endif /* MODULE */