Skip to content
Snippets Groups Projects
3c527.c 42.7 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
 *	mc32_send_packet	-	queue a frame for transmit
 *	@skb: buffer to transmit
 *	@dev: 3c527 to send it out of
 *
 *	Transmit a buffer. This normally means throwing the buffer onto
 *	the transmit queue as the queue is quite large. If the queue is
 *	full then we set tx_busy and return. Once the interrupt handler
 *	gets messages telling it to reclaim transmit queue entries, we will
 *	clear tx_busy and the kernel will start calling this again.
 *
 *      We do not disable interrupts or acquire any locks; this can
 *      run concurrently with mc32_tx_ring(), and the function itself
 *      is serialised at a higher layer. However, similarly for the
 *      card itself, we must ensure that we update tx_ring_head only
 *      after we've established a valid packet on the tx ring (and
 *      before we let the card "see" it, to prevent it racing with the
 *      irq handler).
 * 
 */

static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	u32 head = atomic_read(&lp->tx_ring_head);
	
	volatile struct skb_header *p, *np;

	netif_stop_queue(dev);

	if(atomic_read(&lp->tx_count)==0) {
		return 1;
	}

	if (skb_padto(skb, ETH_ZLEN)) {
Linus Torvalds's avatar
Linus Torvalds committed
1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
		netif_wake_queue(dev);
		return 0;
	}

	atomic_dec(&lp->tx_count); 

	/* P is the last sending/sent buffer as a pointer */
	p=lp->tx_ring[head].p;
		
	head = next_tx(head);

	/* NP is the buffer we will be loading */
	np=lp->tx_ring[head].p; 
	
	/* We will need this to flush the buffer out */
	lp->tx_ring[head].skb=skb;

	np->length      = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;			
	np->data	= isa_virt_to_bus(skb->data);
	np->status	= 0;
	np->control     = CONTROL_EOP | CONTROL_EOL;     
	wmb();
		
	/*
	 * The new frame has been setup; we can now
	 * let the interrupt handler and card "see" it
	 */

	atomic_set(&lp->tx_ring_head, head); 
	p->control     &= ~CONTROL_EOL;

	netif_wake_queue(dev);
	return 0;
}


/**
 *	mc32_update_stats	-	pull off the on board statistics
 *	@dev: 3c527 to service
 *
 * 
 *	Query and reset the on-card stats. There's the small possibility
 *	of a race here, which would result in an underestimation of
 *	actual errors. As such, we'd prefer to keep all our stats
 *	collection in software. As a rule, we do. However it can't be
 *	used for rx errors and collisions as, by default, the card discards
 *	bad rx packets. 
 *
 *	Setting the SAV BP in the rx filter command supposedly
 *	stops this behaviour. However, testing shows that it only seems to
 *	enable the collation of on-card rx statistics --- the driver
 *	never sees an RX descriptor with an error status set.
 *
 */

static void mc32_update_stats(struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	volatile struct mc32_stats *st = lp->stats; 

	u32 rx_errors=0; 
      
	rx_errors+=lp->net_stats.rx_crc_errors   +=st->rx_crc_errors;         
	                                           st->rx_crc_errors=0;
	rx_errors+=lp->net_stats.rx_fifo_errors  +=st->rx_overrun_errors;   
	                                           st->rx_overrun_errors=0; 
	rx_errors+=lp->net_stats.rx_frame_errors +=st->rx_alignment_errors; 
 	                                           st->rx_alignment_errors=0;
	rx_errors+=lp->net_stats.rx_length_errors+=st->rx_tooshort_errors; 
	                                           st->rx_tooshort_errors=0;
	rx_errors+=lp->net_stats.rx_missed_errors+=st->rx_outofresource_errors;
	                                           st->rx_outofresource_errors=0; 
        lp->net_stats.rx_errors=rx_errors; 
						   
	/* Number of packets which saw one collision */
	lp->net_stats.collisions+=st->dataC[10];
	st->dataC[10]=0; 

	/* Number of packets which saw 2--15 collisions */ 
	lp->net_stats.collisions+=st->dataC[11]; 
	st->dataC[11]=0; 
}	


/**
 *	mc32_rx_ring	-	process the receive ring
 *	@dev: 3c527 that needs its receive ring processing
 *
 *
 *	We have received one or more indications from the card that a
 *	receive has completed. The buffer ring thus contains dirty
 *	entries. We walk the ring by iterating over the circular rx_ring
 *	array, starting at the next dirty buffer (which happens to be the
 *	one we finished up at last time around).
 *
 *	For each completed packet, we will either copy it and pass it up
 * 	the stack or, if the packet is near MTU sized, we allocate
 *	another buffer and flip the old one up the stack.
 * 
 *	We must succeed in keeping a buffer on the ring. If necessary we
 *	will toss a received packet rather than lose a ring entry. Once
 *	the first uncompleted descriptor is found, we move the
 *	End-Of-List bit to include the buffers just processed.
 *
 */

static void mc32_rx_ring(struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	volatile struct skb_header *p;
	u16 rx_ring_tail;
	u16 rx_old_tail;
	int x=0;

	rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
	
	do
	{ 
		p=lp->rx_ring[rx_ring_tail].p; 

		if(!(p->status & (1<<7))) { /* Not COMPLETED */ 
			break;
		} 
		if(p->status & (1<<6)) /* COMPLETED_OK */
		{		        

			u16 length=p->length;
			struct sk_buff *skb; 
			struct sk_buff *newskb; 

			/* Try to save time by avoiding a copy on big frames */

			if ((length > RX_COPYBREAK) 
			    && ((newskb=dev_alloc_skb(1532)) != NULL)) 
			{ 
				skb=lp->rx_ring[rx_ring_tail].skb;
				skb_put(skb, length);
				
				skb_reserve(newskb,18); 
				lp->rx_ring[rx_ring_tail].skb=newskb;  
				p->data=isa_virt_to_bus(newskb->data);  
			} 
			else 
			{
				skb=dev_alloc_skb(length+2);  

				if(skb==NULL) {
					lp->net_stats.rx_dropped++; 
					goto dropped; 
				}

				skb_reserve(skb,2);
				memcpy(skb_put(skb, length),
				       lp->rx_ring[rx_ring_tail].skb->data, length);
			}
			
			skb->protocol=eth_type_trans(skb,dev); 
			skb->dev=dev; 
			dev->last_rx = jiffies;
 			lp->net_stats.rx_packets++; 
 			lp->net_stats.rx_bytes += length; 
			netif_rx(skb);
		}

	dropped:
		p->length = 1532; 
		p->status = 0;
		
		rx_ring_tail=next_rx(rx_ring_tail); 
	}
        while(x++<48);  

	/* If there was actually a frame to be processed, place the EOL bit */ 
	/* at the descriptor prior to the one to be filled next */ 

	if (rx_ring_tail != rx_old_tail) 
	{ 
		lp->rx_ring[prev_rx(rx_ring_tail)].p->control |=  CONTROL_EOL; 
		lp->rx_ring[prev_rx(rx_old_tail)].p->control  &= ~CONTROL_EOL; 

		lp->rx_ring_tail=rx_ring_tail; 
	}
}


/**
 *	mc32_tx_ring	-	process completed transmits
 *	@dev: 3c527 that needs its transmit ring processing
 *
 *
 *	This operates in a similar fashion to mc32_rx_ring. We iterate
 *	over the transmit ring. For each descriptor which has been
 *	processed by the card, we free its associated buffer and note
 *	any errors. This continues until the transmit ring is emptied
 *	or we reach a descriptor that hasn't yet been processed by the
 *	card.
 * 
 */

static void mc32_tx_ring(struct net_device *dev) 
{
	struct mc32_local *lp = netdev_priv(dev);
	volatile struct skb_header *np;

	/*
	 * We rely on head==tail to mean 'queue empty'.
	 * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
	 * tx_ring_head wrapping to tail and confusing a 'queue empty'
	 * condition with 'queue full'
	 */

	while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))  
	{   
		u16 t; 

		t=next_tx(lp->tx_ring_tail); 
		np=lp->tx_ring[t].p; 

		if(!(np->status & (1<<7))) 
		{
			/* Not COMPLETED */ 
			break; 
		} 
		lp->net_stats.tx_packets++;
		if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
		{
			lp->net_stats.tx_errors++;   

			switch(np->status&0x0F)
			{
				case 1:
					lp->net_stats.tx_aborted_errors++;
					break; /* Max collisions */ 
				case 2:
					lp->net_stats.tx_fifo_errors++;
					break;
				case 3:
					lp->net_stats.tx_carrier_errors++;
					break;
				case 4:
					lp->net_stats.tx_window_errors++;
					break;  /* CTS Lost */ 
				case 5:
					lp->net_stats.tx_aborted_errors++;
					break; /* Transmit timeout */ 
			}
		}
		/* Packets are sent in order - this is
		    basically a FIFO queue of buffers matching
		    the card ring */
		lp->net_stats.tx_bytes+=lp->tx_ring[t].skb->len;
		dev_kfree_skb_irq(lp->tx_ring[t].skb);
		lp->tx_ring[t].skb=NULL;
		atomic_inc(&lp->tx_count);
		netif_wake_queue(dev);

		lp->tx_ring_tail=t; 
	}

} 


/**
 *	mc32_interrupt		-	handle an interrupt from a 3c527
 *	@irq: Interrupt number
 *	@dev_id: 3c527 that requires servicing
 *	@regs: Registers (unused)
 *
 *
 *	An interrupt is raised whenever the 3c527 writes to the command
 *	register. This register contains the message it wishes to send us
 *	packed into a single byte field. We keep reading status entries
 *	until we have processed all the control items, but simply count
 *	transmit and receive reports. When all reports are in we empty the
 *	transceiver rings as appropriate. This saves the overhead of
 *	multiple command requests.
 *
 *	Because MCA is level-triggered, we shouldn't miss indications.
 *	Therefore, we needn't ask the card to suspend interrupts within
 *	this handler. The card receives an implicit acknowledgment of the
 *	current interrupt when we read the command register.
 *
 */

static irqreturn_t mc32_interrupt(int irq, void *dev_id, struct pt_regs * regs)
{
	struct net_device *dev = dev_id;
	struct mc32_local *lp;
	int ioaddr, status, boguscount = 0;
	int rx_event = 0;
	int tx_event = 0; 
	
	if (dev == NULL) {
		printk(KERN_WARNING "%s: irq %d for unknown device.\n", cardname, irq);
		return IRQ_NONE;
	}
 
	ioaddr = dev->base_addr;
	lp = netdev_priv(dev);

	/* See whats cooking */

	while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
	{
		status=inb(ioaddr+HOST_CMD);

#ifdef DEBUG_IRQ		
		printk("Status TX%d RX%d EX%d OV%d BC%d\n",
			(status&7), (status>>3)&7, (status>>6)&1,
			(status>>7)&1, boguscount);
#endif
			
		switch(status&7)
		{
			case 0:
				break;
			case 6: /* TX fail */
			case 2:	/* TX ok */
				tx_event = 1; 
				break;
			case 3: /* Halt */
			case 4: /* Abort */
				complete(&lp->xceiver_cmd);
				break;
			default:
				printk("%s: strange tx ack %d\n", dev->name, status&7);
		}
		status>>=3;
		switch(status&7)
		{
			case 0:
				break;
			case 2:	/* RX */
				rx_event=1; 
				break;
			case 3: /* Halt */
			case 4: /* Abort */
				complete(&lp->xceiver_cmd);
				break;
			case 6:
				/* Out of RX buffers stat */
				/* Must restart rx */
				lp->net_stats.rx_dropped++;
				mc32_rx_ring(dev); 
				mc32_start_transceiver(dev); 
				break;
			default:
				printk("%s: strange rx ack %d\n", 
					dev->name, status&7);			
		}
		status>>=3;
		if(status&1)
		{
			/*
			 * No thread is waiting: we need to tidy
			 * up ourself.
			 */
				   
			if (lp->cmd_nonblocking) {
				up(&lp->cmd_mutex);
				if (lp->mc_reload_wait) 
					mc32_reset_multicast_list(dev);
			}
			else complete(&lp->execution_cmd);
		}
		if(status&2)
		{
			/*
			 *	We get interrupted once per
			 *	counter that is about to overflow. 
			 */

			mc32_update_stats(dev);			
		}
	}


	/*
	 *	Process the transmit and receive rings 
         */

	if(tx_event) 
		mc32_tx_ring(dev);
	 
	if(rx_event) 
		mc32_rx_ring(dev);

	return IRQ_HANDLED;
}


/**
 *	mc32_close	-	user configuring the 3c527 down
 *	@dev: 3c527 card to shut down
 *
 *	The 3c527 is a bus mastering device. We must be careful how we
 *	shut it down. It may also be running shared interrupt so we have
 *	to be sure to silence it properly
 *
 *	We indicate that the card is closing to the rest of the
 *	driver.  Otherwise, it is possible that the card may run out
 *	of receive buffers and restart the transceiver while we're
 *	trying to close it.
 * 
 *	We abort any receive and transmits going on and then wait until
 *	any pending exec commands have completed in other code threads.
 *	In theory we can't get here while that is true, in practice I am
 *	paranoid
 *
 *	We turn off the interrupt enable for the board to be sure it can't
 *	intefere with other devices.
 */

static int mc32_close(struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	int ioaddr = dev->base_addr;

	u8 regs;
	u16 one=1;
	
	lp->xceiver_desired_state = HALTED;
	netif_stop_queue(dev);

	/*
	 *	Send the indications on command (handy debug check)
	 */

	mc32_command(dev, 4, &one, 2);

	/* Shut down the transceiver */

	mc32_halt_transceiver(dev); 
	
	/* Ensure we issue no more commands beyond this point */

	down(&lp->cmd_mutex);
	
	/* Ok the card is now stopping */	
	
	regs=inb(ioaddr+HOST_CTRL);
	regs&=~HOST_CTRL_INTE;
	outb(regs, ioaddr+HOST_CTRL);

	mc32_flush_rx_ring(dev);
	mc32_flush_tx_ring(dev);
		
	mc32_update_stats(dev); 

	return 0;
}


/**
 *	mc32_get_stats		-	hand back stats to network layer
 *	@dev: The 3c527 card to handle
 *
 *	We've collected all the stats we can in software already. Now
 *	it's time to update those kept on-card and return the lot. 
 * 
 */

static struct net_device_stats *mc32_get_stats(struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	
	mc32_update_stats(dev); 
	return &lp->net_stats;
}


/**
 *	do_mc32_set_multicast_list	-	attempt to update multicasts
 *	@dev: 3c527 device to load the list on
 *	@retry: indicates this is not the first call. 
 *
 *
 * 	Actually set or clear the multicast filter for this adaptor. The
 *	locking issues are handled by this routine. We have to track
 *	state as it may take multiple calls to get the command sequence
 *	completed. We just keep trying to schedule the loads until we
 *	manage to process them all.
 * 
 *	num_addrs == -1	Promiscuous mode, receive all packets
 * 
 *	num_addrs == 0	Normal mode, clear multicast list
 * 
 *	num_addrs > 0	Multicast mode, receive normal and MC packets, 
 *			and do best-effort filtering. 
 *
 *	See mc32_update_stats() regards setting the SAV BP bit. 
 *
 */

static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
{
	struct mc32_local *lp = netdev_priv(dev);
	u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ 

	if (dev->flags&IFF_PROMISC)
		/* Enable promiscuous mode */
		filt |= 1;
	else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10)
	{
		dev->flags|=IFF_PROMISC;
		filt |= 1;
	}
	else if(dev->mc_count)
	{
		unsigned char block[62];
		unsigned char *bp;
		struct dev_mc_list *dmc=dev->mc_list;
		
		int i;
	       
		if(retry==0)
			lp->mc_list_valid = 0;
		if(!lp->mc_list_valid)
		{
			block[1]=0;
			block[0]=dev->mc_count;
			bp=block+2;
		
			for(i=0;i<dev->mc_count;i++)
			{
				memcpy(bp, dmc->dmi_addr, 6);
				bp+=6;
				dmc=dmc->next;
			}
			if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
			{
				lp->mc_reload_wait = 1;
				return;
			}
			lp->mc_list_valid=1;
		}
	}
	
	if(mc32_command_nowait(dev, 0, &filt, 2)==-1) 
	{
		lp->mc_reload_wait = 1;
	} 
	else { 
		lp->mc_reload_wait = 0;
	}
}


/**
 *	mc32_set_multicast_list	-	queue multicast list update
 *	@dev: The 3c527 to use
 *
 *	Commence loading the multicast list. This is called when the kernel
 *	changes the lists. It will override any pending list we are trying to
 *	load.
 */

static void mc32_set_multicast_list(struct net_device *dev)
{
	do_mc32_set_multicast_list(dev,0);
}


/**
 *	mc32_reset_multicast_list	-	reset multicast list
 *	@dev: The 3c527 to use
 *
 *	Attempt the next step in loading the multicast lists. If this attempt
 *	fails to complete then it will be scheduled and this function called
 *	again later from elsewhere.
 */

static void mc32_reset_multicast_list(struct net_device *dev)
{
	do_mc32_set_multicast_list(dev,1);
}

static void netdev_get_drvinfo(struct net_device *dev,
			       struct ethtool_drvinfo *info)
{
	strcpy(info->driver, DRV_NAME);
	strcpy(info->version, DRV_VERSION);
	sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
}

static u32 netdev_get_msglevel(struct net_device *dev)
{
	return mc32_debug;
}

static void netdev_set_msglevel(struct net_device *dev, u32 level)
{
	mc32_debug = level;
}

static struct ethtool_ops netdev_ethtool_ops = {
	.get_drvinfo		= netdev_get_drvinfo,
	.get_msglevel		= netdev_get_msglevel,
	.set_msglevel		= netdev_set_msglevel,
};

#ifdef MODULE

static struct net_device *this_device;

/**
 *	init_module		-	entry point
 *
 *	Probe and locate a 3c527 card. This really should probe and locate
 *	all the 3c527 cards in the machine not just one of them. Yes you can
 *	insmod multiple modules for now but it's a hack.
 */

int __init init_module(void)
Linus Torvalds's avatar
Linus Torvalds committed
{
	this_device = mc32_probe(-1);
	if (IS_ERR(this_device))
		return PTR_ERR(this_device);
	return 0;
}

/**
 *	cleanup_module	-	free resources for an unload
 *
 *	Unloading time. We release the MCA bus resources and the interrupt
 *	at which point everything is ready to unload. The card must be stopped
 *	at this point or we would not have been called. When we unload we
 *	leave the card stopped but not totally shut down. When the card is
 *	initialized it must be rebooted or the rings reloaded before any
 *	transmit operations are allowed to start scribbling into memory.
 */

void cleanup_module(void)
{
	unregister_netdev(this_device);
	cleanup_card(this_device);
	free_netdev(this_device);
}

#endif /* MODULE */