Newer
Older
Amit S. Kale
committed
"%s: Device temperature %d degrees C "
"exceeds operating range."
" Immediate action needed.\n",
netxen_nic_driver_name, temp_val);
}
} else {
if (adapter->temp == NX_TEMP_WARN) {
printk(KERN_INFO
"%s: Device temperature is now %d degrees C"
" in normal range.\n", netxen_nic_driver_name,
temp_val);
}
}
adapter->temp = temp_state;
return rv;
}
void netxen_watchdog_task(struct work_struct *work)
struct netxen_adapter *adapter =
container_of(work, struct netxen_adapter, watchdog_task);
if ((adapter->portnum == 0) && netxen_nic_check_temp(adapter))
Amit S. Kale
committed
return;
if (adapter->handle_phy_intr)
adapter->handle_phy_intr(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
}
/*
* netxen_process_rcv() send the received packet to the protocol stack.
* and if the number of receives exceeds RX_BUFFERS_REFILL, then we
* invoke the routine to send more rx buffers to the Phantom...
*/
static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
struct status_desc *desc)
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
u64 sts_data = le64_to_cpu(desc->status_desc_data);
int index = netxen_get_sts_refhandle(sts_data);
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
struct netxen_rx_buffer *buffer;
struct sk_buff *skb;
u32 length = netxen_get_sts_totallength(sts_data);
u32 desc_ctx;
struct netxen_rcv_desc_ctx *rcv_desc;
int ret;
desc_ctx = netxen_get_sts_type(sts_data);
if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
printk("%s: %s Bad Rcv descriptor ring\n",
netxen_nic_driver_name, netdev->name);
return;
}
rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
if (unlikely(index > rcv_desc->max_rx_desc_count)) {
DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
index, rcv_desc->max_rx_desc_count);
return;
}
buffer = &rcv_desc->rx_buf_arr[index];
if (desc_ctx == RCV_DESC_LRO_CTXID) {
buffer->lro_current_frags++;
if (netxen_get_sts_desc_lro_last_frag(desc)) {
buffer->lro_expected_frags =
netxen_get_sts_desc_lro_cnt(desc);
buffer->lro_length = length;
}
if (buffer->lro_current_frags != buffer->lro_expected_frags) {
if (buffer->lro_expected_frags != 0) {
printk("LRO: (refhandle:%x) recv frag. "
"wait for last. flags: %x expected:%d "
"have:%d\n", index,
netxen_get_sts_desc_lro_last_frag(desc),
buffer->lro_expected_frags,
buffer->lro_current_frags);
}
return;
}
}
pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
PCI_DMA_FROMDEVICE);
skb = (struct sk_buff *)buffer->skb;
netxen_get_sts_status(sts_data) == STATUS_CKSUM_OK)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb->ip_summed = CHECKSUM_NONE;
Dhananjay Phadke
committed
skb->dev = netdev;
if (desc_ctx == RCV_DESC_LRO_CTXID) {
/* True length was only available on the last pkt */
skb_put(skb, buffer->lro_length);
} else {
skb_put(skb, length);
}
skb->protocol = eth_type_trans(skb, netdev);
ret = netif_receive_skb(skb);
netdev->last_rx = jiffies;
rcv_desc->rcv_pending--;
/*
* We just consumed one buffer so post a buffer.
*/
buffer->skb = NULL;
buffer->state = NETXEN_BUFFER_FREE;
buffer->lro_current_frags = 0;
buffer->lro_expected_frags = 0;
adapter->stats.no_rcv++;
adapter->stats.rxbytes += length;
}
/* Process Receive status ring */
u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
{
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
struct status_desc *desc; /* used to read status desc here */
u32 consumer = recv_ctx->status_rx_consumer;
u32 producer = 0;
int count = 0, ring;
while (count < max) {
desc = &desc_head[consumer];
DPRINTK(ERR, "desc %p ownedby %x\n", desc,
netxen_get_sts_owner(desc));
break;
}
netxen_process_rcv(adapter, ctxid, desc);
consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
count++;
}
for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
/* update the consumer index in phantom */
if (count) {
recv_ctx->status_rx_consumer = consumer;
recv_ctx->status_rx_producer = producer;
/* Window = 1 */
writel(consumer,
NETXEN_CRB_NORMALIZE(adapter,
crb_rcv_status_consumer));
}
return count;
}
/* Process Command status ring */
int netxen_process_cmd_ring(struct netxen_adapter *adapter)
u32 last_consumer, consumer;
int count = 0, i;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
last_consumer = adapter->last_cmd_consumer;
consumer = le32_to_cpu(*(adapter->cmd_consumer));
while (last_consumer != consumer) {
buffer = &adapter->cmd_buf_arr[last_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
pci_unmap_single(pdev, frag->dma, frag->length,
PCI_DMA_TODEVICE);
Dhananjay Phadke
committed
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
pci_unmap_page(pdev, frag->dma, frag->length,
PCI_DMA_TODEVICE);
Dhananjay Phadke
committed
frag->dma = 0ULL;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
last_consumer = get_next_index(last_consumer,
adapter->max_tx_desc_count);
if (++count >= MAX_STATUS_HANDLE)
break;
adapter->last_cmd_consumer = last_consumer;
smp_mb();
if (netif_queue_stopped(netdev) && netif_running(netdev)) {
netif_tx_lock(netdev);
netif_wake_queue(netdev);
smp_mb();
netif_tx_unlock(netdev);
/*
* If everything is freed up to consumer then check if the ring is full
* If the ring is full then check if more needs to be freed and
* schedule the call back again.
*
* This happens when there are 2 CPUs. One could be freeing and the
* other filling it. If the ring is full when we get out of here and
* the card has already interrupted the host then the host can miss the
* interrupt.
*
* There is still a possible race condition and the host could miss an
* interrupt. The card has to take care of this.
*/
consumer = le32_to_cpu(*(adapter->cmd_consumer));
done = (last_consumer == consumer);
return (done);
}
/*
* netxen_post_rx_buffers puts buffer in the Phantom memory
*/
void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
{
struct pci_dev *pdev = adapter->ahw.pdev;
struct sk_buff *skb;
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
struct netxen_rcv_desc_ctx *rcv_desc = NULL;
uint producer;
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int count = 0;
int index = 0;
netxen_ctx_msg msg = 0;
dma_addr_t dma;
rcv_desc = &recv_ctx->rcv_desc[ringid];
producer = rcv_desc->producer;
index = rcv_desc->begin_alloc;
buffer = &rcv_desc->rx_buf_arr[index];
/* We can start writing rx descriptors into the phantom memory. */
while (buffer->state == NETXEN_BUFFER_FREE) {
skb = dev_alloc_skb(rcv_desc->skb_size);
if (unlikely(!skb)) {
/*
* We need to schedule the posting of buffers to the pegs.
*/
rcv_desc->begin_alloc = index;
Amit S. Kale
committed
DPRINTK(ERR, "netxen_post_rx_buffers: "
" allocated only %d buffers\n", count);
break;
}
count++; /* now there should be no failure */
pdesc = &rcv_desc->desc_head[producer];
#if defined(XGB_DEBUG)
*(unsigned long *)(skb->head) = 0xc0debabe;
if (skb_is_nonlinear(skb)) {
printk("Allocated SKB @%p is nonlinear\n");
}
#endif
skb_reserve(skb, 2);
/* This will be setup when we receive the
* buffer after it has been filled FSL TBD TBD
* skb->dev = netdev;
*/
dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
PCI_DMA_FROMDEVICE);
buffer->skb = skb;
buffer->state = NETXEN_BUFFER_BUSY;
buffer->dma = dma;
/* make a rcv descriptor */
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size);
DPRINTK(INFO, "done writing descripter\n");
producer =
get_next_index(producer, rcv_desc->max_rx_desc_count);
index = get_next_index(index, rcv_desc->max_rx_desc_count);
buffer = &rcv_desc->rx_buf_arr[index];
}
/* if we did allocate buffers, then write the count to Phantom */
if (count) {
rcv_desc->begin_alloc = index;
rcv_desc->rcv_pending += count;
rcv_desc->producer = producer;
/* Window = 1 */
writel((producer - 1) &
(rcv_desc->max_rx_desc_count - 1),
NETXEN_CRB_NORMALIZE(adapter,
recv_crb_registers[
adapter->portnum].
rcv_desc_crb[ringid].
crb_rcv_producer_offset));
/*
* Write a doorbell msg to tell phanmon of change in
* receive ring producer
*/
netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
netxen_set_msg_privid(msg);
netxen_set_msg_count(msg,
((producer -
1) & (rcv_desc->
max_rx_desc_count - 1)));
netxen_set_msg_ctxid(msg, adapter->portnum);
netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
writel(msg,
DB_NORMALIZE(adapter,
NETXEN_RCV_PRODUCER_OFFSET));
}
}
static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
uint32_t ctx, uint32_t ringid)
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
{
struct pci_dev *pdev = adapter->ahw.pdev;
struct sk_buff *skb;
struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
struct netxen_rcv_desc_ctx *rcv_desc = NULL;
u32 producer;
struct rcv_desc *pdesc;
struct netxen_rx_buffer *buffer;
int count = 0;
int index = 0;
rcv_desc = &recv_ctx->rcv_desc[ringid];
producer = rcv_desc->producer;
index = rcv_desc->begin_alloc;
buffer = &rcv_desc->rx_buf_arr[index];
/* We can start writing rx descriptors into the phantom memory. */
while (buffer->state == NETXEN_BUFFER_FREE) {
skb = dev_alloc_skb(rcv_desc->skb_size);
if (unlikely(!skb)) {
/*
* We need to schedule the posting of buffers to the pegs.
*/
rcv_desc->begin_alloc = index;
DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
" allocated only %d buffers\n", count);
break;
}
count++; /* now there should be no failure */
pdesc = &rcv_desc->desc_head[producer];
skb_reserve(skb, 2);
* This will be setup when we receive the
* buffer after it has been filled
* skb->dev = netdev;
*/
buffer->skb = skb;
buffer->state = NETXEN_BUFFER_BUSY;
buffer->dma = pci_map_single(pdev, skb->data,
rcv_desc->dma_size,
PCI_DMA_FROMDEVICE);
pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
pdesc->addr_buffer = cpu_to_le64(buffer->dma);
DPRINTK(INFO, "done writing descripter\n");
producer =
get_next_index(producer, rcv_desc->max_rx_desc_count);
index = get_next_index(index, rcv_desc->max_rx_desc_count);
buffer = &rcv_desc->rx_buf_arr[index];
}
/* if we did allocate buffers, then write the count to Phantom */
if (count) {
rcv_desc->begin_alloc = index;
rcv_desc->rcv_pending += count;
rcv_desc->producer = producer;
/* Window = 1 */
writel((producer - 1) &
(rcv_desc->max_rx_desc_count - 1),
NETXEN_CRB_NORMALIZE(adapter,
recv_crb_registers[
adapter->portnum].
rcv_desc_crb[ringid].
crb_rcv_producer_offset));
wmb();
}
}
void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
memset(&adapter->stats, 0, sizeof(adapter->stats));