Skip to content
Snippets Groups Projects
netxen_nic_init.c 37.6 KiB
Newer Older
	if (!retries) {
		printk(KERN_ERR "Receive Peg initialization not "
			      "complete, state: 0x%x.\n", val);
		return -EIO;
	}

	return 0;
}

int netxen_init_firmware(struct netxen_adapter *adapter)
{
	int err;

	err = netxen_receive_peg_ready(adapter);
	if (err)
		return err;

	NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
	NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
	NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
	NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
static void
netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
{
	u32 cable_OUI;
	u16 cable_len;
	u16 link_speed;
	u8  link_status, module, duplex, autoneg;
	struct net_device *netdev = adapter->netdev;

	adapter->has_link_events = 1;

	cable_OUI = msg->body[1] & 0xffffffff;
	cable_len = (msg->body[1] >> 32) & 0xffff;
	link_speed = (msg->body[1] >> 48) & 0xffff;

	link_status = msg->body[2] & 0xff;
	duplex = (msg->body[2] >> 16) & 0xff;
	autoneg = (msg->body[2] >> 24) & 0xff;

	module = (msg->body[2] >> 8) & 0xff;
	if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
		printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
				netdev->name, cable_OUI, cable_len);
	} else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
		printk(KERN_INFO "%s: unsupported cable length %d\n",
				netdev->name, cable_len);
	}

	netxen_advert_link_change(adapter, link_status);

	/* update link parameters */
	if (duplex == LINKEVENT_FULL_DUPLEX)
		adapter->link_duplex = DUPLEX_FULL;
	else
		adapter->link_duplex = DUPLEX_HALF;
	adapter->module_type = module;
	adapter->link_autoneg = autoneg;
	adapter->link_speed = link_speed;
}

static void
netxen_handle_fw_message(int desc_cnt, int index,
		struct nx_host_sds_ring *sds_ring)
{
	nx_fw_msg_t msg;
	struct status_desc *desc;
	int i = 0, opcode;

	while (desc_cnt > 0 && i < 8) {
		desc = &sds_ring->desc_head[index];
		msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
		msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);

		index = get_next_index(index, sds_ring->num_desc);
		desc_cnt--;
	}

	opcode = netxen_get_nic_msg_opcode(msg.body[0]);
	switch (opcode) {
	case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
		netxen_handle_linkevent(sds_ring->adapter, &msg);
		break;
	default:
		break;
	}
}

static int
netxen_alloc_rx_skb(struct netxen_adapter *adapter,
		struct nx_host_rds_ring *rds_ring,
		struct netxen_rx_buffer *buffer)
{
	struct sk_buff *skb;
	dma_addr_t dma;
	struct pci_dev *pdev = adapter->pdev;

	buffer->skb = dev_alloc_skb(rds_ring->skb_size);
	if (!buffer->skb)
		return 1;

	skb = buffer->skb;

	if (!adapter->ahw.cut_through)
		skb_reserve(skb, 2);

	dma = pci_map_single(pdev, skb->data,
			rds_ring->dma_size, PCI_DMA_FROMDEVICE);

	if (pci_dma_mapping_error(pdev, dma)) {
		dev_kfree_skb_any(skb);
		buffer->skb = NULL;
		return 1;
	}

	buffer->skb = skb;
	buffer->dma = dma;
	buffer->state = NETXEN_BUFFER_BUSY;

	return 0;
}

static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
		struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
{
	struct netxen_rx_buffer *buffer;
	struct sk_buff *skb;

	buffer = &rds_ring->rx_buf_arr[index];

	pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
			PCI_DMA_FROMDEVICE);

	skb = buffer->skb;
	if (!skb)
		goto no_skb;

	if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
		adapter->stats.csummed++;
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else
		skb->ip_summed = CHECKSUM_NONE;

	skb->dev = adapter->netdev;

	buffer->skb = NULL;
no_skb:
	buffer->state = NETXEN_BUFFER_FREE;
	return skb;
}

static struct netxen_rx_buffer *
netxen_process_rcv(struct netxen_adapter *adapter,
		struct nx_host_sds_ring *sds_ring,
		int ring, u64 sts_data0)
	struct net_device *netdev = adapter->netdev;
	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
	struct netxen_rx_buffer *buffer;
	struct sk_buff *skb;
	struct nx_host_rds_ring *rds_ring;
	int index, length, cksum, pkt_offset;
	if (unlikely(ring >= adapter->max_rds_rings))
		return NULL;

	rds_ring = &recv_ctx->rds_rings[ring];

	index = netxen_get_sts_refhandle(sts_data0);
	if (unlikely(index >= rds_ring->num_desc))
	buffer = &rds_ring->rx_buf_arr[index];
	length = netxen_get_sts_totallength(sts_data0);
	cksum  = netxen_get_sts_status(sts_data0);
	pkt_offset = netxen_get_sts_pkt_offset(sts_data0);

	skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
	if (!skb)
	if (length > rds_ring->skb_size)
		skb_put(skb, rds_ring->skb_size);
	else
		skb_put(skb, length);

	if (pkt_offset)
		skb_pull(skb, pkt_offset);
	skb->truesize = skb->len + sizeof(struct sk_buff);
	skb->protocol = eth_type_trans(skb, netdev);

	napi_gro_receive(&sds_ring->napi, skb);
	adapter->stats.rx_pkts++;
	adapter->stats.rxbytes += length;
#define TCP_HDR_SIZE            20
#define TCP_TS_OPTION_SIZE      12
#define TCP_TS_HDR_SIZE         (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)

static struct netxen_rx_buffer *
netxen_process_lro(struct netxen_adapter *adapter,
		struct nx_host_sds_ring *sds_ring,
		int ring, u64 sts_data0, u64 sts_data1)
{
	struct net_device *netdev = adapter->netdev;
	struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
	struct netxen_rx_buffer *buffer;
	struct sk_buff *skb;
	struct nx_host_rds_ring *rds_ring;
	struct iphdr *iph;
	struct tcphdr *th;
	bool push, timestamp;
	int l2_hdr_offset, l4_hdr_offset;
	int index;
	u16 lro_length, length, data_offset;
	u32 seq_number;

	if (unlikely(ring > adapter->max_rds_rings))
		return NULL;

	rds_ring = &recv_ctx->rds_rings[ring];

	index = netxen_get_lro_sts_refhandle(sts_data0);
	if (unlikely(index > rds_ring->num_desc))
		return NULL;

	buffer = &rds_ring->rx_buf_arr[index];

	timestamp = netxen_get_lro_sts_timestamp(sts_data0);
	lro_length = netxen_get_lro_sts_length(sts_data0);
	l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
	l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
	push = netxen_get_lro_sts_push_flag(sts_data0);
	seq_number = netxen_get_lro_sts_seq_number(sts_data1);

	skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
	if (!skb)
		return buffer;

	if (timestamp)
		data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
	else
		data_offset = l4_hdr_offset + TCP_HDR_SIZE;

	skb_put(skb, lro_length + data_offset);

	skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);

	skb_pull(skb, l2_hdr_offset);
	skb->protocol = eth_type_trans(skb, netdev);

	iph = (struct iphdr *)skb->data;
	th = (struct tcphdr *)(skb->data + (iph->ihl << 2));

	length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
	iph->tot_len = htons(length);
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	th->psh = push;
	th->seq = htonl(seq_number);

	netif_receive_skb(skb);

	adapter->stats.lro_pkts++;
	adapter->stats.rxbytes += length;

#define netxen_merge_rx_buffers(list, head) \
	do { list_splice_tail_init(list, head); } while (0);

netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
	struct netxen_adapter *adapter = sds_ring->adapter;

	struct list_head *cur;

	struct status_desc *desc;
	struct netxen_rx_buffer *rxbuf;

	u32 consumer = sds_ring->consumer;

	int count = 0;
	u64 sts_data0, sts_data1;
	int opcode, ring = 0, desc_cnt;

	while (count < max) {
		desc = &sds_ring->desc_head[consumer];
		sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
		if (!(sts_data0 & STATUS_OWNER_HOST))
		desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
		opcode = netxen_get_sts_opcode(sts_data0);
		switch (opcode) {
		case NETXEN_NIC_RXPKT_DESC:
		case NETXEN_OLD_RXPKT_DESC:
		case NETXEN_NIC_SYN_OFFLOAD:
			ring = netxen_get_sts_type(sts_data0);
			rxbuf = netxen_process_rcv(adapter, sds_ring,
					ring, sts_data0);
			break;
		case NETXEN_NIC_LRO_DESC:
			ring = netxen_get_lro_sts_type(sts_data0);
			sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
			rxbuf = netxen_process_lro(adapter, sds_ring,
					ring, sts_data0, sts_data1);
			break;
		case NETXEN_NIC_RESPONSE_DESC:
			netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
		default:
			goto skip;
		}

		WARN_ON(desc_cnt > 1);

		if (rxbuf)
			list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);

skip:
		for (; desc_cnt > 0; desc_cnt--) {
			desc = &sds_ring->desc_head[consumer];
			desc->status_desc_data[0] =
				cpu_to_le64(STATUS_OWNER_PHANTOM);
			consumer = get_next_index(consumer, sds_ring->num_desc);
		}
	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
		struct nx_host_rds_ring *rds_ring =
			&adapter->recv_ctx.rds_rings[ring];

		if (!list_empty(&sds_ring->free_list[ring])) {
			list_for_each(cur, &sds_ring->free_list[ring]) {
				rxbuf = list_entry(cur,
						struct netxen_rx_buffer, list);
				netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
			}
			spin_lock(&rds_ring->lock);
			netxen_merge_rx_buffers(&sds_ring->free_list[ring],
						&rds_ring->free_list);
			spin_unlock(&rds_ring->lock);
		}

		netxen_post_rx_buffers_nodb(adapter, rds_ring);
	}
		sds_ring->consumer = consumer;
		NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
	}

	return count;
}

/* Process Command status ring */
int netxen_process_cmd_ring(struct netxen_adapter *adapter)
	u32 sw_consumer, hw_consumer;
	int count = 0, i;
	struct netxen_cmd_buffer *buffer;
	struct pci_dev *pdev = adapter->pdev;
	struct net_device *netdev = adapter->netdev;
	struct netxen_skb_frag *frag;
	int done = 0;
	struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
	if (!spin_trylock(&adapter->tx_clean_lock))
		return 1;

	sw_consumer = tx_ring->sw_consumer;
	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
	while (sw_consumer != hw_consumer) {
		buffer = &tx_ring->cmd_buf_arr[sw_consumer];
		if (buffer->skb) {
			frag = &buffer->frag_array[0];
			pci_unmap_single(pdev, frag->dma, frag->length,
					 PCI_DMA_TODEVICE);
			for (i = 1; i < buffer->frag_count; i++) {
				frag++;	/* Get the next frag */
				pci_unmap_page(pdev, frag->dma, frag->length,
					       PCI_DMA_TODEVICE);
			adapter->stats.xmitfinished++;
			dev_kfree_skb_any(buffer->skb);
			buffer->skb = NULL;
		sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
		if (++count >= MAX_STATUS_HANDLE)
			break;
	if (count && netif_running(netdev)) {
		tx_ring->sw_consumer = sw_consumer;

		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
			__netif_tx_lock(tx_ring->txq, smp_processor_id());
			if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH) {
				netif_wake_queue(netdev);
				adapter->tx_timeo_cnt = 0;
			}
			__netif_tx_unlock(tx_ring->txq);
	/*
	 * If everything is freed up to consumer then check if the ring is full
	 * If the ring is full then check if more needs to be freed and
	 * schedule the call back again.
	 *
	 * This happens when there are 2 CPUs. One could be freeing and the
	 * other filling it. If the ring is full when we get out of here and
	 * the card has already interrupted the host then the host can miss the
	 * interrupt.
	 *
	 * There is still a possible race condition and the host could miss an
	 * interrupt. The card has to take care of this.
	 */
	hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
	done = (sw_consumer == hw_consumer);
	spin_unlock(&adapter->tx_clean_lock);
netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
	struct nx_host_rds_ring *rds_ring)
{
	struct rcv_desc *pdesc;
	struct netxen_rx_buffer *buffer;
	int producer, count = 0;
	struct list_head *head;
	producer = rds_ring->producer;
	spin_lock(&rds_ring->lock);
	head = &rds_ring->free_list;
	while (!list_empty(head)) {

		buffer = list_entry(head->next, struct netxen_rx_buffer, list);
		if (!buffer->skb) {
			if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
				break;
		list_del(&buffer->list);

		/* make a rcv descriptor  */
		pdesc = &rds_ring->desc_head[producer];
		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
		producer = get_next_index(producer, rds_ring->num_desc);
	spin_unlock(&rds_ring->lock);
		rds_ring->producer = producer;
		NXWRIO(adapter, rds_ring->crb_rcv_producer,
				(producer-1) & (rds_ring->num_desc-1));
		if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
			/*
			 * Write a doorbell msg to tell phanmon of change in
			 * receive ring producer
			 * Only for firmware version < 4.0.0
			 */
			netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
			netxen_set_msg_privid(msg);
			netxen_set_msg_count(msg,
					     ((producer - 1) &
					      (rds_ring->num_desc - 1)));
			netxen_set_msg_ctxid(msg, adapter->portnum);
			netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
			NXWRIO(adapter, DB_NORMALIZE(adapter,
					NETXEN_RCV_PRODUCER_OFFSET), msg);
netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
		struct nx_host_rds_ring *rds_ring)
{
	struct rcv_desc *pdesc;
	struct netxen_rx_buffer *buffer;
	int producer, count = 0;
	struct list_head *head;
	producer = rds_ring->producer;
	if (!spin_trylock(&rds_ring->lock))
		return;

	head = &rds_ring->free_list;
	while (!list_empty(head)) {

		buffer = list_entry(head->next, struct netxen_rx_buffer, list);
		if (!buffer->skb) {
			if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
				break;
		list_del(&buffer->list);

		/* make a rcv descriptor  */
		pdesc = &rds_ring->desc_head[producer];
		pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
		pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
		pdesc->addr_buffer = cpu_to_le64(buffer->dma);
		producer = get_next_index(producer, rds_ring->num_desc);
		rds_ring->producer = producer;
		NXWRIO(adapter, rds_ring->crb_rcv_producer,
				(producer - 1) & (rds_ring->num_desc - 1));
	spin_unlock(&rds_ring->lock);
}

void netxen_nic_clear_stats(struct netxen_adapter *adapter)
{
	memset(&adapter->stats, 0, sizeof(adapter->stats));