Newer
Older
struct cmdQ *q = &sge->cmdQ[i];
if (!spin_trylock(&q->lock))
continue;
if (i == 0 && q->in_use) { /* flush pending credits */
writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
}
spin_unlock(&q->lock);
}
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
}
/*
* Propagate changes of the SGE coalescing parameters to the HW.
*/
int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
{
sge->fixed_intrtimer = p->rx_coalesce_usecs *
core_ticks_per_usec(sge->adapter);
writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
return 0;
}
/*
* Allocates both RX and TX resources and configures the SGE. However,
* the hardware is not enabled yet.
*/
int t1_sge_configure(struct sge *sge, struct sge_params *p)
if (alloc_rx_resources(sge, p))
return -ENOMEM;
if (alloc_tx_resources(sge, p)) {
free_rx_resources(sge);
return -ENOMEM;
}
configure_sge(sge, p);
/*
* Now that we have sized the free lists calculate the payload
* capacity of the large buffers. Other parts of the driver use
* this to set the max offload coalescing size so that RX packets
* do not overflow our large buffers.
*/
p->large_buf_capacity = jumbo_payload_capacity(sge);
return 0;
}
/*
* Disables the DMA engine.
*/
void t1_sge_stop(struct sge *sge)
{
writel(0, sge->adapter->regs + A_SG_CONTROL);
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
if (is_T2(sge->adapter))
del_timer_sync(&sge->espibug_timer);
del_timer_sync(&sge->tx_reclaim_timer);
if (sge->tx_sched)
tx_sched_stop(sge);
for (i = 0; i < MAX_NPORTS; i++)
if (sge->espibug_skb[i])
kfree_skb(sge->espibug_skb[i]);
}
/*
* Enables the DMA engine.
*/
void t1_sge_start(struct sge *sge)
refill_free_list(sge, &sge->freelQ[0]);
refill_free_list(sge, &sge->freelQ[1]);
writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
if (is_T2(sge->adapter))
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
/*
* Callback for the T2 ESPI 'stuck packet feature' workaorund
*/
static void espibug_workaround_t204(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct sge *sge = adapter->sge;
unsigned int nports = adapter->params.nports;
u32 seop[MAX_NPORTS];
if (adapter->open_device_map & PORT_MASK) {
int i;
if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
for (i = 0; i < nports; i++) {
struct sk_buff *skb = sge->espibug_skb[i];
if (!netif_running(adapter->port[i].dev) ||
netif_queue_stopped(adapter->port[i].dev) ||
!seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
continue;
if (!skb->cb[0]) {
u8 ch_mac_addr[ETH_ALEN] = {
0x0, 0x7, 0x43, 0x0, 0x0, 0x0
};
memcpy(skb->data + sizeof(struct cpl_tx_pkt),
ch_mac_addr, ETH_ALEN);
memcpy(skb->data + skb->len - 10,
ch_mac_addr, ETH_ALEN);
skb->cb[0] = 0xff;
/* bump the reference count to avoid freeing of
* the skb once the DMA has completed.
*/
skb = skb_get(skb);
t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
}
}
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
static void espibug_workaround(unsigned long data)
{
struct adapter *adapter = (struct adapter *)data;
struct sge *sge = adapter->sge;
if (netif_running(adapter->port[0].dev)) {
struct sk_buff *skb = sge->espibug_skb[0];
u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
if ((seop & 0xfff0fff) == 0xfff && skb) {
if (!skb->cb[0]) {
u8 ch_mac_addr[ETH_ALEN] =
{0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
memcpy(skb->data + sizeof(struct cpl_tx_pkt),
ch_mac_addr, ETH_ALEN);
memcpy(skb->data + skb->len - 10, ch_mac_addr,
ETH_ALEN);
skb->cb[0] = 0xff;
}
/* bump the reference count to avoid freeing of the
* skb once the DMA has completed.
*/
skb = skb_get(skb);
t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
}
}
mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
}
/*
* Creates a t1_sge structure and returns suggested resource parameters.
*/
struct sge * __devinit t1_sge_create(struct adapter *adapter,
struct sge_params *p)
{
struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
if (!sge)
return NULL;
sge->adapter = adapter;
sge->netdev = adapter->port[0].dev;
sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
for_each_port(adapter, i) {
sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
if (!sge->port_stats[i])
goto nomem_port;
}
init_timer(&sge->tx_reclaim_timer);
sge->tx_reclaim_timer.data = (unsigned long)sge;
sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
if (is_T2(sge->adapter)) {
init_timer(&sge->espibug_timer);
if (adapter->params.nports > 1) {
tx_sched_init(sge);
sge->espibug_timer.function = espibug_workaround_t204;
sge->espibug_timer.function = espibug_workaround;
sge->espibug_timer.data = (unsigned long)sge->adapter;
/* for T204, every 10ms */
if (adapter->params.nports > 1)
sge->espibug_timeout = HZ/100;
p->cmdQ_size[0] = SGE_CMDQ0_E_N;
p->cmdQ_size[1] = SGE_CMDQ1_E_N;
p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
if (sge->tx_sched) {
if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
p->rx_coalesce_usecs = 15;
else
p->rx_coalesce_usecs = 50;
} else
p->rx_coalesce_usecs = 50;
p->coalesce_enable = 0;
p->sample_interval_usecs = 0;
return sge;
nomem_port:
while (i >= 0) {
free_percpu(sge->port_stats[i]);
--i;
}
kfree(sge);
return NULL;