Newer
Older
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
/* Calculate the transmit packet descript needed*/
static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
{
u16 tpd_req;
u16 proto_hdr_len = 0;
tpd_req = skb_shinfo(skb)->nr_frags + 1;
if (skb_is_gso(skb)) {
proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
if (proto_hdr_len < skb_headlen(skb))
tpd_req++;
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
tpd_req++;
}
return tpd_req;
}
static int atl1c_tso_csum(struct atl1c_adapter *adapter,
struct sk_buff *skb,
struct atl1c_tpd_desc **tpd,
enum atl1c_trans_queue type)
{
struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
int err;
if (skb_is_gso(skb)) {
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (unlikely(err))
return -1;
}
offload_type = skb_shinfo(skb)->gso_type;
if (offload_type & SKB_GSO_TCPV4) {
real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
+ ntohs(ip_hdr(skb)->tot_len));
if (real_len < skb->len)
pskb_trim(skb, real_len);
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
dev_warn(&pdev->dev,
"IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(
ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
(*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
}
}
if (offload_type & SKB_GSO_TCPV6) {
struct atl1c_tpd_ext_desc *etpd =
*(struct atl1c_tpd_ext_desc **)(tpd);
memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
*tpd = atl1c_get_tpd(adapter, type);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
if (netif_msg_tx_queued(adapter))
dev_warn(&pdev->dev,
"IPV6 tso with zero data??\n");
goto check_sum;
} else
tcp_hdr(skb)->check = ~csum_ipv6_magic(
&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0);
etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
etpd->pkt_len = cpu_to_le32(skb->len);
(*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
}
(*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
(*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
TPD_TCPHDR_OFFSET_SHIFT;
(*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
TPD_MSS_SHIFT;
return 0;
}
check_sum:
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
u8 css, cso;
cso = skb_checksum_start_offset(skb);
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
if (unlikely(cso & 0x1)) {
if (netif_msg_tx_err(adapter))
dev_err(&adapter->pdev->dev,
"payload offset should not an event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
(*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
TPD_PLOADOFFSET_SHIFT;
(*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
TPD_CCSUM_OFFSET_SHIFT;
(*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
}
}
return 0;
}
static void atl1c_tx_map(struct atl1c_adapter *adapter,
struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
enum atl1c_trans_queue type)
{
struct atl1c_tpd_desc *use_tpd = NULL;
struct atl1c_buffer *buffer_info = NULL;
u16 buf_len = skb_headlen(skb);
u16 map_len = 0;
u16 mapped_len = 0;
u16 hdr_len = 0;
u16 nr_frags;
u16 f;
int tso;
nr_frags = skb_shinfo(skb)->nr_frags;
tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
if (tso) {
/* TSO */
map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
use_tpd = tpd;
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = map_len;
buffer_info->dma = pci_map_single(adapter->pdev,
skb->data, hdr_len, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
mapped_len += map_len;
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
if (mapped_len < buf_len) {
/* mapped_len == 0, means we should use the first tpd,
which is given by caller */
if (mapped_len == 0)
use_tpd = tpd;
else {
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
}
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
buffer_info->dma =
pci_map_single(adapter->pdev, skb->data + mapped_len,
buffer_info->length, PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
ATL1C_PCIMAP_TODEVICE);
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
for (f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
use_tpd = atl1c_get_tpd(adapter, type);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = frag->size;
buffer_info->dma =
pci_map_page(adapter->pdev, frag->page,
frag->page_offset,
buffer_info->length,
PCI_DMA_TODEVICE);
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
ATL1C_PCIMAP_TODEVICE);
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
/* The last tpd */
use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
/* The last buffer info contain the skb address,
so it will be free after unmap */
buffer_info->skb = skb;
}
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
{
struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
u32 prod_data;
AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data);
switch (type) {
case atl1c_trans_high:
prod_data &= 0xFFFF0000;
prod_data |= tpd_ring->next_to_use & 0xFFFF;
break;
case atl1c_trans_normal:
prod_data &= 0x0000FFFF;
prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
break;
default:
break;
}
wmb();
AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
}
static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
if (test_bit(__AT_DOWN, &adapter->flags)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
tpd_req = atl1c_cal_tpd_req(skb);
if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
if (netif_msg_pktdata(adapter))
dev_info(&adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
if (skb->mark == 0x01)
type = atl1c_trans_high;
else
type = atl1c_trans_normal;
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
tpd = atl1c_get_tpd(adapter, type);
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (unlikely(vlan_tx_tag_present(skb))) {
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
u16 vlan = vlan_tx_tag_get(skb);
__le16 tag;
vlan = cpu_to_le16(vlan);
AT_VLAN_TO_TAG(vlan, tag);
tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
tpd->vlan_tag = tag;
}
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
atl1c_tx_map(adapter, skb, tpd, type);
atl1c_tx_queue(adapter, skb, tpd, type);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
static void atl1c_free_irq(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
}
static int atl1c_request_irq(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
int flags = 0;
int err = 0;
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
if (netif_msg_ifup(adapter))
dev_err(&pdev->dev,
"Unable to allocate MSI interrupt Error: %d\n",
err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
if (!adapter->have_msi)
flags |= IRQF_SHARED;
err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
netdev->name, netdev);
if (err) {
if (netif_msg_ifup(adapter))
dev_err(&pdev->dev,
"Unable to allocate interrupt Error: %d\n",
err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
if (netif_msg_ifup(adapter))
dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
return err;
}
static int atl1c_up(struct atl1c_adapter *adapter)
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
{
struct net_device *netdev = adapter->netdev;
int num;
int err;
int i;
netif_carrier_off(netdev);
atl1c_init_ring_ptrs(adapter);
atl1c_set_multi(netdev);
atl1c_restore_vlan(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
num = atl1c_alloc_rx_buffer(adapter, i);
if (unlikely(num == 0)) {
err = -ENOMEM;
goto err_alloc_rx;
}
}
if (atl1c_configure(adapter)) {
err = -EIO;
goto err_up;
}
err = atl1c_request_irq(adapter);
if (unlikely(err))
goto err_up;
clear_bit(__AT_DOWN, &adapter->flags);
napi_enable(&adapter->napi);
atl1c_irq_enable(adapter);
atl1c_check_link_status(adapter);
netif_start_queue(netdev);
return err;
err_up:
err_alloc_rx:
atl1c_clean_rx_ring(adapter);
return err;
}
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
atl1c_del_timer(adapter);
adapter->work_event = 0; /* clear all event */
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
/* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
netif_carrier_off(netdev);
napi_disable(&adapter->napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
/* reset MAC to disable all RX/TX */
atl1c_reset_mac(&adapter->hw);
msleep(1);
adapter->link_speed = SPEED_0;
adapter->link_duplex = -1;
atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
atl1c_clean_tx_ring(adapter, atl1c_trans_high);
atl1c_clean_rx_ring(adapter);
}
/*
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
*/
static int atl1c_open(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
int err;
/* disallow open during test */
if (test_bit(__AT_TESTING, &adapter->flags))
return -EBUSY;
/* allocate rx/tx dma buffer & descriptors */
err = atl1c_setup_ring_resources(adapter);
if (unlikely(err))
return err;
err = atl1c_up(adapter);
if (unlikely(err))
goto err_up;
if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
u32 phy_data;
AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
phy_data |= MDIO_AP_EN;
AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
}
return 0;
err_up:
atl1c_free_irq(adapter);
atl1c_free_ring_resources(adapter);
atl1c_reset_mac(&adapter->hw);
return err;
}
/*
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
* The close entry point is called when an interface is de-activated
* by the OS. The hardware is still under the drivers control, but
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
*/
static int atl1c_close(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
atl1c_down(adapter);
atl1c_free_ring_resources(adapter);
return 0;
}
static int atl1c_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
u32 mac_ctrl_data = 0;
u32 master_ctrl_data = 0;
u16 mii_intr_status_data = 0;
atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
atl1c_down(adapter);
}
netif_device_detach(netdev);
if (wufc)
if (atl1c_phy_power_saving(hw) != 0)
dev_dbg(&pdev->dev, "phy power saving failed");
AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
MAC_CTRL_PRMLEN_MASK) <<
MAC_CTRL_PRMLEN_SHIFT);
mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
mac_ctrl_data &= ~MAC_CTRL_DUPLX;
mac_ctrl_data |= MAC_CTRL_RX_EN;
if (adapter->link_speed == SPEED_1000 ||
adapter->link_speed == SPEED_0) {
mac_ctrl_data |= atl1c_mac_speed_1000 <<
MAC_CTRL_SPEED_SHIFT;
mac_ctrl_data |= MAC_CTRL_DUPLX;
} else
mac_ctrl_data |= atl1c_mac_speed_10_100 <<
MAC_CTRL_SPEED_SHIFT;
if (adapter->link_duplex == DUPLEX_FULL)
mac_ctrl_data |= MAC_CTRL_DUPLX;
/* turn on magic packet wol */
if (wufc & AT_WUFC_MAG)
wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
if (wufc & AT_WUFC_LNKC) {
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
dev_dbg(&pdev->dev, "%s: read write phy "
"register failed.\n",
atl1c_driver_name);
}
}
/* clear phy interrupt */
atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
/* Config MAC Ctrl register */
__atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
/* magic packet maybe Broadcast&multicast&Unicast frame */
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
dev_dbg(&pdev->dev,
"%s: suspend MAC=0x%x\n",
atl1c_driver_name, mac_ctrl_data);
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
} else {
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
mac_ctrl_data |= MAC_CTRL_DUPLX;
AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
hw->phy_configured = false; /* re-init PHY when resume */
}
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int atl1c_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
atl1c_phy_reset(&adapter->hw);
atl1c_reset_mac(&adapter->hw);
atl1c_phy_init(&adapter->hw);
#if 0
AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data);
pm_data &= ~PM_CTRLSTAT_PME_EN;
AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data);
#endif
netif_device_attach(netdev);
if (netif_running(netdev))
atl1c_up(adapter);
return 0;
}
#endif
static void atl1c_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
atl1c_suspend(&pdev->dev);
pci_wake_from_d3(pdev, adapter->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
static const struct net_device_ops atl1c_netdev_ops = {
.ndo_open = atl1c_open,
.ndo_stop = atl1c_close,
.ndo_validate_addr = eth_validate_addr,
.ndo_start_xmit = atl1c_xmit_frame,
.ndo_set_multicast_list = atl1c_set_multi,
.ndo_change_mtu = atl1c_change_mtu,
.ndo_fix_features = atl1c_fix_features,
.ndo_do_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = atl1c_netpoll,
#endif
};
static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
{
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
netdev->irq = pdev->irq;
netdev->netdev_ops = &atl1c_netdev_ops;
netdev->watchdog_timeo = AT_TX_WATCHDOG;
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
netdev->features = netdev->hw_features |
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
return 0;
}
/*
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
*
* Returns 0 on success, negative on failure
*
* atl1c_probe initializes an adapter identified by a pci_dev structure.
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
*/
static int __devinit atl1c_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl1c_adapter *adapter;
static int cards_found;
int err = 0;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev, "cannot enable PCI device\n");
return err;
}
/*
* The atl1c chip can DMA to 64-bit addresses, but it uses a single
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used at a time.
*
* Supporting 64-bit DMA on this hardware is more trouble than it's
* worth. It is far easier to limit to 32-bit DMA than update
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
err = pci_request_regions(pdev, atl1c_driver_name);
if (err) {
dev_err(&pdev->dev, "cannot obtain PCI resources\n");
goto err_pci_reg;
}
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
if (netdev == NULL) {
err = -ENOMEM;
dev_err(&pdev->dev, "etherdev alloc failed\n");
goto err_alloc_etherdev;
}
err = atl1c_init_netdev(netdev, pdev);
if (err) {
dev_err(&pdev->dev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
adapter->bd_number = cards_found;
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.adapter = adapter;
adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
if (!adapter->hw.hw_addr) {
err = -EIO;
dev_err(&pdev->dev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
/* init mii data */
adapter->mii.dev = netdev;
adapter->mii.mdio_read = atl1c_mdio_read;
adapter->mii.mdio_write = atl1c_mdio_write;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
(unsigned long)adapter);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
dev_err(&pdev->dev, "net device private data init failed\n");
goto err_sw_init;
}
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
/* Init GPHY as early as possible due to power saving issue */
atl1c_phy_reset(&adapter->hw);
err = atl1c_reset_mac(&adapter->hw);
if (err) {
err = -EIO;
goto err_reset;
}
/* reset the controller to
* put the device in a known good starting state */
err = atl1c_phy_init(&adapter->hw);
if (err) {
err = -EIO;
goto err_reset;
}
if (atl1c_read_mac_addr(&adapter->hw) != 0) {
err = -EIO;
dev_err(&pdev->dev, "get mac address failed\n");
goto err_eeprom;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
adapter->work_event = 0;
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "register netdevice failed\n");
goto err_register;
}
if (netif_msg_probe(adapter))
dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION);
cards_found++;
return 0;
err_reset:
err_register:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
return err;
}
/*
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
* atl1c_remove is called by the PCI subsystem to alert the driver
* that it should release a PCI device. The could be caused by a
* Hot-Plug event, or because the driver is going to be removed from
* memory.
*/
static void __devexit atl1c_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
atl1c_phy_disable(&adapter->hw);
iounmap(adapter->hw.hw_addr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(netdev);
}
/*
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
if (netif_running(netdev))
atl1c_down(adapter);
pci_disable_device(pdev);
/* Request a slot slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
}
/*
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the e1000_resume routine.
*/
static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
if (netif_msg_hw(adapter))
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
atl1c_reset_mac(&adapter->hw);
return PCI_ERS_RESULT_RECOVERED;
}
/*
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the atl1c_resume routine.
*/
static void atl1c_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev)) {
if (atl1c_up(adapter)) {
if (netif_msg_hw(adapter))
dev_err(&pdev->dev,
"Cannot bring device back up after reset\n");
return;
}
}
netif_device_attach(netdev);
}
static struct pci_error_handlers atl1c_err_handler = {
.error_detected = atl1c_io_error_detected,
.slot_reset = atl1c_io_slot_reset,
.resume = atl1c_io_resume,
};
static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
.remove = __devexit_p(atl1c_remove),
.shutdown = atl1c_shutdown,
.err_handler = &atl1c_err_handler,
.driver.pm = &atl1c_pm_ops,
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
};
/*
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
*/
static int __init atl1c_init_module(void)
{
return pci_register_driver(&atl1c_driver);
}
/*
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
* from memory.
*/
static void __exit atl1c_exit_module(void)
{
pci_unregister_driver(&atl1c_driver);
}
module_init(atl1c_init_module);
module_exit(atl1c_exit_module);