Commit ebfc45ee authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull more networking fixes from David Miller:

 1) Fix mlx4_en_netpoll implementation, it needs to schedule a NAPI
    context, not synchronize it.  From Chris Mason.

 2) Ipv4 flow input interface should never be zero, it should be
    LOOPBACK_IFINDEX instead.  From Cong Wang and Julian Anastasov.

 3) Properly configure MAC to PHY connection in mvneta devices, from
    Thomas Petazzoni.

 4) sys_recv should use SYSCALL_DEFINE.  From Jan Glauber.

 5) Tunnel driver ioctls do not use the correct namespace, fix from
    Nicolas Dichtel.

 6) Fix memory leak on seccomp filter attach, from Kees Cook.

 7) Fix lockdep warning for nested vlans, from Ding Tianhong.

 8) Crashes can happen in SCTP due to how the auth_enable value is
    managed, fix from Vlad Yasevich.

 9) Wireless fixes from John W Linville and co.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (45 commits)
  net: sctp: cache auth_enable per endpoint
  tg3: update rx_jumbo_pending ring param only when jumbo frames are enabled
  vlan: Fix lockdep warning when vlan dev handle notification
  seccomp: fix memory leak on filter attach
  isdn: icn: buffer overflow in icn_command()
  ip6_tunnel: use the right netns in ioctl handler
  sit: use the right netns in ioctl handler
  ip_tunnel: use the right netns in ioctl handler
  net: use SYSCALL_DEFINEx for sys_recv
  net: mdio-gpio: Add support for separate MDI and MDO gpio pins
  net: mdio-gpio: Add support for active low gpio pins
  net: mdio-gpio: Use devm_ functions where possible
  ipv4, route: pass 0 instead of LOOPBACK_IFINDEX to fib_validate_source()
  ipv4, fib: pass LOOPBACK_IFINDEX instead of 0 to flowi4_iif
  mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll
  net: mvneta: properly configure the MAC <-> PHY connection in all situations
  net: phy: add minimal support for QSGMII PHY
  sfc:On MCDI timeout, issue an FLR (and mark MCDI to fail-fast)
  mwifiex: fix hung task on command timeout
  mwifiex: process event before command response
  ...
parents 6e66d5da b14878cc
......@@ -10,7 +10,7 @@ The following properties are common to the Ethernet controllers:
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
the maximum frame size (there's contradiction in ePAPR).
- phy-mode: string, operation mode of the PHY interface; supported values are
"mii", "gmii", "sgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
"mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
"rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
standard property;
- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
......
......@@ -1155,7 +1155,7 @@ icn_command(isdn_ctrl *c, icn_card *card)
ulong a;
ulong flags;
int i;
char cbuf[60];
char cbuf[80];
isdn_ctrl cmd;
icn_cdef cdef;
char __user *arg;
......@@ -1309,7 +1309,6 @@ icn_command(isdn_ctrl *c, icn_card *card)
break;
if ((c->arg & 255) < ICN_BCH) {
char *p;
char dial[50];
char dcode[4];
a = c->arg;
......@@ -1321,10 +1320,10 @@ icn_command(isdn_ctrl *c, icn_card *card)
} else
/* Normal Dial */
strcpy(dcode, "CAL");
strcpy(dial, p);
sprintf(cbuf, "%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
dcode, dial, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
snprintf(cbuf, sizeof(cbuf),
"%02d;D%s_R%s,%02d,%02d,%s\n", (int) (a + 1),
dcode, p, c->parm.setup.si1,
c->parm.setup.si2, c->parm.setup.eazmsn);
i = icn_writecmd(cbuf, strlen(cbuf), 0, card);
}
break;
......
......@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (tg3_flag(tp, MAX_RXPEND_64) &&
tp->rx_pending > 63)
tp->rx_pending = 63;
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
if (tg3_flag(tp, JUMBO_RING_ENABLE))
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].tx_pending = ering->tx_pending;
......
......@@ -89,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
#define MVNETA_SGMII_SERDES_CFG 0x24A0
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_QSGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
......@@ -711,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
{
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
if (enable)
val |= MVNETA_GMAC2_PORT_RGMII;
else
val &= ~MVNETA_GMAC2_PORT_RGMII;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
}
/* Config SGMII port */
static void mvneta_port_sgmii_config(struct mvneta_port *pp)
{
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val |= MVNETA_GMAC2_PCS_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
}
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
......@@ -2749,26 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
}
/* Power up the port */
static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
u32 val;
u32 ctrl;
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
mvneta_port_sgmii_config(pp);
ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
mvneta_gmac_rgmii_set(pp, 1);
/* Even though it might look weird, when we're configured in
* SGMII or QSGMII mode, the RGMII bit needs to be set.
*/
switch(phy_mode) {
case PHY_INTERFACE_MODE_QSGMII:
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
break;
case PHY_INTERFACE_MODE_SGMII:
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
ctrl |= MVNETA_GMAC2_PORT_RGMII;
break;
default:
return -EINVAL;
}
/* Cancel Port Reset */
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
ctrl &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
MVNETA_GMAC2_PORT_RESET) != 0)
continue;
return 0;
}
/* Device initialization routine */
......@@ -2879,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "can't init eth hal\n");
goto err_free_stats;
}
mvneta_port_power_up(pp, phy_mode);
err = mvneta_port_power_up(pp, phy_mode);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
goto err_deinit;
}
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
......
......@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
spin_lock_init(&cq->lock);
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
......
......@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_cq *cq;
unsigned long flags;
int i;
for (i = 0; i < priv->rx_ring_num; i++) {
cq = priv->rx_cq[i];
spin_lock_irqsave(&cq->lock, flags);
napi_synchronize(&cq->napi);
mlx4_en_process_rx_cq(dev, cq, 0);
spin_unlock_irqrestore(&cq->lock, flags);
napi_schedule(&cq->napi);
}
}
#endif
......
......@@ -319,7 +319,6 @@ struct mlx4_en_cq {
struct mlx4_cq mcq;
struct mlx4_hwq_resources wqres;
int ring;
spinlock_t lock;
struct net_device *dev;
struct napi_struct napi;
int size;
......
......@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
/* If it was a port reset, trigger reallocation of MC resources.
* Note that on an MC reset nothing needs to be done now because we'll
* detect the MC reset later and handle it then.
* For an FLR, we never get an MC reset event, but the MC has reset all
* resources assigned to us, so we have to trigger reallocation now.
*/
if (reset_type == RESET_TYPE_ALL && !rc)
if ((reset_type == RESET_TYPE_ALL ||
reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
efx_ef10_reset_mc_allocations(efx);
return rc;
}
......@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
return 0;
}
static void efx_ef10_prepare_flr(struct efx_nic *efx)
{
atomic_set(&efx->active_queues, 0);
}
static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right)
{
......@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_ef10_fini_dmaq,
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats,
.start_stats = efx_mcdi_mac_start_stats,
......
......@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
......@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
[RESET_TYPE_MC_BIST] = "MC_BIST",
[RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
};
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
......@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
/* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled || !netif_running(efx->net_dev))
if (efx->port_enabled || !netif_running(efx->net_dev) ||
efx->reset_pending)
return;
efx_start_port(efx);
......@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
{
EFX_ASSERT_RESET_SERIALISED(efx);
if (method == RESET_TYPE_MCDI_TIMEOUT)
efx->type->prepare_flr(efx);
efx_stop_all(efx);
efx_disable_interrupts(efx);
......@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
EFX_ASSERT_RESET_SERIALISED(efx);
if (method == RESET_TYPE_MCDI_TIMEOUT)
efx->type->finish_flr(efx);
/* Ensure that SRAM is initialised even if we're disabling the device */
rc = efx->type->init(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
......@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
/* Clear flags for the scopes we covered. We assume the NIC and
* driver are now quiescent so that there is no race here.
*/
efx->reset_pending &= -(1 << (method + 1));
if (method < RESET_TYPE_MAX_METHOD)
efx->reset_pending &= -(1 << (method + 1));
else /* it doesn't fit into the well-ordered scope hierarchy */
__clear_bit(method, &efx->reset_pending);
/* Reinitialise bus-mastering, which may have been turned off before
* the reset was scheduled. This is still appropriate, even in the
......@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
case RESET_TYPE_MC_BIST:
case RESET_TYPE_MCDI_TIMEOUT:
method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method));
......
......@@ -143,6 +143,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
......@@ -150,14 +151,16 @@ enum efx_loopback_mode {
* @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
* @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
*/
enum reset_type {
RESET_TYPE_INVISIBLE = 0,
RESET_TYPE_RECOVER_OR_ALL = 1,
RESET_TYPE_ALL = 2,
RESET_TYPE_WORLD = 3,
RESET_TYPE_RECOVER_OR_DISABLE = 4,
RESET_TYPE_DISABLE = 5,
RESET_TYPE_INVISIBLE,
RESET_TYPE_RECOVER_OR_ALL,
RESET_TYPE_ALL,
RESET_TYPE_WORLD,
RESET_TYPE_RECOVER_OR_DISABLE,
RESET_TYPE_MC_BIST,
RESET_TYPE_DISABLE,
RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
......@@ -165,7 +168,13 @@ enum reset_type {
RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MC_BIST,
/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
* it doesn't fit the scope hierarchy (not well-ordered by inclusion).
* We encode this by having its enum value be greater than
* RESET_TYPE_MAX_METHOD. This also prevents issuing it with
* efx_ioctl_reset.
*/
RESET_TYPE_MCDI_TIMEOUT,
RESET_TYPE_MAX,
};
......
......@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.prepare_flr = efx_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
......@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
.prepare_flr = efx_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,
.describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
......
......@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
return rc;
}
/* Reset queue and flush accounting after FLR
*
* One possible cause of FLR recovery is that DMA may be failing (eg. if bus
* mastering was disabled), in which case we don't receive (RXQ) flush
* completion events. This means that efx->rxq_flush_outstanding remained at 4
* after the FLR; also, efx->active_queues was non-zero (as no flush completion
* events were received, and we didn't go through efx_check_tx_flush_complete())
* If we don't fix this up, on the next call to efx_realloc_channels() we won't
* flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
* for batched flush requests; and the efx->active_queues gets messed up because
* we keep incrementing for the newly initialised queues, but it never went to
* zero previously. Then we get a timeout every time we try to restart the
* queues, as it doesn't go back to zero when we should be flushing the queues.
*/
void efx_farch_finish_flr(struct efx_nic *efx)
{
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
atomic_set(&efx->active_queues, 0);
}
/**************************************************************************
*
* Event queue processing
......
......@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
EFX_BUG_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->iface;
}
static void efx_mcdi_abandon(struct efx_nic *efx);
int efx_mcdi_init(struct efx_nic *efx)
{
......@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
rc = 0;
}
efx_mcdi_abandon(efx);
/* Close the race with efx_mcdi_ev_cpl() executing just too late
* and completing a request we've just cancelled, by ensuring
* that the seqno check therein fails.
......@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
if (efx->mc_bist_for_other_fn)
return -ENETDOWN;
if (mcdi->mode == MCDI_MODE_FAIL)
return -ENETDOWN;
efx_mcdi_acquire_sync(mcdi);
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
return 0;
......@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
if (mcdi->mode == MCDI_MODE_POLL)
/* If already in polling mode, nothing to do.
* If in fail-fast state, don't switch to polled completion.
* FLR recovery will do that later.
*/
if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
return;
/* We can switch from event completion to polled completion, because
......@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
mcdi = efx_mcdi(efx);
/* We must be in polling mode so no more requests can be queued */
BUG_ON(mcdi->mode != MCDI_MODE_POLL);
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
del_timer_sync(&mcdi->async_timer);
......@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
return;
mcdi = efx_mcdi(efx);
if (mcdi->mode == MCDI_MODE_EVENTS)
/* If already in event completion mode, nothing to do.
* If in fail-fast state, don't switch to event completion. FLR
* recovery will do that later.
*/
if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
return;
/* We can't switch from polled to event completion in the middle of a
......@@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
spin_unlock(&mcdi->iface_lock);
}
/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
* to recover.
*/
static void efx_mcdi_abandon(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
return; /* it had already been done */
netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
}
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
......@@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
{
int rc;
/* If MCDI is down, we can't handle_assertion */
if (method == RESET_TYPE_MCDI_TIMEOUT) {
rc = pci_reset_function(efx->pci_dev);
if (rc)
return rc;
/* Re-enable polled MCDI completion */
if (efx->mcdi) {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
mcdi->mode = MCDI_MODE_POLL;
}
return 0;
}
/* Recover from a failed assertion pre-reset */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
......
......@@ -28,9 +28,16 @@ enum efx_mcdi_state {
MCDI_STATE_COMPLETED,
};
/**
* enum efx_mcdi_mode - MCDI transaction mode
* @MCDI_MODE_POLL: poll for MCDI completion, until timeout
* @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
* @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
*/
enum efx_mcdi_mode {
MCDI_MODE_POLL,
MCDI_MODE_EVENTS,
MCDI_MODE_FAIL,
};
/**
......@@ -104,6 +111,12 @@ struct efx_mcdi_data {
u32 fn_flags;
};
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
EFX_BUG_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->iface;
}
#ifdef CONFIG_SFC_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
......
......@@ -972,6 +972,8 @@ struct efx_mtd_partition {
* (for Falcon architecture)
* @finish_flush: Clean up after flushing the DMA queues (for Falcon
* architecture)
* @prepare_flr: Prepare for an FLR
* @finish_flr: Clean up after an FLR
* @describe_stats: Describe statistics for ethtool
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
......@@ -1100,6 +1102,8 @@ struct efx_nic_type {
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
void (*prepare_flr)(struct efx_nic *efx);
void (*finish_flr)(struct efx_nic *efx);
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
......
......@@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
int efx_nic_flush_queues(struct efx_nic *efx);
void siena_prepare_flush(struct efx_nic *efx);
int efx_farch_fini_dmaq(struct efx_nic *efx);
void efx_farch_finish_flr(struct efx_nic *efx);
void siena_finish_flush(struct efx_nic *efx);
void falcon_start_nic_stats(struct efx_nic *efx);
void falcon_stop_nic_stats(struct efx_nic *efx);
......
......@@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = {
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
.prepare_flr = efx_port_dummy_op_void,
.finish_flr = efx_farch_finish_flr,
.describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
.start_stats = efx_mcdi_mac_start_stats,
......
......@@ -32,29 +32,39 @@
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
int mdc, mdio;
int mdc, mdio, mdo;
int mdc_active_low, mdio_active_low, mdo_active_low;
};
static void *mdio_gpio_of_get_data(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mdio_gpio_platform_data *pdata;
enum of_gpio_flags flags;
int ret;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
ret = of_get_gpio(np, 0);
ret = of_get_gpio_flags(np, 0, &flags);
if (ret < 0)
return NULL;
pdata->mdc = ret;
pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;