Commit faf8dcc1 authored by Jon Cooper's avatar Jon Cooper Committed by David S. Miller

sfc: Track RPS flow IDs per channel instead of per function

Otherwise we get confused when two flows on different channels get the
 same flow ID.
Signed-off-by: default avatarEdward Cree <ecree@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d69d1694
...@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) ...@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) { if (efx->type->offload_features & NETIF_F_NTUPLE) {
efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, struct efx_channel *channel;
sizeof(*efx->rps_flow_id), int i, success = 1;
GFP_KERNEL);
if (!efx->rps_flow_id) { efx_for_each_channel(channel, efx) {
channel->rps_flow_id =
kcalloc(efx->type->max_rx_ip_filters,
sizeof(*channel->rps_flow_id),
GFP_KERNEL);
if (!channel->rps_flow_id)
success = 0;
else
for (i = 0;
i < efx->type->max_rx_ip_filters;
++i)
channel->rps_flow_id[i] =
RPS_FLOW_ID_INVALID;
}
if (!success) {
efx_for_each_channel(channel, efx)
kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx); efx->type->filter_table_remove(efx);
rc = -ENOMEM; rc = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
efx->rps_expire_index = efx->rps_expire_channel = 0;
} }
#endif #endif
out_unlock: out_unlock:
...@@ -1744,7 +1763,10 @@ out_unlock: ...@@ -1744,7 +1763,10 @@ out_unlock:
static void efx_remove_filters(struct efx_nic *efx) static void efx_remove_filters(struct efx_nic *efx)
{ {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
kfree(efx->rps_flow_id); struct efx_channel *channel;
efx_for_each_channel(channel, efx)
kfree(channel->rps_flow_id);
#endif #endif
down_write(&efx->filter_sem); down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx); efx->type->filter_table_remove(efx);
......
...@@ -403,6 +403,8 @@ enum efx_sync_events_state { ...@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision * @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score * @irq_mod_score: IRQ moderation score
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
...@@ -446,6 +448,8 @@ struct efx_channel { ...@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score; unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added; unsigned int rfs_filters_added;
#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
u32 *rps_flow_id;
#endif #endif
unsigned n_rx_tobe_disc; unsigned n_rx_tobe_disc;
...@@ -889,9 +893,9 @@ struct vfdi_status; ...@@ -889,9 +893,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table * @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes * @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state * @filter_state: Architecture-dependent filter table state
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, * @rps_expire_channel: Next channel to check for expiry
* indexed by filter ID * @rps_expire_index: Next index to check for expiry in
* @rps_expire_index: Next index to check for expiry in @rps_flow_id * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed. * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called. * Decremented when the efx_flush_rx_queue() is called.
...@@ -1035,7 +1039,7 @@ struct efx_nic { ...@@ -1035,7 +1039,7 @@ struct efx_nic {
spinlock_t filter_lock; spinlock_t filter_lock;
void *filter_state; void *filter_state;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
u32 *rps_flow_id; unsigned int rps_expire_channel;
unsigned int rps_expire_index; unsigned int rps_expire_index;
#endif #endif
......
...@@ -845,6 +845,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -845,6 +845,9 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct flow_keys fk; struct flow_keys fk;
int rc; int rc;
if (flow_id == RPS_FLOW_ID_INVALID)
return -EINVAL;
if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
...@@ -879,8 +882,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -879,8 +882,8 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
return rc; return rc;
/* Remember this so we can check whether to expire the filter later */ /* Remember this so we can check whether to expire the filter later */
efx->rps_flow_id[rc] = flow_id; channel = efx_get_channel(efx, rxq_index);
channel = efx_get_channel(efx, skb_get_rx_queue(skb)); channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added; ++channel->rfs_filters_added;
if (spec.ether_type == htons(ETH_P_IP)) if (spec.ether_type == htons(ETH_P_IP))
...@@ -902,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, ...@@ -902,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{ {
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
unsigned int index, size; unsigned int channel_idx, index, size;
u32 flow_id; u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock)) if (!spin_trylock_bh(&efx->filter_lock))
return false; return false;
expire_one = efx->type->filter_rfs_expire_one; expire_one = efx->type->filter_rfs_expire_one;
channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index; index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters; size = efx->type->max_rx_ip_filters;
while (quota--) { while (quota--) {
flow_id = efx->rps_flow_id[index]; struct efx_channel *channel = efx_get_channel(efx, channel_idx);
if (expire_one(efx, flow_id, index)) flow_id = channel->rps_flow_id[index];
if (flow_id != RPS_FLOW_ID_INVALID &&
expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev, netif_info(efx, rx_status, efx->net_dev,
"expired filter %d [flow %u]\n", "expired filter %d [queue %u flow %u]\n",
index, flow_id); index, channel_idx, flow_id);
if (++index == size) channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
}
if (++index == size) {
if (++channel_idx == efx->n_channels)
channel_idx = 0;
index = 0; index = 0;
}
} }
efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index; efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock); spin_unlock_bh(&efx->filter_lock);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment