Commit 23dabf88 authored by Jarno Rajahalme's avatar Jarno Rajahalme Committed by Jesse Gross

openvswitch: Remove 5-tuple optimization.

The 5-tuple optimization becomes unnecessary with a later per-NUMA
node stats patch.  Remove it first to make the changes easier to
grasp.
Signed-off-by: default avatarJarno Rajahalme <jrajahalme@nicira.com>
Signed-off-by: default avatarJesse Gross <jesse@nicira.com>
parent 8c63ff09
......@@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
packet->protocol = htons(ETH_P_802_2);
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc(false);
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
......@@ -782,7 +782,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
struct datapath *dp;
struct sw_flow_actions *acts = NULL;
struct sw_flow_match match;
bool exact_5tuple;
int error;
/* Extract key. */
......@@ -791,7 +790,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto error;
ovs_match_init(&match, &key, &mask);
error = ovs_nla_get_match(&match, &exact_5tuple,
error = ovs_nla_get_match(&match,
a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
if (error)
goto error;
......@@ -830,7 +829,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
/* Allocate flow. */
flow = ovs_flow_alloc(!exact_5tuple);
flow = ovs_flow_alloc();
if (IS_ERR(flow)) {
error = PTR_ERR(flow);
goto err_unlock_ovs;
......@@ -914,7 +913,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
ovs_match_init(&match, &key, NULL);
err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
return err;
......@@ -968,7 +967,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
}
ovs_match_init(&match, &key, NULL);
err = ovs_nla_get_match(&match, NULL, a[OVS_FLOW_ATTR_KEY], NULL);
err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
......
......@@ -66,10 +66,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
struct flow_stats *stats;
__be16 tcp_flags = 0;
if (!flow->stats.is_percpu)
stats = flow->stats.stat;
else
stats = this_cpu_ptr(flow->stats.cpu_stats);
stats = this_cpu_ptr(flow->stats);
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
......@@ -110,16 +107,14 @@ void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
memset(ovs_stats, 0, sizeof(*ovs_stats));
local_bh_disable();
if (!flow->stats.is_percpu) {
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
} else {
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
}
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
}
local_bh_enable();
}
......@@ -138,13 +133,10 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
int cpu;
local_bh_disable();
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
for_each_possible_cpu(cpu) {
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
}
}
for_each_possible_cpu(cpu)
stats_reset(per_cpu_ptr(flow->stats, cpu));
local_bh_enable();
}
......
......@@ -155,14 +155,6 @@ struct flow_stats {
__be16 tcp_flags; /* Union of seen TCP flags. */
};
struct sw_flow_stats {
bool is_percpu;
union {
struct flow_stats *stat;
struct flow_stats __percpu *cpu_stats;
};
};
struct sw_flow {
struct rcu_head rcu;
struct hlist_node hash_node[2];
......@@ -172,7 +164,7 @@ struct sw_flow {
struct sw_flow_key unmasked_key;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_stats stats;
struct flow_stats __percpu *stats;
};
struct arp_eth_header {
......
......@@ -268,20 +268,6 @@ static bool is_all_zero(const u8 *fp, size_t size)
return true;
}
static bool is_all_set(const u8 *fp, size_t size)
{
int i;
if (!fp)
return false;
for (i = 0; i < size; i++)
if (fp[i] != 0xff)
return false;
return true;
}
static int __parse_flow_nlattrs(const struct nlattr *attr,
const struct nlattr *a[],
u64 *attrsp, bool nz)
......@@ -503,9 +489,8 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
return 0;
}
static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple,
u64 attrs, const struct nlattr **a,
bool is_mask)
static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
const struct nlattr **a, bool is_mask)
{
int err;
u64 orig_attrs = attrs;
......@@ -562,11 +547,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
}
if (is_mask && exact_5tuple) {
if (match->mask->key.eth.type != htons(0xffff))
*exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
......@@ -589,13 +569,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
ipv4_key->ipv4_dst, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
if (is_mask && exact_5tuple && *exact_5tuple) {
if (ipv4_key->ipv4_proto != 0xff ||
ipv4_key->ipv4_src != htonl(0xffffffff) ||
ipv4_key->ipv4_dst != htonl(0xffffffff))
*exact_5tuple = false;
}
}
if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
......@@ -627,15 +600,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
if (is_mask && exact_5tuple && *exact_5tuple) {
if (ipv6_key->ipv6_proto != 0xff ||
!is_all_set((const u8 *)ipv6_key->ipv6_src,
sizeof(match->key->ipv6.addr.src)) ||
!is_all_set((const u8 *)ipv6_key->ipv6_dst,
sizeof(match->key->ipv6.addr.dst)))
*exact_5tuple = false;
}
}
if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
......@@ -678,11 +642,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
tcp_key->tcp_dst, is_mask);
}
attrs &= ~(1 << OVS_KEY_ATTR_TCP);
if (is_mask && exact_5tuple && *exact_5tuple &&
(tcp_key->tcp_src != htons(0xffff) ||
tcp_key->tcp_dst != htons(0xffff)))
*exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
......@@ -714,11 +673,6 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, bool *exact_5tuple
udp_key->udp_dst, is_mask);
}
attrs &= ~(1 << OVS_KEY_ATTR_UDP);
if (is_mask && exact_5tuple && *exact_5tuple &&
(udp_key->udp_src != htons(0xffff) ||
udp_key->udp_dst != htons(0xffff)))
*exact_5tuple = false;
}
if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
......@@ -804,7 +758,6 @@ static void sw_flow_mask_set(struct sw_flow_mask *mask,
* attribute specifies the mask field of the wildcarded flow.
*/
int ovs_nla_get_match(struct sw_flow_match *match,
bool *exact_5tuple,
const struct nlattr *key,
const struct nlattr *mask)
{
......@@ -852,13 +805,10 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
err = ovs_key_from_nlattrs(match, NULL, key_attrs, a, false);
err = ovs_key_from_nlattrs(match, key_attrs, a, false);
if (err)
return err;
if (exact_5tuple)
*exact_5tuple = true;
if (mask) {
err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
if (err)
......@@ -896,7 +846,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
}
}
err = ovs_key_from_nlattrs(match, exact_5tuple, mask_attrs, a, true);
err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
if (err)
return err;
} else {
......
......@@ -45,7 +45,6 @@ int ovs_nla_put_flow(const struct sw_flow_key *,
int ovs_nla_get_flow_metadata(struct sw_flow *flow,
const struct nlattr *attr);
int ovs_nla_get_match(struct sw_flow_match *match,
bool *exact_5tuple,
const struct nlattr *,
const struct nlattr *);
......
......@@ -72,7 +72,7 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
*d++ = *s++ & *m++;
}
struct sw_flow *ovs_flow_alloc(bool percpu_stats)
struct sw_flow *ovs_flow_alloc(void)
{
struct sw_flow *flow;
int cpu;
......@@ -84,25 +84,15 @@ struct sw_flow *ovs_flow_alloc(bool percpu_stats)
flow->sf_acts = NULL;
flow->mask = NULL;
flow->stats.is_percpu = percpu_stats;
flow->stats = alloc_percpu(struct flow_stats);
if (!flow->stats)
goto err;
if (!percpu_stats) {
flow->stats.stat = kzalloc(sizeof(*flow->stats.stat), GFP_KERNEL);
if (!flow->stats.stat)
goto err;
for_each_possible_cpu(cpu) {
struct flow_stats *cpu_stats;
spin_lock_init(&flow->stats.stat->lock);
} else {
flow->stats.cpu_stats = alloc_percpu(struct flow_stats);
if (!flow->stats.cpu_stats)
goto err;
for_each_possible_cpu(cpu) {
struct flow_stats *cpu_stats;
cpu_stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
spin_lock_init(&cpu_stats->lock);
}
cpu_stats = per_cpu_ptr(flow->stats, cpu);
spin_lock_init(&cpu_stats->lock);
}
return flow;
err:
......@@ -141,10 +131,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
static void flow_free(struct sw_flow *flow)
{
kfree((struct sf_flow_acts __force *)flow->sf_acts);
if (flow->stats.is_percpu)
free_percpu(flow->stats.cpu_stats);
else
kfree(flow->stats.stat);
free_percpu(flow->stats);
kmem_cache_free(flow_cache, flow);
}
......
......@@ -55,7 +55,7 @@ struct flow_table {
int ovs_flow_init(void);
void ovs_flow_exit(void);
struct sw_flow *ovs_flow_alloc(bool percpu_stats);
struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment