datapath.c 52 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
51 52
#include <net/net_namespace.h>
#include <net/netns/generic.h>
53 54 55

#include "datapath.h"
#include "flow.h"
56
#include "flow_table.h"
57
#include "flow_netlink.h"
58
#include "vport-internal_dev.h"
59
#include "vport-netdev.h"
60

61
int ovs_net_id __read_mostly;
62
EXPORT_SYMBOL(ovs_net_id);
63

64 65 66 67
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;

68 69
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
	.name = OVS_FLOW_MCGROUP,
70 71
};

72 73
static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
	.name = OVS_DATAPATH_MCGROUP,
74 75
};

76 77
static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
	.name = OVS_VPORT_MCGROUP,
78 79
};

80 81
/* Check if need to build a reply message.
 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
82 83
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
			    unsigned int group)
84 85
{
	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
86 87
	       genl_has_listeners(family, genl_info_net(info)->genl_sock,
				  group);
88 89
}

90
static void ovs_notify(struct genl_family *family,
91
		       struct sk_buff *skb, struct genl_info *info)
92
{
93
	genl_notify(family, skb, genl_info_net(info), info->snd_portid,
94
		    0, info->nlhdr, GFP_KERNEL);
95 96
}

97 98 99
/**
 * DOC: Locking:
 *
100 101 102 103
 * All writes e.g. Writes to device state (add/remove datapath, port, set
 * operations on vports, etc.), Writes to other state (flow table
 * modifications, set miscellaneous datapath parameters, etc.) are protected
 * by ovs_lock.
104 105 106 107 108 109
 *
 * Reads are protected by RCU.
 *
 * There are a few special cases (mostly stats) that have their own
 * synchronization but they nest under all of above and don't interact with
 * each other.
110 111
 *
 * The RTNL lock nests inside ovs_mutex.
112 113
 */

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
static DEFINE_MUTEX(ovs_mutex);

void ovs_lock(void)
{
	mutex_lock(&ovs_mutex);
}

void ovs_unlock(void)
{
	mutex_unlock(&ovs_mutex);
}

#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{
	if (debug_locks)
		return lockdep_is_held(&ovs_mutex);
	else
		return 1;
}
134
EXPORT_SYMBOL(lockdep_ovsl_is_held);
135 136
#endif

137
static struct vport *new_vport(const struct vport_parms *);
138
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139
			     const struct dp_upcall_info *);
140
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141 142
				  const struct dp_upcall_info *);

143 144
/* Must be called with rcu_read_lock. */
static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
145
{
146
	struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
147 148 149 150

	if (dev) {
		struct vport *vport = ovs_internal_dev_get_vport(dev);
		if (vport)
151
			return vport->dp;
152
	}
153 154 155 156 157 158 159 160 161 162 163 164 165 166

	return NULL;
}

/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
 * returned dp pointer valid.
 */
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
{
	struct datapath *dp;

	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
	rcu_read_lock();
	dp = get_dp_rcu(net, dp_ifindex);
167 168 169 170 171
	rcu_read_unlock();

	return dp;
}

172
/* Must be called with rcu_read_lock or ovs_mutex. */
173
const char *ovs_dp_name(const struct datapath *dp)
174
{
175
	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
176 177 178 179 180 181 182 183 184 185
	return vport->ops->get_name(vport);
}

static int get_dpifindex(struct datapath *dp)
{
	struct vport *local;
	int ifindex;

	rcu_read_lock();

186
	local = ovs_vport_rcu(dp, OVSP_LOCAL);
187
	if (local)
188
		ifindex = netdev_vport_priv(local)->dev->ifindex;
189 190 191 192 193 194 195 196 197 198 199 200
	else
		ifindex = 0;

	rcu_read_unlock();

	return ifindex;
}

static void destroy_dp_rcu(struct rcu_head *rcu)
{
	struct datapath *dp = container_of(rcu, struct datapath, rcu);

201
	ovs_flow_tbl_destroy(&dp->table);
202
	free_percpu(dp->stats_percpu);
203
	release_net(ovs_dp_get_net(dp));
204
	kfree(dp->ports);
205 206 207
	kfree(dp);
}

208 209 210 211 212 213
static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
					    u16 port_no)
{
	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
}

214
/* Called with ovs_mutex or RCU read lock. */
215 216 217 218 219 220
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
	struct vport *vport;
	struct hlist_head *head;

	head = vport_hash_bucket(dp, port_no);
221
	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
222 223 224 225 226 227
		if (vport->port_no == port_no)
			return vport;
	}
	return NULL;
}

228
/* Called with ovs_mutex. */
229 230 231 232 233 234 235
static struct vport *new_vport(const struct vport_parms *parms)
{
	struct vport *vport;

	vport = ovs_vport_add(parms);
	if (!IS_ERR(vport)) {
		struct datapath *dp = parms->dp;
236
		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
237

238
		hlist_add_head_rcu(&vport->dp_hash_node, head);
239 240 241 242 243 244
	}
	return vport;
}

void ovs_dp_detach_port(struct vport *p)
{
245
	ASSERT_OVSL();
246 247

	/* First drop references to device. */
248
	hlist_del_rcu(&p->dp_hash_node);
249 250 251 252 253 254

	/* Then destroy it. */
	ovs_vport_del(p);
}

/* Must be called with rcu_read_lock. */
255
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
256
{
257
	const struct vport *p = OVS_CB(skb)->input_vport;
258 259
	struct datapath *dp = p->dp;
	struct sw_flow *flow;
260
	struct sw_flow_actions *sf_acts;
261 262
	struct dp_stats_percpu *stats;
	u64 *stats_counter;
263
	u32 n_mask_hit;
264

265
	stats = this_cpu_ptr(dp->stats_percpu);
266 267

	/* Look up flow. */
268
	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
269 270
	if (unlikely(!flow)) {
		struct dp_upcall_info upcall;
271
		int error;
272 273

		upcall.cmd = OVS_PACKET_CMD_MISS;
274
		upcall.key = key;
275
		upcall.userdata = NULL;
276
		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277 278 279 280 281
		error = ovs_dp_upcall(dp, skb, &upcall);
		if (unlikely(error))
			kfree_skb(skb);
		else
			consume_skb(skb);
282 283 284 285
		stats_counter = &stats->n_missed;
		goto out;
	}

286 287 288
	ovs_flow_stats_update(flow, key->tp.flags, skb);
	sf_acts = rcu_dereference(flow->sf_acts);
	ovs_execute_actions(dp, skb, sf_acts, key);
289

290
	stats_counter = &stats->n_hit;
291 292 293

out:
	/* Update datapath statistics. */
294
	u64_stats_update_begin(&stats->syncp);
295
	(*stats_counter)++;
296
	stats->n_mask_hit += n_mask_hit;
297
	u64_stats_update_end(&stats->syncp);
298 299 300
}

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
301
		  const struct dp_upcall_info *upcall_info)
302 303 304 305
{
	struct dp_stats_percpu *stats;
	int err;

306
	if (upcall_info->portid == 0) {
307 308 309 310 311
		err = -ENOTCONN;
		goto err;
	}

	if (!skb_is_gso(skb))
312
		err = queue_userspace_packet(dp, skb, upcall_info);
313
	else
314
		err = queue_gso_packets(dp, skb, upcall_info);
315 316 317 318 319 320
	if (err)
		goto err;

	return 0;

err:
321
	stats = this_cpu_ptr(dp->stats_percpu);
322

323
	u64_stats_update_begin(&stats->syncp);
324
	stats->n_lost++;
325
	u64_stats_update_end(&stats->syncp);
326 327 328 329

	return err;
}

330
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
331 332
			     const struct dp_upcall_info *upcall_info)
{
333
	unsigned short gso_type = skb_shinfo(skb)->gso_type;
334 335 336 337 338
	struct dp_upcall_info later_info;
	struct sw_flow_key later_key;
	struct sk_buff *segs, *nskb;
	int err;

339
	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
340 341
	if (IS_ERR(segs))
		return PTR_ERR(segs);
342 343
	if (segs == NULL)
		return -EINVAL;
344 345 346 347

	/* Queue all of the segments. */
	skb = segs;
	do {
348
		err = queue_userspace_packet(dp, skb, upcall_info);
349 350 351
		if (err)
			break;

352
		if (skb == segs && gso_type & SKB_GSO_UDP) {
353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
			/* The initial flow key extracted by ovs_flow_extract()
			 * in this case is for a first fragment, so we need to
			 * properly mark later fragments.
			 */
			later_key = *upcall_info->key;
			later_key.ip.frag = OVS_FRAG_TYPE_LATER;

			later_info = *upcall_info;
			later_info.key = &later_key;
			upcall_info = &later_info;
		}
	} while ((skb = skb->next));

	/* Free all of the segments. */
	skb = segs;
	do {
		nskb = skb->next;
		if (err)
			kfree_skb(skb);
		else
			consume_skb(skb);
	} while ((skb = nskb));
	return err;
}

378 379
static size_t upcall_msg_size(const struct nlattr *userdata,
			      unsigned int hdrlen)
380 381
{
	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
382
		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
383
		+ nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
384 385 386 387 388 389 390 391

	/* OVS_PACKET_ATTR_USERDATA */
	if (userdata)
		size += NLA_ALIGN(userdata->nla_len);

	return size;
}

392
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
393 394 395 396
				  const struct dp_upcall_info *upcall_info)
{
	struct ovs_header *upcall;
	struct sk_buff *nskb = NULL;
Li RongQing's avatar
Li RongQing committed
397
	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
398
	struct nlattr *nla;
399
	struct genl_info info = {
400
		.dst_sk = ovs_dp_get_net(dp)->genl_sock,
401 402 403
		.snd_portid = upcall_info->portid,
	};
	size_t len;
404
	unsigned int hlen;
405 406 407 408 409
	int err, dp_ifindex;

	dp_ifindex = get_dpifindex(dp);
	if (!dp_ifindex)
		return -ENODEV;
410 411 412 413 414 415

	if (vlan_tx_tag_present(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return -ENOMEM;

416
		nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
417
		if (!nskb)
418 419 420 421 422 423 424 425 426 427 428
			return -ENOMEM;

		nskb->vlan_tci = 0;
		skb = nskb;
	}

	if (nla_attr_size(skb->len) > USHRT_MAX) {
		err = -EFBIG;
		goto out;
	}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	/* Complete checksum if needed */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    (err = skb_checksum_help(skb)))
		goto out;

	/* Older versions of OVS user space enforce alignment of the last
	 * Netlink attribute to NLA_ALIGNTO which would require extensive
	 * padding logic. Only perform zerocopy if padding is not required.
	 */
	if (dp->user_features & OVS_DP_F_UNALIGNED)
		hlen = skb_zerocopy_headlen(skb);
	else
		hlen = skb->len;

	len = upcall_msg_size(upcall_info->userdata, hlen);
444
	user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
445 446 447 448 449 450 451 452 453 454
	if (!user_skb) {
		err = -ENOMEM;
		goto out;
	}

	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
			     0, upcall_info->cmd);
	upcall->dp_ifindex = dp_ifindex;

	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
455 456
	err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
	BUG_ON(err);
457 458 459
	nla_nest_end(user_skb, nla);

	if (upcall_info->userdata)
460 461 462
		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
			  nla_len(upcall_info->userdata),
			  nla_data(upcall_info->userdata));
463

464 465 466 467 468 469 470
	/* Only reserve room for attribute header, packet data is added
	 * in skb_zerocopy() */
	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
		err = -ENOBUFS;
		goto out;
	}
	nla->nla_len = nla_attr_size(skb->len);
471

472 473 474
	err = skb_zerocopy(user_skb, skb, skb->len, hlen);
	if (err)
		goto out;
475

476 477 478 479 480 481 482 483
	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;

		if (plen > 0)
			memset(skb_put(user_skb, plen), 0, plen);
	}

484
	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
485

486
	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
Li RongQing's avatar
Li RongQing committed
487
	user_skb = NULL;
488
out:
489 490
	if (err)
		skb_tx_error(skb);
Li RongQing's avatar
Li RongQing committed
491
	kfree_skb(user_skb);
492 493 494 495 496 497 498 499 500 501 502
	kfree_skb(nskb);
	return err;
}

static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
	struct ovs_header *ovs_header = info->userhdr;
	struct nlattr **a = info->attrs;
	struct sw_flow_actions *acts;
	struct sk_buff *packet;
	struct sw_flow *flow;
503
	struct sw_flow_actions *sf_acts;
504 505
	struct datapath *dp;
	struct ethhdr *eth;
506
	struct vport *input_vport;
507 508 509 510 511
	int len;
	int err;

	err = -EINVAL;
	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
512
	    !a[OVS_PACKET_ATTR_ACTIONS])
513 514 515 516 517 518 519 520 521
		goto err;

	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
	err = -ENOMEM;
	if (!packet)
		goto err;
	skb_reserve(packet, NET_IP_ALIGN);

522
	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
523 524 525 526 527 528 529

	skb_reset_mac_header(packet);
	eth = eth_hdr(packet);

	/* Normally, setting the skb 'protocol' field would be handled by a
	 * call to eth_type_trans(), but it assumes there's a sending
	 * device, which we may not have. */
Simon Horman's avatar
Simon Horman committed
530
	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
531 532 533 534 535
		packet->protocol = eth->h_proto;
	else
		packet->protocol = htons(ETH_P_802_2);

	/* Build an sw_flow for sending this packet. */
536
	flow = ovs_flow_alloc();
537 538 539 540
	err = PTR_ERR(flow);
	if (IS_ERR(flow))
		goto err_kfree_skb;

541 542
	err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
					     &flow->key);
543 544 545
	if (err)
		goto err_flow_free;

546
	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
547 548 549
	err = PTR_ERR(acts);
	if (IS_ERR(acts))
		goto err_flow_free;
550

551
	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
552
				   &flow->key, &acts);
553 554
	if (err)
		goto err_flow_free;
555

556 557 558
	rcu_assign_pointer(flow->sf_acts, acts);

	OVS_CB(packet)->egress_tun_info = NULL;
559
	packet->priority = flow->key.phy.priority;
560
	packet->mark = flow->key.phy.skb_mark;
561 562

	rcu_read_lock();
563
	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
564 565 566 567
	err = -ENODEV;
	if (!dp)
		goto err_unlock;

568 569 570 571 572 573 574 575
	input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
	if (!input_vport)
		input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);

	if (!input_vport)
		goto err_unlock;

	OVS_CB(packet)->input_vport = input_vport;
576
	sf_acts = rcu_dereference(flow->sf_acts);
577

578
	local_bh_disable();
579
	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
580 581 582
	local_bh_enable();
	rcu_read_unlock();

583
	ovs_flow_free(flow, false);
584 585 586 587 588
	return err;

err_unlock:
	rcu_read_unlock();
err_flow_free:
589
	ovs_flow_free(flow, false);
590 591 592 593 594 595 596
err_kfree_skb:
	kfree_skb(packet);
err:
	return err;
}

static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
597
	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
598 599 600 601
	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
};

602
static const struct genl_ops dp_packet_genl_ops[] = {
603 604 605 606 607 608 609
	{ .cmd = OVS_PACKET_CMD_EXECUTE,
	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
	  .policy = packet_policy,
	  .doit = ovs_packet_cmd_execute
	}
};

610 611 612 613 614 615 616 617 618 619 620 621
static struct genl_family dp_packet_genl_family = {
	.id = GENL_ID_GENERATE,
	.hdrsize = sizeof(struct ovs_header),
	.name = OVS_PACKET_FAMILY,
	.version = OVS_PACKET_VERSION,
	.maxattr = OVS_PACKET_ATTR_MAX,
	.netnsok = true,
	.parallel_ops = true,
	.ops = dp_packet_genl_ops,
	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
};

622 623
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
			 struct ovs_dp_megaflow_stats *mega_stats)
624 625 626
{
	int i;

627 628
	memset(mega_stats, 0, sizeof(*mega_stats));

629
	stats->n_flows = ovs_flow_tbl_count(&dp->table);
630
	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
631 632

	stats->n_hit = stats->n_missed = stats->n_lost = 0;
633

634 635 636 637 638 639 640 641
	for_each_possible_cpu(i) {
		const struct dp_stats_percpu *percpu_stats;
		struct dp_stats_percpu local_stats;
		unsigned int start;

		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);

		do {
642
			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
643
			local_stats = *percpu_stats;
644
		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
645 646 647 648

		stats->n_hit += local_stats.n_hit;
		stats->n_missed += local_stats.n_missed;
		stats->n_lost += local_stats.n_lost;
649
		mega_stats->n_mask_hit += local_stats.n_mask_hit;
650 651 652
	}
}

653 654 655
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
	return NLMSG_ALIGN(sizeof(struct ovs_header))
656 657
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
658 659 660 661 662 663
		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
		+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
}

664
/* Called with ovs_mutex or RCU read lock. */
665 666
static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
				   struct sk_buff *skb)
667 668 669 670
{
	struct nlattr *nla;
	int err;

671
	/* Fill flow key. */
672 673
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
	if (!nla)
674
		return -EMSGSIZE;
675

676
	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
677
	if (err)
678 679
		return err;

680 681
	nla_nest_end(skb, nla);

682
	/* Fill flow mask. */
683 684
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
	if (!nla)
685
		return -EMSGSIZE;
686

687
	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
688
	if (err)
689
		return err;
690

691
	nla_nest_end(skb, nla);
692 693 694 695 696 697 698 699 700 701
	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
				   struct sk_buff *skb)
{
	struct ovs_flow_stats stats;
	__be16 tcp_flags;
	unsigned long used;
702

703
	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
704

705 706
	if (used &&
	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
707
		return -EMSGSIZE;
708

709
	if (stats.n_packets &&
710
	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
711
		return -EMSGSIZE;
712

713 714
	if ((u8)ntohs(tcp_flags) &&
	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
715 716 717 718 719 720 721 722 723 724 725
		return -EMSGSIZE;

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
				     struct sk_buff *skb, int skb_orig_len)
{
	struct nlattr *start;
	int err;
726 727 728 729 730 731 732 733 734 735 736

	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
	 * this is the first flow to be dumped into 'skb'.  This is unusual for
	 * Netlink but individual action lists can be longer than
	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
	 * The userspace caller can always fetch the actions separately if it
	 * really wants them.  (Most userspace callers in fact don't care.)
	 *
	 * This can only fail for dump operations because the skb is always
	 * properly sized for single flows.
	 */
737 738
	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
	if (start) {
739 740
		const struct sw_flow_actions *sf_acts;

741
		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
742 743
		err = ovs_nla_put_actions(sf_acts->actions,
					  sf_acts->actions_len, skb);
744

745 746 747 748
		if (!err)
			nla_nest_end(skb, start);
		else {
			if (skb_orig_len)
749
				return err;
750 751 752

			nla_nest_cancel(skb, start);
		}
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	} else if (skb_orig_len) {
		return -EMSGSIZE;
	}

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
				  struct sk_buff *skb, u32 portid,
				  u32 seq, u32 flags, u8 cmd)
{
	const int skb_orig_len = skb->len;
	struct ovs_header *ovs_header;
	int err;

	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
				 flags, cmd);
	if (!ovs_header)
		return -EMSGSIZE;

	ovs_header->dp_ifindex = dp_ifindex;

	err = ovs_flow_cmd_fill_match(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_stats(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
	if (err)
		goto error;
787 788 789 790 791 792 793 794

	return genlmsg_end(skb, ovs_header);

error:
	genlmsg_cancel(skb, ovs_header);
	return err;
}

795 796
/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
797 798
					       struct genl_info *info,
					       bool always)
799
{
800
	struct sk_buff *skb;
801

802
	if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
803 804
		return NULL;

805
	skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
806 807 808 809
	if (!skb)
		return ERR_PTR(-ENOMEM);

	return skb;
810 811
}

812 813 814 815 816
/* Called with ovs_mutex. */
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
					       int dp_ifindex,
					       struct genl_info *info, u8 cmd,
					       bool always)
817 818 819 820
{
	struct sk_buff *skb;
	int retval;

821 822
	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
				      always);
823
	if (IS_ERR_OR_NULL(skb))
824
		return skb;
825

826 827 828
	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
					info->snd_portid, info->snd_seq, 0,
					cmd);
829 830 831 832
	BUG_ON(retval < 0);
	return skb;
}

833
static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
834 835 836
{
	struct nlattr **a = info->attrs;
	struct ovs_header *ovs_header = info->userhdr;
837
	struct sw_flow *flow, *new_flow;
838
	struct sw_flow_mask mask;
839 840
	struct sk_buff *reply;
	struct datapath *dp;
841
	struct sw_flow_actions *acts;
842
	struct sw_flow_match match;
843 844
	int error;

845
	/* Must have key and actions. */
846
	error = -EINVAL;
847 848
	if (!a[OVS_FLOW_ATTR_KEY]) {
		OVS_NLERR("Flow key attribute not present in new flow.\n");
849
		goto error;
850 851 852
	}
	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
		OVS_NLERR("Flow actions attribute not present in new flow.\n");
853
		goto error;
854
	}
855

856 857 858 859 860 861 862 863 864 865 866
	/* Most of the time we need to allocate a new flow, do it before
	 * locking.
	 */
	new_flow = ovs_flow_alloc();
	if (IS_ERR(new_flow)) {
		error = PTR_ERR(new_flow);
		goto error;
	}

	/* Extract key. */
	ovs_match_init(&match, &new_flow->unmasked_key, &mask);
867
	error = ovs_nla_get_match(&match,
868
				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
869
	if (error)
870
		goto err_kfree_flow;
871

872
	ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
873

874
	/* Validate actions. */
875 876 877
	acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
	error = PTR_ERR(acts);
	if (IS_ERR(acts))
878
		goto err_kfree_flow;
879

880
	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
881
				     &acts);
882 883
	if (error) {
		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
884 885 886 887 888 889 890
		goto err_kfree_acts;
	}

	reply = ovs_flow_cmd_alloc_info(acts, info, false);
	if (IS_ERR(reply)) {
		error = PTR_ERR(reply);
		goto err_kfree_acts;
891 892
	}

893
	ovs_lock();
894
	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
895 896
	if (unlikely(!dp)) {
		error = -ENODEV;
897
		goto err_unlock_ovs;
898
	}
899
	/* Check if this is a duplicate flow */
900 901 902
	flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
	if (likely(!flow)) {
		rcu_assign_pointer(new_flow->sf_acts, acts);
903 904

		/* Put flow in bucket. */
905 906
		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
		if (unlikely(error)) {
907
			acts = NULL;
908 909 910 911 912 913 914 915 916 917
			goto err_unlock_ovs;
		}

		if (unlikely(reply)) {
			error = ovs_flow_cmd_fill_info(new_flow,
						       ovs_header->dp_ifindex,
						       reply, info->snd_portid,
						       info->snd_seq, 0,
						       OVS_FLOW_CMD_NEW);
			BUG_ON(error < 0);
918
		}
919
		ovs_unlock();
920
	} else {
921 922
		struct sw_flow_actions *old_acts;

923 924 925 926 927 928
		/* Bail out if we're not allowed to modify an existing flow.
		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
		 * because Generic Netlink treats the latter as a dump
		 * request.  We also accept NLM_F_EXCL in case that bug ever
		 * gets fixed.
		 */
929 930 931
		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
							 | NLM_F_EXCL))) {
			error = -EEXIST;
932
			goto err_unlock_ovs;
933
		}
934
		/* The unmasked key has to be the same for flow updates. */
935
		if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
936 937 938 939 940
			flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
			if (!flow) {
				error = -ENOENT;
				goto err_unlock_ovs;
			}
941
		}
942 943 944 945
		/* Update actions. */
		old_acts = ovsl_dereference(flow->sf_acts);
		rcu_assign_pointer(flow->sf_acts, acts);

946 947 948 949 950 951 952 953 954
		if (unlikely(reply)) {
			error = ovs_flow_cmd_fill_info(flow,
						       ovs_header->dp_ifindex,
						       reply, info->snd_portid,
						       info->snd_seq, 0,
						       OVS_FLOW_CMD_NEW);
			BUG_ON(error < 0);
		}
		ovs_unlock();
955

956 957
		ovs_nla_free_flow_actions(old_acts);
		ovs_flow_free(new_flow, false);
958
	}
959 960 961

	if (reply)
		ovs_notify(&dp_flow_genl_family, reply, info);
962 963 964 965
	return 0;

err_unlock_ovs:
	ovs_unlock();
966 967
	kfree_skb(reply);
err_kfree_acts:
968
	kfree(acts);
969 970
err_kfree_flow:
	ovs_flow_free(new_flow, false);
971 972 973
error:
	return error;
}
974

975 976 977 978 979 980 981 982 983 984 985 986 987
static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
						const struct sw_flow_key *key,
						const struct sw_flow_mask *mask)
{
	struct sw_flow_actions *acts;
	struct sw_flow_key masked_key;
	int error;

	acts = ovs_nla_alloc_flow_actions(nla_len(a));
	if (IS_ERR(acts))
		return acts;

	ovs_flow_mask_key(&masked_key, key, mask);
988
	error = ovs_nla_copy_actions(a, &masked_key, &acts);
989 990 991 992 993 994 995 996 997
	if (error) {
		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
		kfree(acts);
		return ERR_PTR(error);
	}

	return acts;
}

998 999 1000 1001
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
	struct nlattr **a = info->attrs;
	struct ovs_header *ovs_header = info->userhdr;
1002
	struct sw_flow_key key;
1003 1004 1005 1006
	struct sw_flow *flow;
	struct sw_flow_mask mask;
	struct sk_buff *reply = NULL;
	struct datapath *dp;
1007
	struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1008 1009 1010 1011 1012
	struct sw_flow_match match;
	int error;

	/* Extract key. */
	error = -EINVAL;
1013 1014
	if (!a[OVS_FLOW_ATTR_KEY]) {
		OVS_NLERR("Flow key attribute not present in set flow.\n");
1015
		goto error;