datapath.c 52.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
51 52
#include <net/net_namespace.h>
#include <net/netns/generic.h>
53 54 55

#include "datapath.h"
#include "flow.h"
56
#include "flow_table.h"
57
#include "flow_netlink.h"
58
#include "vport-internal_dev.h"
59
#include "vport-netdev.h"
60

61
int ovs_net_id __read_mostly;
62
EXPORT_SYMBOL_GPL(ovs_net_id);
63

64 65 66 67
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;

68 69
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
	.name = OVS_FLOW_MCGROUP,
70 71
};

72 73
static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
	.name = OVS_DATAPATH_MCGROUP,
74 75
};

76 77
static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
	.name = OVS_VPORT_MCGROUP,
78 79
};

80 81
/* Check if need to build a reply message.
 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
82 83
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
			    unsigned int group)
84 85
{
	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
86 87
	       genl_has_listeners(family, genl_info_net(info)->genl_sock,
				  group);
88 89
}

90
static void ovs_notify(struct genl_family *family,
91
		       struct sk_buff *skb, struct genl_info *info)
92
{
93
	genl_notify(family, skb, genl_info_net(info), info->snd_portid,
94
		    0, info->nlhdr, GFP_KERNEL);
95 96
}

97 98 99
/**
 * DOC: Locking:
 *
100 101 102 103
 * All writes e.g. Writes to device state (add/remove datapath, port, set
 * operations on vports, etc.), Writes to other state (flow table
 * modifications, set miscellaneous datapath parameters, etc.) are protected
 * by ovs_lock.
104 105 106 107 108 109
 *
 * Reads are protected by RCU.
 *
 * There are a few special cases (mostly stats) that have their own
 * synchronization but they nest under all of above and don't interact with
 * each other.
110 111
 *
 * The RTNL lock nests inside ovs_mutex.
112 113
 */

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
static DEFINE_MUTEX(ovs_mutex);

void ovs_lock(void)
{
	mutex_lock(&ovs_mutex);
}

void ovs_unlock(void)
{
	mutex_unlock(&ovs_mutex);
}

#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{
	if (debug_locks)
		return lockdep_is_held(&ovs_mutex);
	else
		return 1;
}
134
EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 136
#endif

137
static struct vport *new_vport(const struct vport_parms *);
138
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139
			     const struct dp_upcall_info *);
140
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141 142
				  const struct dp_upcall_info *);

143 144
/* Must be called with rcu_read_lock. */
static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
145
{
146
	struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
147 148 149 150

	if (dev) {
		struct vport *vport = ovs_internal_dev_get_vport(dev);
		if (vport)
151
			return vport->dp;
152
	}
153 154 155 156 157 158 159 160 161 162 163 164 165 166

	return NULL;
}

/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
 * returned dp pointer valid.
 */
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
{
	struct datapath *dp;

	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
	rcu_read_lock();
	dp = get_dp_rcu(net, dp_ifindex);
167 168 169 170 171
	rcu_read_unlock();

	return dp;
}

172
/* Must be called with rcu_read_lock or ovs_mutex. */
173
const char *ovs_dp_name(const struct datapath *dp)
174
{
175
	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
176 177 178 179 180 181 182 183 184 185
	return vport->ops->get_name(vport);
}

static int get_dpifindex(struct datapath *dp)
{
	struct vport *local;
	int ifindex;

	rcu_read_lock();

186
	local = ovs_vport_rcu(dp, OVSP_LOCAL);
187
	if (local)
188
		ifindex = netdev_vport_priv(local)->dev->ifindex;
189 190 191 192 193 194 195 196 197 198 199 200
	else
		ifindex = 0;

	rcu_read_unlock();

	return ifindex;
}

static void destroy_dp_rcu(struct rcu_head *rcu)
{
	struct datapath *dp = container_of(rcu, struct datapath, rcu);

201
	ovs_flow_tbl_destroy(&dp->table);
202
	free_percpu(dp->stats_percpu);
203
	release_net(ovs_dp_get_net(dp));
204
	kfree(dp->ports);
205 206 207
	kfree(dp);
}

208 209 210 211 212 213
static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
					    u16 port_no)
{
	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
}

214
/* Called with ovs_mutex or RCU read lock. */
215 216 217 218 219 220
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
	struct vport *vport;
	struct hlist_head *head;

	head = vport_hash_bucket(dp, port_no);
221
	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
222 223 224 225 226 227
		if (vport->port_no == port_no)
			return vport;
	}
	return NULL;
}

228
/* Called with ovs_mutex. */
229 230 231 232 233 234 235
static struct vport *new_vport(const struct vport_parms *parms)
{
	struct vport *vport;

	vport = ovs_vport_add(parms);
	if (!IS_ERR(vport)) {
		struct datapath *dp = parms->dp;
236
		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
237

238
		hlist_add_head_rcu(&vport->dp_hash_node, head);
239 240 241 242 243 244
	}
	return vport;
}

void ovs_dp_detach_port(struct vport *p)
{
245
	ASSERT_OVSL();
246 247

	/* First drop references to device. */
248
	hlist_del_rcu(&p->dp_hash_node);
249 250 251 252 253 254

	/* Then destroy it. */
	ovs_vport_del(p);
}

/* Must be called with rcu_read_lock. */
255
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
256
{
257
	const struct vport *p = OVS_CB(skb)->input_vport;
258 259
	struct datapath *dp = p->dp;
	struct sw_flow *flow;
260
	struct sw_flow_actions *sf_acts;
261 262
	struct dp_stats_percpu *stats;
	u64 *stats_counter;
263
	u32 n_mask_hit;
264

265
	stats = this_cpu_ptr(dp->stats_percpu);
266 267

	/* Look up flow. */
268
	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
269 270
	if (unlikely(!flow)) {
		struct dp_upcall_info upcall;
271
		int error;
272 273

		upcall.cmd = OVS_PACKET_CMD_MISS;
274
		upcall.key = key;
275
		upcall.userdata = NULL;
276
		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277
		upcall.egress_tun_info = NULL;
278 279 280 281 282
		error = ovs_dp_upcall(dp, skb, &upcall);
		if (unlikely(error))
			kfree_skb(skb);
		else
			consume_skb(skb);
283 284 285 286
		stats_counter = &stats->n_missed;
		goto out;
	}

287 288 289
	ovs_flow_stats_update(flow, key->tp.flags, skb);
	sf_acts = rcu_dereference(flow->sf_acts);
	ovs_execute_actions(dp, skb, sf_acts, key);
290

291
	stats_counter = &stats->n_hit;
292 293 294

out:
	/* Update datapath statistics. */
295
	u64_stats_update_begin(&stats->syncp);
296
	(*stats_counter)++;
297
	stats->n_mask_hit += n_mask_hit;
298
	u64_stats_update_end(&stats->syncp);
299 300 301
}

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302
		  const struct dp_upcall_info *upcall_info)
303 304 305 306
{
	struct dp_stats_percpu *stats;
	int err;

307
	if (upcall_info->portid == 0) {
308 309 310 311 312
		err = -ENOTCONN;
		goto err;
	}

	if (!skb_is_gso(skb))
313
		err = queue_userspace_packet(dp, skb, upcall_info);
314
	else
315
		err = queue_gso_packets(dp, skb, upcall_info);
316 317 318 319 320 321
	if (err)
		goto err;

	return 0;

err:
322
	stats = this_cpu_ptr(dp->stats_percpu);
323

324
	u64_stats_update_begin(&stats->syncp);
325
	stats->n_lost++;
326
	u64_stats_update_end(&stats->syncp);
327 328 329 330

	return err;
}

331
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
332 333
			     const struct dp_upcall_info *upcall_info)
{
334
	unsigned short gso_type = skb_shinfo(skb)->gso_type;
335 336 337 338 339
	struct dp_upcall_info later_info;
	struct sw_flow_key later_key;
	struct sk_buff *segs, *nskb;
	int err;

340
	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
341 342
	if (IS_ERR(segs))
		return PTR_ERR(segs);
343 344
	if (segs == NULL)
		return -EINVAL;
345 346 347 348

	/* Queue all of the segments. */
	skb = segs;
	do {
349
		err = queue_userspace_packet(dp, skb, upcall_info);
350 351 352
		if (err)
			break;

353
		if (skb == segs && gso_type & SKB_GSO_UDP) {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
			/* The initial flow key extracted by ovs_flow_extract()
			 * in this case is for a first fragment, so we need to
			 * properly mark later fragments.
			 */
			later_key = *upcall_info->key;
			later_key.ip.frag = OVS_FRAG_TYPE_LATER;

			later_info = *upcall_info;
			later_info.key = &later_key;
			upcall_info = &later_info;
		}
	} while ((skb = skb->next));

	/* Free all of the segments. */
	skb = segs;
	do {
		nskb = skb->next;
		if (err)
			kfree_skb(skb);
		else
			consume_skb(skb);
	} while ((skb = nskb));
	return err;
}

379
static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
380
			      unsigned int hdrlen)
381 382
{
	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
383
		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
384
		+ nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
385 386

	/* OVS_PACKET_ATTR_USERDATA */
387 388 389 390 391 392
	if (upcall_info->userdata)
		size += NLA_ALIGN(upcall_info->userdata->nla_len);

	/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
	if (upcall_info->egress_tun_info)
		size += nla_total_size(ovs_tun_key_attr_size());
393 394 395 396

	return size;
}

397
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
398 399 400 401
				  const struct dp_upcall_info *upcall_info)
{
	struct ovs_header *upcall;
	struct sk_buff *nskb = NULL;
Li RongQing's avatar
Li RongQing committed
402
	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
403
	struct nlattr *nla;
404
	struct genl_info info = {
405
		.dst_sk = ovs_dp_get_net(dp)->genl_sock,
406 407 408
		.snd_portid = upcall_info->portid,
	};
	size_t len;
409
	unsigned int hlen;
410 411 412 413 414
	int err, dp_ifindex;

	dp_ifindex = get_dpifindex(dp);
	if (!dp_ifindex)
		return -ENODEV;
415 416 417 418 419 420

	if (vlan_tx_tag_present(skb)) {
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return -ENOMEM;

421
		nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
422
		if (!nskb)
423 424 425 426 427 428 429 430 431 432 433
			return -ENOMEM;

		nskb->vlan_tci = 0;
		skb = nskb;
	}

	if (nla_attr_size(skb->len) > USHRT_MAX) {
		err = -EFBIG;
		goto out;
	}

434 435 436 437 438 439 440 441 442 443 444 445 446 447
	/* Complete checksum if needed */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    (err = skb_checksum_help(skb)))
		goto out;

	/* Older versions of OVS user space enforce alignment of the last
	 * Netlink attribute to NLA_ALIGNTO which would require extensive
	 * padding logic. Only perform zerocopy if padding is not required.
	 */
	if (dp->user_features & OVS_DP_F_UNALIGNED)
		hlen = skb_zerocopy_headlen(skb);
	else
		hlen = skb->len;

448
	len = upcall_msg_size(upcall_info, hlen);
449
	user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
450 451 452 453 454 455 456 457 458 459
	if (!user_skb) {
		err = -ENOMEM;
		goto out;
	}

	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
			     0, upcall_info->cmd);
	upcall->dp_ifindex = dp_ifindex;

	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
460 461
	err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
	BUG_ON(err);
462 463 464
	nla_nest_end(user_skb, nla);

	if (upcall_info->userdata)
465 466 467
		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
			  nla_len(upcall_info->userdata),
			  nla_data(upcall_info->userdata));
468

469 470 471 472 473 474 475 476
	if (upcall_info->egress_tun_info) {
		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
		err = ovs_nla_put_egress_tunnel_key(user_skb,
						    upcall_info->egress_tun_info);
		BUG_ON(err);
		nla_nest_end(user_skb, nla);
	}

477 478 479 480 481 482 483
	/* Only reserve room for attribute header, packet data is added
	 * in skb_zerocopy() */
	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
		err = -ENOBUFS;
		goto out;
	}
	nla->nla_len = nla_attr_size(skb->len);
484

485 486 487
	err = skb_zerocopy(user_skb, skb, skb->len, hlen);
	if (err)
		goto out;
488

489 490 491 492 493 494 495 496
	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;

		if (plen > 0)
			memset(skb_put(user_skb, plen), 0, plen);
	}

497
	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
498

499
	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
Li RongQing's avatar
Li RongQing committed
500
	user_skb = NULL;
501
out:
502 503
	if (err)
		skb_tx_error(skb);
Li RongQing's avatar
Li RongQing committed
504
	kfree_skb(user_skb);
505 506 507 508 509 510 511 512 513 514 515
	kfree_skb(nskb);
	return err;
}

static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
	struct ovs_header *ovs_header = info->userhdr;
	struct nlattr **a = info->attrs;
	struct sw_flow_actions *acts;
	struct sk_buff *packet;
	struct sw_flow *flow;
516
	struct sw_flow_actions *sf_acts;
517 518
	struct datapath *dp;
	struct ethhdr *eth;
519
	struct vport *input_vport;
520 521 522 523 524
	int len;
	int err;

	err = -EINVAL;
	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
525
	    !a[OVS_PACKET_ATTR_ACTIONS])
526 527 528 529 530 531 532 533 534
		goto err;

	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
	err = -ENOMEM;
	if (!packet)
		goto err;
	skb_reserve(packet, NET_IP_ALIGN);

535
	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
536 537 538 539 540 541 542

	skb_reset_mac_header(packet);
	eth = eth_hdr(packet);

	/* Normally, setting the skb 'protocol' field would be handled by a
	 * call to eth_type_trans(), but it assumes there's a sending
	 * device, which we may not have. */
Simon Horman's avatar
Simon Horman committed
543
	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
544 545 546 547 548
		packet->protocol = eth->h_proto;
	else
		packet->protocol = htons(ETH_P_802_2);

	/* Build an sw_flow for sending this packet. */
549
	flow = ovs_flow_alloc();
550 551 552 553
	err = PTR_ERR(flow);
	if (IS_ERR(flow))
		goto err_kfree_skb;

554 555
	err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
					     &flow->key);
556 557 558
	if (err)
		goto err_flow_free;

559
	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
560
				   &flow->key, &acts);
561 562
	if (err)
		goto err_flow_free;
563

564 565
	rcu_assign_pointer(flow->sf_acts, acts);
	OVS_CB(packet)->egress_tun_info = NULL;
566
	packet->priority = flow->key.phy.priority;
567
	packet->mark = flow->key.phy.skb_mark;
568 569

	rcu_read_lock();
570
	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
571 572 573 574
	err = -ENODEV;
	if (!dp)
		goto err_unlock;

575 576 577 578 579 580 581 582
	input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
	if (!input_vport)
		input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);

	if (!input_vport)
		goto err_unlock;

	OVS_CB(packet)->input_vport = input_vport;
583
	sf_acts = rcu_dereference(flow->sf_acts);
584

585
	local_bh_disable();
586
	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
587 588 589
	local_bh_enable();
	rcu_read_unlock();

590
	ovs_flow_free(flow, false);
591 592 593 594 595
	return err;

err_unlock:
	rcu_read_unlock();
err_flow_free:
596
	ovs_flow_free(flow, false);
597 598 599 600 601 602 603
err_kfree_skb:
	kfree_skb(packet);
err:
	return err;
}

static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
604
	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
605 606 607 608
	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
};

609
static const struct genl_ops dp_packet_genl_ops[] = {
610 611 612 613 614 615 616
	{ .cmd = OVS_PACKET_CMD_EXECUTE,
	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
	  .policy = packet_policy,
	  .doit = ovs_packet_cmd_execute
	}
};

617 618 619 620 621 622 623 624 625 626 627 628
static struct genl_family dp_packet_genl_family = {
	.id = GENL_ID_GENERATE,
	.hdrsize = sizeof(struct ovs_header),
	.name = OVS_PACKET_FAMILY,
	.version = OVS_PACKET_VERSION,
	.maxattr = OVS_PACKET_ATTR_MAX,
	.netnsok = true,
	.parallel_ops = true,
	.ops = dp_packet_genl_ops,
	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
};

629 630
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
			 struct ovs_dp_megaflow_stats *mega_stats)
631 632 633
{
	int i;

634 635
	memset(mega_stats, 0, sizeof(*mega_stats));

636
	stats->n_flows = ovs_flow_tbl_count(&dp->table);
637
	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
638 639

	stats->n_hit = stats->n_missed = stats->n_lost = 0;
640

641 642 643 644 645 646 647 648
	for_each_possible_cpu(i) {
		const struct dp_stats_percpu *percpu_stats;
		struct dp_stats_percpu local_stats;
		unsigned int start;

		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);

		do {
649
			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
650
			local_stats = *percpu_stats;
651
		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
652 653 654 655

		stats->n_hit += local_stats.n_hit;
		stats->n_missed += local_stats.n_missed;
		stats->n_lost += local_stats.n_lost;
656
		mega_stats->n_mask_hit += local_stats.n_mask_hit;
657 658 659
	}
}

660 661 662
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
	return NLMSG_ALIGN(sizeof(struct ovs_header))
663 664
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
665 666 667 668 669 670
		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
		+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
}

671
/* Called with ovs_mutex or RCU read lock. */
672 673
static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
				   struct sk_buff *skb)
674 675 676 677
{
	struct nlattr *nla;
	int err;

678
	/* Fill flow key. */
679 680
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
	if (!nla)
681
		return -EMSGSIZE;
682

683
	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
684
	if (err)
685 686
		return err;

687 688
	nla_nest_end(skb, nla);

689
	/* Fill flow mask. */
690 691
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
	if (!nla)
692
		return -EMSGSIZE;
693

694
	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
695
	if (err)
696
		return err;
697

698
	nla_nest_end(skb, nla);
699 700 701 702 703 704 705 706 707 708
	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
				   struct sk_buff *skb)
{
	struct ovs_flow_stats stats;
	__be16 tcp_flags;
	unsigned long used;
709

710
	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
711

712 713
	if (used &&
	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
714
		return -EMSGSIZE;
715

716
	if (stats.n_packets &&
717
	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
718
		return -EMSGSIZE;
719

720 721
	if ((u8)ntohs(tcp_flags) &&
	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
722 723 724 725 726 727 728 729 730 731 732
		return -EMSGSIZE;

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
				     struct sk_buff *skb, int skb_orig_len)
{
	struct nlattr *start;
	int err;
733 734 735 736 737 738 739 740 741 742 743

	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
	 * this is the first flow to be dumped into 'skb'.  This is unusual for
	 * Netlink but individual action lists can be longer than
	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
	 * The userspace caller can always fetch the actions separately if it
	 * really wants them.  (Most userspace callers in fact don't care.)
	 *
	 * This can only fail for dump operations because the skb is always
	 * properly sized for single flows.
	 */
744 745
	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
	if (start) {
746 747
		const struct sw_flow_actions *sf_acts;

748
		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
749 750
		err = ovs_nla_put_actions(sf_acts->actions,
					  sf_acts->actions_len, skb);
751

752 753 754 755
		if (!err)
			nla_nest_end(skb, start);
		else {
			if (skb_orig_len)
756
				return err;
757 758 759

			nla_nest_cancel(skb, start);
		}
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
	} else if (skb_orig_len) {
		return -EMSGSIZE;
	}

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
				  struct sk_buff *skb, u32 portid,
				  u32 seq, u32 flags, u8 cmd)
{
	const int skb_orig_len = skb->len;
	struct ovs_header *ovs_header;
	int err;

	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
				 flags, cmd);
	if (!ovs_header)
		return -EMSGSIZE;

	ovs_header->dp_ifindex = dp_ifindex;

	err = ovs_flow_cmd_fill_match(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_stats(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
	if (err)
		goto error;
794 795 796 797 798 799 800 801

	return genlmsg_end(skb, ovs_header);

error:
	genlmsg_cancel(skb, ovs_header);
	return err;
}

802 803
/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
804 805
					       struct genl_info *info,
					       bool always)
806
{
807
	struct sk_buff *skb;
808

809
	if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
810 811
		return NULL;

812
	skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
813 814 815 816
	if (!skb)
		return ERR_PTR(-ENOMEM);

	return skb;
817 818
}

819 820 821 822 823
/* Called with ovs_mutex. */
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
					       int dp_ifindex,
					       struct genl_info *info, u8 cmd,
					       bool always)
824 825 826 827
{
	struct sk_buff *skb;
	int retval;

828 829
	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
				      always);
830
	if (IS_ERR_OR_NULL(skb))
831
		return skb;
832

833 834 835
	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
					info->snd_portid, info->snd_seq, 0,
					cmd);
836 837 838 839
	BUG_ON(retval < 0);
	return skb;
}

840
static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
841 842 843
{
	struct nlattr **a = info->attrs;
	struct ovs_header *ovs_header = info->userhdr;
844
	struct sw_flow *flow, *new_flow;
845
	struct sw_flow_mask mask;
846 847
	struct sk_buff *reply;
	struct datapath *dp;
848
	struct sw_flow_actions *acts;
849
	struct sw_flow_match match;
850 851
	int error;

852
	/* Must have key and actions. */
853
	error = -EINVAL;
854 855
	if (!a[OVS_FLOW_ATTR_KEY]) {
		OVS_NLERR("Flow key attribute not present in new flow.\n");
856
		goto error;
857 858 859
	}
	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
		OVS_NLERR("Flow actions attribute not present in new flow.\n");
860
		goto error;
861
	}
862

863 864 865 866 867 868 869 870 871 872 873
	/* Most of the time we need to allocate a new flow, do it before
	 * locking.
	 */
	new_flow = ovs_flow_alloc();
	if (IS_ERR(new_flow)) {
		error = PTR_ERR(new_flow);
		goto error;
	}

	/* Extract key. */
	ovs_match_init(&match, &new_flow->unmasked_key, &mask);
874
	error = ovs_nla_get_match(&match,
875
				  a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
876
	if (error)
877
		goto err_kfree_flow;
878

879
	ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
880

881 882
	/* Validate actions. */
	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
883
				     &acts);
884 885
	if (error) {
		OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
886
		goto err_kfree_flow;
887 888 889 890 891 892
	}

	reply = ovs_flow_cmd_alloc_info(acts, info, false);
	if (IS_ERR(reply)) {
		error = PTR_ERR(reply);
		goto err_kfree_acts;
893 894
	}

895
	ovs_lock();
896
	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
897 898
	if (unlikely(!dp)) {
		error = -ENODEV;
899
		goto err_unlock_ovs;
900
	}
901
	/* Check if this is a duplicate flow */
902 903 904
	flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
	if (likely(!flow)) {
		rcu_assign_pointer(new_flow->sf_acts, acts);
905 906

		/* Put flow in bucket. */
907 908
		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
		if (unlikely(error)) {
909
			acts = NULL;
910 911 912 913 914 915 916 917 918 919
			goto err_unlock_ovs;
		}

		if (unlikely(reply)) {
			error = ovs_flow_cmd_fill_info(new_flow,
						       ovs_header->dp_ifindex,
						       reply, info->snd_portid,
						       info->snd_seq, 0,
						       OVS_FLOW_CMD_NEW);
			BUG_ON(error < 0);
920
		}
921
		ovs_unlock();
922
	} else {
923 924
		struct sw_flow_actions *old_acts;

925 926 927 928 929 930
		/* Bail out if we're not allowed to modify an existing flow.
		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
		 * because Generic Netlink treats the latter as a dump
		 * request.  We also accept NLM_F_EXCL in case that bug ever
		 * gets fixed.
		 */
931 932 933
		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
							 | NLM_F_EXCL))) {
			error = -EEXIST;
934
			goto err_unlock_ovs;
935
		}
936
		/* The unmasked key has to be the same for flow updates. */
937
		if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
938 939 940 941 942
			flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
			if (!flow) {
				error = -ENOENT;
				goto err_unlock_ovs;
			}
943
		}
944 945 946 947
		/* Update actions. */
		old_acts = ovsl_dereference(flow->sf_acts);
		rcu_assign_pointer(flow->sf_acts, acts);

948 949 950 951 952 953 954 955 956
		if (unlikely(reply)) {
			error = ovs_flow_cmd_fill_info(flow,
						       ovs_header->dp_ifindex,
						       reply, info->snd_portid,
						       info->snd_seq, 0,
						       OVS_FLOW_CMD_NEW);
			BUG_ON(error < 0);
		}
		ovs_unlock();
957

958 959
		ovs_nla_free_flow_actions(old_acts);
		ovs_flow_free(new_flow, false);
960
	}
961 962 963

	if (reply)
		ovs_notify(&dp_flow_genl_family, reply, info);
964 965 966 967
	return 0;

err_unlock_ovs:
	ovs_unlock();
968 969
	kfree_skb(reply);
err_kfree_acts:
970
	kfree(acts);
971 972
err_kfree_flow:
	ovs_flow_free(new_flow, false);
973 974 975
error:
	return error;
}
976

977
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
978 979 980 981 982 983 984 985 986
static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
						const struct sw_flow_key *key,
						const struct sw_flow_mask *mask)
{
	struct sw_flow_actions *acts;
	struct sw_flow_key masked_key;
	int error;

	ovs_flow_mask_key(&masked_key, key, mask);
987
	error = ovs_nla_copy_actions(a, &masked_key, &acts);
988
	if (error) {
989
		OVS_NLERR("Actions may not be safe on all matching packets.\n");
990 991 992 993 994 995
		return ERR_PTR(error);
	}

	return acts;
}

996 997 998 999
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
	struct nlattr **a = info->attrs;
	struct ovs_header *ovs_header = info->userhdr;
1000
	struct sw_flow_key key;
1001 1002 1003 1004
	struct sw_flow *flow;
	struct sw_flow_mask mask;
	struct sk_buff *reply = NULL;
	struct datapath *dp;
1005
	struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1006 1007 1008 1009 1010
	struct sw_flow_match match;
	int error;

	/* Extract key. */
	error = -EINVAL;