datapath.c 52.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2007-2014 Nicira, Inc.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 * 02110-1301, USA
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
51 52
#include <net/net_namespace.h>
#include <net/netns/generic.h>
53 54 55

#include "datapath.h"
#include "flow.h"
56
#include "flow_table.h"
57
#include "flow_netlink.h"
58
#include "vport-internal_dev.h"
59
#include "vport-netdev.h"
60

61
int ovs_net_id __read_mostly;
62
EXPORT_SYMBOL_GPL(ovs_net_id);
63

64 65 66 67
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;

68 69
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
	.name = OVS_FLOW_MCGROUP,
70 71
};

72 73
static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
	.name = OVS_DATAPATH_MCGROUP,
74 75
};

76 77
static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
	.name = OVS_VPORT_MCGROUP,
78 79
};

80 81
/* Check if need to build a reply message.
 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
82 83
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
			    unsigned int group)
84 85
{
	return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
86
	       genl_has_listeners(family, genl_info_net(info), group);
87 88
}

89
static void ovs_notify(struct genl_family *family,
90
		       struct sk_buff *skb, struct genl_info *info)
91
{
92
	genl_notify(family, skb, genl_info_net(info), info->snd_portid,
93
		    0, info->nlhdr, GFP_KERNEL);
94 95
}

96 97 98
/**
 * DOC: Locking:
 *
99 100 101 102
 * All writes e.g. Writes to device state (add/remove datapath, port, set
 * operations on vports, etc.), Writes to other state (flow table
 * modifications, set miscellaneous datapath parameters, etc.) are protected
 * by ovs_lock.
103 104 105 106 107 108
 *
 * Reads are protected by RCU.
 *
 * There are a few special cases (mostly stats) that have their own
 * synchronization but they nest under all of above and don't interact with
 * each other.
109 110
 *
 * The RTNL lock nests inside ovs_mutex.
111 112
 */

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
static DEFINE_MUTEX(ovs_mutex);

void ovs_lock(void)
{
	mutex_lock(&ovs_mutex);
}

void ovs_unlock(void)
{
	mutex_unlock(&ovs_mutex);
}

#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{
	if (debug_locks)
		return lockdep_is_held(&ovs_mutex);
	else
		return 1;
}
133
EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
134 135
#endif

136
static struct vport *new_vport(const struct vport_parms *);
137
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
138
			     const struct sw_flow_key *,
139
			     const struct dp_upcall_info *);
140
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
141
				  const struct sw_flow_key *,
142 143
				  const struct dp_upcall_info *);

144 145
/* Must be called with rcu_read_lock. */
static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
146
{
147
	struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
148 149 150 151

	if (dev) {
		struct vport *vport = ovs_internal_dev_get_vport(dev);
		if (vport)
152
			return vport->dp;
153
	}
154 155 156 157 158 159 160 161 162 163 164 165 166 167

	return NULL;
}

/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
 * returned dp pointer valid.
 */
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
{
	struct datapath *dp;

	WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
	rcu_read_lock();
	dp = get_dp_rcu(net, dp_ifindex);
168 169 170 171 172
	rcu_read_unlock();

	return dp;
}

173
/* Must be called with rcu_read_lock or ovs_mutex. */
174
const char *ovs_dp_name(const struct datapath *dp)
175
{
176
	struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
177 178 179
	return vport->ops->get_name(vport);
}

180
static int get_dpifindex(const struct datapath *dp)
181 182 183 184 185 186
{
	struct vport *local;
	int ifindex;

	rcu_read_lock();

187
	local = ovs_vport_rcu(dp, OVSP_LOCAL);
188
	if (local)
189
		ifindex = netdev_vport_priv(local)->dev->ifindex;
190 191 192 193 194 195 196 197 198 199 200 201
	else
		ifindex = 0;

	rcu_read_unlock();

	return ifindex;
}

static void destroy_dp_rcu(struct rcu_head *rcu)
{
	struct datapath *dp = container_of(rcu, struct datapath, rcu);

202
	ovs_flow_tbl_destroy(&dp->table);
203
	free_percpu(dp->stats_percpu);
204
	release_net(ovs_dp_get_net(dp));
205
	kfree(dp->ports);
206 207 208
	kfree(dp);
}

209 210 211 212 213 214
static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
					    u16 port_no)
{
	return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
}

215
/* Called with ovs_mutex or RCU read lock. */
216 217 218 219 220 221
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
	struct vport *vport;
	struct hlist_head *head;

	head = vport_hash_bucket(dp, port_no);
222
	hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
223 224 225 226 227 228
		if (vport->port_no == port_no)
			return vport;
	}
	return NULL;
}

229
/* Called with ovs_mutex. */
230 231 232 233 234 235 236
static struct vport *new_vport(const struct vport_parms *parms)
{
	struct vport *vport;

	vport = ovs_vport_add(parms);
	if (!IS_ERR(vport)) {
		struct datapath *dp = parms->dp;
237
		struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
238

239
		hlist_add_head_rcu(&vport->dp_hash_node, head);
240 241 242 243 244 245
	}
	return vport;
}

void ovs_dp_detach_port(struct vport *p)
{
246
	ASSERT_OVSL();
247 248

	/* First drop references to device. */
249
	hlist_del_rcu(&p->dp_hash_node);
250 251 252 253 254 255

	/* Then destroy it. */
	ovs_vport_del(p);
}

/* Must be called with rcu_read_lock. */
256
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
257
{
258
	const struct vport *p = OVS_CB(skb)->input_vport;
259 260
	struct datapath *dp = p->dp;
	struct sw_flow *flow;
261
	struct sw_flow_actions *sf_acts;
262 263
	struct dp_stats_percpu *stats;
	u64 *stats_counter;
264
	u32 n_mask_hit;
265

266
	stats = this_cpu_ptr(dp->stats_percpu);
267 268

	/* Look up flow. */
269
	flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
270 271
	if (unlikely(!flow)) {
		struct dp_upcall_info upcall;
272
		int error;
273 274 275

		upcall.cmd = OVS_PACKET_CMD_MISS;
		upcall.userdata = NULL;
276
		upcall.portid = ovs_vport_find_upcall_portid(p, skb);
277
		upcall.egress_tun_info = NULL;
278
		error = ovs_dp_upcall(dp, skb, key, &upcall);
279 280 281 282
		if (unlikely(error))
			kfree_skb(skb);
		else
			consume_skb(skb);
283 284 285 286
		stats_counter = &stats->n_missed;
		goto out;
	}

287 288 289
	ovs_flow_stats_update(flow, key->tp.flags, skb);
	sf_acts = rcu_dereference(flow->sf_acts);
	ovs_execute_actions(dp, skb, sf_acts, key);
290

291
	stats_counter = &stats->n_hit;
292 293 294

out:
	/* Update datapath statistics. */
295
	u64_stats_update_begin(&stats->syncp);
296
	(*stats_counter)++;
297
	stats->n_mask_hit += n_mask_hit;
298
	u64_stats_update_end(&stats->syncp);
299 300 301
}

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
302
		  const struct sw_flow_key *key,
303
		  const struct dp_upcall_info *upcall_info)
304 305 306 307
{
	struct dp_stats_percpu *stats;
	int err;

308
	if (upcall_info->portid == 0) {
309 310 311 312 313
		err = -ENOTCONN;
		goto err;
	}

	if (!skb_is_gso(skb))
314
		err = queue_userspace_packet(dp, skb, key, upcall_info);
315
	else
316
		err = queue_gso_packets(dp, skb, key, upcall_info);
317 318 319 320 321 322
	if (err)
		goto err;

	return 0;

err:
323
	stats = this_cpu_ptr(dp->stats_percpu);
324

325
	u64_stats_update_begin(&stats->syncp);
326
	stats->n_lost++;
327
	u64_stats_update_end(&stats->syncp);
328 329 330 331

	return err;
}

332
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
333
			     const struct sw_flow_key *key,
334 335
			     const struct dp_upcall_info *upcall_info)
{
336
	unsigned short gso_type = skb_shinfo(skb)->gso_type;
337 338
	struct sw_flow_key later_key;
	struct sk_buff *segs, *nskb;
339
	struct ovs_skb_cb ovs_cb;
340 341
	int err;

342
	ovs_cb = *OVS_CB(skb);
343
	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
344
	*OVS_CB(skb) = ovs_cb;
345 346
	if (IS_ERR(segs))
		return PTR_ERR(segs);
347 348
	if (segs == NULL)
		return -EINVAL;
349

350 351 352 353 354 355 356 357 358
	if (gso_type & SKB_GSO_UDP) {
		/* The initial flow key extracted by ovs_flow_key_extract()
		 * in this case is for a first fragment, so we need to
		 * properly mark later fragments.
		 */
		later_key = *key;
		later_key.ip.frag = OVS_FRAG_TYPE_LATER;
	}

359 360 361
	/* Queue all of the segments. */
	skb = segs;
	do {
362 363 364 365 366
		*OVS_CB(skb) = ovs_cb;
		if (gso_type & SKB_GSO_UDP && skb != segs)
			key = &later_key;

		err = queue_userspace_packet(dp, skb, key, upcall_info);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
		if (err)
			break;

	} while ((skb = skb->next));

	/* Free all of the segments. */
	skb = segs;
	do {
		nskb = skb->next;
		if (err)
			kfree_skb(skb);
		else
			consume_skb(skb);
	} while ((skb = nskb));
	return err;
}

384
static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
385
			      unsigned int hdrlen)
386 387
{
	size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
388
		+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
389
		+ nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
390 391

	/* OVS_PACKET_ATTR_USERDATA */
392 393 394 395 396 397
	if (upcall_info->userdata)
		size += NLA_ALIGN(upcall_info->userdata->nla_len);

	/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
	if (upcall_info->egress_tun_info)
		size += nla_total_size(ovs_tun_key_attr_size());
398 399 400 401

	return size;
}

402
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
403
				  const struct sw_flow_key *key,
404 405 406 407
				  const struct dp_upcall_info *upcall_info)
{
	struct ovs_header *upcall;
	struct sk_buff *nskb = NULL;
Li RongQing's avatar
Li RongQing committed
408
	struct sk_buff *user_skb = NULL; /* to be queued to userspace */
409
	struct nlattr *nla;
410
	struct genl_info info = {
411
		.dst_sk = ovs_dp_get_net(dp)->genl_sock,
412 413 414
		.snd_portid = upcall_info->portid,
	};
	size_t len;
415
	unsigned int hlen;
416 417 418 419 420
	int err, dp_ifindex;

	dp_ifindex = get_dpifindex(dp);
	if (!dp_ifindex)
		return -ENODEV;
421

422
	if (skb_vlan_tag_present(skb)) {
423 424 425 426
		nskb = skb_clone(skb, GFP_ATOMIC);
		if (!nskb)
			return -ENOMEM;

427
		nskb = __vlan_hwaccel_push_inside(nskb);
428
		if (!nskb)
429 430 431 432 433 434 435 436 437 438
			return -ENOMEM;

		skb = nskb;
	}

	if (nla_attr_size(skb->len) > USHRT_MAX) {
		err = -EFBIG;
		goto out;
	}

439 440 441 442 443 444 445 446 447 448 449 450 451 452
	/* Complete checksum if needed */
	if (skb->ip_summed == CHECKSUM_PARTIAL &&
	    (err = skb_checksum_help(skb)))
		goto out;

	/* Older versions of OVS user space enforce alignment of the last
	 * Netlink attribute to NLA_ALIGNTO which would require extensive
	 * padding logic. Only perform zerocopy if padding is not required.
	 */
	if (dp->user_features & OVS_DP_F_UNALIGNED)
		hlen = skb_zerocopy_headlen(skb);
	else
		hlen = skb->len;

453
	len = upcall_msg_size(upcall_info, hlen);
454
	user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
455 456 457 458 459 460 461 462 463 464
	if (!user_skb) {
		err = -ENOMEM;
		goto out;
	}

	upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
			     0, upcall_info->cmd);
	upcall->dp_ifindex = dp_ifindex;

	nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
465
	err = ovs_nla_put_flow(key, key, user_skb);
466
	BUG_ON(err);
467 468 469
	nla_nest_end(user_skb, nla);

	if (upcall_info->userdata)
470 471 472
		__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
			  nla_len(upcall_info->userdata),
			  nla_data(upcall_info->userdata));
473

474 475 476 477 478 479 480 481
	if (upcall_info->egress_tun_info) {
		nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
		err = ovs_nla_put_egress_tunnel_key(user_skb,
						    upcall_info->egress_tun_info);
		BUG_ON(err);
		nla_nest_end(user_skb, nla);
	}

482 483 484 485 486 487 488
	/* Only reserve room for attribute header, packet data is added
	 * in skb_zerocopy() */
	if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
		err = -ENOBUFS;
		goto out;
	}
	nla->nla_len = nla_attr_size(skb->len);
489

490 491 492
	err = skb_zerocopy(user_skb, skb, skb->len, hlen);
	if (err)
		goto out;
493

494 495 496 497 498 499 500 501
	/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
	if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
		size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;

		if (plen > 0)
			memset(skb_put(user_skb, plen), 0, plen);
	}

502
	((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
503

504
	err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
Li RongQing's avatar
Li RongQing committed
505
	user_skb = NULL;
506
out:
507 508
	if (err)
		skb_tx_error(skb);
Li RongQing's avatar
Li RongQing committed
509
	kfree_skb(user_skb);
510 511 512 513 514 515 516 517 518 519 520
	kfree_skb(nskb);
	return err;
}

static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
	struct ovs_header *ovs_header = info->userhdr;
	struct nlattr **a = info->attrs;
	struct sw_flow_actions *acts;
	struct sk_buff *packet;
	struct sw_flow *flow;
521
	struct sw_flow_actions *sf_acts;
522 523
	struct datapath *dp;
	struct ethhdr *eth;
524
	struct vport *input_vport;
525 526
	int len;
	int err;
527
	bool log = !a[OVS_FLOW_ATTR_PROBE];
528 529 530

	err = -EINVAL;
	if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
531
	    !a[OVS_PACKET_ATTR_ACTIONS])
532 533 534 535 536 537 538 539 540
		goto err;

	len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
	packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
	err = -ENOMEM;
	if (!packet)
		goto err;
	skb_reserve(packet, NET_IP_ALIGN);

541
	nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
542 543 544 545 546 547 548

	skb_reset_mac_header(packet);
	eth = eth_hdr(packet);

	/* Normally, setting the skb 'protocol' field would be handled by a
	 * call to eth_type_trans(), but it assumes there's a sending
	 * device, which we may not have. */
Simon Horman's avatar
Simon Horman committed
549
	if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
550 551 552 553 554
		packet->protocol = eth->h_proto;
	else
		packet->protocol = htons(ETH_P_802_2);

	/* Build an sw_flow for sending this packet. */
555
	flow = ovs_flow_alloc();
556 557 558 559
	err = PTR_ERR(flow);
	if (IS_ERR(flow))
		goto err_kfree_skb;

560
	err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
561
					     &flow->key, log);
562 563 564
	if (err)
		goto err_flow_free;

565
	err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
566
				   &flow->key, &acts, log);
567 568
	if (err)
		goto err_flow_free;
569

570 571
	rcu_assign_pointer(flow->sf_acts, acts);
	OVS_CB(packet)->egress_tun_info = NULL;
572
	packet->priority = flow->key.phy.priority;
573
	packet->mark = flow->key.phy.skb_mark;
574 575

	rcu_read_lock();
576
	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
577 578 579 580
	err = -ENODEV;
	if (!dp)
		goto err_unlock;

581 582 583 584 585 586 587 588
	input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
	if (!input_vport)
		input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);

	if (!input_vport)
		goto err_unlock;

	OVS_CB(packet)->input_vport = input_vport;
589
	sf_acts = rcu_dereference(flow->sf_acts);
590

591
	local_bh_disable();
592
	err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
593 594 595
	local_bh_enable();
	rcu_read_unlock();

596
	ovs_flow_free(flow, false);
597 598 599 600 601
	return err;

err_unlock:
	rcu_read_unlock();
err_flow_free:
602
	ovs_flow_free(flow, false);
603 604 605 606 607 608 609
err_kfree_skb:
	kfree_skb(packet);
err:
	return err;
}

static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
610
	[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
611 612 613 614
	[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
	[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
};

615
static const struct genl_ops dp_packet_genl_ops[] = {
616 617 618 619 620 621 622
	{ .cmd = OVS_PACKET_CMD_EXECUTE,
	  .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
	  .policy = packet_policy,
	  .doit = ovs_packet_cmd_execute
	}
};

623 624 625 626 627 628 629 630 631 632 633 634
static struct genl_family dp_packet_genl_family = {
	.id = GENL_ID_GENERATE,
	.hdrsize = sizeof(struct ovs_header),
	.name = OVS_PACKET_FAMILY,
	.version = OVS_PACKET_VERSION,
	.maxattr = OVS_PACKET_ATTR_MAX,
	.netnsok = true,
	.parallel_ops = true,
	.ops = dp_packet_genl_ops,
	.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
};

635
static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
636
			 struct ovs_dp_megaflow_stats *mega_stats)
637 638 639
{
	int i;

640 641
	memset(mega_stats, 0, sizeof(*mega_stats));

642
	stats->n_flows = ovs_flow_tbl_count(&dp->table);
643
	mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
644 645

	stats->n_hit = stats->n_missed = stats->n_lost = 0;
646

647 648 649 650 651 652 653 654
	for_each_possible_cpu(i) {
		const struct dp_stats_percpu *percpu_stats;
		struct dp_stats_percpu local_stats;
		unsigned int start;

		percpu_stats = per_cpu_ptr(dp->stats_percpu, i);

		do {
655
			start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
656
			local_stats = *percpu_stats;
657
		} while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
658 659 660 661

		stats->n_hit += local_stats.n_hit;
		stats->n_missed += local_stats.n_missed;
		stats->n_lost += local_stats.n_lost;
662
		mega_stats->n_mask_hit += local_stats.n_mask_hit;
663 664 665
	}
}

666 667 668
static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
	return NLMSG_ALIGN(sizeof(struct ovs_header))
669 670
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
		+ nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
671 672 673 674 675 676
		+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
		+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
		+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
		+ nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
}

677
/* Called with ovs_mutex or RCU read lock. */
678 679
static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
				   struct sk_buff *skb)
680 681 682 683
{
	struct nlattr *nla;
	int err;

684
	/* Fill flow key. */
685 686
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
	if (!nla)
687
		return -EMSGSIZE;
688

689
	err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
690
	if (err)
691 692
		return err;

693 694
	nla_nest_end(skb, nla);

695
	/* Fill flow mask. */
696 697
	nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
	if (!nla)
698
		return -EMSGSIZE;
699

700
	err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
701
	if (err)
702
		return err;
703

704
	nla_nest_end(skb, nla);
705 706 707 708 709 710 711 712 713 714
	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
				   struct sk_buff *skb)
{
	struct ovs_flow_stats stats;
	__be16 tcp_flags;
	unsigned long used;
715

716
	ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
717

718 719
	if (used &&
	    nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
720
		return -EMSGSIZE;
721

722
	if (stats.n_packets &&
723
	    nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
724
		return -EMSGSIZE;
725

726 727
	if ((u8)ntohs(tcp_flags) &&
	     nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
728 729 730 731 732 733 734 735 736 737 738
		return -EMSGSIZE;

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
				     struct sk_buff *skb, int skb_orig_len)
{
	struct nlattr *start;
	int err;
739 740 741 742 743 744 745 746 747 748 749

	/* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
	 * this is the first flow to be dumped into 'skb'.  This is unusual for
	 * Netlink but individual action lists can be longer than
	 * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
	 * The userspace caller can always fetch the actions separately if it
	 * really wants them.  (Most userspace callers in fact don't care.)
	 *
	 * This can only fail for dump operations because the skb is always
	 * properly sized for single flows.
	 */
750 751
	start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
	if (start) {
752 753
		const struct sw_flow_actions *sf_acts;

754
		sf_acts = rcu_dereference_ovsl(flow->sf_acts);
755 756
		err = ovs_nla_put_actions(sf_acts->actions,
					  sf_acts->actions_len, skb);
757

758 759 760 761
		if (!err)
			nla_nest_end(skb, start);
		else {
			if (skb_orig_len)
762
				return err;
763 764 765

			nla_nest_cancel(skb, start);
		}
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	} else if (skb_orig_len) {
		return -EMSGSIZE;
	}

	return 0;
}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
				  struct sk_buff *skb, u32 portid,
				  u32 seq, u32 flags, u8 cmd)
{
	const int skb_orig_len = skb->len;
	struct ovs_header *ovs_header;
	int err;

	ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
				 flags, cmd);
	if (!ovs_header)
		return -EMSGSIZE;

	ovs_header->dp_ifindex = dp_ifindex;

	err = ovs_flow_cmd_fill_match(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_stats(flow, skb);
	if (err)
		goto error;

	err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
	if (err)
		goto error;
800 801 802 803 804 805 806 807

	return genlmsg_end(skb, ovs_header);

error:
	genlmsg_cancel(skb, ovs_header);
	return err;
}

808 809
/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
810 811
					       struct genl_info *info,
					       bool always)
812
{
813
	struct sk_buff *skb;
814

815
	if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
816 817
		return NULL;

818
	skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
819 820 821 822
	if (!skb)
		return ERR_PTR(-ENOMEM);

	return skb;
823 824
}

825 826 827 828 829
/* Called with ovs_mutex. */
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
					       int dp_ifindex,
					       struct genl_info *info, u8 cmd,
					       bool always)
830 831 832 833
{
	struct sk_buff *skb;
	int retval;

834 835
	skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
				      always);
836
	if (IS_ERR_OR_NULL(skb))
837
		return skb;
838

839 840 841
	retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
					info->snd_portid, info->snd_seq, 0,
					cmd);
842 843 844 845
	BUG_ON(retval < 0);
	return skb;
}

846
static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
847 848 849
{
	struct nlattr **a = info->attrs;
	struct ovs_header *ovs_header = info->userhdr;
850
	struct sw_flow *flow, *new_flow;
851
	struct sw_flow_mask mask;
852 853
	struct sk_buff *reply;
	struct datapath *dp;
854
	struct sw_flow_actions *acts;
855
	struct sw_flow_match match;
856
	int error;
857
	bool log = !a[OVS_FLOW_ATTR_PROBE];
858

859
	/* Must have key and actions. */
860
	error = -EINVAL;
861
	if (!a[OVS_FLOW_ATTR_KEY]) {
862
		OVS_NLERR(log, "Flow key attr not present in new flow.");
863
		goto error;
864 865
	}
	if (!a[OVS_FLOW_ATTR_ACTIONS]) {
866
		OVS_NLERR(log, "Flow actions attr not present in new flow.");
867
		goto error;
868
	}
869

870 871 872 873 874 875 876 877 878 879 880
	/* Most of the time we need to allocate a new flow, do it before
	 * locking.
	 */
	new_flow = ovs_flow_alloc();
	if (IS_ERR(new_flow)) {
		error = PTR_ERR(new_flow);
		goto error;
	}

	/* Extract key. */
	ovs_match_init(&match, &new_flow->unmasked_key, &mask);
881 882
	error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
				  a[OVS_FLOW_ATTR_MASK], log);
883
	if (error)
884
		goto err_kfree_flow;
885

886
	ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
887

888 889
	/* Validate actions. */
	error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
890
				     &acts, log);
891
	if (error) {
892
		OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
893
		goto err_kfree_flow;
894 895 896 897 898 899
	}

	reply = ovs_flow_cmd_alloc_info(acts, info, false);
	if (IS_ERR(reply)) {
		error = PTR_ERR(reply);
		goto err_kfree_acts;
900 901
	}

902
	ovs_lock();
903
	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
904 905
	if (unlikely(!dp)) {
		error = -ENODEV;
906
		goto err_unlock_ovs;
907
	}
908
	/* Check if this is a duplicate flow */
909 910 911
	flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
	if (likely(!flow)) {
		rcu_assign_pointer(new_flow->sf_acts, acts);
912 913

		/* Put flow in bucket. */
914 915
		error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
		if (unlikely(error)) {
916
			acts = NULL;
917 918 919 920 921 922 923 924 925 926
			goto err_unlock_ovs;
		}

		if (unlikely(reply)) {
			error = ovs_flow_cmd_fill_info(new_flow,
						       ovs_header->dp_ifindex,
						       reply, info->snd_portid,
						       info->snd_seq, 0,
						       OVS_FLOW_CMD_NEW);
			BUG_ON(error < 0);
927
		}
928
		ovs_unlock();
929
	} else {
930 931
		struct sw_flow_actions *old_acts;

932 933 934 935 936 937
		/* Bail out if we're not allowed to modify an existing flow.
		 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
		 * because Generic Netlink treats the latter as a dump
		 * request.  We also accept NLM_F_EXCL in case that bug ever
		 * gets fixed.
		 */
938 939 940
		if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
							 | NLM_F_EXCL))) {
			error = -EEXIST;
941
			goto err_unlock_ovs;
942
		}