rtnetlink.c 84.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Routing netlink socket interface: protocol independent part.
 *
 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 *	Fixes:
 *	Vitaly E. Lavrov		RTA_OK arithmetics was wrong.
 */

#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/security.h>
36
#include <linux/mutex.h>
37
#include <linux/if_addr.h>
38
#include <linux/if_bridge.h>
39
#include <linux/if_vlan.h>
40
#include <linux/pci.h>
41
#include <linux/etherdevice.h>
Linus Torvalds's avatar
Linus Torvalds committed
42
43
44
45
46

#include <asm/uaccess.h>

#include <linux/inet.h>
#include <linux/netdevice.h>
47
#include <net/switchdev.h>
Linus Torvalds's avatar
Linus Torvalds committed
48
49
50
51
52
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/udp.h>
53
#include <net/tcp.h>
Linus Torvalds's avatar
Linus Torvalds committed
54
55
#include <net/sock.h>
#include <net/pkt_sched.h>
56
#include <net/fib_rules.h>
57
#include <net/rtnetlink.h>
58
#include <net/net_namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
59

Eric Dumazet's avatar
Eric Dumazet committed
60
struct rtnl_link {
61
62
	rtnl_doit_func		doit;
	rtnl_dumpit_func	dumpit;
63
	rtnl_calcit_func 	calcit;
64
65
};

66
static DEFINE_MUTEX(rtnl_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
67
68
69

void rtnl_lock(void)
{
70
	mutex_lock(&rtnl_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
71
}
Eric Dumazet's avatar
Eric Dumazet committed
72
EXPORT_SYMBOL(rtnl_lock);
Linus Torvalds's avatar
Linus Torvalds committed
73

74
void __rtnl_unlock(void)
Linus Torvalds's avatar
Linus Torvalds committed
75
{
76
	mutex_unlock(&rtnl_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
77
}
78

Linus Torvalds's avatar
Linus Torvalds committed
79
80
void rtnl_unlock(void)
{
81
	/* This fellow will unlock it for us. */
Linus Torvalds's avatar
Linus Torvalds committed
82
83
	netdev_run_todo();
}
Eric Dumazet's avatar
Eric Dumazet committed
84
EXPORT_SYMBOL(rtnl_unlock);
Linus Torvalds's avatar
Linus Torvalds committed
85

86
87
88
89
int rtnl_trylock(void)
{
	return mutex_trylock(&rtnl_mutex);
}
Eric Dumazet's avatar
Eric Dumazet committed
90
EXPORT_SYMBOL(rtnl_trylock);
91

92
93
94
95
int rtnl_is_locked(void)
{
	return mutex_is_locked(&rtnl_mutex);
}
Eric Dumazet's avatar
Eric Dumazet committed
96
EXPORT_SYMBOL(rtnl_is_locked);
97

98
#ifdef CONFIG_PROVE_LOCKING
99
bool lockdep_rtnl_is_held(void)
100
101
102
103
104
105
{
	return lockdep_is_held(&rtnl_mutex);
}
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */

106
static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125

static inline int rtm_msgindex(int msgtype)
{
	int msgindex = msgtype - RTM_BASE;

	/*
	 * msgindex < 0 implies someone tried to register a netlink
	 * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
	 * the message type has not been added to linux/rtnetlink.h
	 */
	BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);

	return msgindex;
}

static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
{
	struct rtnl_link *tab;

126
	if (protocol <= RTNL_FAMILY_MAX)
127
128
129
130
		tab = rtnl_msg_handlers[protocol];
	else
		tab = NULL;

131
	if (tab == NULL || tab[msgindex].doit == NULL)
132
133
		tab = rtnl_msg_handlers[PF_UNSPEC];

134
	return tab[msgindex].doit;
135
136
137
138
139
140
}

static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
{
	struct rtnl_link *tab;

141
	if (protocol <= RTNL_FAMILY_MAX)
142
143
144
145
		tab = rtnl_msg_handlers[protocol];
	else
		tab = NULL;

146
	if (tab == NULL || tab[msgindex].dumpit == NULL)
147
148
		tab = rtnl_msg_handlers[PF_UNSPEC];

149
	return tab[msgindex].dumpit;
150
151
}

152
153
154
155
156
157
158
159
160
161
162
163
static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
{
	struct rtnl_link *tab;

	if (protocol <= RTNL_FAMILY_MAX)
		tab = rtnl_msg_handlers[protocol];
	else
		tab = NULL;

	if (tab == NULL || tab[msgindex].calcit == NULL)
		tab = rtnl_msg_handlers[PF_UNSPEC];

164
	return tab[msgindex].calcit;
165
166
}

167
168
169
170
171
172
/**
 * __rtnl_register - Register a rtnetlink message type
 * @protocol: Protocol family or PF_UNSPEC
 * @msgtype: rtnetlink message type
 * @doit: Function pointer called for each request message
 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
173
 * @calcit: Function pointer to calc size of dump message
174
175
176
177
178
179
180
181
182
183
184
185
 *
 * Registers the specified function pointers (at least one of them has
 * to be non-NULL) to be called whenever a request message for the
 * specified protocol family and message type is received.
 *
 * The special protocol family PF_UNSPEC may be used to define fallback
 * function pointers for the case when no entry for the specific protocol
 * family exists.
 *
 * Returns 0 on success or a negative error code.
 */
int __rtnl_register(int protocol, int msgtype,
186
187
		    rtnl_doit_func doit, rtnl_dumpit_func dumpit,
		    rtnl_calcit_func calcit)
188
189
190
191
{
	struct rtnl_link *tab;
	int msgindex;

192
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
	msgindex = rtm_msgindex(msgtype);

	tab = rtnl_msg_handlers[protocol];
	if (tab == NULL) {
		tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
		if (tab == NULL)
			return -ENOBUFS;

		rtnl_msg_handlers[protocol] = tab;
	}

	if (doit)
		tab[msgindex].doit = doit;

	if (dumpit)
		tab[msgindex].dumpit = dumpit;

210
211
212
	if (calcit)
		tab[msgindex].calcit = calcit;

213
214
215
216
217
218
219
220
221
222
223
	return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_register);

/**
 * rtnl_register - Register a rtnetlink message type
 *
 * Identical to __rtnl_register() but panics on failure. This is useful
 * as failure of this function is very unlikely, it can only happen due
 * to lack of memory when allocating the chain to store all message
 * handlers for a protocol. Meant for use in init functions where lack
Lucas De Marchi's avatar
Lucas De Marchi committed
224
 * of memory implies no sense in continuing.
225
226
 */
void rtnl_register(int protocol, int msgtype,
227
228
		   rtnl_doit_func doit, rtnl_dumpit_func dumpit,
		   rtnl_calcit_func calcit)
229
{
230
	if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
		panic("Unable to register rtnetlink message handler, "
		      "protocol = %d, message type = %d\n",
		      protocol, msgtype);
}
EXPORT_SYMBOL_GPL(rtnl_register);

/**
 * rtnl_unregister - Unregister a rtnetlink message type
 * @protocol: Protocol family or PF_UNSPEC
 * @msgtype: rtnetlink message type
 *
 * Returns 0 on success or a negative error code.
 */
int rtnl_unregister(int protocol, int msgtype)
{
	int msgindex;

248
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
	msgindex = rtm_msgindex(msgtype);

	if (rtnl_msg_handlers[protocol] == NULL)
		return -ENOENT;

	rtnl_msg_handlers[protocol][msgindex].doit = NULL;
	rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;

	return 0;
}
EXPORT_SYMBOL_GPL(rtnl_unregister);

/**
 * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
 * @protocol : Protocol family or PF_UNSPEC
 *
 * Identical to calling rtnl_unregster() for all registered message types
 * of a certain protocol family.
 */
void rtnl_unregister_all(int protocol)
{
270
	BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
271
272
273
274
275

	kfree(rtnl_msg_handlers[protocol]);
	rtnl_msg_handlers[protocol] = NULL;
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
Linus Torvalds's avatar
Linus Torvalds committed
276

277
278
static LIST_HEAD(link_ops);

279
280
281
282
283
284
285
286
287
288
289
static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
{
	const struct rtnl_link_ops *ops;

	list_for_each_entry(ops, &link_ops, list) {
		if (!strcmp(ops->kind, kind))
			return ops;
	}
	return NULL;
}

290
291
292
293
294
295
296
297
298
299
300
301
/**
 * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 * @ops: struct rtnl_link_ops * to register
 *
 * The caller must hold the rtnl_mutex. This function should be used
 * by drivers that create devices during module initialization. It
 * must be called before registering the devices.
 *
 * Returns 0 on success or a negative error code.
 */
int __rtnl_link_register(struct rtnl_link_ops *ops)
{
302
303
304
	if (rtnl_link_ops_get(ops->kind))
		return -EEXIST;

305
306
307
308
309
310
	/* The check for setup is here because if ops
	 * does not have that filled up, it is not possible
	 * to use the ops for creating device. So do not
	 * fill up dellink as well. That disables rtnl_dellink.
	 */
	if (ops->setup && !ops->dellink)
311
		ops->dellink = unregister_netdevice_queue;
312

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
	list_add_tail(&ops->list, &link_ops);
	return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_link_register);

/**
 * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
 * @ops: struct rtnl_link_ops * to register
 *
 * Returns 0 on success or a negative error code.
 */
int rtnl_link_register(struct rtnl_link_ops *ops)
{
	int err;

	rtnl_lock();
	err = __rtnl_link_register(ops);
	rtnl_unlock();
	return err;
}
EXPORT_SYMBOL_GPL(rtnl_link_register);

335
336
337
static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
{
	struct net_device *dev;
338
339
	LIST_HEAD(list_kill);

340
	for_each_netdev(net, dev) {
341
342
		if (dev->rtnl_link_ops == ops)
			ops->dellink(dev, &list_kill);
343
	}
344
	unregister_netdevice_many(&list_kill);
345
346
}

347
348
349
350
/**
 * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 * @ops: struct rtnl_link_ops * to unregister
 *
351
 * The caller must hold the rtnl_mutex.
352
353
354
 */
void __rtnl_link_unregister(struct rtnl_link_ops *ops)
{
355
	struct net *net;
356

357
	for_each_net(net) {
358
		__rtnl_kill_links(net, ops);
359
	}
360
361
362
363
	list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);

364
365
366
367
368
369
370
/* Return with the rtnl_lock held when there are no network
 * devices unregistering in any network namespace.
 */
static void rtnl_lock_unregistering_all(void)
{
	struct net *net;
	bool unregistering;
371
	DEFINE_WAIT_FUNC(wait, woken_wake_function);
372

373
	add_wait_queue(&netdev_unregistering_wq, &wait);
374
375
376
377
378
379
380
381
382
383
384
385
	for (;;) {
		unregistering = false;
		rtnl_lock();
		for_each_net(net) {
			if (net->dev_unreg_count > 0) {
				unregistering = true;
				break;
			}
		}
		if (!unregistering)
			break;
		__rtnl_unlock();
386
387

		wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
388
	}
389
	remove_wait_queue(&netdev_unregistering_wq, &wait);
390
391
}

392
393
394
395
396
397
/**
 * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
 * @ops: struct rtnl_link_ops * to unregister
 */
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
398
399
400
	/* Close the race with cleanup_net() */
	mutex_lock(&net_mutex);
	rtnl_lock_unregistering_all();
401
402
	__rtnl_link_unregister(ops);
	rtnl_unlock();
403
	mutex_unlock(&net_mutex);
404
405
406
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);

407
408
409
410
411
412
413
414
415
static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
{
	struct net_device *master_dev;
	const struct rtnl_link_ops *ops;

	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
	if (!master_dev)
		return 0;
	ops = master_dev->rtnl_link_ops;
416
	if (!ops || !ops->get_slave_size)
417
418
419
420
421
422
		return 0;
	/* IFLA_INFO_SLAVE_DATA + nested data */
	return nla_total_size(sizeof(struct nlattr)) +
	       ops->get_slave_size(master_dev, dev);
}

423
424
425
426
427
428
429
430
static size_t rtnl_link_get_size(const struct net_device *dev)
{
	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
	size_t size;

	if (!ops)
		return 0;

431
432
	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
433
434
435

	if (ops->get_size)
		/* IFLA_INFO_DATA + nested data */
436
		size += nla_total_size(sizeof(struct nlattr)) +
437
438
439
			ops->get_size(dev);

	if (ops->get_xstats_size)
440
441
		/* IFLA_INFO_XSTATS */
		size += nla_total_size(ops->get_xstats_size(dev));
442

443
444
	size += rtnl_link_get_slave_info_data_size(dev);

445
446
447
	return size;
}

448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
static LIST_HEAD(rtnl_af_ops);

static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
	const struct rtnl_af_ops *ops;

	list_for_each_entry(ops, &rtnl_af_ops, list) {
		if (ops->family == family)
			return ops;
	}

	return NULL;
}

/**
 * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
 * @ops: struct rtnl_af_ops * to register
 *
 * Returns 0 on success or a negative error code.
 */
468
void rtnl_af_register(struct rtnl_af_ops *ops)
469
470
{
	rtnl_lock();
471
	list_add_tail(&ops->list, &rtnl_af_ops);
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
	rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_register);

/**
 * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 * @ops: struct rtnl_af_ops * to unregister
 *
 * The caller must hold the rtnl_mutex.
 */
void __rtnl_af_unregister(struct rtnl_af_ops *ops)
{
	list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_af_unregister);

/**
 * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
 * @ops: struct rtnl_af_ops * to unregister
 */
void rtnl_af_unregister(struct rtnl_af_ops *ops)
{
	rtnl_lock();
	__rtnl_af_unregister(ops);
	rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);

500
501
static size_t rtnl_link_get_af_size(const struct net_device *dev,
				    u32 ext_filter_mask)
502
503
504
505
506
507
508
509
510
511
512
{
	struct rtnl_af_ops *af_ops;
	size_t size;

	/* IFLA_AF_SPEC */
	size = nla_total_size(sizeof(struct nlattr));

	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
		if (af_ops->get_link_af_size) {
			/* AF_* + nested data */
			size += nla_total_size(sizeof(struct nlattr)) +
513
				af_ops->get_link_af_size(dev, ext_filter_mask);
514
515
516
517
518
519
		}
	}

	return size;
}

520
static bool rtnl_have_link_slave_info(const struct net_device *dev)
521
{
522
	struct net_device *master_dev;
523

524
	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
525
	if (master_dev && master_dev->rtnl_link_ops)
526
527
528
529
530
531
532
533
534
535
536
		return true;
	return false;
}

static int rtnl_link_slave_info_fill(struct sk_buff *skb,
				     const struct net_device *dev)
{
	struct net_device *master_dev;
	const struct rtnl_link_ops *ops;
	struct nlattr *slave_data;
	int err;
537

538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
	master_dev = netdev_master_upper_dev_get((struct net_device *) dev);
	if (!master_dev)
		return 0;
	ops = master_dev->rtnl_link_ops;
	if (!ops)
		return 0;
	if (nla_put_string(skb, IFLA_INFO_SLAVE_KIND, ops->kind) < 0)
		return -EMSGSIZE;
	if (ops->fill_slave_info) {
		slave_data = nla_nest_start(skb, IFLA_INFO_SLAVE_DATA);
		if (!slave_data)
			return -EMSGSIZE;
		err = ops->fill_slave_info(skb, master_dev, dev);
		if (err < 0)
			goto err_cancel_slave_data;
		nla_nest_end(skb, slave_data);
	}
	return 0;

err_cancel_slave_data:
	nla_nest_cancel(skb, slave_data);
	return err;
}

static int rtnl_link_info_fill(struct sk_buff *skb,
			       const struct net_device *dev)
{
	const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
	struct nlattr *data;
	int err;

	if (!ops)
		return 0;
571
	if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
572
		return -EMSGSIZE;
573
574
575
	if (ops->fill_xstats) {
		err = ops->fill_xstats(skb, dev);
		if (err < 0)
576
			return err;
577
578
579
	}
	if (ops->fill_info) {
		data = nla_nest_start(skb, IFLA_INFO_DATA);
580
581
		if (data == NULL)
			return -EMSGSIZE;
582
583
584
585
586
587
588
589
590
		err = ops->fill_info(skb, dev);
		if (err < 0)
			goto err_cancel_data;
		nla_nest_end(skb, data);
	}
	return 0;

err_cancel_data:
	nla_nest_cancel(skb, data);
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
	return err;
}

static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
{
	struct nlattr *linkinfo;
	int err = -EMSGSIZE;

	linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
	if (linkinfo == NULL)
		goto out;

	err = rtnl_link_info_fill(skb, dev);
	if (err < 0)
		goto err_cancel_link;

	err = rtnl_link_slave_info_fill(skb, dev);
	if (err < 0)
		goto err_cancel_link;

	nla_nest_end(skb, linkinfo);
	return 0;

614
615
616
617
618
619
err_cancel_link:
	nla_nest_cancel(skb, linkinfo);
out:
	return err;
}

620
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
Linus Torvalds's avatar
Linus Torvalds committed
621
{
622
	struct sock *rtnl = net->rtnl;
Linus Torvalds's avatar
Linus Torvalds committed
623
624
	int err = 0;

625
	NETLINK_CB(skb).dst_group = group;
Linus Torvalds's avatar
Linus Torvalds committed
626
627
628
629
630
631
632
633
	if (echo)
		atomic_inc(&skb->users);
	netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
	if (echo)
		err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
	return err;
}

634
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
635
{
636
637
	struct sock *rtnl = net->rtnl;

638
639
	return nlmsg_unicast(rtnl, skb, pid);
}
Eric Dumazet's avatar
Eric Dumazet committed
640
EXPORT_SYMBOL(rtnl_unicast);
641

642
643
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
		 struct nlmsghdr *nlh, gfp_t flags)
644
{
645
	struct sock *rtnl = net->rtnl;
646
647
648
649
650
	int report = 0;

	if (nlh)
		report = nlmsg_report(nlh);

651
	nlmsg_notify(rtnl, skb, pid, group, report, flags);
652
}
Eric Dumazet's avatar
Eric Dumazet committed
653
EXPORT_SYMBOL(rtnl_notify);
654

655
void rtnl_set_sk_err(struct net *net, u32 group, int error)
656
{
657
658
	struct sock *rtnl = net->rtnl;

659
660
	netlink_set_err(rtnl, 0, group, error);
}
Eric Dumazet's avatar
Eric Dumazet committed
661
EXPORT_SYMBOL(rtnl_set_sk_err);
662

Linus Torvalds's avatar
Linus Torvalds committed
663
664
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
{
665
666
667
668
669
670
671
672
673
	struct nlattr *mx;
	int i, valid = 0;

	mx = nla_nest_start(skb, RTA_METRICS);
	if (mx == NULL)
		return -ENOBUFS;

	for (i = 0; i < RTAX_MAX; i++) {
		if (metrics[i]) {
674
675
676
677
678
679
680
681
			if (i == RTAX_CC_ALGO - 1) {
				char tmp[TCP_CA_NAME_MAX], *name;

				name = tcp_ca_get_name_by_key(metrics[i], tmp);
				if (!name)
					continue;
				if (nla_put_string(skb, i + 1, name))
					goto nla_put_failure;
682
683
684
685
686
687
			} else if (i == RTAX_FEATURES - 1) {
				u32 user_features = metrics[i] & RTAX_FEATURE_MASK;

				BUILD_BUG_ON(RTAX_FEATURE_MASK & DST_FEATURE_MASK);
				if (nla_put_u32(skb, i + 1, user_features))
					goto nla_put_failure;
688
689
690
691
			} else {
				if (nla_put_u32(skb, i + 1, metrics[i]))
					goto nla_put_failure;
			}
692
693
			valid++;
		}
Linus Torvalds's avatar
Linus Torvalds committed
694
695
	}

696
697
698
699
	if (!valid) {
		nla_nest_cancel(skb, mx);
		return 0;
	}
700
701
702
703

	return nla_nest_end(skb, mx);

nla_put_failure:
704
705
	nla_nest_cancel(skb, mx);
	return -EMSGSIZE;
Linus Torvalds's avatar
Linus Torvalds committed
706
}
Eric Dumazet's avatar
Eric Dumazet committed
707
EXPORT_SYMBOL(rtnetlink_put_metrics);
Linus Torvalds's avatar
Linus Torvalds committed
708

709
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
710
		       long expires, u32 error)
711
712
{
	struct rta_cacheinfo ci = {
713
		.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
714
715
716
717
718
719
		.rta_used = dst->__use,
		.rta_clntref = atomic_read(&(dst->__refcnt)),
		.rta_error = error,
		.rta_id =  id,
	};

720
721
	if (expires) {
		unsigned long clock;
722

723
724
725
726
		clock = jiffies_to_clock_t(abs(expires));
		clock = min_t(unsigned long, clock, INT_MAX);
		ci.rta_expires = (expires > 0) ? clock : -clock;
	}
727
728
729
	return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
Linus Torvalds's avatar
Linus Torvalds committed
730

731
static void set_operstate(struct net_device *dev, unsigned char transition)
732
733
734
{
	unsigned char operstate = dev->operstate;

Eric Dumazet's avatar
Eric Dumazet committed
735
	switch (transition) {
736
737
738
739
740
741
742
743
744
745
746
747
	case IF_OPER_UP:
		if ((operstate == IF_OPER_DORMANT ||
		     operstate == IF_OPER_UNKNOWN) &&
		    !netif_dormant(dev))
			operstate = IF_OPER_UP;
		break;

	case IF_OPER_DORMANT:
		if (operstate == IF_OPER_UP ||
		    operstate == IF_OPER_UNKNOWN)
			operstate = IF_OPER_DORMANT;
		break;
748
	}
749
750
751
752
753

	if (dev->operstate != operstate) {
		write_lock_bh(&dev_base_lock);
		dev->operstate = operstate;
		write_unlock_bh(&dev_base_lock);
754
755
		netdev_state_change(dev);
	}
756
757
}

758
759
760
761
762
763
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
{
	return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
	       (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
}

764
765
766
767
768
769
770
771
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
					   const struct ifinfomsg *ifm)
{
	unsigned int flags = ifm->ifi_flags;

	/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
	if (ifm->ifi_change)
		flags = (flags & ifm->ifi_change) |
772
			(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
773
774
775
776

	return flags;
}

777
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
778
				 const struct rtnl_link_stats64 *b)
Linus Torvalds's avatar
Linus Torvalds committed
779
{
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
	a->rx_packets = b->rx_packets;
	a->tx_packets = b->tx_packets;
	a->rx_bytes = b->rx_bytes;
	a->tx_bytes = b->tx_bytes;
	a->rx_errors = b->rx_errors;
	a->tx_errors = b->tx_errors;
	a->rx_dropped = b->rx_dropped;
	a->tx_dropped = b->tx_dropped;

	a->multicast = b->multicast;
	a->collisions = b->collisions;

	a->rx_length_errors = b->rx_length_errors;
	a->rx_over_errors = b->rx_over_errors;
	a->rx_crc_errors = b->rx_crc_errors;
	a->rx_frame_errors = b->rx_frame_errors;
	a->rx_fifo_errors = b->rx_fifo_errors;
	a->rx_missed_errors = b->rx_missed_errors;

	a->tx_aborted_errors = b->tx_aborted_errors;
	a->tx_carrier_errors = b->tx_carrier_errors;
	a->tx_fifo_errors = b->tx_fifo_errors;
	a->tx_heartbeat_errors = b->tx_heartbeat_errors;
	a->tx_window_errors = b->tx_window_errors;

	a->rx_compressed = b->rx_compressed;
	a->tx_compressed = b->tx_compressed;
807
808
}

809
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
810
{
811
	memcpy(v, b, sizeof(*b));
812
}
Linus Torvalds's avatar
Linus Torvalds committed
813

814
/* All VF info */
815
816
static inline int rtnl_vfinfo_size(const struct net_device *dev,
				   u32 ext_filter_mask)
817
{
818
819
	if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
	    (ext_filter_mask & RTEXT_FILTER_VF)) {
820
		int num_vfs = dev_num_vf(dev->dev.parent);
821
822
823
824
825
		size_t size = nla_total_size(sizeof(struct nlattr));
		size += nla_total_size(num_vfs * sizeof(struct nlattr));
		size += num_vfs *
			(nla_total_size(sizeof(struct ifla_vf_mac)) +
			 nla_total_size(sizeof(struct ifla_vf_vlan)) +
826
			 nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
Jiri Benc's avatar
Jiri Benc committed
827
			 nla_total_size(sizeof(struct ifla_vf_rate)) +
828
			 nla_total_size(sizeof(struct ifla_vf_link_state)) +
829
830
831
832
833
834
835
836
837
838
839
840
			 nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
			 /* IFLA_VF_STATS_RX_PACKETS */
			 nla_total_size(sizeof(__u64)) +
			 /* IFLA_VF_STATS_TX_PACKETS */
			 nla_total_size(sizeof(__u64)) +
			 /* IFLA_VF_STATS_RX_BYTES */
			 nla_total_size(sizeof(__u64)) +
			 /* IFLA_VF_STATS_TX_BYTES */
			 nla_total_size(sizeof(__u64)) +
			 /* IFLA_VF_STATS_BROADCAST */
			 nla_total_size(sizeof(__u64)) +
			 /* IFLA_VF_STATS_MULTICAST */
841
842
			 nla_total_size(sizeof(__u64)) +
			 nla_total_size(sizeof(struct ifla_vf_trust)));
843
844
		return size;
	} else
845
846
847
		return 0;
}

848
849
static size_t rtnl_port_size(const struct net_device *dev,
			     u32 ext_filter_mask)
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
{
	size_t port_size = nla_total_size(4)		/* PORT_VF */
		+ nla_total_size(PORT_PROFILE_MAX)	/* PORT_PROFILE */
		+ nla_total_size(sizeof(struct ifla_port_vsi))
							/* PORT_VSI_TYPE */
		+ nla_total_size(PORT_UUID_MAX)		/* PORT_INSTANCE_UUID */
		+ nla_total_size(PORT_UUID_MAX)		/* PORT_HOST_UUID */
		+ nla_total_size(1)			/* PROT_VDP_REQUEST */
		+ nla_total_size(2);			/* PORT_VDP_RESPONSE */
	size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
	size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
		+ port_size;
	size_t port_self_size = nla_total_size(sizeof(struct nlattr))
		+ port_size;

865
866
	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
	    !(ext_filter_mask & RTEXT_FILTER_VF))
867
868
869
870
871
872
873
874
		return 0;
	if (dev_num_vf(dev->dev.parent))
		return port_self_size + vf_ports_size +
			vf_port_size * dev_num_vf(dev->dev.parent);
	else
		return port_self_size;
}

875
876
static noinline size_t if_nlmsg_size(const struct net_device *dev,
				     u32 ext_filter_mask)
877
878
879
{
	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
	       + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
880
	       + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
881
882
883
	       + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
	       + nla_total_size(sizeof(struct rtnl_link_ifmap))
	       + nla_total_size(sizeof(struct rtnl_link_stats))
884
	       + nla_total_size(sizeof(struct rtnl_link_stats64))
885
886
887
888
889
890
891
	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
	       + nla_total_size(4) /* IFLA_TXQLEN */
	       + nla_total_size(4) /* IFLA_WEIGHT */
	       + nla_total_size(4) /* IFLA_MTU */
	       + nla_total_size(4) /* IFLA_LINK */
	       + nla_total_size(4) /* IFLA_MASTER */
892
	       + nla_total_size(1) /* IFLA_CARRIER */
893
	       + nla_total_size(4) /* IFLA_PROMISCUITY */
894
895
	       + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
	       + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
896
	       + nla_total_size(1) /* IFLA_OPERSTATE */
897
	       + nla_total_size(1) /* IFLA_LINKMODE */
898
	       + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
899
	       + nla_total_size(4) /* IFLA_LINK_NETNSID */
900
901
902
	       + nla_total_size(ext_filter_mask
			        & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
	       + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
903
	       + rtnl_port_size(dev, ext_filter_mask) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
904
	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
905
	       + rtnl_link_get_af_size(dev, ext_filter_mask) /* IFLA_AF_SPEC */
906
	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
907
908
909
	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
	       + nla_total_size(1); /* IFLA_PROTO_DOWN */

910
911
}

912
913
914
915
916
917
918
919
920
921
922
923
924
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
{
	struct nlattr *vf_ports;
	struct nlattr *vf_port;
	int vf;
	int err;

	vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
	if (!vf_ports)
		return -EMSGSIZE;

	for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
		vf_port = nla_nest_start(skb, IFLA_VF_PORT);
925
926
		if (!vf_port)
			goto nla_put_failure;
927
928
		if (nla_put_u32(skb, IFLA_PORT_VF, vf))
			goto nla_put_failure;
929
		err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
930
931
		if (err == -EMSGSIZE)
			goto nla_put_failure;
932
933
934
935
936
937
938
939
940
941
		if (err) {
			nla_nest_cancel(skb, vf_port);
			continue;
		}
		nla_nest_end(skb, vf_port);
	}

	nla_nest_end(skb, vf_ports);

	return 0;
942
943
944
945

nla_put_failure:
	nla_nest_cancel(skb, vf_ports);
	return -EMSGSIZE;
946
947
948
949
950
951
952
953
954
955
956
957
958
959
}

static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
{
	struct nlattr *port_self;
	int err;

	port_self = nla_nest_start(skb, IFLA_PORT_SELF);
	if (!port_self)
		return -EMSGSIZE;

	err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
	if (err) {
		nla_nest_cancel(skb, port_self);
960
		return (err == -EMSGSIZE) ? err : 0;
961
962
963
964
965
966
967
	}

	nla_nest_end(skb, port_self);

	return 0;
}

968
969
static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev,
			  u32 ext_filter_mask)
970
971
972
{
	int err;

973
974
	if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent ||
	    !(ext_filter_mask & RTEXT_FILTER_VF))
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
		return 0;

	err = rtnl_port_self_fill(skb, dev);
	if (err)
		return err;

	if (dev_num_vf(dev->dev.parent)) {
		err = rtnl_vf_ports_fill(skb, dev);
		if (err)
			return err;
	}

	return 0;
}

990
991
992
static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
{
	int err;
993
	struct netdev_phys_item_id ppid;
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007

	err = dev_get_phys_port_id(dev, &ppid);
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

	if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
		return -EMSGSIZE;

	return 0;
}

1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
static int rtnl_phys_port_name_fill(struct sk_buff *skb, struct net_device *dev)
{
	char name[IFNAMSIZ];
	int err;

	err = dev_get_phys_port_name(dev, name, sizeof(name));
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

	if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
		return -EMSGSIZE;

	return 0;
}

1026
1027
1028
static int rtnl_phys_switch_id_fill(struct sk_buff *skb, struct net_device *dev)
{
	int err;
1029
	struct switchdev_attr attr = {
1030
		.orig_dev = dev,
1031
		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
1032
1033
		.flags = SWITCHDEV_F_NO_RECURSE,
	};
1034

1035
	err = switchdev_port_attr_get(dev, &attr);
1036
1037
1038
1039
1040
1041
	if (err) {
		if (err == -EOPNOTSUPP)
			return 0;
		return err;
	}

1042
1043
	if (nla_put(skb, IFLA_PHYS_SWITCH_ID, attr.u.ppid.id_len,
		    attr.u.ppid.id))
1044
1045
1046
1047
1048
		return -EMSGSIZE;

	return 0;
}

1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
					      struct net_device *dev)
{
	const struct rtnl_link_stats64 *stats;
	struct rtnl_link_stats64 temp;
	struct nlattr *attr;

	stats = dev_get_stats(dev, &temp);

	attr = nla_reserve(skb, IFLA_STATS,
			   sizeof(struct rtnl_link_stats));
	if (!attr)
		return -EMSGSIZE;

	copy_rtnl_link_stats(nla_data(attr), stats);

	attr = nla_reserve(skb, IFLA_STATS64,
			   sizeof(struct rtnl_link_stats64));
	if (!attr)
		return -EMSGSIZE;

	copy_rtnl_link_stats64(nla_data(attr), stats);

	return 0;
}

static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
					       struct net_device *dev,
					       int vfs_num,
					       struct nlattr *vfinfo)
{
	struct ifla_vf_rss_query_en vf_rss_query_en;
	struct ifla_vf_link_state vf_linkstate;
	struct ifla_vf_spoofchk vf_spoofchk;
	struct ifla_vf_tx_rate vf_tx_rate;
	struct ifla_vf_stats vf_stats;
	struct ifla_vf_trust vf_trust;
	struct ifla_vf_vlan vf_vlan;
	struct ifla_vf_rate vf_rate;
	struct nlattr *vf, *vfstats;
	struct ifla_vf_mac vf_mac;
	struct ifla_vf_info ivi;

	/* Not all SR-IOV capable drivers support the
	 * spoofcheck and "RSS query enable" query.  Preset to
	 * -1 so the user space tool can detect that the driver
	 * didn't report anything.
	 */
	ivi.spoofchk = -1;
	ivi.rss_query_en = -1;
	ivi.trusted = -1;
	memset(ivi.mac, 0, sizeof(ivi.mac));
	/* The default value for VF link state is "auto"
	 * IFLA_VF_LINK_STATE_AUTO which equals zero
	 */
	ivi.linkstate = 0;
	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
		return 0;

	vf_mac.vf =
		vf_vlan.vf =
		vf_rate.vf =
		vf_tx_rate.vf =
		vf_spoofchk.vf =
		vf_linkstate.vf =
		vf_rss_query_en.vf =
		vf_trust.vf = ivi.vf;

	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
	vf_vlan.vlan = ivi.vlan;
	vf_vlan.qos = ivi.qos;
	vf_tx_rate.rate = ivi.max_tx_rate;
	vf_rate.min_tx_rate = ivi.min_tx_rate;
	vf_rate.max_tx_rate = ivi.max_tx_rate;
	vf_spoofchk.setting = ivi.spoofchk;
	vf_linkstate.link_state = ivi.linkstate;
	vf_rss_query_en.setting = ivi.rss_query_en;
	vf_trust.setting = ivi.trusted;
	vf = nla_nest_start(skb, IFLA_VF_INFO);
	if (!vf) {
		nla_nest_cancel(skb, vfinfo);
		return -EMSGSIZE;
	}
	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
		    &vf_rate) ||
	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
		    &vf_tx_rate) ||
	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
		    &vf_spoofchk) ||
	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
		    &vf_linkstate) ||
	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
		    sizeof(vf_rss_query_en),
		    &vf_rss_query_en) ||
	    nla_put(skb, IFLA_VF_TRUST,
		    sizeof(vf_trust), &vf_trust))
		return -EMSGSIZE;
	memset(&vf_stats, 0, sizeof(vf_stats));
	if (dev->netdev_ops->ndo_get_vf_stats)
		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
						&vf_stats);
	vfstats = nla_nest_start(skb, IFLA_VF_STATS);
	if (!vfstats) {
		nla_nest_cancel(skb, vf);
		nla_nest_cancel(skb, vfinfo);
		return -EMSGSIZE;
	}
	if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
			vf_stats.rx_packets) ||
	    nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
			vf_stats.tx_packets) ||
	    nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
			vf_stats.rx_bytes) ||
	    nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
			vf_stats.tx_bytes) ||
	    nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
			vf_stats.broadcast) ||
	    nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
			vf_stats.multicast))
		return -EMSGSIZE;
	nla_nest_end(skb, vfstats);
	nla_nest_end(skb, vf);
	return 0;
}

static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
{
	struct rtnl_link_ifmap map = {
		.mem_start   = dev->mem_start,
		.mem_end     = dev->mem_end,
		.base_addr   = dev->base_addr,
		.irq         = dev->irq,
		.dma         = dev->dma,
		.port        = dev->if_port,
	};
	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
		return -EMSGSIZE;

	return 0;
}

1192
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
1193
			    int type, u32 pid, u32 seq, u32 change,
1194
			    unsigned int flags, u32 ext_filter_mask)
1195
1196
1197
{
	struct ifinfomsg *ifm;
	struct nlmsghdr *nlh;
1198
	struct nlattr *af_spec;
1199
	struct rtnl_af_ops *af_ops;
1200
	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
Linus Torvalds's avatar
Linus Torvalds committed
1201

1202
	ASSERT_RTNL();
1203
1204
	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
	if (nlh == NULL)
1205
		return -EMSGSIZE;
Linus Torvalds's avatar
Linus Torvalds committed
1206

1207
1208
1209
1210
1211
1212
1213
1214
	ifm = nlmsg_data(nlh);
	ifm->ifi_family = AF_UNSPEC;
	ifm->__ifi_pad = 0;
	ifm->ifi_type = dev->type;
	ifm->ifi_index = dev->ifindex;
	ifm->ifi_flags = dev_get_flags(dev);
	ifm->ifi_change = change;

1215
1216
1217
1218
1219
1220
1221
	if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
	    nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
	    nla_put_u8(skb, IFLA_OPERSTATE,
		       netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
	    nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
	    nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
	    nla_put_u32(skb, IFLA_GROUP, dev->group) ||
1222
	    nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
1223
	    nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
1224
#ifdef CONFIG_RPS
1225
	    nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
1226
#endif
1227
1228
	    (dev->ifindex != dev_get_iflink(dev) &&
	     nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
1229
1230
	    (upper_dev &&
	     nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
1231
	    nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
1232
1233
1234
	    (dev->qdisc &&
	     nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
	    (dev->ifalias &&
1235
1236
	     nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
	    nla_put_u32(skb, IFLA_CARRIER_CHANGES,
1237
1238
			atomic_read(&dev->carrier_changes)) ||
	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
1239
		goto nla_put_failure;
1240

1241
1242
	if (rtnl_fill_link_ifmap(skb, dev))
		goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
1243
1244

	if (dev->addr_len) {
1245
1246
1247
		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
		    nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
			goto nla_put_failure;
Linus Torvalds's avatar
Linus Torvalds committed
1248
1249
	}

1250
1251
1252
	if (rtnl_phys_port_id_fill(skb, dev))
		goto nla_put_failure;

1253
1254
1255
	if (rtnl_phys_port_name_fill(skb, dev))
		goto nla_put_failure;

1256
1257
1258
	if (rtnl_phys_switch_id_fill(skb, dev))
		goto nla_put_failure;

1259
	if (rtnl_fill_stats(skb, dev))
1260
1261
		goto nla_put_failure;

1262
1263
1264
	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
	    nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
		goto nla_put_failure;
1265

1266
1267
	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
	    ext_filter_mask & RTEXT_FILTER_VF) {
1268
		int i;
1269
		struct nlattr *vfinfo;
1270
1271
1272
1273
1274
1275
		int num_vfs = dev_num_vf(dev->dev.parent);

		vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
		if (!vfinfo)
			goto nla_put_failure;
		for (i = 0; i < num_vfs; i++) {
1276
			if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
1277
				goto nla_put_failure;
1278
		}
1279

1280
		nla_nest_end(skb, vfinfo);
1281
	}
1282

1283
	if (rtnl_port_fill(skb, dev, ext_filter_mask))
1284
1285
		goto nla_put_failure;

1286
	if (dev->rtnl_link_ops || rtnl_have_link_slave_info(dev)) {
1287
1288
1289
1290
		if (rtnl_link_fill(skb, dev) < 0)
			goto nla_put_failure;
	}

1291
1292
1293
1294
1295
	if (dev->rtnl_link_ops &&
	    dev->rtnl_link_ops->get_link_net) {
		struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);

		if (!net_eq(dev_net(dev), link_net)) {
1296
			int id = peernet2id_alloc(dev_net(dev), link_net);
1297
1298
1299
1300
1301
1302

			if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
				goto nla_put_failure;
		}
	}

1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
	if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
		goto nla_put_failure;

	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
		if (af_ops->fill_link_af) {
			struct nlattr *af;
			int err;

			if (!(af = nla_nest_start(skb, af_ops->family)))
				goto nla_put_failure;

1314
			err = af_ops->fill_link_af(skb, dev, ext_filter_mask);
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332

			/*
			 * Caller may return ENODATA to indicate that there
			 * was no data to be dumped. This is not an error, it
			 * means we should trim the attribute header and
			 * continue.
			 */
			if (err == -ENODATA)
				nla_nest_cancel(skb, af);
			else if (err < 0)
				goto nla_put_failure;

			nla_nest_end(skb, af);
		}
	}

	nla_nest_end(skb, af_spec);

1333
1334
	nlmsg_end(skb, nlh);
	return 0;
1335
1336

nla_put_failure:
1337
1338
	nlmsg_cancel(skb, nlh);
	return -EMSGSIZE;
Linus Torvalds's avatar
Linus Torvalds committed
1339
1340
}

Jiri Pirko's avatar
Jiri Pirko committed
1341
static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1342
	[IFLA_IFNAME]		= { .type = NLA_STRING, .len = IFNAMSIZ-1 },
1343
1344
	[IFLA_ADDRESS]		= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
	[IFLA_BROADCAST]	= { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
1345
	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
1346
	[IFLA_MTU]		= { .type = NLA_U32 },
1347
	[IFLA_LINK]		= { .type = NLA_U32 },
1348
	[IFLA_MASTER]		= { .type = NLA_U32 },
1349
	[IFLA_CARRIER]		= { .type = NLA_U8 },
1350
1351
1352
1353
	[IFLA_TXQLEN]		= { .type = NLA_U32 },
	[IFLA_WEIGHT]		= { .type = NLA_U32 },
	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
	[IFLA_LINKMODE]		= { .type = NLA_U8 },
1354
	[IFLA_LINKINFO]		= { .type = NLA_NESTED },
1355
	[IFLA_NET_NS_PID]	= { .type = NLA_U32 },