sock.c 70.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic socket support routines. Memory allocators, socket lock/release
 *		handler for protocols to use and generic option handler.
 *
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Florian La Roche, <flla@stud.uni-sb.de>
 *		Alan Cox, <A.Cox@swansea.ac.uk>
 *
 * Fixes:
 *		Alan Cox	: 	Numerous verify_area() problems
 *		Alan Cox	:	Connecting on a connecting socket
 *					now returns an error for tcp.
 *		Alan Cox	:	sock->protocol is set correctly.
 *					and is not sometimes left as 0.
 *		Alan Cox	:	connect handles icmp errors on a
 *					connect properly. Unfortunately there
 *					is a restart syscall nasty there. I
 *					can't match BSD without hacking the C
 *					library. Ideas urgently sought!
 *		Alan Cox	:	Disallow bind() to addresses that are
 *					not ours - especially broadcast ones!!
 *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
 *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
 *					instead they leave that for the DESTROY timer.
 *		Alan Cox	:	Clean up error flag in accept
 *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
 *					was buggy. Put a remove_sock() in the handler
 *					for memory when we hit 0. Also altered the timer
35
 *					code. The ACK stuff can wait and needs major
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
 *					TCP layer surgery.
 *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
 *					and fixed timer/inet_bh race.
 *		Alan Cox	:	Added zapped flag for TCP
 *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
 *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
 *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
 *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
 *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
 *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
 *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Fixed connect() taking signals I think.
 *		Alan Cox	:	SO_LINGER supported
 *		Alan Cox	:	Error reporting fixes
 *		Anonymous	:	inet_create tidied up (sk->reuse setting)
 *		Alan Cox	:	inet sockets don't set sk->type!
 *		Alan Cox	:	Split socket option code
 *		Alan Cox	:	Callbacks
 *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
 *		Alex		:	Removed restriction on inet fioctl
 *		Alan Cox	:	Splitting INET from NET core
 *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
 *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
 *		Alan Cox	:	Split IP from generic code
 *		Alan Cox	:	New kfree_skbmem()
 *		Alan Cox	:	Make SO_DEBUG superuser only.
 *		Alan Cox	:	Allow anyone to clear SO_DEBUG
 *					(compatibility fix)
 *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
 *		Alan Cox	:	Allocator for a socket is settable.
 *		Alan Cox	:	SO_ERROR includes soft errors.
 *		Alan Cox	:	Allow NULL arguments on some SO_ opts
 *		Alan Cox	: 	Generic socket allocation to make hooks
 *					easier (suggested by Craig Metz).
 *		Michael Pall	:	SO_ERROR returns positive errno again
 *              Steve Whitehouse:       Added default destructor to free
 *                                      protocol private data.
 *              Steve Whitehouse:       Added various other default routines
 *                                      common to several socket families.
 *              Chris Evans     :       Call suser() check last on F_SETOWN
 *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
 *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
 *		Andi Kleen	:	Fix write_space callback
 *		Chris Evans	:	Security fixes - signedness again
 *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
 *
 * To Fix:
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

Joe Perches's avatar
Joe Perches committed
92
93
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

94
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/init.h>
114
#include <linux/highmem.h>
115
#include <linux/user_namespace.h>
116
#include <linux/static_key.h>
117
#include <linux/memcontrol.h>
118
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119
120
121
122
123
124

#include <asm/uaccess.h>

#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
125
#include <net/net_namespace.h>
126
#include <net/request_sock.h>
Linus Torvalds's avatar
Linus Torvalds committed
127
#include <net/sock.h>
128
#include <linux/net_tstamp.h>
Linus Torvalds's avatar
Linus Torvalds committed
129
130
#include <net/xfrm.h>
#include <linux/ipsec.h>
131
#include <net/cls_cgroup.h>
132
#include <net/netprio_cgroup.h>
Linus Torvalds's avatar
Linus Torvalds committed
133
134
135

#include <linux/filter.h>

136
137
#include <trace/events/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
138
139
140
141
#ifdef CONFIG_INET
#include <net/tcp.h>
#endif

142
#include <net/busy_poll.h>
143

144
static DEFINE_MUTEX(proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
145
146
static LIST_HEAD(proto_list);

Andrew Morton's avatar
Andrew Morton committed
147
#ifdef CONFIG_MEMCG_KMEM
148
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
Glauber Costa's avatar
Glauber Costa committed
149
150
151
152
{
	struct proto *proto;
	int ret = 0;

153
	mutex_lock(&proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
154
155
	list_for_each_entry(proto, &proto_list, node) {
		if (proto->init_cgroup) {
156
			ret = proto->init_cgroup(memcg, ss);
Glauber Costa's avatar
Glauber Costa committed
157
158
159
160
161
			if (ret)
				goto out;
		}
	}

162
	mutex_unlock(&proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
163
164
165
166
	return ret;
out:
	list_for_each_entry_continue_reverse(proto, &proto_list, node)
		if (proto->destroy_cgroup)
167
			proto->destroy_cgroup(memcg);
168
	mutex_unlock(&proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
169
170
171
	return ret;
}

172
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
Glauber Costa's avatar
Glauber Costa committed
173
174
175
{
	struct proto *proto;

176
	mutex_lock(&proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
177
178
	list_for_each_entry_reverse(proto, &proto_list, node)
		if (proto->destroy_cgroup)
179
			proto->destroy_cgroup(memcg);
180
	mutex_unlock(&proto_list_mutex);
Glauber Costa's avatar
Glauber Costa committed
181
182
183
}
#endif

184
185
186
187
/*
 * Each address family might have different locking rules, so we have
 * one slock key per address family:
 */
188
189
190
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];

191
#if defined(CONFIG_MEMCG_KMEM)
192
struct static_key memcg_socket_limit_enabled;
Glauber Costa's avatar
Glauber Costa committed
193
EXPORT_SYMBOL(memcg_socket_limit_enabled);
194
#endif
Glauber Costa's avatar
Glauber Costa committed
195

196
197
198
199
200
/*
 * Make lock validator output more readable. (we pre-construct these
 * strings build-time, so that runtime initialization of socket
 * locks is fast):
 */
201
static const char *const af_family_key_strings[AF_MAX+1] = {
202
203
204
205
206
207
208
  "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX"     , "sk_lock-AF_INET"     ,
  "sk_lock-AF_AX25"  , "sk_lock-AF_IPX"      , "sk_lock-AF_APPLETALK",
  "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE"   , "sk_lock-AF_ATMPVC"   ,
  "sk_lock-AF_X25"   , "sk_lock-AF_INET6"    , "sk_lock-AF_ROSE"     ,
  "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI"  , "sk_lock-AF_SECURITY" ,
  "sk_lock-AF_KEY"   , "sk_lock-AF_NETLINK"  , "sk_lock-AF_PACKET"   ,
  "sk_lock-AF_ASH"   , "sk_lock-AF_ECONET"   , "sk_lock-AF_ATMSVC"   ,
Andy Grover's avatar
Andy Grover committed
209
  "sk_lock-AF_RDS"   , "sk_lock-AF_SNA"      , "sk_lock-AF_IRDA"     ,
210
  "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE"  , "sk_lock-AF_LLC"      ,
211
  "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
212
  "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
213
  "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
214
  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
215
  "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_MAX"
216
};
217
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
218
219
220
221
222
223
224
  "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
  "slock-AF_AX25"  , "slock-AF_IPX"      , "slock-AF_APPLETALK",
  "slock-AF_NETROM", "slock-AF_BRIDGE"   , "slock-AF_ATMPVC"   ,
  "slock-AF_X25"   , "slock-AF_INET6"    , "slock-AF_ROSE"     ,
  "slock-AF_DECnet", "slock-AF_NETBEUI"  , "slock-AF_SECURITY" ,
  "slock-AF_KEY"   , "slock-AF_NETLINK"  , "slock-AF_PACKET"   ,
  "slock-AF_ASH"   , "slock-AF_ECONET"   , "slock-AF_ATMSVC"   ,
Andy Grover's avatar
Andy Grover committed
225
  "slock-AF_RDS"   , "slock-AF_SNA"      , "slock-AF_IRDA"     ,
226
  "slock-AF_PPPOX" , "slock-AF_WANPIPE"  , "slock-AF_LLC"      ,
227
  "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
228
  "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
229
  "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
230
  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
231
  "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_MAX"
232
};
233
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
234
235
236
237
238
239
240
  "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
  "clock-AF_AX25"  , "clock-AF_IPX"      , "clock-AF_APPLETALK",
  "clock-AF_NETROM", "clock-AF_BRIDGE"   , "clock-AF_ATMPVC"   ,
  "clock-AF_X25"   , "clock-AF_INET6"    , "clock-AF_ROSE"     ,
  "clock-AF_DECnet", "clock-AF_NETBEUI"  , "clock-AF_SECURITY" ,
  "clock-AF_KEY"   , "clock-AF_NETLINK"  , "clock-AF_PACKET"   ,
  "clock-AF_ASH"   , "clock-AF_ECONET"   , "clock-AF_ATMSVC"   ,
Andy Grover's avatar
Andy Grover committed
241
  "clock-AF_RDS"   , "clock-AF_SNA"      , "clock-AF_IRDA"     ,
242
  "clock-AF_PPPOX" , "clock-AF_WANPIPE"  , "clock-AF_LLC"      ,
243
  "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
244
  "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
245
  "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
246
  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
247
  "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_MAX"
248
};
249
250
251
252
253
254
255

/*
 * sk_callback_lock locking rules are per-address-family,
 * so split the lock classes by using a per-AF key:
 */
static struct lock_class_key af_callback_keys[AF_MAX];

Linus Torvalds's avatar
Linus Torvalds committed
256
257
258
259
260
261
/* Take into consideration the size of the struct sk_buff overhead in the
 * determination of these values, since that is non-constant across
 * platforms.  This makes socket queueing behavior and performance
 * not depend upon such differences.
 */
#define _SK_MEM_PACKETS		256
Eric Dumazet's avatar
Eric Dumazet committed
262
#define _SK_MEM_OVERHEAD	SKB_TRUESIZE(256)
Linus Torvalds's avatar
Linus Torvalds committed
263
264
265
266
#define SK_WMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
#define SK_RMEM_MAX		(_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)

/* Run time adjustable parameters. */
267
__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
268
EXPORT_SYMBOL(sysctl_wmem_max);
269
__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
270
EXPORT_SYMBOL(sysctl_rmem_max);
271
272
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
Linus Torvalds's avatar
Linus Torvalds committed
273

Lucas De Marchi's avatar
Lucas De Marchi committed
274
/* Maximal space eaten by iovec or ancillary data plus some space */
275
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
Eric Dumazet's avatar
Eric Dumazet committed
276
EXPORT_SYMBOL(sysctl_optmem_max);
Linus Torvalds's avatar
Linus Torvalds committed
277

278
279
280
struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
EXPORT_SYMBOL_GPL(memalloc_socks);

281
282
283
284
285
286
287
288
289
290
291
292
/**
 * sk_set_memalloc - sets %SOCK_MEMALLOC
 * @sk: socket to set it on
 *
 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
 * It's the responsibility of the admin to adjust min_free_kbytes
 * to meet the requirements
 */
void sk_set_memalloc(struct sock *sk)
{
	sock_set_flag(sk, SOCK_MEMALLOC);
	sk->sk_allocation |= __GFP_MEMALLOC;
293
	static_key_slow_inc(&memalloc_socks);
294
295
296
297
298
299
300
}
EXPORT_SYMBOL_GPL(sk_set_memalloc);

void sk_clear_memalloc(struct sock *sk)
{
	sock_reset_flag(sk, SOCK_MEMALLOC);
	sk->sk_allocation &= ~__GFP_MEMALLOC;
301
	static_key_slow_dec(&memalloc_socks);
302
303
304
305
306
307
308
309
310
311
312
313

	/*
	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while
	 * it has rmem allocations there is a risk that the user of the
	 * socket cannot make forward progress due to exceeding the rmem
	 * limits. By rights, sk_clear_memalloc() should only be called
	 * on sockets being torn down but warn and reset the accounting if
	 * that assumption breaks.
	 */
	if (WARN_ON(sk->sk_forward_alloc))
		sk_mem_reclaim(sk);
314
315
316
}
EXPORT_SYMBOL_GPL(sk_clear_memalloc);

317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	int ret;
	unsigned long pflags = current->flags;

	/* these should have been dropped before queueing */
	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));

	current->flags |= PF_MEMALLOC;
	ret = sk->sk_backlog_rcv(sk, skb);
	tsk_restore_flags(current, pflags, PF_MEMALLOC);

	return ret;
}
EXPORT_SYMBOL(__sk_backlog_rcv);

Linus Torvalds's avatar
Linus Torvalds committed
333
334
335
336
337
338
339
340
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
	struct timeval tv;

	if (optlen < sizeof(tv))
		return -EINVAL;
	if (copy_from_user(&tv, optval, sizeof(tv)))
		return -EFAULT;
341
342
	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
		return -EDOM;
Linus Torvalds's avatar
Linus Torvalds committed
343

344
	if (tv.tv_sec < 0) {
345
346
		static int warned __read_mostly;

347
		*timeo_p = 0;
348
		if (warned < 10 && net_ratelimit()) {
349
			warned++;
Joe Perches's avatar
Joe Perches committed
350
351
			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
				__func__, current->comm, task_pid_nr(current));
352
		}
353
354
		return 0;
	}
Linus Torvalds's avatar
Linus Torvalds committed
355
356
357
358
359
360
361
362
363
364
365
366
	*timeo_p = MAX_SCHEDULE_TIMEOUT;
	if (tv.tv_sec == 0 && tv.tv_usec == 0)
		return 0;
	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
		*timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
	return 0;
}

static void sock_warn_obsolete_bsdism(const char *name)
{
	static int warned;
	static char warncomm[TASK_COMM_LEN];
367
368
	if (strcmp(warncomm, current->comm) && warned < 5) {
		strcpy(warncomm,  current->comm);
Joe Perches's avatar
Joe Perches committed
369
370
		pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
			warncomm, name);
Linus Torvalds's avatar
Linus Torvalds committed
371
372
373
374
		warned++;
	}
}

375
376
377
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))

static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
378
{
379
380
381
	if (sk->sk_flags & flags) {
		sk->sk_flags &= ~flags;
		if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
382
			net_disable_timestamp();
Linus Torvalds's avatar
Linus Torvalds committed
383
384
385
386
	}
}


387
388
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
389
	int err;
390
	int skb_len;
391
392
	unsigned long flags;
	struct sk_buff_head *list = &sk->sk_receive_queue;
393

Eric Dumazet's avatar
Eric Dumazet committed
394
	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
Eric Dumazet's avatar
Eric Dumazet committed
395
		atomic_inc(&sk->sk_drops);
396
		trace_sock_rcvqueue_full(sk, skb);
Eric Dumazet's avatar
Eric Dumazet committed
397
		return -ENOMEM;
398
399
	}

400
	err = sk_filter(sk, skb);
401
	if (err)
Eric Dumazet's avatar
Eric Dumazet committed
402
		return err;
403

404
	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
Eric Dumazet's avatar
Eric Dumazet committed
405
406
		atomic_inc(&sk->sk_drops);
		return -ENOBUFS;
407
408
	}

409
410
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
411

412
413
414
415
416
417
418
	/* Cache the SKB length before we tack it onto the receive
	 * queue.  Once it is added it no longer belongs to us and
	 * may be freed by other threads of control pulling packets
	 * from the queue.
	 */
	skb_len = skb->len;

Eric Dumazet's avatar
Eric Dumazet committed
419
420
421
422
423
	/* we escape from rcu protected region, make sure we dont leak
	 * a norefcounted dst
	 */
	skb_dst_force(skb);

424
425
426
427
	spin_lock_irqsave(&list->lock, flags);
	skb->dropcount = atomic_read(&sk->sk_drops);
	__skb_queue_tail(list, skb);
	spin_unlock_irqrestore(&list->lock, flags);
428
429
430

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, skb_len);
Eric Dumazet's avatar
Eric Dumazet committed
431
	return 0;
432
433
434
}
EXPORT_SYMBOL(sock_queue_rcv_skb);

435
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
436
437
438
{
	int rc = NET_RX_SUCCESS;

439
	if (sk_filter(sk, skb))
440
441
442
443
		goto discard_and_relse;

	skb->dev = NULL;

444
	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
445
446
447
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
	}
448
449
450
451
	if (nested)
		bh_lock_sock_nested(sk);
	else
		bh_lock_sock(sk);
452
453
454
455
456
457
	if (!sock_owned_by_user(sk)) {
		/*
		 * trylock + unlock semantics:
		 */
		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);

458
		rc = sk_backlog_rcv(sk, skb);
459
460

		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
461
	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
Zhu Yi's avatar
Zhu Yi committed
462
463
464
465
466
		bh_unlock_sock(sk);
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
	}

467
468
469
470
471
472
473
474
475
476
	bh_unlock_sock(sk);
out:
	sock_put(sk);
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
EXPORT_SYMBOL(sk_receive_skb);

477
478
479
480
481
482
void sk_reset_txq(struct sock *sk)
{
	sk_tx_queue_clear(sk);
}
EXPORT_SYMBOL(sk_reset_txq);

483
484
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
Eric Dumazet's avatar
Eric Dumazet committed
485
	struct dst_entry *dst = __sk_dst_get(sk);
486
487

	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
488
		sk_tx_queue_clear(sk);
489
		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
		dst_release(dst);
		return NULL;
	}

	return dst;
}
EXPORT_SYMBOL(__sk_dst_check);

struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
{
	struct dst_entry *dst = sk_dst_get(sk);

	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
		sk_dst_reset(sk);
		dst_release(dst);
		return NULL;
	}

	return dst;
}
EXPORT_SYMBOL(sk_dst_check);

512
513
static int sock_setbindtodevice(struct sock *sk, char __user *optval,
				int optlen)
514
515
516
{
	int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
517
	struct net *net = sock_net(sk);
518
519
520
521
522
	char devname[IFNAMSIZ];
	int index;

	/* Sorry... */
	ret = -EPERM;
523
	if (!ns_capable(net->user_ns, CAP_NET_RAW))
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
		goto out;

	ret = -EINVAL;
	if (optlen < 0)
		goto out;

	/* Bind this socket to a particular device like "eth0",
	 * as specified in the passed interface name. If the
	 * name is "" or the option length is zero the socket
	 * is not bound.
	 */
	if (optlen > IFNAMSIZ - 1)
		optlen = IFNAMSIZ - 1;
	memset(devname, 0, sizeof(devname));

	ret = -EFAULT;
	if (copy_from_user(devname, optval, optlen))
		goto out;

543
544
	index = 0;
	if (devname[0] != '\0') {
545
		struct net_device *dev;
546

547
548
549
550
551
		rcu_read_lock();
		dev = dev_get_by_name_rcu(net, devname);
		if (dev)
			index = dev->ifindex;
		rcu_read_unlock();
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
		ret = -ENODEV;
		if (!dev)
			goto out;
	}

	lock_sock(sk);
	sk->sk_bound_dev_if = index;
	sk_dst_reset(sk);
	release_sock(sk);

	ret = 0;

out:
#endif

	return ret;
}

570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
static int sock_getbindtodevice(struct sock *sk, char __user *optval,
				int __user *optlen, int len)
{
	int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
	struct net *net = sock_net(sk);
	char devname[IFNAMSIZ];

	if (sk->sk_bound_dev_if == 0) {
		len = 0;
		goto zero;
	}

	ret = -EINVAL;
	if (len < IFNAMSIZ)
		goto out;

587
588
	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
	if (ret)
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
		goto out;

	len = strlen(devname) + 1;

	ret = -EFAULT;
	if (copy_to_user(optval, devname, len))
		goto out;

zero:
	ret = -EFAULT;
	if (put_user(len, optlen))
		goto out;

	ret = 0;

out:
#endif

	return ret;
}

610
611
612
613
614
615
616
617
static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
{
	if (valbool)
		sock_set_flag(sk, bit);
	else
		sock_reset_flag(sk, bit);
}

Linus Torvalds's avatar
Linus Torvalds committed
618
619
620
621
622
623
/*
 *	This is meant for all protocols to use and covers goings on
 *	at the socket level. Everything here is generic.
 */

int sock_setsockopt(struct socket *sock, int level, int optname,
624
		    char __user *optval, unsigned int optlen)
Linus Torvalds's avatar
Linus Torvalds committed
625
{
Eric Dumazet's avatar
Eric Dumazet committed
626
	struct sock *sk = sock->sk;
Linus Torvalds's avatar
Linus Torvalds committed
627
628
629
630
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
631

Linus Torvalds's avatar
Linus Torvalds committed
632
633
634
635
	/*
	 *	Options without arguments
	 */

636
	if (optname == SO_BINDTODEVICE)
637
		return sock_setbindtodevice(sk, optval, optlen);
638

639
640
	if (optlen < sizeof(int))
		return -EINVAL;
641

Linus Torvalds's avatar
Linus Torvalds committed
642
643
	if (get_user(val, (int __user *)optval))
		return -EFAULT;
644

Eric Dumazet's avatar
Eric Dumazet committed
645
	valbool = val ? 1 : 0;
Linus Torvalds's avatar
Linus Torvalds committed
646
647
648

	lock_sock(sk);

Eric Dumazet's avatar
Eric Dumazet committed
649
	switch (optname) {
650
	case SO_DEBUG:
Eric Dumazet's avatar
Eric Dumazet committed
651
		if (val && !capable(CAP_NET_ADMIN))
652
			ret = -EACCES;
Eric Dumazet's avatar
Eric Dumazet committed
653
		else
654
			sock_valbool_flag(sk, SOCK_DBG, valbool);
655
656
		break;
	case SO_REUSEADDR:
657
		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
658
		break;
Tom Herbert's avatar
Tom Herbert committed
659
660
661
	case SO_REUSEPORT:
		sk->sk_reuseport = valbool;
		break;
662
	case SO_TYPE:
663
	case SO_PROTOCOL:
664
	case SO_DOMAIN:
665
666
667
668
	case SO_ERROR:
		ret = -ENOPROTOOPT;
		break;
	case SO_DONTROUTE:
669
		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
670
671
672
673
674
675
		break;
	case SO_BROADCAST:
		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
		break;
	case SO_SNDBUF:
		/* Don't error on this BSD doesn't and if you think
676
677
678
679
680
		 * about it this is right. Otherwise apps have to
		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
		 * are treated in BSD as hints
		 */
		val = min_t(u32, val, sysctl_wmem_max);
681
set_sndbuf:
682
		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
683
684
		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
		/* Wake up sending tasks if we upped the value. */
685
686
		sk->sk_write_space(sk);
		break;
Linus Torvalds's avatar
Linus Torvalds committed
687

688
689
690
691
692
693
	case SO_SNDBUFFORCE:
		if (!capable(CAP_NET_ADMIN)) {
			ret = -EPERM;
			break;
		}
		goto set_sndbuf;
694

695
696
	case SO_RCVBUF:
		/* Don't error on this BSD doesn't and if you think
697
698
699
700
701
		 * about it this is right. Otherwise apps have to
		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
		 * are treated in BSD as hints
		 */
		val = min_t(u32, val, sysctl_rmem_max);
702
set_rcvbuf:
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
		/*
		 * We double it on the way in to account for
		 * "struct sk_buff" etc. overhead.   Applications
		 * assume that the SO_RCVBUF setting they make will
		 * allow that much actual data to be received on that
		 * socket.
		 *
		 * Applications are unaware that "struct sk_buff" and
		 * other overheads allocate from the receive buffer
		 * during socket buffer allocation.
		 *
		 * And after considering the possible alternatives,
		 * returning the value we actually used in getsockopt
		 * is the most desirable behavior.
		 */
719
		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
720
721
722
723
724
		break;

	case SO_RCVBUFFORCE:
		if (!capable(CAP_NET_ADMIN)) {
			ret = -EPERM;
Linus Torvalds's avatar
Linus Torvalds committed
725
			break;
726
727
		}
		goto set_rcvbuf;
Linus Torvalds's avatar
Linus Torvalds committed
728

729
	case SO_KEEPALIVE:
Linus Torvalds's avatar
Linus Torvalds committed
730
#ifdef CONFIG_INET
731
732
		if (sk->sk_protocol == IPPROTO_TCP &&
		    sk->sk_type == SOCK_STREAM)
733
			tcp_set_keepalive(sk, valbool);
Linus Torvalds's avatar
Linus Torvalds committed
734
#endif
735
736
737
738
739
740
741
742
743
744
745
746
		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
		break;

	case SO_OOBINLINE:
		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
		break;

	case SO_NO_CHECK:
		sk->sk_no_check = valbool;
		break;

	case SO_PRIORITY:
747
748
		if ((val >= 0 && val <= 6) ||
		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
749
750
751
752
753
754
755
756
			sk->sk_priority = val;
		else
			ret = -EPERM;
		break;

	case SO_LINGER:
		if (optlen < sizeof(ling)) {
			ret = -EINVAL;	/* 1003.1g */
Linus Torvalds's avatar
Linus Torvalds committed
757
			break;
758
		}
Eric Dumazet's avatar
Eric Dumazet committed
759
		if (copy_from_user(&ling, optval, sizeof(ling))) {
760
			ret = -EFAULT;
Linus Torvalds's avatar
Linus Torvalds committed
761
			break;
762
763
764
765
		}
		if (!ling.l_onoff)
			sock_reset_flag(sk, SOCK_LINGER);
		else {
Linus Torvalds's avatar
Linus Torvalds committed
766
#if (BITS_PER_LONG == 32)
767
768
			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
Linus Torvalds's avatar
Linus Torvalds committed
769
			else
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
#endif
				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
			sock_set_flag(sk, SOCK_LINGER);
		}
		break;

	case SO_BSDCOMPAT:
		sock_warn_obsolete_bsdism("setsockopt");
		break;

	case SO_PASSCRED:
		if (valbool)
			set_bit(SOCK_PASSCRED, &sock->flags);
		else
			clear_bit(SOCK_PASSCRED, &sock->flags);
		break;

	case SO_TIMESTAMP:
788
	case SO_TIMESTAMPNS:
789
		if (valbool)  {
790
791
792
793
			if (optname == SO_TIMESTAMP)
				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
			else
				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
794
			sock_set_flag(sk, SOCK_RCVTSTAMP);
795
			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
796
		} else {
797
			sock_reset_flag(sk, SOCK_RCVTSTAMP);
798
799
			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
		}
800
801
		break;

802
803
	case SO_TIMESTAMPING:
		if (val & ~SOF_TIMESTAMPING_MASK) {
804
			ret = -EINVAL;
805
806
807
808
809
810
811
812
813
814
815
816
817
			break;
		}
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE,
				  val & SOF_TIMESTAMPING_TX_HARDWARE);
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE,
				  val & SOF_TIMESTAMPING_TX_SOFTWARE);
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE,
				  val & SOF_TIMESTAMPING_RX_HARDWARE);
		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
			sock_enable_timestamp(sk,
					      SOCK_TIMESTAMPING_RX_SOFTWARE);
		else
			sock_disable_timestamp(sk,
818
					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
819
820
821
822
823
824
825
826
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
				  val & SOF_TIMESTAMPING_SOFTWARE);
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
				  val & SOF_TIMESTAMPING_SYS_HARDWARE);
		sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
				  val & SOF_TIMESTAMPING_RAW_HARDWARE);
		break;

827
828
829
830
831
832
833
834
835
836
837
838
839
	case SO_RCVLOWAT:
		if (val < 0)
			val = INT_MAX;
		sk->sk_rcvlowat = val ? : 1;
		break;

	case SO_RCVTIMEO:
		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
		break;

	case SO_SNDTIMEO:
		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
		break;
Linus Torvalds's avatar
Linus Torvalds committed
840

841
842
843
844
	case SO_ATTACH_FILTER:
		ret = -EINVAL;
		if (optlen == sizeof(struct sock_fprog)) {
			struct sock_fprog fprog;
Linus Torvalds's avatar
Linus Torvalds committed
845

846
847
			ret = -EFAULT;
			if (copy_from_user(&fprog, optval, sizeof(fprog)))
Linus Torvalds's avatar
Linus Torvalds committed
848
				break;
849
850
851
852
853
854

			ret = sk_attach_filter(&fprog, sk);
		}
		break;

	case SO_DETACH_FILTER:
855
		ret = sk_detach_filter(sk);
856
		break;
Linus Torvalds's avatar
Linus Torvalds committed
857

858
859
860
861
862
863
864
	case SO_LOCK_FILTER:
		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
			ret = -EPERM;
		else
			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
		break;

865
866
867
868
869
870
	case SO_PASSSEC:
		if (valbool)
			set_bit(SOCK_PASSSEC, &sock->flags);
		else
			clear_bit(SOCK_PASSSEC, &sock->flags);
		break;
871
	case SO_MARK:
872
		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
873
			ret = -EPERM;
Eric Dumazet's avatar
Eric Dumazet committed
874
		else
875
876
			sk->sk_mark = val;
		break;
877

Linus Torvalds's avatar
Linus Torvalds committed
878
879
		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
880
	case SO_RXQ_OVFL:
881
		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
882
		break;
883
884
885
886
887

	case SO_WIFI_STATUS:
		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
		break;

888
889
890
891
892
893
	case SO_PEEK_OFF:
		if (sock->ops->set_peek_off)
			sock->ops->set_peek_off(sk, val);
		else
			ret = -EOPNOTSUPP;
		break;
894
895
896
897
898

	case SO_NOFCS:
		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
		break;

899
900
901
902
	case SO_SELECT_ERR_QUEUE:
		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
		break;

903
904
905
906
907
908
909
910
911
912
913
914
915
#ifdef CONFIG_NET_LL_RX_POLL
	case SO_LL:
		/* allow unprivileged users to decrease the value */
		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
			ret = -EPERM;
		else {
			if (val < 0)
				ret = -EINVAL;
			else
				sk->sk_ll_usec = val;
		}
		break;
#endif
916
917
918
	default:
		ret = -ENOPROTOOPT;
		break;
919
	}
Linus Torvalds's avatar
Linus Torvalds committed
920
921
922
	release_sock(sk);
	return ret;
}
Eric Dumazet's avatar
Eric Dumazet committed
923
EXPORT_SYMBOL(sock_setsockopt);
Linus Torvalds's avatar
Linus Torvalds committed
924
925


926
927
928
929
930
931
932
933
void cred_to_ucred(struct pid *pid, const struct cred *cred,
		   struct ucred *ucred)
{
	ucred->pid = pid_vnr(pid);
	ucred->uid = ucred->gid = -1;
	if (cred) {
		struct user_namespace *current_ns = current_user_ns();

934
935
		ucred->uid = from_kuid_munged(current_ns, cred->euid);
		ucred->gid = from_kgid_munged(current_ns, cred->egid);
936
937
	}
}
938
EXPORT_SYMBOL_GPL(cred_to_ucred);
939

Linus Torvalds's avatar
Linus Torvalds committed
940
941
942
943
int sock_getsockopt(struct socket *sock, int level, int optname,
		    char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;
944

945
	union {
946
947
		int val;
		struct linger ling;
Linus Torvalds's avatar
Linus Torvalds committed
948
949
		struct timeval tm;
	} v;
950

951
	int lv = sizeof(int);
Linus Torvalds's avatar
Linus Torvalds committed
952
	int len;
953

954
	if (get_user(len, optlen))
955
		return -EFAULT;
956
	if (len < 0)
Linus Torvalds's avatar
Linus Torvalds committed
957
		return -EINVAL;
958

959
	memset(&v, 0, sizeof(v));
960

Eric Dumazet's avatar
Eric Dumazet committed
961
	switch (optname) {
962
963
964
965
966
967
968
969
970
	case SO_DEBUG:
		v.val = sock_flag(sk, SOCK_DBG);
		break;

	case SO_DONTROUTE:
		v.val = sock_flag(sk, SOCK_LOCALROUTE);
		break;

	case SO_BROADCAST:
Eric Dumazet's avatar
Eric Dumazet committed
971
		v.val = sock_flag(sk, SOCK_BROADCAST);
972
973
974
975
976
977
978
979
980
981
982
983
984
985
		break;

	case SO_SNDBUF:
		v.val = sk->sk_sndbuf;
		break;

	case SO_RCVBUF:
		v.val = sk->sk_rcvbuf;
		break;

	case SO_REUSEADDR:
		v.val = sk->sk_reuse;
		break;

Tom Herbert's avatar
Tom Herbert committed
986
987
988
989
	case SO_REUSEPORT:
		v.val = sk->sk_reuseport;
		break;

990
	case SO_KEEPALIVE:
Eric Dumazet's avatar
Eric Dumazet committed
991
		v.val = sock_flag(sk, SOCK_KEEPOPEN);
992
993
994
995
996
997
		break;

	case SO_TYPE:
		v.val = sk->sk_type;
		break;

998
999
1000
1001
	case SO_PROTOCOL:
		v.val = sk->sk_protocol;
		break;

1002
1003
1004
1005
	case SO_DOMAIN:
		v.val = sk->sk_family;
		break;

1006
1007
	case SO_ERROR:
		v.val = -sock_error(sk);
Eric Dumazet's avatar
Eric Dumazet committed
1008
		if (v.val == 0)
1009
1010
1011
1012
			v.val = xchg(&sk->sk_err_soft, 0);
		break;

	case SO_OOBINLINE:
Eric Dumazet's avatar
Eric Dumazet committed
1013
		v.val = sock_flag(sk, SOCK_URGINLINE);
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
		break;

	case SO_NO_CHECK:
		v.val = sk->sk_no_check;
		break;

	case SO_PRIORITY:
		v.val = sk->sk_priority;
		break;

	case SO_LINGER:
		lv		= sizeof(v.ling);
Eric Dumazet's avatar
Eric Dumazet committed
1026
		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1027
1028
1029
1030
1031
1032
1033
1034
		v.ling.l_linger	= sk->sk_lingertime / HZ;
		break;

	case SO_BSDCOMPAT:
		sock_warn_obsolete_bsdism("getsockopt");
		break;

	case SO_TIMESTAMP:
1035
1036
1037
1038
1039
1040
		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
				!sock_flag(sk, SOCK_RCVTSTAMPNS);
		break;

	case SO_TIMESTAMPNS:
		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
1041
1042
		break;

1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
	case SO_TIMESTAMPING:
		v.val = 0;
		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
			v.val |= SOF_TIMESTAMPING_TX_HARDWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
			v.val |= SOF_TIMESTAMPING_TX_SOFTWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE))
			v.val |= SOF_TIMESTAMPING_RX_HARDWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE))
			v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
			v.val |= SOF_TIMESTAMPING_SOFTWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
			v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
		if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
			v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
		break;

1061
	case SO_RCVTIMEO:
Eric Dumazet's avatar
Eric Dumazet committed
1062
		lv = sizeof(struct timeval);
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
		if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
			v.tm.tv_sec = 0;
			v.tm.tv_usec = 0;
		} else {
			v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
			v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
		}
		break;

	case SO_SNDTIMEO:
Eric Dumazet's avatar
Eric Dumazet committed
1073
		lv = sizeof(struct timeval);
1074
1075
1076
1077
1078
1079
1080
1081
		if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
			v.tm.tv_sec = 0;
			v.tm.tv_usec = 0;
		} else {
			v.tm.tv_sec = sk->sk_sndtimeo / HZ;
			v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
		}
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1082

1083
1084
1085
	case SO_RCVLOWAT:
		v.val = sk->sk_rcvlowat;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1086

1087
	case SO_SNDLOWAT:
Eric Dumazet's avatar
Eric Dumazet committed
1088
		v.val = 1;
1089
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1090

1091
	case SO_PASSCRED:
1092
		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1093
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1094

1095
	case SO_PEERCRED:
1096
1097
1098
1099
1100
1101
	{
		struct ucred peercred;
		if (len > sizeof(peercred))
			len = sizeof(peercred);
		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
		if (copy_to_user(optval, &peercred, len))
1102
1103
			return -EFAULT;
		goto lenout;
1104
	}
Linus Torvalds's avatar
Linus Torvalds committed
1105

1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
	case SO_PEERNAME:
	{
		char address[128];

		if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
			return -ENOTCONN;
		if (lv < len)
			return -EINVAL;
		if (copy_to_user(optval, address, len))
			return -EFAULT;
		goto lenout;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1118

1119
1120
1121
1122
1123
1124
	/* Dubious BSD thing... Probably nobody even uses it, but
	 * the UNIX standard wants it for whatever reason... -DaveM
	 */
	case SO_ACCEPTCONN:
		v.val = sk->sk_state == TCP_LISTEN;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
1125

1126
	case SO_PASSSEC:
1127
		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1128
		break;
1129

1130
1131
	case SO_PEERSEC:
		return security_socket_getpeersec_stream(sock, optval, optlen, len);
Linus Torvalds's avatar
Linus Torvalds committed
1132

1133
1134
1135
1136
	case SO_MARK:
		v.val = sk->sk_mark;
		break;

1137
	case SO_RXQ_OVFL:
Eric Dumazet's avatar
Eric Dumazet committed
1138
		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1139
1140
		break;

1141
	case SO_WIFI_STATUS:
Eric Dumazet's avatar
Eric Dumazet committed
1142
		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1143
1144
		break;

1145
1146
1147
1148
1149
1150
	case SO_PEEK_OFF:
		if (!sock->ops->set_peek_off)
			return -EOPNOTSUPP;

		v.val = sk->sk_peek_off;
		break;
1151
	case SO_NOFCS:
Eric Dumazet's avatar
Eric Dumazet committed
1152
		v.val = sock_flag(sk, SOCK_NOFCS);
1153
		break;
1154

1155
	case SO_BINDTODEVICE:
1156
1157
		return sock_getbindtodevice(sk, optval, optlen, len);

1158
1159
1160
1161
1162
1163
	case SO_GET_FILTER:
		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
		if (len < 0)
			return len;

		goto lenout;
1164

1165
1166
1167
1168
	case SO_LOCK_FILTER:
		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
		break;

1169
1170
1171
1172
	case SO_SELECT_ERR_QUEUE:
		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
		break;

1173
1174
1175
1176
1177
1178
#ifdef CONFIG_NET_LL_RX_POLL
	case SO_LL:
		v.val = sk->sk_ll_usec;
		break;
#endif

1179
1180
	default:
		return -ENOPROTOOPT;
Linus Torvalds's avatar
Linus Torvalds committed
1181
	}
1182

Linus Torvalds's avatar
Linus Torvalds committed
1183
1184
1185
1186
1187
	if (len > lv)
		len = lv;
	if (copy_to_user(optval, &v, len))
		return -EFAULT;
lenout:
1188
1189
1190
	if (put_user(len, optlen))
		return -EFAULT;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1191
1192
}

1193
1194
1195
1196
1197
/*
 * Initialize an sk_lock.
 *
 * (We also register the sk_lock with the lock validator.)
 */
Dave Jones's avatar
Dave Jones committed
1198
static inline void sock_lock_init(struct sock *sk)
1199
{
1200
1201
1202
1203
1204
	sock_lock_init_class_and_name(sk,
			af_family_slock_key_strings[sk->sk_family],
			af_family_slock_keys + sk->sk_family,
			af_family_key_strings[sk->sk_family],
			af_family_keys + sk->sk_family);
1205
1206
}

Eric Dumazet's avatar
Eric Dumazet committed
1207
1208
1209
/*
 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
 * even temporarly, because of RCU lookups. sk_node should also be left as is.
1210
 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
1211
 */
1212
1213
1214
1215
1216
static void sock_copy(struct sock *nsk, const struct sock *osk)
{
#ifdef CONFIG_SECURITY_NETWORK
	void *sptr = nsk->sk_security;
#endif
1217
1218
1219
1220
1221
	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));

	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));

1222
1223
1224
1225
1226
1227
#ifdef CONFIG_SECURITY_NETWORK
	nsk->sk_security = sptr;
	security_sk_clone(osk, nsk);
#endif
}

1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
{
	unsigned long nulls1, nulls2;

	nulls1 = offsetof(struct sock, __sk_common.skc_node.next);
	nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next);
	if (nulls1 > nulls2)
		swap(nulls1, nulls2);

	if (nulls1 != 0)
		memset((char *)sk, 0, nulls1);
	memset((char *)sk + nulls1 + sizeof(void *), 0,
	       nulls2 - nulls1 - sizeof(void *));
	memset((char *)sk + nulls2 + sizeof(void *), 0,
	       size - nulls2 - sizeof(void *));
}
EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls);

1246
1247
static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
		int family)
1248
1249
1250
1251
1252
{
	struct sock *sk;
	struct kmem_cache *slab;

	slab = prot->slab;
1253
1254
1255
1256
1257
	if (slab != NULL) {
		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
		if (!sk)
			return sk;
		if (priority & __GFP_ZERO) {
1258
1259
1260
1261
			if (prot->clear_sk)
				prot->clear_sk(sk, prot->obj_size);
			else
				sk_prot_clear_nulls(sk, prot->obj_size);