sock.h 58.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47 48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
50 51
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
52
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
53
#include <linux/security.h>
54
#include <linux/slab.h>
55
#include <linux/uaccess.h>
56
#include <linux/memcontrol.h>
Glauber Costa's avatar
Glauber Costa committed
57
#include <linux/res_counter.h>
Linus Torvalds's avatar
Linus Torvalds committed
58 59

#include <linux/filter.h>
60
#include <linux/rculist_nulls.h>
61
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
62

63
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
64 65 66
#include <net/dst.h>
#include <net/checksum.h>

67 68
struct cgroup;
struct cgroup_subsys;
Glauber Costa's avatar
Glauber Costa committed
69 70
int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss);
Linus Torvalds's avatar
Linus Torvalds committed
71 72 73 74 75 76 77 78 79 80 81 82
/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
83
/* Validate arguments and do nothing */
84 85
static inline __printf(2, 3)
void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
86 87
{
}
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91 92 93 94 95
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
96
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
97
	wait_queue_head_t	wq;
98 99 100 101 102 103 104 105 106
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
107 108 109
} socket_lock_t;

struct sock;
110
struct proto;
111
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
112 113

/**
114
 *	struct sock_common - minimal network layer representation of sockets
115 116
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
117
 *	@skc_hash: hash value used with various protocol lookup tables
118
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
119 120 121 122 123
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
124
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
125
 *	@skc_prot: protocol handlers inside a network family
126
 *	@skc_net: reference to the network namespace of this socket
127 128 129 130
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
 *	@skc_refcnt: reference count
131 132
 *
 *	This is the minimal network layer representation of sockets, the header
133 134
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
135
struct sock_common {
136 137
	/* skc_daddr and skc_rcv_saddr must be grouped :
	 * cf INET_MATCH() and INET_TW_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
138
	 */
139 140
	__be32			skc_daddr;
	__be32			skc_rcv_saddr;
Eric Dumazet's avatar
Eric Dumazet committed
141

142 143 144 145
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
Eric Dumazet's avatar
Eric Dumazet committed
146 147 148 149
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
150 151 152 153
	union {
		struct hlist_node	skc_bind_node;
		struct hlist_nulls_node skc_portaddr_node;
	};
154
	struct proto		*skc_prot;
155
#ifdef CONFIG_NET_NS
156
	struct net	 	*skc_net;
157
#endif
158 159 160 161
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
162
	/* private: */
163
	int			skc_dontcopy_begin[0];
164
	/* public: */
165 166 167 168 169 170
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
	atomic_t		skc_refcnt;
171
	/* private: */
172
	int                     skc_dontcopy_end[0];
173
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
174 175
};

Glauber Costa's avatar
Glauber Costa committed
176
struct cg_proto;
Linus Torvalds's avatar
Linus Torvalds committed
177 178
/**
  *	struct sock - network layer representation of sockets
179
  *	@__sk_common: shared layout with inet_timewait_sock
180 181 182 183
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
184
  *	@sk_wq: sock wait queue and async head
185 186 187 188 189 190
  *	@sk_dst_cache: destination cache
  *	@sk_dst_lock: destination cache lock
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
191
  *	@sk_async_wait_queue: DMA copied packets
192 193 194 195 196
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_allocation: allocation mode
  *	@sk_sndbuf: size of send buffer in bytes
Wang Chen's avatar
Wang Chen committed
197
  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
198
  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
199 200
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
201
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
202
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
203
  *	@sk_gso_max_size: Maximum GSO segment size to build
204 205 206 207
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
208 209
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
210
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
211 212
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
213
  *	@sk_drops: raw/udp drops counter
214 215 216 217 218
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
219 220
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
221 222 223
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
224
  *	@sk_rxhash: flow hash received from netif layer
225 226 227 228 229 230 231 232 233
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
  *	@sk_sndmsg_page: cached page for sendmsg
  *	@sk_sndmsg_off: cached offset for sendmsg
  *	@sk_send_head: front of stuff to transmit
234
  *	@sk_security: used by security modules
235
  *	@sk_mark: generic packet mark
236
  *	@sk_classid: this socket's cgroup classid
Glauber Costa's avatar
Glauber Costa committed
237
  *	@sk_cgrp: this socket's cgroup-specific proto data
238 239 240 241 242 243 244
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
Linus Torvalds's avatar
Linus Torvalds committed
245 246 247
 */
struct sock {
	/*
248
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
249 250 251
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
252 253 254
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
255
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
256

257 258
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
259
#define sk_hash			__sk_common.skc_hash
Linus Torvalds's avatar
Linus Torvalds committed
260 261 262 263 264
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
265
#define sk_prot			__sk_common.skc_prot
266
#define sk_net			__sk_common.skc_net
Linus Torvalds's avatar
Linus Torvalds committed
267
	socket_lock_t		sk_lock;
Eric Dumazet's avatar
Eric Dumazet committed
268
	struct sk_buff_head	sk_receive_queue;
269 270 271 272
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
273 274 275
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
276 277
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
278 279 280 281
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
282
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
283 284 285 286 287 288 289 290 291
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	atomic_t		sk_drops;
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
292
	struct socket_wq __rcu	*sk_wq;
Eric Dumazet's avatar
Eric Dumazet committed
293 294 295 296 297

#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
#endif

298
#ifdef CONFIG_XFRM
Linus Torvalds's avatar
Linus Torvalds committed
299
	struct xfrm_policy	*sk_policy[2];
300
#endif
Eric Dumazet's avatar
Eric Dumazet committed
301 302
	unsigned long 		sk_flags;
	struct dst_entry	*sk_dst_cache;
Eric Dumazet's avatar
Eric Dumazet committed
303
	spinlock_t		sk_dst_lock;
Linus Torvalds's avatar
Linus Torvalds committed
304 305
	atomic_t		sk_wmem_alloc;
	atomic_t		sk_omem_alloc;
306
	int			sk_sndbuf;
Linus Torvalds's avatar
Linus Torvalds committed
307
	struct sk_buff_head	sk_write_queue;
Eric Dumazet's avatar
Eric Dumazet committed
308 309 310 311 312 313 314
	kmemcheck_bitfield_begin(flags);
	unsigned int		sk_shutdown  : 2,
				sk_no_check  : 2,
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
	kmemcheck_bitfield_end(flags);
Linus Torvalds's avatar
Linus Torvalds committed
315
	int			sk_wmem_queued;
Al Viro's avatar
Al Viro committed
316
	gfp_t			sk_allocation;
317 318
	netdev_features_t	sk_route_caps;
	netdev_features_t	sk_route_nocaps;
319
	int			sk_gso_type;
320
	unsigned int		sk_gso_max_size;
321
	int			sk_rcvlowat;
Linus Torvalds's avatar
Linus Torvalds committed
322 323
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
324
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
325 326 327 328 329 330
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
	unsigned short		sk_ack_backlog;
	unsigned short		sk_max_ack_backlog;
	__u32			sk_priority;
331 332 333
#ifdef CONFIG_CGROUPS
	__u32			sk_cgrp_prioidx;
#endif
334 335
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
336 337 338 339
	long			sk_rcvtimeo;
	long			sk_sndtimeo;
	void			*sk_protinfo;
	struct timer_list	sk_timer;
340
	ktime_t			sk_stamp;
Linus Torvalds's avatar
Linus Torvalds committed
341 342 343 344 345 346
	struct socket		*sk_socket;
	void			*sk_user_data;
	struct page		*sk_sndmsg_page;
	struct sk_buff		*sk_send_head;
	__u32			sk_sndmsg_off;
	int			sk_write_pending;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
347
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
348
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
349
#endif
350
	__u32			sk_mark;
351
	u32			sk_classid;
Glauber Costa's avatar
Glauber Costa committed
352
	struct cg_proto		*sk_cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
353 354 355 356 357 358 359 360 361 362 363 364
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk, int bytes);
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
  	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);  
	void                    (*sk_destruct)(struct sock *sk);
};

/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
365 366 367 368 369
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

370
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374
{
	return hlist_entry(head->first, struct sock, sk_node);
}

375
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
376 377 378 379
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

380 381 382 383 384 385 386 387 388 389
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

390
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
391 392 393 394 395
{
	return sk->sk_node.next ?
		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}

396 397 398 399 400 401 402 403
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

404
static inline int sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
405 406 407 408
{
	return hlist_unhashed(&sk->sk_node);
}

409
static inline int sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
410
{
Akinobu Mita's avatar
Akinobu Mita committed
411
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
412 413 414 415 416 417 418
}

static __inline__ void sk_node_init(struct hlist_node *node)
{
	node->pprev = NULL;
}

419 420 421 422 423
static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
{
	node->pprev = NULL;
}

Linus Torvalds's avatar
Linus Torvalds committed
424 425 426 427 428
static __inline__ void __sk_del_node(struct sock *sk)
{
	__hlist_del(&sk->sk_node);
}

429
/* NB: equivalent to hlist_del_init_rcu */
Linus Torvalds's avatar
Linus Torvalds committed
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
static __inline__ int __sk_del_node_init(struct sock *sk)
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
		return 1;
	}
	return 0;
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

static inline void sock_hold(struct sock *sk)
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
static inline void __sock_put(struct sock *sk)
{
	atomic_dec(&sk->sk_refcnt);
}

static __inline__ int sk_del_node_init(struct sock *sk)
{
	int rc = __sk_del_node_init(sk);

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
470
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
471

472
static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
473 474
{
	if (sk_hashed(sk)) {
475
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
476 477 478 479 480
		return 1;
	}
	return 0;
}

481
static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
482
{
483
	int rc = __sk_nulls_del_node_init_rcu(sk);
484 485 486 487 488 489 490 491 492

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Linus Torvalds's avatar
Linus Torvalds committed
493 494 495 496 497 498 499 500 501 502 503
static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
	hlist_add_head(&sk->sk_node, list);
}

static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

504 505 506 507 508 509
static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
	sock_hold(sk);
	hlist_add_head_rcu(&sk->sk_node, list);
}

510
static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
511
{
512
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
513 514
}

515
static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
516 517
{
	sock_hold(sk);
518
	__sk_nulls_add_node_rcu(sk, list);
519 520
}

Linus Torvalds's avatar
Linus Torvalds committed
521 522 523 524 525 526 527 528 529 530 531 532 533
static __inline__ void __sk_del_bind_node(struct sock *sk)
{
	__hlist_del(&sk->sk_bind_node);
}

static __inline__ void sk_add_bind_node(struct sock *sk,
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

#define sk_for_each(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_node)
534 535
#define sk_for_each_rcu(__sk, node, list) \
	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
536 537 538 539
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
540 541 542
#define sk_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
		hlist_for_each_entry_from(__sk, node, sk_node)
543 544 545
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
#define sk_for_each_safe(__sk, node, tmp, list) \
	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_bind_node)

/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
565
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
566 567
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
568 569 570 571 572 573 574
	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
Eric Dumazet's avatar
Eric Dumazet committed
575
	SOCK_FASYNC, /* fasync() active */
576
	SOCK_RXQ_OVFL,
577
	SOCK_ZEROCOPY, /* buffers from userspace */
578
	SOCK_WIFI_STATUS, /* push wifi status to userspace */
Linus Torvalds's avatar
Linus Torvalds committed
579 580
};

Ralf Baechle's avatar
Ralf Baechle committed
581 582 583 584 585
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

static inline int sock_flag(struct sock *sk, enum sock_flags flag)
{
	return test_bit(flag, &sk->sk_flags);
}

static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

static inline int sk_acceptq_is_full(struct sock *sk)
{
613
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
614 615 616 617 618 619 620
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
static inline int sk_stream_min_wspace(struct sock *sk)
{
621
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
622 623 624 625 626 627 628 629 630 631 632 633 634 635
}

static inline int sk_stream_wspace(struct sock *sk)
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

extern void sk_stream_write_space(struct sock *sk);

static inline int sk_stream_memory_free(struct sock *sk)
{
	return sk->sk_wmem_queued < sk->sk_sndbuf;
}

Zhu Yi's avatar
Zhu Yi committed
636
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
637
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
638
{
Eric Dumazet's avatar
Eric Dumazet committed
639 640 641 642 643 644
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
	skb_dst_force(skb);

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
645
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
646 647

	sk->sk_backlog.tail = skb;
648 649
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
650

651 652 653 654 655 656 657 658 659 660
/*
 * Take into account size of receive queue and backlog queue
 */
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

	return qsize + skb->truesize > sk->sk_rcvbuf;
}

Zhu Yi's avatar
Zhu Yi committed
661
/* The per-socket spinlock must be held here. */
662
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
Zhu Yi's avatar
Zhu Yi committed
663
{
664
	if (sk_rcvqueues_full(sk, skb))
Zhu Yi's avatar
Zhu Yi committed
665 666
		return -ENOBUFS;

Zhu Yi's avatar
Zhu Yi committed
667
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
668 669 670 671
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

672 673 674 675 676
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	return sk->sk_backlog_rcv(sk, skb);
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

701 702
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
703 704
{
#ifdef CONFIG_RPS
705
	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
706
		sock_rps_reset_flow(sk);
707
		sk->sk_rxhash = skb->rxhash;
708 709 710 711
	}
#endif
}

712 713 714 715 716 717 718 719
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sock_rps_reset_flow(sk);
	sk->sk_rxhash = 0;
#endif
}

720 721 722 723 724 725 726 727 728 729 730
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
731 732 733 734 735 736 737 738 739

extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);

extern int sk_wait_data(struct sock *sk, long *timeo);

740
struct request_sock_ops;
741
struct timewait_sock_ops;
742
struct inet_hashinfo;
743
struct raw_hashinfo;
744
struct module;
745

Linus Torvalds's avatar
Linus Torvalds committed
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
/* Networking protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
	void			(*close)(struct sock *sk, 
					long timeout);
	int			(*connect)(struct sock *sk,
				        struct sockaddr *uaddr, 
					int addr_len);
	int			(*disconnect)(struct sock *sk, int flags);

	struct sock *		(*accept) (struct sock *sk, int flags, int *err);

	int			(*ioctl)(struct sock *sk, int cmd,
					 unsigned long arg);
	int			(*init)(struct sock *sk);
763
	void			(*destroy)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
764 765 766
	void			(*shutdown)(struct sock *sk, int how);
	int			(*setsockopt)(struct sock *sk, int level, 
					int optname, char __user *optval,
767
					unsigned int optlen);
Linus Torvalds's avatar
Linus Torvalds committed
768 769 770
	int			(*getsockopt)(struct sock *sk, int level, 
					int optname, char __user *optval, 
					int __user *option);  	 
771
#ifdef CONFIG_COMPAT
772 773 774
	int			(*compat_setsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
775
					unsigned int optlen);
776 777 778 779
	int			(*compat_getsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
					int __user *option);
780 781
	int			(*compat_ioctl)(struct sock *sk,
					unsigned int cmd, unsigned long arg);
782
#endif
Linus Torvalds's avatar
Linus Torvalds committed
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg, size_t len);
	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg,
					size_t len, int noblock, int flags, 
					int *addr_len);
	int			(*sendpage)(struct sock *sk, struct page *page,
					int offset, size_t size, int flags);
	int			(*bind)(struct sock *sk, 
					struct sockaddr *uaddr, int addr_len);

	int			(*backlog_rcv) (struct sock *sk, 
						struct sk_buff *skb);

	/* Keeping track of sk's, looking them up, and port selection methods. */
	void			(*hash)(struct sock *sk);
	void			(*unhash)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
800
	void			(*rehash)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
801
	int			(*get_port)(struct sock *sk, unsigned short snum);
802
	void			(*clear_sk)(struct sock *sk, int size);
Linus Torvalds's avatar
Linus Torvalds committed
803

804
	/* Keeping track of sockets in use */
805
#ifdef CONFIG_PROC_FS
806
	unsigned int		inuse_idx;
807
#endif
808

Linus Torvalds's avatar
Linus Torvalds committed
809
	/* Memory pressure */
810
	void			(*enter_memory_pressure)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
811
	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
812
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
Linus Torvalds's avatar
Linus Torvalds committed
813 814 815
	/*
	 * Pressure flag: try to collapse.
	 * Technical note: it is used by multiple contexts non atomically.
816
	 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
817 818 819
	 * is strict, actions are advisory and have some latency.
	 */
	int			*memory_pressure;
Eric Dumazet's avatar
Eric Dumazet committed
820
	long			*sysctl_mem;
Linus Torvalds's avatar
Linus Torvalds committed
821 822 823
	int			*sysctl_wmem;
	int			*sysctl_rmem;
	int			max_header;
824
	bool			no_autobind;
Linus Torvalds's avatar
Linus Torvalds committed
825

826
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
827
	unsigned int		obj_size;
828
	int			slab_flags;
Linus Torvalds's avatar
Linus Torvalds committed
829

830
	struct percpu_counter	*orphan_count;
831

832
	struct request_sock_ops	*rsk_prot;
833
	struct timewait_sock_ops *twsk_prot;
834

835 836
	union {
		struct inet_hashinfo	*hashinfo;
837
		struct udp_table	*udp_table;
838
		struct raw_hashinfo	*raw_hash;
839
	} h;
840

Linus Torvalds's avatar
Linus Torvalds committed
841 842 843 844 845
	struct module		*owner;

	char			name[32];

	struct list_head	node;
846 847 848
#ifdef SOCK_REFCNT_DEBUG
	atomic_t		socks;
#endif
Glauber Costa's avatar
Glauber Costa committed
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
	/*
	 * cgroup specific init/deinit functions. Called once for all
	 * protocols that implement it, from cgroups populate function.
	 * This function has to setup any files the protocol want to
	 * appear in the kmem cgroup filesystem.
	 */
	int			(*init_cgroup)(struct cgroup *cgrp,
					       struct cgroup_subsys *ss);
	void			(*destroy_cgroup)(struct cgroup *cgrp,
						  struct cgroup_subsys *ss);
	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};

struct cg_proto {
	void			(*enter_memory_pressure)(struct sock *sk);
	struct res_counter	*memory_allocated;	/* Current allocated memory. */
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
	int			*memory_pressure;
	long			*sysctl_mem;
	/*
	 * memcg field is used to find which memcg we belong directly
	 * Each memcg struct can hold more than one cg_proto, so container_of
	 * won't really cut.
	 *
	 * The elegant solution would be having an inverse function to
	 * proto_cgroup in struct proto, but that means polluting the structure
	 * for everybody, instead of just for memcg users.
	 */
	struct mem_cgroup	*memcg;
Linus Torvalds's avatar
Linus Torvalds committed
880 881 882 883 884
};

extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);

885 886 887 888 889 890 891 892 893 894 895 896 897
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
	atomic_inc(&sk->sk_prot->socks);
}

static inline void sk_refcnt_debug_dec(struct sock *sk)
{
	atomic_dec(&sk->sk_prot->socks);
	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}

Glauber Costa's avatar
Glauber Costa committed
898
inline void sk_refcnt_debug_release(const struct sock *sk)
899 900 901 902 903 904 905 906 907 908 909
{
	if (atomic_read(&sk->sk_refcnt) != 1)
		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
#define sk_refcnt_debug_dec(sk) do { } while (0)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */

Glauber Costa's avatar
Glauber Costa committed
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
extern struct jump_label_key memcg_socket_limit_enabled;
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return NULL;
}
#endif


928 929 930 931 932 933 934 935 936
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
	return sk->sk_prot->memory_pressure != NULL;
}

static inline bool sk_under_memory_pressure(const struct sock *sk)
{
	if (!sk->sk_prot->memory_pressure)
		return false;
Glauber Costa's avatar
Glauber Costa committed
937 938 939 940

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return !!*sk->sk_cgrp->memory_pressure;

941 942 943 944 945 946 947
	return !!*sk->sk_prot->memory_pressure;
}

static inline void sk_leave_memory_pressure(struct sock *sk)
{
	int *memory_pressure = sk->sk_prot->memory_pressure;

Glauber Costa's avatar
Glauber Costa committed
948 949 950 951
	if (!memory_pressure)
		return;

	if (*memory_pressure)
952
		*memory_pressure = 0;
Glauber Costa's avatar
Glauber Costa committed
953 954 955 956 957 958 959 960 961 962

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			if (*cg_proto->memory_pressure)
				*cg_proto->memory_pressure = 0;
	}

963 964 965 966
}

static inline void sk_enter_memory_pressure(struct sock *sk)
{
Glauber Costa's avatar
Glauber Costa committed
967 968 969 970 971 972 973 974 975 976 977 978
	if (!sk->sk_prot->enter_memory_pressure)
		return;

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			cg_proto->enter_memory_pressure(sk);
	}

	sk->sk_prot->enter_memory_pressure(sk);
979 980 981 982 983
}

static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
	long *prot = sk->sk_prot->sysctl_mem;
Glauber Costa's avatar
Glauber Costa committed
984 985
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		prot = sk->sk_cgrp->sysctl_mem;
986 987 988
	return prot[index];
}

Glauber Costa's avatar
Glauber Costa committed
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
static inline void memcg_memory_allocated_add(struct cg_proto *prot,
					      unsigned long amt,
					      int *parent_status)
{
	struct res_counter *fail;
	int ret;

	ret = res_counter_charge(prot->memory_allocated,
				 amt << PAGE_SHIFT, &fail);

	if (ret < 0)
		*parent_status = OVER_LIMIT;
}

static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
					      unsigned long amt)
{
	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
}

static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
{
	u64 ret;
	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
	return ret >> PAGE_SHIFT;
}

1016 1017 1018 1019
static inline long
sk_memory_allocated(const struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1020 1021 1022
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return memcg_memory_allocated_read(sk->sk_cgrp);

1023 1024 1025 1026
	return atomic_long_read(prot->memory_allocated);
}

static inline long
Glauber Costa's avatar
Glauber Costa committed
1027
sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1028 1029
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1030 1031 1032 1033 1034 1035 1036 1037

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
		/* update the root cgroup regardless */
		atomic_long_add_return(amt, prot->memory_allocated);
		return memcg_memory_allocated_read(sk->sk_cgrp);
	}

1038 1039 1040 1041
	return atomic_long_add_return(amt, prot->memory_allocated);
}

static inline void
Glauber Costa's avatar
Glauber Costa committed
1042
sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
1043 1044
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1045 1046 1047 1048 1049

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
	    parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
		memcg_memory_allocated_sub(sk->sk_cgrp, amt);

1050 1051 1052 1053 1054 1055
	atomic_long_sub(amt, prot->memory_allocated);
}

static inline void sk_sockets_allocated_dec(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1056 1057 1058 1059 1060 1061 1062 1063

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_dec(cg_proto->sockets_allocated);
	}

1064 1065 1066 1067 1068 1069
	percpu_counter_dec(prot->sockets_allocated);
}

static inline void sk_sockets_allocated_inc(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1070 1071 1072 1073 1074 1075 1076 1077

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_inc(cg_proto->sockets_allocated);
	}

1078 1079 1080 1081 1082 1083 1084 1085
	percpu_counter_inc(prot->sockets_allocated);
}

static inline int
sk_sockets_allocated_read_positive(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;

Glauber Costa's avatar
Glauber Costa committed
1086 1087 1088
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated);

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	return percpu_counter_sum_positive(prot->sockets_allocated);
}

static inline int
proto_sockets_allocated_sum_positive(struct proto *prot)
{
	return percpu_counter_sum_positive(prot->sockets_allocated);
}

static inline long
proto_memory_allocated(struct proto *prot)
{
	return atomic_long_read(prot->memory_allocated);
}

static inline bool
proto_memory_pressure(struct proto *prot)
{
	if (!prot->memory_pressure)
		return false;
	return !!*prot->memory_pressure;
}

1112 1113

#ifdef CONFIG_PROC_FS
Linus Torvalds's avatar
Linus Torvalds committed
1114
/* Called with local bh disabled */
1115 1116
extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
1117
#else
1118 1119
static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
		int inc)
1120 1121 1122 1123
{
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1124

1125 1126 1127 1128 1129 1130 1131 1132 1133
/* With per-bucket locks this operation is not-atomic, so that
 * this version is not worse.
 */
static inline void __sk_prot_rehash(struct sock *sk)
{
	sk->sk_prot->unhash(sk);
	sk->sk_prot->hash(sk);
}

1134 1135
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);

Linus Torvalds's avatar
Linus Torvalds committed
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182