skbuff.h 59.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
/*
 *	Definitions for the 'struct sk_buff' memory handlers.
 *
 *	Authors:
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Florian La Roche, <rzsfl@rz.uni-sb.de>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

#ifndef _LINUX_SKBUFF_H
#define _LINUX_SKBUFF_H

#include <linux/kernel.h>
18
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
20
21
22
23
24
25
26
#include <linux/compiler.h>
#include <linux/time.h>
#include <linux/cache.h>

#include <asm/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/net.h>
27
#include <linux/textsearch.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
#include <net/checksum.h>
29
#include <linux/rcupdate.h>
30
#include <linux/dmaengine.h>
31
#include <linux/hrtimer.h>
Linus Torvalds's avatar
Linus Torvalds committed
32

33
/* Don't change this without changing skb_csum_unnecessary! */
Linus Torvalds's avatar
Linus Torvalds committed
34
#define CHECKSUM_NONE 0
35
36
37
#define CHECKSUM_UNNECESSARY 1
#define CHECKSUM_COMPLETE 2
#define CHECKSUM_PARTIAL 3
Linus Torvalds's avatar
Linus Torvalds committed
38
39
40

#define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
				 ~(SMP_CACHE_BYTES - 1))
41
#define SKB_WITH_OVERHEAD(X)	\
42
	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43
44
#define SKB_MAX_ORDER(X, ORDER) \
	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
Linus Torvalds's avatar
Linus Torvalds committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
#define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))

/* A. Checksumming of received packets by device.
 *
 *	NONE: device failed to checksum this packet.
 *		skb->csum is undefined.
 *
 *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
 *		skb->csum is undefined.
 *	      It is bad option, but, unfortunately, many of vendors do this.
 *	      Apparently with secret goal to sell you new device, when you
 *	      will add new protocol to your host. F.e. IPv6. 8)
 *
59
 *	COMPLETE: the most generic way. Device supplied checksum of _all_
Linus Torvalds's avatar
Linus Torvalds committed
60
61
 *	    the packet as seen by netif_rx in skb->csum.
 *	    NOTE: Even if device supports only some protocols, but
62
 *	    is able to produce some skb->csum, it MUST use COMPLETE,
Linus Torvalds's avatar
Linus Torvalds committed
63
64
 *	    not UNNECESSARY.
 *
65
66
67
68
69
70
71
 *	PARTIAL: identical to the case for output below.  This may occur
 *	    on a packet received directly from another Linux OS, e.g.,
 *	    a virtualised Linux kernel on the same host.  The packet can
 *	    be treated in the same way as UNNECESSARY except that on
 *	    output (i.e., forwarding) the checksum must be filled in
 *	    by the OS or the hardware.
 *
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
75
 * B. Checksumming on output.
 *
 *	NONE: skb is checksummed by protocol or csum is not required.
 *
76
 *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
77
78
 *	from skb->csum_start to the end and to record the checksum
 *	at skb->csum_start + skb->csum_offset.
Linus Torvalds's avatar
Linus Torvalds committed
79
80
81
82
83
84
85
86
87
88
 *
 *	Device must show its capabilities in dev->features, set
 *	at device setup time.
 *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
 *			  everything.
 *	NETIF_F_NO_CSUM - loopback or reliable single hop media.
 *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
 *			  TCP/UDP over IPv4. Sigh. Vendors like this
 *			  way by an unknown reason. Though, see comment above
 *			  about CHECKSUM_UNNECESSARY. 8)
89
 *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
Linus Torvalds's avatar
Linus Torvalds committed
90
91
92
93
94
 *
 *	Any questions? No questions, good. 		--ANK
 */

struct net_device;
95
struct scatterlist;
Jens Axboe's avatar
Jens Axboe committed
96
struct pipe_inode_info;
Linus Torvalds's avatar
Linus Torvalds committed
97

98
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
Linus Torvalds's avatar
Linus Torvalds committed
99
100
101
struct nf_conntrack {
	atomic_t use;
};
102
#endif
Linus Torvalds's avatar
Linus Torvalds committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131

#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info {
	atomic_t use;
	struct net_device *physindev;
	struct net_device *physoutdev;
	unsigned int mask;
	unsigned long data[32 / sizeof(unsigned long)];
};
#endif

struct sk_buff_head {
	/* These two members must be first. */
	struct sk_buff	*next;
	struct sk_buff	*prev;

	__u32		qlen;
	spinlock_t	lock;
};

struct sk_buff;

/* To allow 64K frame to be packed as single skb without frag_list */
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)

typedef struct skb_frag_struct skb_frag_t;

struct skb_frag_struct {
	struct page *page;
132
133
	__u32 page_offset;
	__u32 size;
Linus Torvalds's avatar
Linus Torvalds committed
134
135
};

136
137
138
#define HAVE_HW_TIME_STAMP

/**
139
 * struct skb_shared_hwtstamps - hardware time stamps
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
 * @hwtstamp:	hardware time stamp transformed into duration
 *		since arbitrary point in time
 * @syststamp:	hwtstamp transformed to system time base
 *
 * Software time stamps generated by ktime_get_real() are stored in
 * skb->tstamp. The relation between the different kinds of time
 * stamps is as follows:
 *
 * syststamp and tstamp can be compared against each other in
 * arbitrary combinations.  The accuracy of a
 * syststamp/tstamp/"syststamp from other device" comparison is
 * limited by the accuracy of the transformation into system time
 * base. This depends on the device driver and its underlying
 * hardware.
 *
 * hwtstamps can only be compared against other hwtstamps from
 * the same device.
 *
 * This structure is attached to packets as part of the
 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 */
struct skb_shared_hwtstamps {
	ktime_t	hwtstamp;
	ktime_t	syststamp;
};

/**
167
 * struct skb_shared_tx - instructions for time stamping of outgoing packets
168
169
170
171
 * @hardware:		generate hardware time stamp
 * @software:		generate software time stamp
 * @in_progress:	device driver is going to provide
 *			hardware time stamp
172
 * @flags:		all shared_tx flags
173
174
175
176
177
178
179
180
181
182
183
184
185
 *
 * These flags are attached to packets as part of the
 * &skb_shared_info. Use skb_tx() to get a pointer.
 */
union skb_shared_tx {
	struct {
		__u8	hardware:1,
			software:1,
			in_progress:1;
	};
	__u8 flags;
};

Linus Torvalds's avatar
Linus Torvalds committed
186
187
188
189
/* This data is invariant across clones and lives at
 * the end of the header data, ie. at skb->end.
 */
struct skb_shared_info {
190
	unsigned short	nr_frags;
191
192
193
194
	unsigned short	gso_size;
	/* Warning: this field is not always filled in (UFO)! */
	unsigned short	gso_segs;
	unsigned short  gso_type;
Al Viro's avatar
Al Viro committed
195
	__be32          ip6_frag_id;
196
	union skb_shared_tx tx_flags;
Linus Torvalds's avatar
Linus Torvalds committed
197
	struct sk_buff	*frag_list;
198
	struct skb_shared_hwtstamps hwtstamps;
Eric Dumazet's avatar
Eric Dumazet committed
199
200
201
202
203
204

	/*
	 * Warning : all fields before dataref are cleared in __alloc_skb()
	 */
	atomic_t	dataref;

Linus Torvalds's avatar
Linus Torvalds committed
205
	skb_frag_t	frags[MAX_SKB_FRAGS];
Johann Baudy's avatar
Johann Baudy committed
206
207
208
	/* Intermediate layers must ensure that destructor_arg
	 * remains valid until skb destructor */
	void *		destructor_arg;
Linus Torvalds's avatar
Linus Torvalds committed
209
210
211
212
};

/* We divide dataref into two halves.  The higher 16 bits hold references
 * to the payload part of skb->data.  The lower 16 bits hold references to
213
214
 * the entire skb->data.  A clone of a headerless skb holds the length of
 * the header in skb->hdr_len.
Linus Torvalds's avatar
Linus Torvalds committed
215
216
217
218
219
220
221
222
223
224
 *
 * All users must obey the rule that the skb->data reference count must be
 * greater than or equal to the payload reference count.
 *
 * Holding a reference to the payload part means that the user does not
 * care about modifications to the header part of skb->data.
 */
#define SKB_DATAREF_SHIFT 16
#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)

225
226
227
228
229
230
231

enum {
	SKB_FCLONE_UNAVAILABLE,
	SKB_FCLONE_ORIG,
	SKB_FCLONE_CLONE,
};

232
233
enum {
	SKB_GSO_TCPV4 = 1 << 0,
234
	SKB_GSO_UDP = 1 << 1,
235
236
237

	/* This indicates the skb is from an untrusted source. */
	SKB_GSO_DODGY = 1 << 2,
Michael Chan's avatar
Michael Chan committed
238
239

	/* This indicates the tcp segment has CWR set. */
240
241
242
	SKB_GSO_TCP_ECN = 1 << 3,

	SKB_GSO_TCPV6 = 1 << 4,
243
244

	SKB_GSO_FCOE = 1 << 5,
245
246
};

247
248
249
250
251
252
253
254
255
256
#if BITS_PER_LONG > 32
#define NET_SKBUFF_DATA_USES_OFFSET 1
#endif

#ifdef NET_SKBUFF_DATA_USES_OFFSET
typedef unsigned int sk_buff_data_t;
#else
typedef unsigned char *sk_buff_data_t;
#endif

Linus Torvalds's avatar
Linus Torvalds committed
257
258
259
260
261
/** 
 *	struct sk_buff - socket buffer
 *	@next: Next buffer in list
 *	@prev: Previous buffer in list
 *	@sk: Socket we are owned by
Herbert Xu's avatar
Herbert Xu committed
262
 *	@tstamp: Time we arrived
Linus Torvalds's avatar
Linus Torvalds committed
263
 *	@dev: Device we arrived on/are leaving by
Randy Dunlap's avatar
Randy Dunlap committed
264
 *	@transport_header: Transport layer header
265
266
 *	@network_header: Network layer header
 *	@mac_header: Link layer header
Eric Dumazet's avatar
Eric Dumazet committed
267
 *	@_skb_refdst: destination entry (with norefcount bit)
268
 *	@sp: the security path, used for xfrm
Linus Torvalds's avatar
Linus Torvalds committed
269
270
271
272
 *	@cb: Control buffer. Free for use by every layer. Put private vars here
 *	@len: Length of actual data
 *	@data_len: Data length
 *	@mac_len: Length of link layer header
273
 *	@hdr_len: writable header length of cloned skb
274
275
276
 *	@csum: Checksum (must include start/offset pair)
 *	@csum_start: Offset from skb->head where checksumming should start
 *	@csum_offset: Offset from csum_start where checksum should be stored
277
 *	@local_df: allow local fragmentation
Linus Torvalds's avatar
Linus Torvalds committed
278
279
280
 *	@cloned: Head may be cloned (check refcnt to be sure)
 *	@nohdr: Payload reference only, must not modify header
 *	@pkt_type: Packet class
281
 *	@fclone: skbuff clone status
Linus Torvalds's avatar
Linus Torvalds committed
282
283
284
285
286
287
288
289
290
291
 *	@ip_summed: Driver fed us an IP checksum
 *	@priority: Packet queueing priority
 *	@users: User count - see {datagram,tcp}.c
 *	@protocol: Packet protocol from driver
 *	@truesize: Buffer size 
 *	@head: Head of buffer
 *	@data: Data head pointer
 *	@tail: Tail pointer
 *	@end: End pointer
 *	@destructor: Destruct function
292
 *	@mark: Generic packet mark
Linus Torvalds's avatar
Linus Torvalds committed
293
 *	@nfct: Associated connection, if any
294
 *	@ipvs_property: skbuff is owned by ipvs
295
296
 *	@peeked: this packet has been seen already, so stats have been
 *		done for it, don't do them again
297
 *	@nf_trace: netfilter packet trace flag
Linus Torvalds's avatar
Linus Torvalds committed
298
 *	@nfctinfo: Relationship of this skb to the connection
Randy Dunlap's avatar
Randy Dunlap committed
299
 *	@nfct_reasm: netfilter conntrack re-assembly pointer
Linus Torvalds's avatar
Linus Torvalds committed
300
 *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
301
 *	@skb_iif: ifindex of device we arrived on
Tom Herbert's avatar
Tom Herbert committed
302
 *	@rxhash: the packet hash computed on receive
303
 *	@queue_mapping: Queue mapping for multiqueue devices
Linus Torvalds's avatar
Linus Torvalds committed
304
305
 *	@tc_index: Traffic control index
 *	@tc_verd: traffic control verdict
306
 *	@ndisc_nodetype: router type (from link layer)
Randy Dunlap's avatar
Randy Dunlap committed
307
308
 *	@dma_cookie: a cookie to one of several possible DMA operations
 *		done by skb DMA functions
309
 *	@secmark: security marking
310
 *	@vlan_tci: vlan tag control information
Linus Torvalds's avatar
Linus Torvalds committed
311
312
313
314
315
316
317
 */

struct sk_buff {
	/* These two members must be first. */
	struct sk_buff		*next;
	struct sk_buff		*prev;

318
	ktime_t			tstamp;
319
320

	struct sock		*sk;
Linus Torvalds's avatar
Linus Torvalds committed
321
322
323
324
325
326
327
328
	struct net_device	*dev;

	/*
	 * This is the control buffer. It is free to use for every
	 * layer. Please put your private variables there. If you
	 * want to keep them across layers you have to do a skb_clone()
	 * first. This is owned by whoever has the skb queued ATM.
	 */
329
	char			cb[48] __aligned(8);
Linus Torvalds's avatar
Linus Torvalds committed
330

Eric Dumazet's avatar
Eric Dumazet committed
331
	unsigned long		_skb_refdst;
332
333
334
#ifdef CONFIG_XFRM
	struct	sec_path	*sp;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
335
	unsigned int		len,
336
337
338
				data_len;
	__u16			mac_len,
				hdr_len;
Al Viro's avatar
Al Viro committed
339
340
	union {
		__wsum		csum;
341
342
343
344
		struct {
			__u16	csum_start;
			__u16	csum_offset;
		};
Al Viro's avatar
Al Viro committed
345
	};
Linus Torvalds's avatar
Linus Torvalds committed
346
	__u32			priority;
347
	kmemcheck_bitfield_begin(flags1);
348
349
350
	__u8			local_df:1,
				cloned:1,
				ip_summed:2,
351
352
				nohdr:1,
				nfctinfo:3;
353
	__u8			pkt_type:3,
354
				fclone:2,
355
				ipvs_property:1,
356
				peeked:1,
357
				nf_trace:1;
358
	kmemcheck_bitfield_end(flags1);
Eric Dumazet's avatar
Eric Dumazet committed
359
	__be16			protocol;
Linus Torvalds's avatar
Linus Torvalds committed
360
361

	void			(*destructor)(struct sk_buff *skb);
362
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
363
	struct nf_conntrack	*nfct;
364
365
	struct sk_buff		*nfct_reasm;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
366
367
368
#ifdef CONFIG_BRIDGE_NETFILTER
	struct nf_bridge_info	*nf_bridge;
#endif
369

370
	int			skb_iif;
Linus Torvalds's avatar
Linus Torvalds committed
371
#ifdef CONFIG_NET_SCHED
372
	__u16			tc_index;	/* traffic control index */
Linus Torvalds's avatar
Linus Torvalds committed
373
#ifdef CONFIG_NET_CLS_ACT
374
	__u16			tc_verd;	/* traffic control verdict */
Linus Torvalds's avatar
Linus Torvalds committed
375
376
#endif
#endif
377

Tom Herbert's avatar
Tom Herbert committed
378
379
	__u32			rxhash;

380
	kmemcheck_bitfield_begin(flags2);
Eric Dumazet's avatar
Eric Dumazet committed
381
	__u16			queue_mapping:16;
382
#ifdef CONFIG_IPV6_NDISC_NODETYPE
383
384
385
386
	__u8			ndisc_nodetype:2,
				deliver_no_wcard:1;
#else
	__u8			deliver_no_wcard:1;
387
#endif
388
389
	kmemcheck_bitfield_end(flags2);

390
	/* 0/14 bit hole */
391

392
393
394
#ifdef CONFIG_NET_DMA
	dma_cookie_t		dma_cookie;
#endif
395
396
397
#ifdef CONFIG_NETWORK_SECMARK
	__u32			secmark;
#endif
398
399
400
401
	union {
		__u32		mark;
		__u32		dropcount;
	};
Linus Torvalds's avatar
Linus Torvalds committed
402

403
404
	__u16			vlan_tci;

405
406
407
	sk_buff_data_t		transport_header;
	sk_buff_data_t		network_header;
	sk_buff_data_t		mac_header;
Linus Torvalds's avatar
Linus Torvalds committed
408
	/* These elements must be at the end, see alloc_skb() for details.  */
409
	sk_buff_data_t		tail;
410
	sk_buff_data_t		end;
Linus Torvalds's avatar
Linus Torvalds committed
411
	unsigned char		*head,
412
				*data;
413
414
	unsigned int		truesize;
	atomic_t		users;
Linus Torvalds's avatar
Linus Torvalds committed
415
416
417
418
419
420
421
422
423
424
};

#ifdef __KERNEL__
/*
 *	Handling routines are only of interest to the kernel
 */
#include <linux/slab.h>

#include <asm/system.h>

Eric Dumazet's avatar
Eric Dumazet committed
425
426
427
428
429
430
431
432
433
434
435
436
437
/*
 * skb might have a dst pointer attached, refcounted or not.
 * _skb_refdst low order bit is set if refcount was _not_ taken
 */
#define SKB_DST_NOREF	1UL
#define SKB_DST_PTRMASK	~(SKB_DST_NOREF)

/**
 * skb_dst - returns skb dst_entry
 * @skb: buffer
 *
 * Returns skb dst_entry, regardless of reference taken or not.
 */
Eric Dumazet's avatar
Eric Dumazet committed
438
439
static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
440
441
442
443
444
445
446
	/* If refdst was not refcounted, check we still are in a 
	 * rcu_read_lock section
	 */
	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
		!rcu_read_lock_held() &&
		!rcu_read_lock_bh_held());
	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
Eric Dumazet's avatar
Eric Dumazet committed
447
448
}

Eric Dumazet's avatar
Eric Dumazet committed
449
450
451
452
453
454
455
456
/**
 * skb_dst_set - sets skb dst
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was taken on dst and should
 * be released by skb_dst_drop()
 */
Eric Dumazet's avatar
Eric Dumazet committed
457
458
static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
{
Eric Dumazet's avatar
Eric Dumazet committed
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
	skb->_skb_refdst = (unsigned long)dst;
}

/**
 * skb_dst_set_noref - sets skb dst, without a reference
 * @skb: buffer
 * @dst: dst entry
 *
 * Sets skb dst, assuming a reference was not taken on dst
 * skb_dst_drop() should not dst_release() this dst
 */
static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
{
	WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
	skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
}

/**
 * skb_dst_is_noref - Test if skb dst isnt refcounted
 * @skb: buffer
 */
static inline bool skb_dst_is_noref(const struct sk_buff *skb)
{
	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
Eric Dumazet's avatar
Eric Dumazet committed
483
484
}

Eric Dumazet's avatar
Eric Dumazet committed
485
486
static inline struct rtable *skb_rtable(const struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
487
	return (struct rtable *)skb_dst(skb);
Eric Dumazet's avatar
Eric Dumazet committed
488
489
}

490
extern void kfree_skb(struct sk_buff *skb);
491
extern void consume_skb(struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
492
extern void	       __kfree_skb(struct sk_buff *skb);
493
extern struct sk_buff *__alloc_skb(unsigned int size,
494
				   gfp_t priority, int fclone, int node);
495
static inline struct sk_buff *alloc_skb(unsigned int size,
496
					gfp_t priority)
497
{
498
	return __alloc_skb(size, priority, 0, -1);
499
500
501
}

static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
502
					       gfp_t priority)
503
{
504
	return __alloc_skb(size, priority, 1, -1);
505
506
}

507
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
508

Herbert Xu's avatar
Herbert Xu committed
509
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
Victor Fusco's avatar
Victor Fusco committed
510
extern struct sk_buff *skb_clone(struct sk_buff *skb,
511
				 gfp_t priority);
Victor Fusco's avatar
Victor Fusco committed
512
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
513
				gfp_t priority);
Victor Fusco's avatar
Victor Fusco committed
514
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
515
				 gfp_t gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
516
extern int	       pskb_expand_head(struct sk_buff *skb,
Victor Fusco's avatar
Victor Fusco committed
517
					int nhead, int ntail,
518
					gfp_t gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
519
520
521
522
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
					    unsigned int headroom);
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
				       int newheadroom, int newtailroom,
523
				       gfp_t priority);
524
525
526
527
528
extern int	       skb_to_sgvec(struct sk_buff *skb,
				    struct scatterlist *sg, int offset,
				    int len);
extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
				    struct sk_buff **trailer);
529
extern int	       skb_pad(struct sk_buff *skb, int pad);
530
#define dev_kfree_skb(a)	consume_skb(a)
Linus Torvalds's avatar
Linus Torvalds committed
531

532
533
534
535
536
extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
			int getfrag(void *from, char *to, int offset,
			int len,int odd, struct sk_buff *skb),
			void *from, int length);

Eric Dumazet's avatar
Eric Dumazet committed
537
struct skb_seq_state {
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
	__u32		lower_offset;
	__u32		upper_offset;
	__u32		frag_idx;
	__u32		stepped_offset;
	struct sk_buff	*root_skb;
	struct sk_buff	*cur_skb;
	__u8		*frag_data;
};

extern void	      skb_prepare_seq_read(struct sk_buff *skb,
					   unsigned int from, unsigned int to,
					   struct skb_seq_state *st);
extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
				   struct skb_seq_state *st);
extern void	      skb_abort_seq_read(struct skb_seq_state *st);

554
555
556
557
extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
				    unsigned int to, struct ts_config *config,
				    struct ts_state *state);

558
559
560
561
562
563
564
565
566
567
568
569
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->end;
}
#else
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
{
	return skb->end;
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
570
/* Internal */
571
#define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
Linus Torvalds's avatar
Linus Torvalds committed
572

573
574
575
576
577
578
579
580
581
582
static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
{
	return &skb_shinfo(skb)->hwtstamps;
}

static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
{
	return &skb_shinfo(skb)->tx_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
583
584
585
586
587
588
589
590
591
592
593
/**
 *	skb_queue_empty - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 */
static inline int skb_queue_empty(const struct sk_buff_head *list)
{
	return list->next == (struct sk_buff *)list;
}

594
595
596
597
598
599
600
601
602
603
604
605
606
/**
 *	skb_queue_is_last - check if skb is the last entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the last buffer on the list.
 */
static inline bool skb_queue_is_last(const struct sk_buff_head *list,
				     const struct sk_buff *skb)
{
	return (skb->next == (struct sk_buff *) list);
}

607
608
609
610
611
612
613
614
615
616
617
618
619
/**
 *	skb_queue_is_first - check if skb is the first entry in the queue
 *	@list: queue head
 *	@skb: buffer
 *
 *	Returns true if @skb is the first buffer on the list.
 */
static inline bool skb_queue_is_first(const struct sk_buff_head *list,
				      const struct sk_buff *skb)
{
	return (skb->prev == (struct sk_buff *) list);
}

David S. Miller's avatar
David S. Miller committed
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
/**
 *	skb_queue_next - return the next packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the next packet in @list after @skb.  It is only valid to
 *	call this if skb_queue_is_last() evaluates to false.
 */
static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_last(list, skb));
	return skb->next;
}

638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
/**
 *	skb_queue_prev - return the prev packet in the queue
 *	@list: queue head
 *	@skb: current buffer
 *
 *	Return the prev packet in @list before @skb.  It is only valid to
 *	call this if skb_queue_is_first() evaluates to false.
 */
static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
					     const struct sk_buff *skb)
{
	/* This BUG_ON may seem severe, but if we just return then we
	 * are going to dereference garbage.
	 */
	BUG_ON(skb_queue_is_first(list, skb));
	return skb->prev;
}

Linus Torvalds's avatar
Linus Torvalds committed
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
/**
 *	skb_get - reference buffer
 *	@skb: buffer to reference
 *
 *	Makes another reference to a socket buffer and returns a pointer
 *	to the buffer.
 */
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
	atomic_inc(&skb->users);
	return skb;
}

/*
 * If users == 1, we are the only owner and are can avoid redundant
 * atomic change.
 */

/**
 *	skb_cloned - is the buffer a clone
 *	@skb: buffer to check
 *
 *	Returns true if the buffer was generated with skb_clone() and is
 *	one of multiple shared copies of the buffer. Cloned buffers are
 *	shared data so must not be written to under normal circumstances.
 */
static inline int skb_cloned(const struct sk_buff *skb)
{
	return skb->cloned &&
	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
}

/**
 *	skb_header_cloned - is the header a clone
 *	@skb: buffer to check
 *
 *	Returns true if modifying the header part of the buffer requires
 *	the data to be copied.
 */
static inline int skb_header_cloned(const struct sk_buff *skb)
{
	int dataref;

	if (!skb->cloned)
		return 0;

	dataref = atomic_read(&skb_shinfo(skb)->dataref);
	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
	return dataref != 1;
}

/**
 *	skb_header_release - release reference to header
 *	@skb: buffer to operate on
 *
 *	Drop a reference to the header part of the buffer.  This is done
 *	by acquiring a payload reference.  You must not read from the header
 *	part of skb->data after this.
 */
static inline void skb_header_release(struct sk_buff *skb)
{
	BUG_ON(skb->nohdr);
	skb->nohdr = 1;
	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
}

/**
 *	skb_shared - is the buffer shared
 *	@skb: buffer to check
 *
 *	Returns true if more than one person has a reference to this
 *	buffer.
 */
static inline int skb_shared(const struct sk_buff *skb)
{
	return atomic_read(&skb->users) != 1;
}

/**
 *	skb_share_check - check if buffer is shared and if so clone it
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the buffer is shared the buffer is cloned and the old copy
 *	drops a reference. A new clone with a single reference is returned.
 *	If the buffer is not shared the original buffer is returned. When
 *	being called from interrupt status or with spinlocks held pri must
 *	be GFP_ATOMIC.
 *
 *	NULL is returned on a memory allocation failure.
 */
Victor Fusco's avatar
Victor Fusco committed
747
static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
748
					      gfp_t pri)
Linus Torvalds's avatar
Linus Torvalds committed
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
{
	might_sleep_if(pri & __GFP_WAIT);
	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, pri);
		kfree_skb(skb);
		skb = nskb;
	}
	return skb;
}

/*
 *	Copy shared buffers into a new sk_buff. We effectively do COW on
 *	packets to handle cases where we have a local reader and forward
 *	and a couple of other messy ones. The normal one is tcpdumping
 *	a packet thats being forwarded.
 */

/**
 *	skb_unshare - make a copy of a shared buffer
 *	@skb: buffer to check
 *	@pri: priority for memory allocation
 *
 *	If the socket buffer is a clone then this function creates a new
 *	copy of the data, drops a reference count on the old copy and returns
 *	the new copy with the reference count at 1. If the buffer is not a clone
 *	the original buffer is returned. When called with a spinlock held or
 *	from interrupt state @pri must be %GFP_ATOMIC
 *
 *	%NULL is returned on a memory allocation failure.
 */
779
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
780
					  gfp_t pri)
Linus Torvalds's avatar
Linus Torvalds committed
781
782
783
784
785
786
787
788
789
790
791
{
	might_sleep_if(pri & __GFP_WAIT);
	if (skb_cloned(skb)) {
		struct sk_buff *nskb = skb_copy(skb, pri);
		kfree_skb(skb);	/* Free our shared copy */
		skb = nskb;
	}
	return skb;
}

/**
792
 *	skb_peek - peek at the head of an &sk_buff_head
Linus Torvalds's avatar
Linus Torvalds committed
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the head element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
{
	struct sk_buff *list = ((struct sk_buff *)list_)->next;
	if (list == (struct sk_buff *)list_)
		list = NULL;
	return list;
}

/**
813
 *	skb_peek_tail - peek at the tail of an &sk_buff_head
Linus Torvalds's avatar
Linus Torvalds committed
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
 *	@list_: list to peek at
 *
 *	Peek an &sk_buff. Unlike most other operations you _MUST_
 *	be careful with this one. A peek leaves the buffer on the
 *	list and someone else may run off with it. You must hold
 *	the appropriate locks or have a private queue to do this.
 *
 *	Returns %NULL for an empty list or a pointer to the tail element.
 *	The reference count is not incremented and the reference is therefore
 *	volatile. Use with caution.
 */
static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
{
	struct sk_buff *list = ((struct sk_buff *)list_)->prev;
	if (list == (struct sk_buff *)list_)
		list = NULL;
	return list;
}

/**
 *	skb_queue_len	- get queue length
 *	@list_: list to measure
 *
 *	Return the length of an &sk_buff queue.
 */
static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
{
	return list_->qlen;
}

844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
/**
 *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 *	@list: queue to initialize
 *
 *	This initializes only the list and queue length aspects of
 *	an sk_buff_head object.  This allows to initialize the list
 *	aspects of an sk_buff_head without reinitializing things like
 *	the spinlock.  It can also be used for on-stack sk_buff_head
 *	objects where the spinlock is known to not be used.
 */
static inline void __skb_queue_head_init(struct sk_buff_head *list)
{
	list->prev = list->next = (struct sk_buff *)list;
	list->qlen = 0;
}

860
861
862
863
864
865
866
867
/*
 * This function creates a split out lock class for each invocation;
 * this is needed for now since a whole lot of users of the skb-queue
 * infrastructure in drivers have different locking usage (in hardirq)
 * than the networking core (in softirq only). In the long run either the
 * network layer or drivers should need annotation to consolidate the
 * main types of usage into 3 classes.
 */
Linus Torvalds's avatar
Linus Torvalds committed
868
869
870
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
	spin_lock_init(&list->lock);
871
	__skb_queue_head_init(list);
Linus Torvalds's avatar
Linus Torvalds committed
872
873
}

874
875
876
877
878
879
880
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
		struct lock_class_key *class)
{
	skb_queue_head_init(list);
	lockdep_set_class(&list->lock, class);
}

Linus Torvalds's avatar
Linus Torvalds committed
881
/*
882
 *	Insert an sk_buff on a list.
Linus Torvalds's avatar
Linus Torvalds committed
883
884
885
886
 *
 *	The "__skb_xxxx()" functions are the non-atomic ones that
 *	can only be called with interrupts disabled.
 */
887
888
889
890
891
892
893
894
895
896
extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
				struct sk_buff *prev, struct sk_buff *next,
				struct sk_buff_head *list)
{
	newsk->next = next;
	newsk->prev = prev;
	next->prev  = prev->next = newsk;
	list->qlen++;
}
Linus Torvalds's avatar
Linus Torvalds committed
897

898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
static inline void __skb_queue_splice(const struct sk_buff_head *list,
				      struct sk_buff *prev,
				      struct sk_buff *next)
{
	struct sk_buff *first = list->next;
	struct sk_buff *last = list->prev;

	first->prev = prev;
	prev->next = first;

	last->next = next;
	next->prev = last;
}

/**
 *	skb_queue_splice - join two skb lists, this is designed for stacks
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice(const struct sk_buff_head *list,
				    struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
922
		head->qlen += list->qlen;
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
	}
}

/**
 *	skb_queue_splice - join two skb lists and reinitialise the emptied list
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_init(struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
938
		head->qlen += list->qlen;
939
940
941
942
943
944
945
946
947
948
949
950
951
952
		__skb_queue_head_init(list);
	}
}

/**
 *	skb_queue_splice_tail - join two skb lists, each list being a queue
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 */
static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
					 struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
953
		head->qlen += list->qlen;
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
	}
}

/**
 *	skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
 *	@list: the new list to add
 *	@head: the place to add it in the first list
 *
 *	Each of the lists is a queue.
 *	The list at @list is reinitialised
 */
static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
					      struct sk_buff_head *head)
{
	if (!skb_queue_empty(list)) {
		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
970
		head->qlen += list->qlen;
971
972
973
974
		__skb_queue_head_init(list);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
975
/**
976
 *	__skb_queue_after - queue a buffer at the list head
Linus Torvalds's avatar
Linus Torvalds committed
977
 *	@list: list to use
978
 *	@prev: place after this buffer
Linus Torvalds's avatar
Linus Torvalds committed
979
980
 *	@newsk: buffer to queue
 *
981
 *	Queue a buffer int the middle of a list. This function takes no locks
Linus Torvalds's avatar
Linus Torvalds committed
982
983
984
985
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
986
987
988
static inline void __skb_queue_after(struct sk_buff_head *list,
				     struct sk_buff *prev,
				     struct sk_buff *newsk)
Linus Torvalds's avatar
Linus Torvalds committed
989
{
990
	__skb_insert(newsk, prev, prev->next, list);
Linus Torvalds's avatar
Linus Torvalds committed
991
992
}

993
994
995
extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
		       struct sk_buff_head *list);

996
997
998
999
1000
1001
1002
static inline void __skb_queue_before(struct sk_buff_head *list,
				      struct sk_buff *next,
				      struct sk_buff *newsk)
{
	__skb_insert(newsk, next->prev, next, list);
}

1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
/**
 *	__skb_queue_head - queue a buffer at the list head
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the start of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_head(struct sk_buff_head *list,
				    struct sk_buff *newsk)
{
	__skb_queue_after(list, (struct sk_buff *)list, newsk);
}

Linus Torvalds's avatar
Linus Torvalds committed
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
/**
 *	__skb_queue_tail - queue a buffer at the list tail
 *	@list: list to use
 *	@newsk: buffer to queue
 *
 *	Queue a buffer at the end of a list. This function takes no locks
 *	and you must therefore hold required locks before calling it.
 *
 *	A buffer cannot be placed on two lists at the same time.
 */
extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
static inline void __skb_queue_tail(struct sk_buff_head *list,
				   struct sk_buff *newsk)
{
1034
	__skb_queue_before(list, (struct sk_buff *)list, newsk);
Linus Torvalds's avatar
Linus Torvalds committed
1035
1036
1037
1038
1039
1040
}

/*
 * remove sk_buff from list. _Must_ be called atomically, and with
 * the list known..
 */
David S. Miller's avatar
David S. Miller committed
1041
extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
Linus Torvalds's avatar
Linus Torvalds committed
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
	struct sk_buff *next, *prev;

	list->qlen--;
	next	   = skb->next;
	prev	   = skb->prev;
	skb->next  = skb->prev = NULL;
	next->prev = prev;
	prev->next = next;
}

1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
/**
 *	__skb_dequeue - remove from the head of the queue
 *	@list: list to dequeue from
 *
 *	Remove the head of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The head item is
 *	returned or %NULL if the list is empty.
 */
extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}
Linus Torvalds's avatar
Linus Torvalds committed
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118

/**
 *	__skb_dequeue_tail - remove from the tail of the queue
 *	@list: list to dequeue from
 *
 *	Remove the tail of the list. This function does not take any locks
 *	so must be used with appropriate locks held only. The tail item is
 *	returned or %NULL if the list is empty.
 */
extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
{
	struct sk_buff *skb = skb_peek_tail(list);
	if (skb)
		__skb_unlink(skb, list);
	return skb;
}


static inline int skb_is_nonlinear(const struct sk_buff *skb)
{
	return skb->data_len;
}

static inline unsigned int skb_headlen(const struct sk_buff *skb)
{
	return skb->len - skb->data_len;
}

static inline int skb_pagelen(const struct sk_buff *skb)
{
	int i, len = 0;

	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
		len += skb_shinfo(skb)->frags[i].size;
	return len + skb_headlen(skb);
}

static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
				      struct page *page, int off, int size)
{
	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

	frag->page		  = page;
	frag->page_offset	  = off;
	frag->size		  = size;
	skb_shinfo(skb)->nr_frags = i + 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
1119
1120
1121
extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
			    int off, int size);

Linus Torvalds's avatar
Linus Torvalds committed
1122
#define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1123
#define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frags(skb))
Linus Torvalds's avatar
Linus Torvalds committed
1124
1125
#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))

1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
#ifdef NET_SKBUFF_DATA_USES_OFFSET
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->head + skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data - skb->head;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb_reset_tail_pointer(skb);
	skb->tail += offset;
}
#else /* NET_SKBUFF_DATA_USES_OFFSET */
static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
{
	return skb->tail;
}

static inline void skb_reset_tail_pointer(struct sk_buff *skb)
{
	skb->tail = skb->data;
}

static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
{
	skb->tail = skb->data + offset;
}
1157

1158
1159
#endif /* NET_SKBUFF_DATA_USES_OFFSET */

Linus Torvalds's avatar
Linus Torvalds committed
1160
1161
1162
/*
 *	Add data to an sk_buff
 */
1163
extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
Linus Torvalds's avatar
Linus Torvalds committed
1164
1165
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
1166
	unsigned char *tmp = skb_tail_pointer(skb);
Linus Torvalds's avatar
Linus Torvalds committed
1167
1168
1169
1170
1171
1172
	SKB_LINEAR_ASSERT(skb);
	skb->tail += len;
	skb->len  += len;
	return tmp;
}

1173
extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
Linus Torvalds's avatar
Linus Torvalds committed
1174
1175
1176
1177
1178
1179
1180
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
	skb->data -= len;
	skb->len  += len;
	return skb->data;
}

1181
extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
Linus Torvalds's avatar
Linus Torvalds committed
1182
1183
1184
1185
1186
1187
1188
static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
	skb->len -= len;
	BUG_ON(skb->len < skb->data_len);
	return skb->data += len;
}

1189
1190
1191
1192
1193
static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
{
	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
}

Linus Torvalds's avatar
Linus Torvalds committed
1194
1195
1196
1197
1198
extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);

static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
{
	if (len > skb_headlen(skb) &&
Gerrit Renker's avatar
Gerrit Renker committed
1199
	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
Linus Torvalds's avatar
Linus Torvalds committed
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
		return NULL;
	skb->len -= len;
	return skb->data += len;
}

static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
{
	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}

static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
	if (likely(len <= skb_headlen(skb)))
		return 1;
	if (unlikely(len > skb->len))
		return 0;
Gerrit Renker's avatar
Gerrit Renker committed
1216
	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1217
1218
1219
1220
1221
1222
1223
1224
}

/**
 *	skb_headroom - bytes at buffer head
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the head of an &sk_buff.
 */
1225
static inline unsigned int skb_headroom(const struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
{
	return skb->data - skb->head;
}

/**
 *	skb_tailroom - bytes at buffer end
 *	@skb: buffer to check
 *
 *	Return the number of bytes of free space at the tail of an sk_buff
 */
static inline int skb_tailroom(const struct sk_buff *skb)
{
1238
	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
Linus Torvalds's avatar
Linus Torvalds committed
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
}

/**
 *	skb_reserve - adjust headroom
 *	@skb: buffer to alter
 *	@len: bytes to move
 *
 *	Increase the headroom of an empty &sk_buff by reducing the tail
 *	room. This is only allowed for an empty buffer.
 */
1249
static inline void skb_reserve(struct sk_buff *skb, int len)
Linus Torvalds's avatar
Linus Torvalds committed
1250
1251
1252
1253
1254
{
	skb->data += len;
	skb->tail += len;
}

1255
#ifdef NET_SKBUFF_DATA_USES_OFFSET
1256
1257
static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
1258
	return skb->head + skb->transport_header;
1259
1260
}

1261
1262
static inline void skb_reset_transport_header(struct sk_buff *skb)
{
1263
	skb->transport_header = skb->data - skb->head;
1264
1265
}

1266
1267
1268
static inline void skb_set_transport_header(struct sk_buff *skb,
					    const int offset)
{
1269
1270
	skb_reset_transport_header(skb);
	skb->transport_header += offset;
1271
1272
}

1273
1274
static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
1275
	return skb->head + skb->network_header;
1276
1277
}

1278
1279
static inline void skb_reset_network_header(struct sk_buff *skb)
{
1280
	skb->network_header = skb->data - skb->head;
1281
1282
}

1283
1284
static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
1285
1286
	skb_reset_network_header(skb);
	skb->network_header += offset;
1287
1288
}

1289
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1290
{
1291
	return skb->head + skb->mac_header;
1292
1293
}

1294
static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1295
{
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
	return skb->mac_header != ~0U;
}

static inline void skb_reset_mac_header(struct sk_buff *skb)
{
	skb->mac_header = skb->data - skb->head;
}

static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
	skb_reset_mac_header(skb);
	skb->mac_header += offset;
}

#else /* NET_SKBUFF_DATA_USES_OFFSET */

static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
{
	return skb->transport_header;
}

static inline void skb_reset_transport_header(struct sk_buff *skb)
{
	skb->transport_header = skb->data;
}

static inline void skb_set_transport_header(struct sk_buff *skb,
					    const int offset)
{
	skb->transport_header = skb->data + offset;
}

static inline unsigned char *skb_network_header(const struct sk_buff *skb)
{
	return skb->network_header;
}

static inline void skb_reset_network_header(struct sk_buff *skb)
{
	skb->network_header = skb->data;
}

static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
{
	skb->network_header = skb->data + offset;
1341
1342
}

1343
1344
static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
{
1345
	return skb->mac_header;
1346
1347
1348
1349
}

static inline int skb_mac_header_was_set(const struct sk_buff *skb)
{
1350
	return skb->mac_header != NULL;
1351
1352
}

1353
1354
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
1355
	skb->mac_header = skb->data;
1356
1357
}

1358
1359
static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
{
1360
	skb->mac_header = skb->data + offset;
1361
}
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
#endif /* NET_SKBUFF_DATA_USES_OFFSET */

static inline int skb_transport_offset(const struct sk_buff *skb)
{
	return skb_transport_header(skb) - skb->data;
}

static inline u32 skb_network_header_len(const struct sk_buff *skb)
{
	return skb->transport_header - skb->network_header;
}

static inline int skb_network_offset(const struct sk_buff *skb)
{
	return skb_network_header(skb) - skb->data;
}
1378

Linus Torvalds's avatar
Linus Torvalds committed
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
/*
 * CPUs often take a performance hit when accessing unaligned memory
 * locations. The actual performance hit varies, it can be small if the
 * hardware handles it or large if we have to take an exception and fix it
 * in software.
 *
 * Since an ethernet header is 14 bytes network drivers often end up with
 * the IP header at an unaligned offset. The IP header can be aligned by
 * shifting the start of the packet by 2 bytes. Drivers should do this
 * with:
 *
1390
 * skb_reserve(skb, NET_IP_ALIGN);
Linus Torvalds's avatar
Linus Torvalds committed
1391
1392
1393
1394
 *
 * The downside to this alignment of the IP header is that the DMA is now
 * unaligned. On some architectures the cost of an unaligned DMA is high
 * and this cost outweighs the gains made by aligning the IP header.
1395
 *
Linus Torvalds's avatar
Linus Torvalds committed
1396
1397
1398
1399
1400
1401
1402
 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
 * to be overridden.
 */
#ifndef NET_IP_ALIGN
#define NET_IP_ALIGN	2
#endif

1403
1404
1405
1406
/*
 * The networking layer reserves some headroom in skb data (via
 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
 * the header has to grow. In the default case, if the header has to grow
1407
 * 32 bytes or less we avoid the reallocation.
1408
1409
1410
1411
1412
1413
1414
 *
 * Unfortunately this headroom changes the DMA alignment of the resulting
 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
 * on some architectures. An architecture can override this value,
 * perhaps setting it to a cacheline in size (since that will maintain
 * cacheline alignment of the DMA). It must be a power of 2.
 *
1415
 * Various parts of the networking layer expect at least 32 bytes of
1416
 * headroom, you should not reduce this.
1417
1418
1419
 * With RPS, we raised NET_SKB_PAD to 64 so that get_rps_cpus() fetches span
 * a 64 bytes aligned block to fit modern (>= 64 bytes) cache line sizes
 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1420
1421
 */
#ifndef NET_SKB_PAD
1422
#define NET_SKB_PAD	64
1423
1424
#endif

1425
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds's avatar
Linus Torvalds committed
1426
1427
1428

static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
{
1429
1430
1431
1432
	if (unlikely(skb->data_len)) {
		WARN_ON(1);
		return;
	}
1433
1434
	skb->len = len;
	skb_set_tail_pointer(skb, len);
Linus Torvalds's avatar
Linus Torvalds committed
1435
1436
}

1437
extern void skb_trim(struct sk_buff *skb, unsigned int len);
Linus Torvalds's avatar
Linus Torvalds committed
1438
1439
1440

static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
{
1441
1442
1443
1444
	if (skb->data_len)
		return ___pskb_trim(skb, len);
	__skb_trim(skb, len);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1445
1446
1447
1448
1449
1450
1451
}

static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
{
	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
}

1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
/**
 *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
 *	@skb: buffer to alter
 *	@len: new length
 *
 *	This is identical to pskb_trim except that the caller knows that
 *	the skb is not cloned so we should never get an error due to out-
 *	of-memory.
 */
static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
{
	int err = pskb_trim(skb, len);
	BUG_ON(err);
}

Linus Torvalds's avatar
Linus Torvalds committed
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
/**
 *	skb_orphan - orphan a buffer
 *	@skb: buffer to orphan
 *
 *	If a buffer currently has an owner then we call the owner's
 *	destructor function and make the @skb unowned. The buffer continues
 *	to exist but is no longer charged to its former owner.
 */
static inline void skb_orphan(struct sk_buff *skb)
{
	if (skb->destructor)
		skb->destructor(skb);
	skb->destructor = NULL;
	skb->sk		= NULL;
}

/**
 *	__skb_queue_purge - empty a list
 *	@list: list to empty
 *
 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
 *	the list and one reference dropped. This function does not take the
 *	list lock and the caller must hold the relevant locks to use it.
 */
extern void skb_queue_purge(struct sk_buff_head *list);
static inline void __skb_queue_purge(struct sk_buff_head *list)
{
	struct sk_buff *skb;
	while ((skb = __skb_dequeue(list)) != NULL)
		kfree_skb(skb);
}

/**
1500
 *	__dev_alloc_skb - allocate an skbuff for receiving
Linus Torvalds's avatar
Linus Torvalds committed
1501
1502
1503
1504
1505
1506
1507
1508
 *	@length: length to allocate
 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 *
 *	Allocate a new &sk_buff and assign it a usage count of one. The
 *	buffer has unspecified headroom built in. Users should allocate
 *	the headroom they think they need without accounting for the
 *	built in space. The built in space is used for optimisations.
 *
1509
 *	%NULL is returned if there is no free memory.
Linus Torvalds's avatar
Linus Torvalds committed
1510
1511
 */
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1512
					      gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1513
{
1514
	struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);