tcp.c 86 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Implementation of the Transmission Control Protocol(TCP).
 *
8
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche, <flla@stud.uni-sb.de>
 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Matthew Dillon, <dillon@apollo.west.oic.com>
 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *		Jorge Cwik, <jorge@laser.satlink.net>
 *
 * Fixes:
 *		Alan Cox	:	Numerous verify_area() calls
 *		Alan Cox	:	Set the ACK bit on a reset
 *		Alan Cox	:	Stopped it crashing if it closed while
 *					sk->inuse=1 and was trying to connect
 *					(tcp_err()).
 *		Alan Cox	:	All icmp error handling was broken
 *					pointers passed where wrong and the
 *					socket was looked up backwards. Nobody
 *					tested any icmp error code obviously.
 *		Alan Cox	:	tcp_err() now handled properly. It
 *					wakes people on errors. poll
 *					behaves and the icmp error race
 *					has gone by moving it into sock.c
 *		Alan Cox	:	tcp_send_reset() fixed to work for
 *					everything not just packets for
 *					unknown sockets.
 *		Alan Cox	:	tcp option processing.
 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
 *					syn rule wrong]
 *		Herp Rosmanith  :	More reset fixes
 *		Alan Cox	:	No longer acks invalid rst frames.
 *					Acking any kind of RST is right out.
 *		Alan Cox	:	Sets an ignore me flag on an rst
 *					receive otherwise odd bits of prattle
 *					escape still
 *		Alan Cox	:	Fixed another acking RST frame bug.
 *					Should stop LAN workplace lockups.
 *		Alan Cox	: 	Some tidyups using the new skb list
 *					facilities
 *		Alan Cox	:	sk->keepopen now seems to work
 *		Alan Cox	:	Pulls options out correctly on accepts
 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
 *					bit to skb ops.
 *		Alan Cox	:	Tidied tcp_data to avoid a potential
 *					nasty.
 *		Alan Cox	:	Added some better commenting, as the
 *					tcp is hard to follow
 *		Alan Cox	:	Removed incorrect check for 20 * psh
 *	Michael O'Reilly	:	ack < copied bug fix.
 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
 *		Alan Cox	:	FIN with no memory -> CRASH
 *		Alan Cox	:	Added socket option proto entries.
 *					Also added awareness of them to accept.
 *		Alan Cox	:	Added TCP options (SOL_TCP)
 *		Alan Cox	:	Switched wakeup calls to callbacks,
 *					so the kernel can layer network
 *					sockets.
 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
 *		Alan Cox	:	Handle FIN (more) properly (we hope).
 *		Alan Cox	:	RST frames sent on unsynchronised
 *					state ack error.
 *		Alan Cox	:	Put in missing check for SYN bit.
 *		Alan Cox	:	Added tcp_select_window() aka NET2E
 *					window non shrink trick.
 *		Alan Cox	:	Added a couple of small NET2E timer
 *					fixes
 *		Charles Hedrick :	TCP fixes
 *		Toomas Tamm	:	TCP window fixes
 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
 *		Charles Hedrick	:	Rewrote most of it to actually work
 *		Linus		:	Rewrote tcp_read() and URG handling
 *					completely
 *		Gerhard Koerting:	Fixed some missing timer handling
 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
 *		Gerhard Koerting:	PC/TCP workarounds
 *		Adam Caldwell	:	Assorted timer/timing errors
 *		Matthew Dillon	:	Fixed another RST bug
 *		Alan Cox	:	Move to kernel side addressing changes.
 *		Alan Cox	:	Beginning work on TCP fastpathing
 *					(not yet usable)
 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
 *		Alan Cox	:	TCP fast path debugging
 *		Alan Cox	:	Window clamping
 *		Michael Riepe	:	Bug in tcp_check()
 *		Matt Dillon	:	More TCP improvements and RST bug fixes
 *		Matt Dillon	:	Yet more small nasties remove from the
 *					TCP code (Be very nice to this man if
 *					tcp finally works 100%) 8)
 *		Alan Cox	:	BSD accept semantics.
 *		Alan Cox	:	Reset on closedown bug.
 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 *		Michael Pall	:	Handle poll() after URG properly in
 *					all cases.
 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 *					(multi URG PUSH broke rlogin).
 *		Michael Pall	:	Fix the multi URG PUSH problem in
 *					tcp_readable(), poll() after URG
 *					works now.
 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 *					BSD api.
 *		Alan Cox	:	Changed the semantics of sk->socket to
 *					fix a race and a signal problem with
 *					accept() and async I/O.
 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 *					clients/servers which listen in on
 *					fixed ports.
 *		Alan Cox	:	Cleaned the above up and shrank it to
 *					a sensible code size.
 *		Alan Cox	:	Self connect lockup fix.
 *		Alan Cox	:	No connect to multicast.
 *		Ross Biro	:	Close unaccepted children on master
 *					socket close.
 *		Alan Cox	:	Reset tracing code.
 *		Alan Cox	:	Spurious resets on shutdown.
 *		Alan Cox	:	Giant 15 minute/60 second timer error
 *		Alan Cox	:	Small whoops in polling before an
 *					accept.
 *		Alan Cox	:	Kept the state trace facility since
 *					it's handy for debugging.
 *		Alan Cox	:	More reset handler fixes.
 *		Alan Cox	:	Started rewriting the code based on
 *					the RFC's for other useful protocol
 *					references see: Comer, KA9Q NOS, and
 *					for a reference on the difference
 *					between specifications and how BSD
 *					works see the 4.4lite source.
 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 *					close.
 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 *		Alan Cox	:	Reimplemented timers as per the RFC
 *					and using multiple timers for sanity.
 *		Alan Cox	:	Small bug fixes, and a lot of new
 *					comments.
 *		Alan Cox	:	Fixed dual reader crash by locking
 *					the buffers (much like datagram.c)
 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 *					now gets fed up of retrying without
 *					(even a no space) answer.
 *		Alan Cox	:	Extracted closing code better
 *		Alan Cox	:	Fixed the closing state machine to
 *					resemble the RFC.
 *		Alan Cox	:	More 'per spec' fixes.
 *		Jorge Cwik	:	Even faster checksumming.
 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 *					only frames. At least one pc tcp stack
 *					generates them.
 *		Alan Cox	:	Cache last socket.
 *		Alan Cox	:	Per route irtt.
 *		Matt Day	:	poll()->select() match BSD precisely on error
 *		Alan Cox	:	New buffers
 *		Marc Tamsky	:	Various sk->prot->retransmits and
 *					sk->retransmits misupdating fixed.
 *					Fixed tcp_write_timeout: stuck close,
 *					and TCP syn retries gets used now.
 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 *					ack if state is TCP_CLOSED.
 *		Alan Cox	:	Look up device on a retransmit - routes may
 *					change. Doesn't yet cope with MSS shrink right
 *					but it's a start!
 *		Marc Tamsky	:	Closing in closing fixes.
 *		Mike Shaver	:	RFC1122 verifications.
 *		Alan Cox	:	rcv_saddr errors.
 *		Alan Cox	:	Block double connect().
 *		Alan Cox	:	Small hooks for enSKIP.
 *		Alexey Kuznetsov:	Path MTU discovery.
 *		Alan Cox	:	Support soft errors.
 *		Alan Cox	:	Fix MTU discovery pathological case
 *					when the remote claims no mtu!
 *		Marc Tamsky	:	TCP_CLOSE fix.
 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 *					window but wrong (fixes NT lpd problems)
 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 *		Joerg Reuter	:	No modification of locked buffers in
 *					tcp_do_retransmit()
 *		Eric Schenk	:	Changed receiver side silly window
 *					avoidance algorithm to BSD style
 *					algorithm. This doubles throughput
 *					against machines running Solaris,
 *					and seems to result in general
 *					improvement.
 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 *	Willy Konynenberg	:	Transparent proxying support.
 *	Mike McLagan		:	Routing by source
 *		Keith Owens	:	Do proper merging with partial SKB's in
 *					tcp_do_sendmsg to avoid burstiness.
 *		Eric Schenk	:	Fix fast close down bug with
 *					shutdown() followed by close().
 *		Andi Kleen 	:	Make poll agree with SIGIO
 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 *					lingertime == 0 (RFC 793 ABORT Call)
 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 *					csum_and_copy_from_user() if possible.
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or(at your option) any later version.
 *
 * Description of States:
 *
 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 *
 *	TCP_SYN_RECV		received a connection request, sent ack,
 *				waiting for final ack in three-way handshake.
 *
 *	TCP_ESTABLISHED		connection established
 *
 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 *				transmission of remaining buffered data
 *
 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 *				to shutdown
 *
 *	TCP_CLOSING		both sides have shutdown but we still have
 *				data we have to finish sending
 *
 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 *				closed, can only be entered from FIN_WAIT2
 *				or CLOSING.  Required because the other end
 *				may not have gotten our last ACK causing it
 *				to retransmit the data packet (which we ignore)
 *
 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 *				us to finish writing our data and to shutdown
 *				(we have to close() to move on to LAST_ACK)
 *
 *	TCP_LAST_ACK		out side has shutdown after remote has
 *				shutdown.  There may still be data in our
 *				buffer that we have to finish sending
 *
 *	TCP_CLOSE		socket is finished
 */

248
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
249
250
251
252
253
254
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
Jens Axboe's avatar
Jens Axboe committed
255
#include <linux/skbuff.h>
256
#include <linux/scatterlist.h>
Jens Axboe's avatar
Jens Axboe committed
257
258
259
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/socket.h>
Linus Torvalds's avatar
Linus Torvalds committed
260
261
#include <linux/random.h>
#include <linux/bootmem.h>
262
263
#include <linux/highmem.h>
#include <linux/swap.h>
264
#include <linux/cache.h>
Herbert Xu's avatar
Herbert Xu committed
265
#include <linux/err.h>
266
#include <linux/crypto.h>
267
#include <linux/time.h>
268
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
269
270
271
272
273

#include <net/icmp.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
274
#include <net/netdma.h>
Jens Axboe's avatar
Jens Axboe committed
275
#include <net/sock.h>
Linus Torvalds's avatar
Linus Torvalds committed
276
277
278
279

#include <asm/uaccess.h>
#include <asm/ioctls.h>

280
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds's avatar
Linus Torvalds committed
281

282
struct percpu_counter tcp_orphan_count;
283
284
EXPORT_SYMBOL_GPL(tcp_orphan_count);

Eric Dumazet's avatar
Eric Dumazet committed
285
long sysctl_tcp_mem[3] __read_mostly;
286
287
int sysctl_tcp_wmem[3] __read_mostly;
int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
288
289
290
291
292

EXPORT_SYMBOL(sysctl_tcp_mem);
EXPORT_SYMBOL(sysctl_tcp_rmem);
EXPORT_SYMBOL(sysctl_tcp_wmem);

Eric Dumazet's avatar
Eric Dumazet committed
293
atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
Linus Torvalds's avatar
Linus Torvalds committed
294
EXPORT_SYMBOL(tcp_memory_allocated);
295
296
297
298
299

/*
 * Current number of TCP sockets.
 */
struct percpu_counter tcp_sockets_allocated;
Linus Torvalds's avatar
Linus Torvalds committed
300
301
EXPORT_SYMBOL(tcp_sockets_allocated);

Jens Axboe's avatar
Jens Axboe committed
302
303
304
305
306
307
308
309
310
/*
 * TCP splice context
 */
struct tcp_splice_state {
	struct pipe_inode_info *pipe;
	size_t len;
	unsigned int flags;
};

Linus Torvalds's avatar
Linus Torvalds committed
311
312
313
/*
 * Pressure flag: try to collapse.
 * Technical note: it is used by multiple contexts non atomically.
314
 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
315
316
 * is strict, actions are advisory and have some latency.
 */
317
int tcp_memory_pressure __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
318
319
EXPORT_SYMBOL(tcp_memory_pressure);

320
void tcp_enter_memory_pressure(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
321
322
{
	if (!tcp_memory_pressure) {
323
		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
Linus Torvalds's avatar
Linus Torvalds committed
324
325
326
327
328
		tcp_memory_pressure = 1;
	}
}
EXPORT_SYMBOL(tcp_enter_memory_pressure);

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
/* Convert seconds to retransmits based on initial and max timeout */
static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
{
	u8 res = 0;

	if (seconds > 0) {
		int period = timeout;

		res = 1;
		while (seconds > period && res < 255) {
			res++;
			timeout <<= 1;
			if (timeout > rto_max)
				timeout = rto_max;
			period += timeout;
		}
	}
	return res;
}

/* Convert retransmits to seconds based on initial and max timeout */
static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
{
	int period = 0;

	if (retrans > 0) {
		period = timeout;
		while (--retrans) {
			timeout <<= 1;
			if (timeout > rto_max)
				timeout = rto_max;
			period += timeout;
		}
	}
	return period;
}

Linus Torvalds's avatar
Linus Torvalds committed
366
367
368
369
370
371
372
373
374
375
376
/*
 *	Wait for a TCP event.
 *
 *	Note that we don't need to lock the socket, as the upper poll layers
 *	take care of normal races (between the test and the event) and we don't
 *	go look at any of the socket buffers directly.
 */
unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
	unsigned int mask;
	struct sock *sk = sock->sk;
377
	const struct tcp_sock *tp = tcp_sk(sk);
Linus Torvalds's avatar
Linus Torvalds committed
378

Eric Dumazet's avatar
Eric Dumazet committed
379
	sock_poll_wait(file, sk_sleep(sk), wait);
Linus Torvalds's avatar
Linus Torvalds committed
380
	if (sk->sk_state == TCP_LISTEN)
381
		return inet_csk_listen_poll(sk);
Linus Torvalds's avatar
Linus Torvalds committed
382
383

	/* Socket is not locked. We are protected from async events
384
385
	 * by poll logic and correct handling of state changes
	 * made by other threads is impossible in any case.
Linus Torvalds's avatar
Linus Torvalds committed
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
	 */

	mask = 0;

	/*
	 * POLLHUP is certainly not done right. But poll() doesn't
	 * have a notion of HUP in just one direction, and for a
	 * socket the read side is more interesting.
	 *
	 * Some poll() documentation says that POLLHUP is incompatible
	 * with the POLLOUT/POLLWR flags, so somebody should check this
	 * all. But careful, it tends to be safer to return too many
	 * bits than too few, and you can easily break real applications
	 * if you don't tell them that something has hung up!
	 *
	 * Check-me.
	 *
	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
	 * our fs/select.c). It means that after we received EOF,
	 * poll always returns immediately, making impossible poll() on write()
	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
	 * if and only if shutdown has been made in both directions.
	 * Actually, it is interesting to look how Solaris and DUX
409
	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
Linus Torvalds's avatar
Linus Torvalds committed
410
411
	 * then we could set it on SND_SHUTDOWN. BTW examples given
	 * in Stevens' books assume exactly this behaviour, it explains
412
	 * why POLLHUP is incompatible with POLLOUT.	--ANK
Linus Torvalds's avatar
Linus Torvalds committed
413
414
415
416
417
418
419
	 *
	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
	 * blocking on fresh not-connected or disconnected socket. --ANK
	 */
	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
		mask |= POLLHUP;
	if (sk->sk_shutdown & RCV_SHUTDOWN)
420
		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds's avatar
Linus Torvalds committed
421
422
423

	/* Connected? */
	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
424
425
426
427
428
		int target = sock_rcvlowat(sk, 0, INT_MAX);

		if (tp->urg_seq == tp->copied_seq &&
		    !sock_flag(sk, SOCK_URGINLINE) &&
		    tp->urg_data)
429
			target++;
430

Linus Torvalds's avatar
Linus Torvalds committed
431
432
433
		/* Potential race condition. If read of tp below will
		 * escape above sk->sk_state, we can be illegally awaken
		 * in SYN_* states. */
434
		if (tp->rcv_nxt - tp->copied_seq >= target)
Linus Torvalds's avatar
Linus Torvalds committed
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
			mask |= POLLIN | POLLRDNORM;

		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
				mask |= POLLOUT | POLLWRNORM;
			} else {  /* send SIGIO later */
				set_bit(SOCK_ASYNC_NOSPACE,
					&sk->sk_socket->flags);
				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);

				/* Race breaker. If space is freed after
				 * wspace test but before the flags are set,
				 * IO signal will be lost.
				 */
				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
					mask |= POLLOUT | POLLWRNORM;
			}
452
453
		} else
			mask |= POLLOUT | POLLWRNORM;
Linus Torvalds's avatar
Linus Torvalds committed
454
455
456
457

		if (tp->urg_data & TCP_URG_VALID)
			mask |= POLLPRI;
	}
Tom Marshall's avatar
Tom Marshall committed
458
459
460
461
462
	/* This barrier is coupled with smp_wmb() in tcp_reset() */
	smp_rmb();
	if (sk->sk_err)
		mask |= POLLERR;

Linus Torvalds's avatar
Linus Torvalds committed
463
464
	return mask;
}
465
EXPORT_SYMBOL(tcp_poll);
Linus Torvalds's avatar
Linus Torvalds committed
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483

int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
	struct tcp_sock *tp = tcp_sk(sk);
	int answ;

	switch (cmd) {
	case SIOCINQ:
		if (sk->sk_state == TCP_LISTEN)
			return -EINVAL;

		lock_sock(sk);
		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
			answ = 0;
		else if (sock_flag(sk, SOCK_URGINLINE) ||
			 !tp->urg_data ||
			 before(tp->urg_seq, tp->copied_seq) ||
			 !before(tp->urg_seq, tp->rcv_nxt)) {
484
485
			struct sk_buff *skb;

Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
			answ = tp->rcv_nxt - tp->copied_seq;

			/* Subtract 1, if FIN is in queue. */
489
490
491
			skb = skb_peek_tail(&sk->sk_receive_queue);
			if (answ && skb)
				answ -= tcp_hdr(skb)->fin;
Linus Torvalds's avatar
Linus Torvalds committed
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
		} else
			answ = tp->urg_seq - tp->copied_seq;
		release_sock(sk);
		break;
	case SIOCATMARK:
		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
		break;
	case SIOCOUTQ:
		if (sk->sk_state == TCP_LISTEN)
			return -EINVAL;

		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
			answ = 0;
		else
			answ = tp->write_seq - tp->snd_una;
		break;
508
509
510
511
512
513
514
515
516
	case SIOCOUTQNSD:
		if (sk->sk_state == TCP_LISTEN)
			return -EINVAL;

		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
			answ = 0;
		else
			answ = tp->write_seq - tp->snd_nxt;
		break;
Linus Torvalds's avatar
Linus Torvalds committed
517
518
	default:
		return -ENOIOCTLCMD;
519
	}
Linus Torvalds's avatar
Linus Torvalds committed
520
521
522

	return put_user(answ, (int __user *)arg);
}
523
EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds's avatar
Linus Torvalds committed
524
525
526

static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
{
Eric Dumazet's avatar
Eric Dumazet committed
527
	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
Linus Torvalds's avatar
Linus Torvalds committed
528
529
530
	tp->pushed_seq = tp->write_seq;
}

531
static inline int forced_push(const struct tcp_sock *tp)
Linus Torvalds's avatar
Linus Torvalds committed
532
533
534
535
{
	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}

536
static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
537
{
538
	struct tcp_sock *tp = tcp_sk(sk);
539
540
541
542
	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);

	skb->csum    = 0;
	tcb->seq     = tcb->end_seq = tp->write_seq;
Eric Dumazet's avatar
Eric Dumazet committed
543
	tcb->tcp_flags = TCPHDR_ACK;
544
	tcb->sacked  = 0;
Linus Torvalds's avatar
Linus Torvalds committed
545
	skb_header_release(skb);
546
	tcp_add_write_queue_tail(sk, skb);
547
548
	sk->sk_wmem_queued += skb->truesize;
	sk_mem_charge(sk, skb->truesize);
549
	if (tp->nonagle & TCP_NAGLE_PUSH)
550
		tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds's avatar
Linus Torvalds committed
551
552
}

553
static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
554
{
Ilpo Järvinen's avatar
Ilpo Järvinen committed
555
	if (flags & MSG_OOB)
Linus Torvalds's avatar
Linus Torvalds committed
556
557
558
		tp->snd_up = tp->write_seq;
}

559
560
static inline void tcp_push(struct sock *sk, int flags, int mss_now,
			    int nonagle)
Linus Torvalds's avatar
Linus Torvalds committed
561
{
562
	if (tcp_send_head(sk)) {
563
564
		struct tcp_sock *tp = tcp_sk(sk);

Linus Torvalds's avatar
Linus Torvalds committed
565
		if (!(flags & MSG_MORE) || forced_push(tp))
566
567
568
			tcp_mark_push(tp, tcp_write_queue_tail(sk));

		tcp_mark_urg(tp, flags);
569
		__tcp_push_pending_frames(sk, mss_now,
Linus Torvalds's avatar
Linus Torvalds committed
570
571
572
573
					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
	}
}

574
575
static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
				unsigned int offset, size_t len)
Jens Axboe's avatar
Jens Axboe committed
576
577
{
	struct tcp_splice_state *tss = rd_desc->arg.data;
578
	int ret;
Jens Axboe's avatar
Jens Axboe committed
579

580
581
	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
			      tss->flags);
582
583
584
	if (ret > 0)
		rd_desc->count -= ret;
	return ret;
Jens Axboe's avatar
Jens Axboe committed
585
586
587
588
589
590
591
}

static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
{
	/* Store TCP splice context information in read_descriptor_t. */
	read_descriptor_t rd_desc = {
		.arg.data = tss,
592
		.count	  = tss->len,
Jens Axboe's avatar
Jens Axboe committed
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
	};

	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
}

/**
 *  tcp_splice_read - splice data from TCP socket to a pipe
 * @sock:	socket to splice from
 * @ppos:	position (not valid)
 * @pipe:	pipe to splice to
 * @len:	number of bytes to splice
 * @flags:	splice modifier flags
 *
 * Description:
 *    Will read pages from given socket and fill them into a pipe.
 *
 **/
ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
			struct pipe_inode_info *pipe, size_t len,
			unsigned int flags)
{
	struct sock *sk = sock->sk;
	struct tcp_splice_state tss = {
		.pipe = pipe,
		.len = len,
		.flags = flags,
	};
	long timeo;
	ssize_t spliced;
	int ret;

624
	sock_rps_record_flow(sk);
Jens Axboe's avatar
Jens Axboe committed
625
626
627
628
629
630
631
632
633
634
	/*
	 * We can't seek on a socket input
	 */
	if (unlikely(*ppos))
		return -ESPIPE;

	ret = spliced = 0;

	lock_sock(sk);

635
	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
Jens Axboe's avatar
Jens Axboe committed
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
	while (tss.len) {
		ret = __tcp_splice_read(sk, &tss);
		if (ret < 0)
			break;
		else if (!ret) {
			if (spliced)
				break;
			if (sock_flag(sk, SOCK_DONE))
				break;
			if (sk->sk_err) {
				ret = sock_error(sk);
				break;
			}
			if (sk->sk_shutdown & RCV_SHUTDOWN)
				break;
			if (sk->sk_state == TCP_CLOSE) {
				/*
				 * This occurs when user tries to read
				 * from never connected socket.
				 */
				if (!sock_flag(sk, SOCK_DONE))
					ret = -ENOTCONN;
				break;
			}
			if (!timeo) {
				ret = -EAGAIN;
				break;
			}
			sk_wait_data(sk, &timeo);
			if (signal_pending(current)) {
				ret = sock_intr_errno(timeo);
				break;
			}
			continue;
		}
		tss.len -= ret;
		spliced += ret;

674
675
		if (!timeo)
			break;
Jens Axboe's avatar
Jens Axboe committed
676
677
678
679
		release_sock(sk);
		lock_sock(sk);

		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
680
		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
Jens Axboe's avatar
Jens Axboe committed
681
682
683
684
685
686
687
688
689
690
691
		    signal_pending(current))
			break;
	}

	release_sock(sk);

	if (spliced)
		return spliced;

	return ret;
}
692
EXPORT_SYMBOL(tcp_splice_read);
Jens Axboe's avatar
Jens Axboe committed
693

694
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
695
696
697
698
699
700
701
702
{
	struct sk_buff *skb;

	/* The TCP header must be at least 32-bit aligned.  */
	size = ALIGN(size, 4);

	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
	if (skb) {
703
		if (sk_wmem_schedule(sk, skb->truesize)) {
704
705
706
707
708
709
710
711
712
			/*
			 * Make sure that we have exactly size bytes
			 * available to the caller, no more, no less.
			 */
			skb_reserve(skb, skb_tailroom(skb) - size);
			return skb;
		}
		__kfree_skb(skb);
	} else {
713
		sk->sk_prot->enter_memory_pressure(sk);
714
715
716
717
718
		sk_stream_moderate_sndbuf(sk);
	}
	return NULL;
}

Ilpo Järvinen's avatar
Ilpo Järvinen committed
719
720
721
722
static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
				       int large_allowed)
{
	struct tcp_sock *tp = tcp_sk(sk);
723
	u32 xmit_size_goal, old_size_goal;
Ilpo Järvinen's avatar
Ilpo Järvinen committed
724
725
726
727
728
729
730
731
732
733

	xmit_size_goal = mss_now;

	if (large_allowed && sk_can_gso(sk)) {
		xmit_size_goal = ((sk->sk_gso_max_size - 1) -
				  inet_csk(sk)->icsk_af_ops->net_header_len -
				  inet_csk(sk)->icsk_ext_hdr_len -
				  tp->tcp_header_len);

		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
734
735
736
737
738
739
740
741
742
743
744

		/* We try hard to avoid divides here */
		old_size_goal = tp->xmit_size_goal_segs * mss_now;

		if (likely(old_size_goal <= xmit_size_goal &&
			   old_size_goal + mss_now > xmit_size_goal)) {
			xmit_size_goal = old_size_goal;
		} else {
			tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
		}
Ilpo Järvinen's avatar
Ilpo Järvinen committed
745
746
	}

747
	return max(xmit_size_goal, mss_now);
Ilpo Järvinen's avatar
Ilpo Järvinen committed
748
749
750
751
752
753
754
755
756
757
758
759
}

static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
{
	int mss_now;

	mss_now = tcp_current_mss(sk);
	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));

	return mss_now;
}

Linus Torvalds's avatar
Linus Torvalds committed
760
761
762
763
static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
			 size_t psize, int flags)
{
	struct tcp_sock *tp = tcp_sk(sk);
764
	int mss_now, size_goal;
Linus Torvalds's avatar
Linus Torvalds committed
765
766
767
768
769
770
771
772
773
774
775
	int err;
	ssize_t copied;
	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);

	/* Wait for a connection to finish. */
	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
			goto out_err;

	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

Ilpo Järvinen's avatar
Ilpo Järvinen committed
776
	mss_now = tcp_send_mss(sk, &size_goal, flags);
Linus Torvalds's avatar
Linus Torvalds committed
777
778
779
780
	copied = 0;

	err = -EPIPE;
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
781
		goto out_err;
Linus Torvalds's avatar
Linus Torvalds committed
782
783

	while (psize > 0) {
784
		struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds's avatar
Linus Torvalds committed
785
786
787
788
789
		struct page *page = pages[poffset / PAGE_SIZE];
		int copy, i, can_coalesce;
		int offset = poffset % PAGE_SIZE;
		int size = min_t(size_t, psize, PAGE_SIZE - offset);

790
		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds's avatar
Linus Torvalds committed
791
792
793
794
new_segment:
			if (!sk_stream_memory_free(sk))
				goto wait_for_sndbuf;

795
			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
Linus Torvalds's avatar
Linus Torvalds committed
796
797
798
			if (!skb)
				goto wait_for_memory;

799
			skb_entail(sk, skb);
800
			copy = size_goal;
Linus Torvalds's avatar
Linus Torvalds committed
801
802
803
804
805
806
807
808
809
810
811
		}

		if (copy > size)
			copy = size;

		i = skb_shinfo(skb)->nr_frags;
		can_coalesce = skb_can_coalesce(skb, i, page, offset);
		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
			tcp_mark_push(tp, skb);
			goto new_segment;
		}
812
		if (!sk_wmem_schedule(sk, copy))
Linus Torvalds's avatar
Linus Torvalds committed
813
			goto wait_for_memory;
814

Linus Torvalds's avatar
Linus Torvalds committed
815
		if (can_coalesce) {
816
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
Linus Torvalds's avatar
Linus Torvalds committed
817
818
819
820
821
822
823
824
825
		} else {
			get_page(page);
			skb_fill_page_desc(skb, i, page, offset, copy);
		}

		skb->len += copy;
		skb->data_len += copy;
		skb->truesize += copy;
		sk->sk_wmem_queued += copy;
826
		sk_mem_charge(sk, copy);
827
		skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds's avatar
Linus Torvalds committed
828
829
		tp->write_seq += copy;
		TCP_SKB_CB(skb)->end_seq += copy;
830
		skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds's avatar
Linus Torvalds committed
831
832

		if (!copied)
Eric Dumazet's avatar
Eric Dumazet committed
833
			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
Linus Torvalds's avatar
Linus Torvalds committed
834
835
836
837
838
839

		copied += copy;
		poffset += copy;
		if (!(psize -= copy))
			goto out;

840
		if (skb->len < size_goal || (flags & MSG_OOB))
Linus Torvalds's avatar
Linus Torvalds committed
841
842
843
844
			continue;

		if (forced_push(tp)) {
			tcp_mark_push(tp, skb);
845
			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
846
		} else if (skb == tcp_send_head(sk))
Linus Torvalds's avatar
Linus Torvalds committed
847
848
849
850
851
852
853
			tcp_push_one(sk, mss_now);
		continue;

wait_for_sndbuf:
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
		if (copied)
854
			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds's avatar
Linus Torvalds committed
855
856
857
858

		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
			goto do_error;

Ilpo Järvinen's avatar
Ilpo Järvinen committed
859
		mss_now = tcp_send_mss(sk, &size_goal, flags);
Linus Torvalds's avatar
Linus Torvalds committed
860
861
862
863
	}

out:
	if (copied)
864
		tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds's avatar
Linus Torvalds committed
865
866
867
868
869
870
871
872
873
	return copied;

do_error:
	if (copied)
		goto out;
out_err:
	return sk_stream_error(sk, flags, err);
}

874
875
int tcp_sendpage(struct sock *sk, struct page *page, int offset,
		 size_t size, int flags)
Linus Torvalds's avatar
Linus Torvalds committed
876
877
878
879
{
	ssize_t res;

	if (!(sk->sk_route_caps & NETIF_F_SG) ||
880
	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
881
882
		return sock_no_sendpage(sk->sk_socket, page, offset, size,
					flags);
Linus Torvalds's avatar
Linus Torvalds committed
883
884
885
886
887
888

	lock_sock(sk);
	res = do_tcp_sendpages(sk, &page, offset, size, flags);
	release_sock(sk);
	return res;
}
889
EXPORT_SYMBOL(tcp_sendpage);
Linus Torvalds's avatar
Linus Torvalds committed
890
891
892
893

#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
#define TCP_OFF(sk)	(sk->sk_sndmsg_off)

894
static inline int select_size(const struct sock *sk, bool sg)
Linus Torvalds's avatar
Linus Torvalds committed
895
{
896
	const struct tcp_sock *tp = tcp_sk(sk);
897
	int tmp = tp->mss_cache;
Linus Torvalds's avatar
Linus Torvalds committed
898

899
	if (sg) {
900
901
902
903
904
905
		if (sk_can_gso(sk)) {
			/* Small frames wont use a full page:
			 * Payload will immediately follow tcp header.
			 */
			tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
		} else {
906
907
908
909
910
911
912
			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);

			if (tmp >= pgbreak &&
			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
				tmp = pgbreak;
		}
	}
Linus Torvalds's avatar
Linus Torvalds committed
913
914
915
916

	return tmp;
}

917
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
Linus Torvalds's avatar
Linus Torvalds committed
918
919
920
921
922
		size_t size)
{
	struct iovec *iov;
	struct tcp_sock *tp = tcp_sk(sk);
	struct sk_buff *skb;
923
	int iovlen, flags, err, copied;
924
	int mss_now, size_goal;
925
	bool sg;
Linus Torvalds's avatar
Linus Torvalds committed
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
	long timeo;

	lock_sock(sk);

	flags = msg->msg_flags;
	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);

	/* Wait for a connection to finish. */
	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
			goto out_err;

	/* This should be in poll */
	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);

Ilpo Järvinen's avatar
Ilpo Järvinen committed
941
	mss_now = tcp_send_mss(sk, &size_goal, flags);
Linus Torvalds's avatar
Linus Torvalds committed
942
943
944
945
946
947
948
949

	/* Ok commence sending. */
	iovlen = msg->msg_iovlen;
	iov = msg->msg_iov;
	copied = 0;

	err = -EPIPE;
	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
950
		goto out_err;
Linus Torvalds's avatar
Linus Torvalds committed
951

952
	sg = !!(sk->sk_route_caps & NETIF_F_SG);
953

Linus Torvalds's avatar
Linus Torvalds committed
954
	while (--iovlen >= 0) {
955
		size_t seglen = iov->iov_len;
Linus Torvalds's avatar
Linus Torvalds committed
956
957
958
959
960
		unsigned char __user *from = iov->iov_base;

		iov++;

		while (seglen > 0) {
961
962
			int copy = 0;
			int max = size_goal;
Linus Torvalds's avatar
Linus Torvalds committed
963

964
			skb = tcp_write_queue_tail(sk);
965
966
967
968
969
			if (tcp_send_head(sk)) {
				if (skb->ip_summed == CHECKSUM_NONE)
					max = mss_now;
				copy = max - skb->len;
			}
Linus Torvalds's avatar
Linus Torvalds committed
970

971
			if (copy <= 0) {
Linus Torvalds's avatar
Linus Torvalds committed
972
973
974
975
976
977
978
new_segment:
				/* Allocate new segment. If the interface is SG,
				 * allocate skb fitting to single page.
				 */
				if (!sk_stream_memory_free(sk))
					goto wait_for_sndbuf;

979
980
981
				skb = sk_stream_alloc_skb(sk,
							  select_size(sk, sg),
							  sk->sk_allocation);
Linus Torvalds's avatar
Linus Torvalds committed
982
983
984
985
986
987
				if (!skb)
					goto wait_for_memory;

				/*
				 * Check whether we can use HW checksum.
				 */
988
				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
989
					skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds's avatar
Linus Torvalds committed
990

991
				skb_entail(sk, skb);
992
				copy = size_goal;
993
				max = size_goal;
Linus Torvalds's avatar
Linus Torvalds committed
994
995
996
997
998
999
1000
			}

			/* Try to append data to the end of skb. */
			if (copy > seglen)
				copy = seglen;

			/* Where to copy to? */
For faster browsing, not all history is shown. View entire blame