inet_hashtables.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Generic INET transport hashtables
 *
 * Authors:	Lotsa people, from code originally in tcp
 *
 *	This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

16
#include <linux/module.h>
17
#include <linux/random.h>
18
#include <linux/sched.h>
19
#include <linux/slab.h>
20
#include <linux/wait.h>
21

22
#include <net/inet_connection_sock.h>
23
#include <net/inet_hashtables.h>
24
#include <net/ip.h>
25 26 27 28 29

/*
 * Allocate and initialize a new local port bind bucket.
 * The bindhash mutex for snum's hash chain must be held here.
 */
30
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
31
						 struct net *net,
32 33 34
						 struct inet_bind_hashbucket *head,
						 const unsigned short snum)
{
35
	struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
36 37

	if (tb != NULL) {
38
		tb->ib_net       = hold_net(net);
39 40 41 42 43 44 45 46 47 48 49
		tb->port      = snum;
		tb->fastreuse = 0;
		INIT_HLIST_HEAD(&tb->owners);
		hlist_add_head(&tb->node, &head->chain);
	}
	return tb;
}

/*
 * Caller must hold hashbucket lock for this tb with local BH disabled
 */
50
void inet_bind_bucket_destroy(struct kmem_cache *cachep, struct inet_bind_bucket *tb)
51 52 53
{
	if (hlist_empty(&tb->owners)) {
		__hlist_del(&tb->node);
54
		release_net(tb->ib_net);
55 56 57
		kmem_cache_free(cachep, tb);
	}
}
58 59 60 61

void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
		    const unsigned short snum)
{
62
	inet_sk(sk)->num = snum;
63
	sk_add_bind_node(sk, &tb->owners);
64
	inet_csk(sk)->icsk_bind_hash = tb;
65 66 67 68 69
}

/*
 * Get rid of any references to a local port held by the given sock.
 */
70
static void __inet_put_port(struct sock *sk)
71
{
72
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
73 74
	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->num,
			hashinfo->bhash_size);
75 76 77 78
	struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
79
	tb = inet_csk(sk)->icsk_bind_hash;
80
	__sk_del_bind_node(sk);
81 82
	inet_csk(sk)->icsk_bind_hash = NULL;
	inet_sk(sk)->num = 0;
83 84 85 86
	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
	spin_unlock(&head->lock);
}

87
void inet_put_port(struct sock *sk)
88 89
{
	local_bh_disable();
90
	__inet_put_port(sk);
91 92 93 94
	local_bh_enable();
}

EXPORT_SYMBOL(inet_put_port);
95

96 97 98
void __inet_inherit_port(struct sock *sk, struct sock *child)
{
	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
99 100
	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(child)->num,
			table->bhash_size);
101 102 103 104 105 106 107 108 109 110 111 112
	struct inet_bind_hashbucket *head = &table->bhash[bhash];
	struct inet_bind_bucket *tb;

	spin_lock(&head->lock);
	tb = inet_csk(sk)->icsk_bind_hash;
	sk_add_bind_node(child, &tb->owners);
	inet_csk(child)->icsk_bind_hash = tb;
	spin_unlock(&head->lock);
}

EXPORT_SYMBOL_GPL(__inet_inherit_port);

113 114 115 116 117 118 119 120
/*
 * This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
 * Look, when several writers sleep and reader wakes them up, all but one
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 * this, _but_ remember, it adds useless work on UP machines (wake up each
 * exclusive lock release). It should be ifdefed really.
 */
void inet_listen_wlock(struct inet_hashinfo *hashinfo)
121
	__acquires(hashinfo->lhash_lock)
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
{
	write_lock(&hashinfo->lhash_lock);

	if (atomic_read(&hashinfo->lhash_users)) {
		DEFINE_WAIT(wait);

		for (;;) {
			prepare_to_wait_exclusive(&hashinfo->lhash_wait,
						  &wait, TASK_UNINTERRUPTIBLE);
			if (!atomic_read(&hashinfo->lhash_users))
				break;
			write_unlock_bh(&hashinfo->lhash_lock);
			schedule();
			write_lock_bh(&hashinfo->lhash_lock);
		}

		finish_wait(&hashinfo->lhash_wait, &wait);
	}
}

142 143 144 145 146 147
/*
 * Don't inline this cruft. Here are some nice properties to exploit here. The
 * BSD API does not allow a listening sock to specify the remote port nor the
 * remote address for the connection. So always assume those are both
 * wildcarded during the search since they can never be otherwise.
 */
148 149
static struct sock *inet_lookup_listener_slow(struct net *net,
					      const struct hlist_head *head,
150
					      const __be32 daddr,
151 152
					      const unsigned short hnum,
					      const int dif)
153 154 155 156 157 158 159 160
{
	struct sock *result = NULL, *sk;
	const struct hlist_node *node;
	int hiscore = -1;

	sk_for_each(sk, node, head) {
		const struct inet_sock *inet = inet_sk(sk);

161
		if (net_eq(sock_net(sk), net) && inet->num == hnum &&
162
				!ipv6_only_sock(sk)) {
163
			const __be32 rcv_saddr = inet->rcv_saddr;
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
			int score = sk->sk_family == PF_INET ? 1 : 0;

			if (rcv_saddr) {
				if (rcv_saddr != daddr)
					continue;
				score += 2;
			}
			if (sk->sk_bound_dev_if) {
				if (sk->sk_bound_dev_if != dif)
					continue;
				score += 2;
			}
			if (score == 5)
				return sk;
			if (score > hiscore) {
				hiscore	= score;
				result	= sk;
			}
		}
	}
	return result;
}
186

187
/* Optimize the common listener case. */
188 189
struct sock *__inet_lookup_listener(struct net *net,
				    struct inet_hashinfo *hashinfo,
190
				    const __be32 daddr, const unsigned short hnum,
191
				    const int dif)
192 193 194 195 196 197 198 199 200 201 202 203
{
	struct sock *sk = NULL;
	const struct hlist_head *head;

	read_lock(&hashinfo->lhash_lock);
	head = &hashinfo->listening_hash[inet_lhashfn(hnum)];
	if (!hlist_empty(head)) {
		const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));

		if (inet->num == hnum && !sk->sk_node.next &&
		    (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
		    (sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
204
		    !sk->sk_bound_dev_if && net_eq(sock_net(sk), net))
205
			goto sherry_cache;
206
		sk = inet_lookup_listener_slow(net, head, daddr, hnum, dif);
207 208 209 210 211 212 213 214
	}
	if (sk) {
sherry_cache:
		sock_hold(sk);
	}
	read_unlock(&hashinfo->lhash_lock);
	return sk;
}
215
EXPORT_SYMBOL_GPL(__inet_lookup_listener);
216

217 218
struct sock * __inet_lookup_established(struct net *net,
				  struct inet_hashinfo *hashinfo,
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
				  const __be32 saddr, const __be16 sport,
				  const __be32 daddr, const u16 hnum,
				  const int dif)
{
	INET_ADDR_COOKIE(acookie, saddr, daddr)
	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
	struct sock *sk;
	const struct hlist_node *node;
	/* Optimize here for direct hit, only listening connections can
	 * have wildcards anyways.
	 */
	unsigned int hash = inet_ehashfn(daddr, hnum, saddr, sport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
	rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);

	prefetch(head->chain.first);
	read_lock(lock);
	sk_for_each(sk, node, &head->chain) {
237 238
		if (INET_MATCH(sk, net, hash, acookie,
					saddr, daddr, ports, dif))
239 240 241 242 243
			goto hit; /* You sunk my battleship! */
	}

	/* Must check for a TIME_WAIT'er before going to listener hash. */
	sk_for_each(sk, node, &head->twchain) {
244 245
		if (INET_TW_MATCH(sk, net, hash, acookie,
					saddr, daddr, ports, dif))
246 247 248 249 250 251 252 253 254 255 256 257
			goto hit;
	}
	sk = NULL;
out:
	read_unlock(lock);
	return sk;
hit:
	sock_hold(sk);
	goto out;
}
EXPORT_SYMBOL_GPL(__inet_lookup_established);

258 259 260 261 262 263 264
/* called with local bh disabled */
static int __inet_check_established(struct inet_timewait_death_row *death_row,
				    struct sock *sk, __u16 lport,
				    struct inet_timewait_sock **twp)
{
	struct inet_hashinfo *hinfo = death_row->hashinfo;
	struct inet_sock *inet = inet_sk(sk);
265 266
	__be32 daddr = inet->rcv_saddr;
	__be32 saddr = inet->daddr;
267 268
	int dif = sk->sk_bound_dev_if;
	INET_ADDR_COOKIE(acookie, saddr, daddr)
Al Viro's avatar
Al Viro committed
269
	const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
270 271
	unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
	struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
272
	rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
273 274 275
	struct sock *sk2;
	const struct hlist_node *node;
	struct inet_timewait_sock *tw;
276
	struct net *net = sock_net(sk);
277 278

	prefetch(head->chain.first);
279
	write_lock(lock);
280 281

	/* Check TIME-WAIT sockets first. */
282
	sk_for_each(sk2, node, &head->twchain) {
283 284
		tw = inet_twsk(sk2);

285 286
		if (INET_TW_MATCH(sk2, net, hash, acookie,
					saddr, daddr, ports, dif)) {
287 288 289 290 291 292 293 294 295 296
			if (twsk_unique(sk, sk2, twp))
				goto unique;
			else
				goto not_unique;
		}
	}
	tw = NULL;

	/* And established part... */
	sk_for_each(sk2, node, &head->chain) {
297 298
		if (INET_MATCH(sk2, net, hash, acookie,
					saddr, daddr, ports, dif))
299 300 301 302 303 304 305 306 307 308 309
			goto not_unique;
	}

unique:
	/* Must record num and sport now. Otherwise we will see
	 * in hash table socket with a funny identity. */
	inet->num = lport;
	inet->sport = htons(lport);
	sk->sk_hash = hash;
	BUG_TRAP(sk_unhashed(sk));
	__sk_add_node(sk, &head->chain);
310
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
311
	write_unlock(lock);
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326

	if (twp) {
		*twp = tw;
		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
	} else if (tw) {
		/* Silly. Should hash-dance instead... */
		inet_twsk_deschedule(tw, death_row);
		NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);

		inet_twsk_put(tw);
	}

	return 0;

not_unique:
327
	write_unlock(lock);
328 329 330 331 332 333
	return -EADDRNOTAVAIL;
}

static inline u32 inet_sk_port_offset(const struct sock *sk)
{
	const struct inet_sock *inet = inet_sk(sk);
334
	return secure_ipv4_port_ephemeral(inet->rcv_saddr, inet->daddr,
335 336 337
					  inet->dport);
}

338
void __inet_hash_nolisten(struct sock *sk)
339
{
340
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
341 342 343 344 345 346 347 348 349 350 351 352 353
	struct hlist_head *list;
	rwlock_t *lock;
	struct inet_ehash_bucket *head;

	BUG_TRAP(sk_unhashed(sk));

	sk->sk_hash = inet_sk_ehashfn(sk);
	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
	list = &head->chain;
	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);

	write_lock(lock);
	__sk_add_node(sk, list);
354
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
355 356 357 358
	write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_hash_nolisten);

359
static void __inet_hash(struct sock *sk)
360
{
361
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
362 363 364 365
	struct hlist_head *list;
	rwlock_t *lock;

	if (sk->sk_state != TCP_LISTEN) {
366
		__inet_hash_nolisten(sk);
367 368 369 370 371 372 373 374 375
		return;
	}

	BUG_TRAP(sk_unhashed(sk));
	list = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
	lock = &hashinfo->lhash_lock;

	inet_listen_wlock(hashinfo);
	__sk_add_node(sk, list);
376
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
377 378 379
	write_unlock(lock);
	wake_up(&hashinfo->lhash_wait);
}
380 381 382 383 384 385 386 387 388 389 390 391 392 393

void inet_hash(struct sock *sk)
{
	if (sk->sk_state != TCP_CLOSE) {
		local_bh_disable();
		__inet_hash(sk);
		local_bh_enable();
	}
}
EXPORT_SYMBOL_GPL(inet_hash);

void inet_unhash(struct sock *sk)
{
	rwlock_t *lock;
394
	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
395 396 397 398 399 400 401 402 403 404 405 406 407 408

	if (sk_unhashed(sk))
		goto out;

	if (sk->sk_state == TCP_LISTEN) {
		local_bh_disable();
		inet_listen_wlock(hashinfo);
		lock = &hashinfo->lhash_lock;
	} else {
		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
		write_lock_bh(lock);
	}

	if (__sk_del_node_init(sk))
409
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
410 411 412 413 414 415
	write_unlock_bh(lock);
out:
	if (sk->sk_state == TCP_LISTEN)
		wake_up(&hashinfo->lhash_wait);
}
EXPORT_SYMBOL_GPL(inet_unhash);
416

417
int __inet_hash_connect(struct inet_timewait_death_row *death_row,
418
		struct sock *sk, u32 port_offset,
419 420
		int (*check_established)(struct inet_timewait_death_row *,
			struct sock *, __u16, struct inet_timewait_sock **),
421
		void (*hash)(struct sock *sk))
422 423 424
{
	struct inet_hashinfo *hinfo = death_row->hashinfo;
	const unsigned short snum = inet_sk(sk)->num;
425 426
	struct inet_bind_hashbucket *head;
	struct inet_bind_bucket *tb;
427
	int ret;
428
	struct net *net = sock_net(sk);
429

430
	if (!snum) {
431
		int i, remaining, low, high, port;
432
		static u32 hint;
433
		u32 offset = hint + port_offset;
434
		struct hlist_node *node;
435
		struct inet_timewait_sock *tw = NULL;
436

437
		inet_get_local_port_range(&low, &high);
438
		remaining = (high - low) + 1;
439

440
		local_bh_disable();
441 442
		for (i = 1; i <= remaining; i++) {
			port = low + (i + offset) % remaining;
443 444
			head = &hinfo->bhash[inet_bhashfn(net, port,
					hinfo->bhash_size)];
445
			spin_lock(&head->lock);
446

447 448 449 450
			/* Does not bother with rcv_saddr checks,
			 * because the established check is already
			 * unique enough.
			 */
451
			inet_bind_bucket_for_each(tb, node, &head->chain) {
452
				if (tb->ib_net == net && tb->port == port) {
453 454 455
					BUG_TRAP(!hlist_empty(&tb->owners));
					if (tb->fastreuse >= 0)
						goto next_port;
456 457
					if (!check_established(death_row, sk,
								port, &tw))
458 459 460 461 462
						goto ok;
					goto next_port;
				}
			}

463 464
			tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
					net, head, port);
465 466 467 468 469 470 471 472 473 474 475 476 477
			if (!tb) {
				spin_unlock(&head->lock);
				break;
			}
			tb->fastreuse = -1;
			goto ok;

		next_port:
			spin_unlock(&head->lock);
		}
		local_bh_enable();

		return -EADDRNOTAVAIL;
478 479 480 481

ok:
		hint += i;

482 483
		/* Head lock still held and bh's disabled */
		inet_bind_hash(sk, tb, port);
484
		if (sk_unhashed(sk)) {
485
			inet_sk(sk)->sport = htons(port);
486
			hash(sk);
487 488
		}
		spin_unlock(&head->lock);
489

490 491 492 493
		if (tw) {
			inet_twsk_deschedule(tw, death_row);
			inet_twsk_put(tw);
		}
494 495 496

		ret = 0;
		goto out;
497
	}
498

499
	head = &hinfo->bhash[inet_bhashfn(net, snum, hinfo->bhash_size)];
500
	tb  = inet_csk(sk)->icsk_bind_hash;
501 502
	spin_lock_bh(&head->lock);
	if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
503
		hash(sk);
504 505 506 507 508
		spin_unlock_bh(&head->lock);
		return 0;
	} else {
		spin_unlock(&head->lock);
		/* No definite answer... Walk to established hash table */
509
		ret = check_established(death_row, sk, snum, NULL);
510 511 512 513 514
out:
		local_bh_enable();
		return ret;
	}
}
515 516 517 518 519 520 521

/*
 * Bind a port for a connect operation and hash it.
 */
int inet_hash_connect(struct inet_timewait_death_row *death_row,
		      struct sock *sk)
{
522
	return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
523 524
			__inet_check_established, __inet_hash_nolisten);
}
525 526

EXPORT_SYMBOL_GPL(inet_hash_connect);