All new accounts created on Gitlab now require administrator approval. If you invite any collaborators, please let Flux staff know so they can approve the accounts.

route.c 84.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		ROUTE - implementation of the IP router.
 *
8
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
9 10 11 12 13 14 15 16 17 18 19 20
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
 *		Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *		Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *		Alan Cox	:	Verify area fixes.
 *		Alan Cox	:	cli() protects routing changes
 *		Rui Oliveira	:	ICMP routing table updates
 *		(rco@di.uminho.pt)	Routing table insertion and update
 *		Linus Torvalds	:	Rewrote bits to be sensible
 *		Alan Cox	:	Added BSD route gw semantics
21
 *		Alan Cox	:	Super /proc >4K
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
 *		Alan Cox	:	MTU in route table
 *		Alan Cox	: 	MSS actually. Also added the window
 *					clamper.
 *		Sam Lantinga	:	Fixed route matching in rt_del()
 *		Alan Cox	:	Routing cache support.
 *		Alan Cox	:	Removed compatibility cruft.
 *		Alan Cox	:	RTF_REJECT support.
 *		Alan Cox	:	TCP irtt support.
 *		Jonathan Naylor	:	Added Metric support.
 *	Miquel van Smoorenburg	:	BSD API fixes.
 *	Miquel van Smoorenburg	:	Metrics.
 *		Alan Cox	:	Use __u32 properly
 *		Alan Cox	:	Aligned routing errors more closely with BSD
 *					our system is still very different.
 *		Alan Cox	:	Faster /proc handling
 *	Alexey Kuznetsov	:	Massive rework to support tree based routing,
 *					routing caches and better behaviour.
39
 *
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
 *		Olaf Erb	:	irtt wasn't being copied right.
 *		Bjorn Ekwall	:	Kerneld route support.
 *		Alan Cox	:	Multicast fixed (I hope)
 * 		Pavel Krauz	:	Limited broadcast fixed
 *		Mike McLagan	:	Routing by source
 *	Alexey Kuznetsov	:	End of old history. Split to fib.c and
 *					route.c and rewritten from scratch.
 *		Andi Kleen	:	Load-limit warning messages.
 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
 *	Vitaly E. Lavrov	:	Race condition in ip_route_input_slow.
 *	Tobias Ringstrom	:	Uninitialized res.type in ip_route_output_slow.
 *	Vladimir V. Ivanov	:	IP rule info (flowid) is really useful.
 *		Marc Boucher	:	routing by fwmark
 *	Robert Olsson		:	Added rt_cache statistics
 *	Arnaldo C. Melo		:	Convert proc stuff to seq_file
55
 *	Eric Dumazet		:	hashed spinlocks and rt_check_expire() fixes.
56 57
 * 	Ilia Sotnikov		:	Ignore TOS on PMTUD and Redirect
 * 	Ilia Sotnikov		:	Removed TOS from hash calculations
Linus Torvalds's avatar
Linus Torvalds committed
58 59 60 61 62 63 64 65 66 67 68 69 70 71
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */

#include <linux/module.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
72
#include <linux/bootmem.h>
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75 76 77 78 79 80 81
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
82
#include <linux/workqueue.h>
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87 88 89 90 91 92
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
93
#include <net/dst.h>
94
#include <net/net_namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
95 96 97 98 99 100 101 102 103 104
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
105
#include <net/netevent.h>
106
#include <net/rtnetlink.h>
Linus Torvalds's avatar
Linus Torvalds committed
107 108 109 110 111 112 113 114 115 116 117 118
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif

#define RT_FL_TOS(oldflp) \
    ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))

#define IP_MAX_MTU	0xFFF0

#define RT_GC_TIMEOUT (300*HZ)

static int ip_rt_max_size;
119 120 121 122 123 124 125 126 127 128 129 130 131
static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly	= 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
static int ip_rt_redirect_number __read_mostly	= 9;
static int ip_rt_redirect_load __read_mostly	= HZ / 50;
static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly	= HZ;
static int ip_rt_error_burst __read_mostly	= 5 * HZ;
static int ip_rt_gc_elasticity __read_mostly	= 8;
static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly	= 256;
static int ip_rt_secret_interval __read_mostly	= 10 * 60 * HZ;
132
static int rt_chain_length_max __read_mostly	= 20;
Linus Torvalds's avatar
Linus Torvalds committed
133

134 135
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
Linus Torvalds's avatar
Linus Torvalds committed
136 137 138 139 140 141 142 143 144 145 146 147

/*
 *	Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static void		 ipv4_dst_destroy(struct dst_entry *dst);
static void		 ipv4_dst_ifdown(struct dst_entry *dst,
					 struct net_device *dev, int how);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
static void		 ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148
static int rt_garbage_collect(struct dst_ops *ops);
149
static void rt_emergency_hash_rebuild(struct net *net);
Linus Torvalds's avatar
Linus Torvalds committed
150 151 152 153


static struct dst_ops ipv4_dst_ops = {
	.family =		AF_INET,
154
	.protocol =		cpu_to_be16(ETH_P_IP),
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159 160 161
	.gc =			rt_garbage_collect,
	.check =		ipv4_dst_check,
	.destroy =		ipv4_dst_destroy,
	.ifdown =		ipv4_dst_ifdown,
	.negative_advice =	ipv4_negative_advice,
	.link_failure =		ipv4_link_failure,
	.update_pmtu =		ip_rt_update_pmtu,
162
	.local_out =		__ip_local_out,
163
	.entries =		ATOMIC_INIT(0),
Linus Torvalds's avatar
Linus Torvalds committed
164 165 166 167
};

#define ECN_OR_COST(class)	TC_PRIO_##class

168
const __u8 ip_tos2prio[16] = {
Linus Torvalds's avatar
Linus Torvalds committed
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	TC_PRIO_BESTEFFORT,
	ECN_OR_COST(FILLER),
	TC_PRIO_BESTEFFORT,
	ECN_OR_COST(BESTEFFORT),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_BULK,
	ECN_OR_COST(BULK),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE,
	ECN_OR_COST(INTERACTIVE),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK),
	TC_PRIO_INTERACTIVE_BULK,
	ECN_OR_COST(INTERACTIVE_BULK)
};


/*
 * Route cache.
 */

/* The locking scheme is rather straight forward:
 *
 * 1) Read-Copy Update protects the buckets of the central route hash.
 * 2) Only writers remove entries, and they hold the lock
 *    as they look at rtable reference counts.
 * 3) Only readers acquire references to rtable entries,
 *    they do so with atomic increments and with the
 *    lock held.
 */

struct rt_hash_bucket {
	struct rtable	*chain;
204
};
205

206 207
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
	defined(CONFIG_PROVE_LOCKING)
208 209 210
/*
 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
 * The size of this table is a power of two and depends on the number of CPUS.
211
 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
212
 */
213 214
#ifdef CONFIG_LOCKDEP
# define RT_HASH_LOCK_SZ	256
215
#else
216 217 218 219 220 221 222 223 224 225 226
# if NR_CPUS >= 32
#  define RT_HASH_LOCK_SZ	4096
# elif NR_CPUS >= 16
#  define RT_HASH_LOCK_SZ	2048
# elif NR_CPUS >= 8
#  define RT_HASH_LOCK_SZ	1024
# elif NR_CPUS >= 4
#  define RT_HASH_LOCK_SZ	512
# else
#  define RT_HASH_LOCK_SZ	256
# endif
227 228 229 230
#endif

static spinlock_t	*rt_hash_locks;
# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
231 232 233 234 235 236 237 238 239 240 241 242 243

static __init void rt_hash_lock_init(void)
{
	int i;

	rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
			GFP_KERNEL);
	if (!rt_hash_locks)
		panic("IP: failed to allocate rt_hash_locks\n");

	for (i = 0; i < RT_HASH_LOCK_SZ; i++)
		spin_lock_init(&rt_hash_locks[i]);
}
244 245
#else
# define rt_hash_lock_addr(slot) NULL
246 247 248 249

static inline void rt_hash_lock_init(void)
{
}
250
#endif
Linus Torvalds's avatar
Linus Torvalds committed
251

252 253 254
static struct rt_hash_bucket 	*rt_hash_table __read_mostly;
static unsigned			rt_hash_mask __read_mostly;
static unsigned int		rt_hash_log  __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
255

256
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
257
#define RT_CACHE_STAT_INC(field) \
258
	(__raw_get_cpu_var(rt_cache_stat).field++)
Linus Torvalds's avatar
Linus Torvalds committed
259

260 261
static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
		int genid)
Linus Torvalds's avatar
Linus Torvalds committed
262
{
Stephen Hemminger's avatar
Stephen Hemminger committed
263 264
	return jhash_3words((__force u32)(__be32)(daddr),
			    (__force u32)(__be32)(saddr),
265
			    idx, genid)
266
		& rt_hash_mask;
Linus Torvalds's avatar
Linus Torvalds committed
267 268
}

269 270 271 272 273
static inline int rt_genid(struct net *net)
{
	return atomic_read(&net->ipv4.rt_genid);
}

Linus Torvalds's avatar
Linus Torvalds committed
274 275
#ifdef CONFIG_PROC_FS
struct rt_cache_iter_state {
276
	struct seq_net_private p;
Linus Torvalds's avatar
Linus Torvalds committed
277
	int bucket;
278
	int genid;
Linus Torvalds's avatar
Linus Torvalds committed
279 280
};

281
static struct rtable *rt_cache_get_first(struct seq_file *seq)
Linus Torvalds's avatar
Linus Torvalds committed
282
{
283
	struct rt_cache_iter_state *st = seq->private;
Linus Torvalds's avatar
Linus Torvalds committed
284 285 286
	struct rtable *r = NULL;

	for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
287 288
		if (!rt_hash_table[st->bucket].chain)
			continue;
Linus Torvalds's avatar
Linus Torvalds committed
289
		rcu_read_lock_bh();
290 291
		r = rcu_dereference(rt_hash_table[st->bucket].chain);
		while (r) {
292
			if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
293
			    r->rt_genid == st->genid)
294 295 296
				return r;
			r = rcu_dereference(r->u.dst.rt_next);
		}
Linus Torvalds's avatar
Linus Torvalds committed
297 298
		rcu_read_unlock_bh();
	}
299
	return r;
Linus Torvalds's avatar
Linus Torvalds committed
300 301
}

302
static struct rtable *__rt_cache_get_next(struct seq_file *seq,
303
					  struct rtable *r)
Linus Torvalds's avatar
Linus Torvalds committed
304
{
305
	struct rt_cache_iter_state *st = seq->private;
306

307
	r = r->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
308 309
	while (!r) {
		rcu_read_unlock_bh();
310 311 312 313
		do {
			if (--st->bucket < 0)
				return NULL;
		} while (!rt_hash_table[st->bucket].chain);
Linus Torvalds's avatar
Linus Torvalds committed
314 315 316
		rcu_read_lock_bh();
		r = rt_hash_table[st->bucket].chain;
	}
317
	return rcu_dereference(r);
Linus Torvalds's avatar
Linus Torvalds committed
318 319
}

320
static struct rtable *rt_cache_get_next(struct seq_file *seq,
321 322
					struct rtable *r)
{
323 324 325
	struct rt_cache_iter_state *st = seq->private;
	while ((r = __rt_cache_get_next(seq, r)) != NULL) {
		if (dev_net(r->u.dst.dev) != seq_file_net(seq))
326
			continue;
327 328 329 330 331 332
		if (r->rt_genid == st->genid)
			break;
	}
	return r;
}

333
static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
Linus Torvalds's avatar
Linus Torvalds committed
334
{
335
	struct rtable *r = rt_cache_get_first(seq);
Linus Torvalds's avatar
Linus Torvalds committed
336 337

	if (r)
338
		while (pos && (r = rt_cache_get_next(seq, r)))
Linus Torvalds's avatar
Linus Torvalds committed
339 340 341 342 343 344
			--pos;
	return pos ? NULL : r;
}

static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
345 346
	struct rt_cache_iter_state *st = seq->private;
	if (*pos)
347
		return rt_cache_get_idx(seq, *pos - 1);
348
	st->genid = rt_genid(seq_file_net(seq));
349
	return SEQ_START_TOKEN;
Linus Torvalds's avatar
Linus Torvalds committed
350 351 352 353
}

static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
354
	struct rtable *r;
Linus Torvalds's avatar
Linus Torvalds committed
355 356

	if (v == SEQ_START_TOKEN)
357
		r = rt_cache_get_first(seq);
Linus Torvalds's avatar
Linus Torvalds committed
358
	else
359
		r = rt_cache_get_next(seq, v);
Linus Torvalds's avatar
Linus Torvalds committed
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	++*pos;
	return r;
}

static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
	if (v && v != SEQ_START_TOKEN)
		rcu_read_unlock_bh();
}

static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
	if (v == SEQ_START_TOKEN)
		seq_printf(seq, "%-127s\n",
			   "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
			   "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
			   "HHUptod\tSpecDst");
	else {
		struct rtable *r = v;
379
		int len;
Linus Torvalds's avatar
Linus Torvalds committed
380

381 382
		seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
			      "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
Linus Torvalds's avatar
Linus Torvalds committed
383 384 385 386 387 388 389 390 391 392 393 394 395
			r->u.dst.dev ? r->u.dst.dev->name : "*",
			(unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
			r->rt_flags, atomic_read(&r->u.dst.__refcnt),
			r->u.dst.__use, 0, (unsigned long)r->rt_src,
			(dst_metric(&r->u.dst, RTAX_ADVMSS) ?
			     (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
			dst_metric(&r->u.dst, RTAX_WINDOW),
			(int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
			      dst_metric(&r->u.dst, RTAX_RTTVAR)),
			r->fl.fl4_tos,
			r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
			r->u.dst.hh ? (r->u.dst.hh->hh_output ==
				       dev_queue_xmit) : 0,
396 397 398
			r->rt_spec_dst, &len);

		seq_printf(seq, "%*s\n", 127 - len, "");
399 400
	}
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
401 402
}

403
static const struct seq_operations rt_cache_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
404 405 406 407 408 409 410 411
	.start  = rt_cache_seq_start,
	.next   = rt_cache_seq_next,
	.stop   = rt_cache_seq_stop,
	.show   = rt_cache_seq_show,
};

static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
412
	return seq_open_net(inode, file, &rt_cache_seq_ops,
413
			sizeof(struct rt_cache_iter_state));
Linus Torvalds's avatar
Linus Torvalds committed
414 415
}

416
static const struct file_operations rt_cache_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
417 418 419 420
	.owner	 = THIS_MODULE,
	.open	 = rt_cache_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
421
	.release = seq_release_net,
Linus Torvalds's avatar
Linus Torvalds committed
422 423 424 425 426 427 428 429 430 431
};


static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
	int cpu;

	if (*pos == 0)
		return SEQ_START_TOKEN;

432
	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
436
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
437 438 439 440 441 442 443 444
	}
	return NULL;
}

static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
	int cpu;

445
	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
446 447 448
		if (!cpu_possible(cpu))
			continue;
		*pos = cpu+1;
449
		return &per_cpu(rt_cache_stat, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
450 451
	}
	return NULL;
452

Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456 457 458 459 460 461 462 463 464
}

static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{

}

static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
	struct rt_cache_stat *st = v;

	if (v == SEQ_START_TOKEN) {
465
		seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
Linus Torvalds's avatar
Linus Torvalds committed
466 467
		return 0;
	}
468

Linus Torvalds's avatar
Linus Torvalds committed
469 470 471 472 473 474 475 476 477 478 479 480 481
	seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
		   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
		   atomic_read(&ipv4_dst_ops.entries),
		   st->in_hit,
		   st->in_slow_tot,
		   st->in_slow_mc,
		   st->in_no_route,
		   st->in_brd,
		   st->in_martian_dst,
		   st->in_martian_src,

		   st->out_hit,
		   st->out_slow_tot,
482
		   st->out_slow_mc,
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488 489 490 491 492 493

		   st->gc_total,
		   st->gc_ignored,
		   st->gc_goal_miss,
		   st->gc_dst_overflow,
		   st->in_hlist_search,
		   st->out_hlist_search
		);
	return 0;
}

494
static const struct seq_operations rt_cpu_seq_ops = {
Linus Torvalds's avatar
Linus Torvalds committed
495 496 497 498 499 500 501 502 503 504 505 506
	.start  = rt_cpu_seq_start,
	.next   = rt_cpu_seq_next,
	.stop   = rt_cpu_seq_stop,
	.show   = rt_cpu_seq_show,
};


static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
	return seq_open(file, &rt_cpu_seq_ops);
}

507
static const struct file_operations rt_cpu_seq_fops = {
Linus Torvalds's avatar
Linus Torvalds committed
508 509 510 511 512 513 514
	.owner	 = THIS_MODULE,
	.open	 = rt_cpu_seq_open,
	.read	 = seq_read,
	.llseek	 = seq_lseek,
	.release = seq_release,
};

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
#ifdef CONFIG_NET_CLS_ROUTE
static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
			   int length, int *eof, void *data)
{
	unsigned int i;

	if ((offset & 3) || (length & 3))
		return -EIO;

	if (offset >= sizeof(struct ip_rt_acct) * 256) {
		*eof = 1;
		return 0;
	}

	if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
		length = sizeof(struct ip_rt_acct) * 256 - offset;
		*eof = 1;
	}

	offset /= sizeof(u32);

	if (length > 0) {
		u32 *dst = (u32 *) buffer;

		*start = buffer;
		memset(dst, 0, length);

		for_each_possible_cpu(i) {
			unsigned int j;
			u32 *src;

			src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
			for (j = 0; j < length/4; j++)
				dst[j] += src[j];
		}
	}
	return length;
}
#endif
554

555
static int __net_init ip_rt_do_proc_init(struct net *net)
556 557 558 559 560 561 562 563
{
	struct proc_dir_entry *pde;

	pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
			&rt_cache_seq_fops);
	if (!pde)
		goto err1;

564 565
	pde = proc_create("rt_cache", S_IRUGO,
			  net->proc_net_stat, &rt_cpu_seq_fops);
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585
	if (!pde)
		goto err2;

#ifdef CONFIG_NET_CLS_ROUTE
	pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
			ip_rt_acct_read, NULL);
	if (!pde)
		goto err3;
#endif
	return 0;

#ifdef CONFIG_NET_CLS_ROUTE
err3:
	remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
	remove_proc_entry("rt_cache", net->proc_net);
err1:
	return -ENOMEM;
}
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
	remove_proc_entry("rt_cache", net->proc_net_stat);
	remove_proc_entry("rt_cache", net->proc_net);
	remove_proc_entry("rt_acct", net->proc_net);
}

static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
	.init = ip_rt_do_proc_init,
	.exit = ip_rt_do_proc_exit,
};

static int __init ip_rt_proc_init(void)
{
	return register_pernet_subsys(&ip_rt_proc_ops);
}

604
#else
605
static inline int ip_rt_proc_init(void)
606 607 608
{
	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
609
#endif /* CONFIG_PROC_FS */
610

Stephen Hemminger's avatar
Stephen Hemminger committed
611
static inline void rt_free(struct rtable *rt)
Linus Torvalds's avatar
Linus Torvalds committed
612 613 614 615
{
	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
}

Stephen Hemminger's avatar
Stephen Hemminger committed
616
static inline void rt_drop(struct rtable *rt)
Linus Torvalds's avatar
Linus Torvalds committed
617 618 619 620 621
{
	ip_rt_put(rt);
	call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
}

Stephen Hemminger's avatar
Stephen Hemminger committed
622
static inline int rt_fast_clean(struct rtable *rth)
Linus Torvalds's avatar
Linus Torvalds committed
623 624 625 626
{
	/* Kill broadcast/multicast entries very aggresively, if they
	   collide in hash table with more useful entries */
	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
627
		rth->fl.iif && rth->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
628 629
}

Stephen Hemminger's avatar
Stephen Hemminger committed
630
static inline int rt_valuable(struct rtable *rth)
Linus Torvalds's avatar
Linus Torvalds committed
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
{
	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
		rth->u.dst.expires;
}

static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
{
	unsigned long age;
	int ret = 0;

	if (atomic_read(&rth->u.dst.__refcnt))
		goto out;

	ret = 1;
	if (rth->u.dst.expires &&
	    time_after_eq(jiffies, rth->u.dst.expires))
		goto out;

	age = jiffies - rth->u.dst.lastuse;
	ret = 0;
	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
	    (age <= tmo2 && rt_valuable(rth)))
		goto out;
	ret = 1;
out:	return ret;
}

/* Bits of score are:
 * 31: very valuable
 * 30: not quite useless
 * 29..0: usage counter
 */
static inline u32 rt_score(struct rtable *rt)
{
	u32 score = jiffies - rt->u.dst.lastuse;

	score = ~score & ~(3<<30);

	if (rt_valuable(rt))
		score |= (1<<31);

	if (!rt->fl.iif ||
	    !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
		score |= (1<<30);

	return score;
}

679 680 681 682 683 684 685 686 687 688 689 690 691 692
static inline bool rt_caching(const struct net *net)
{
	return net->ipv4.current_rt_cache_rebuild_count <=
		net->ipv4.sysctl_rt_cache_rebuild_count;
}

static inline bool compare_hash_inputs(const struct flowi *fl1,
					const struct flowi *fl2)
{
	return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
		(fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
		(fl1->iif ^ fl2->iif)) == 0);
}

Linus Torvalds's avatar
Linus Torvalds committed
693 694
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
695 696
	return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
		(fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
697
		(fl1->mark ^ fl2->mark) |
698 699 700 701
		(*(u16 *)&fl1->nl_u.ip4_u.tos ^
		 *(u16 *)&fl2->nl_u.ip4_u.tos) |
		(fl1->oif ^ fl2->oif) |
		(fl1->iif ^ fl2->iif)) == 0;
Linus Torvalds's avatar
Linus Torvalds committed
702 703
}

704 705
static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
{
706
	return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
707 708
}

709 710 711 712 713
static inline int rt_is_expired(struct rtable *rth)
{
	return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
}

714 715 716 717 718 719 720 721 722
/*
 * Perform a full scan of hash table and free all entries.
 * Can be called by a softirq or a process.
 * In the later case, we want to be reschedule if necessary
 */
static void rt_do_flush(int process_context)
{
	unsigned int i;
	struct rtable *rth, *next;
723
	struct rtable * tail;
724 725 726 727 728 729 730 731 732

	for (i = 0; i <= rt_hash_mask; i++) {
		if (process_context && need_resched())
			cond_resched();
		rth = rt_hash_table[i].chain;
		if (!rth)
			continue;

		spin_lock_bh(rt_hash_lock_addr(i));
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
#ifdef CONFIG_NET_NS
		{
		struct rtable ** prev, * p;

		rth = rt_hash_table[i].chain;

		/* defer releasing the head of the list after spin_unlock */
		for (tail = rth; tail; tail = tail->u.dst.rt_next)
			if (!rt_is_expired(tail))
				break;
		if (rth != tail)
			rt_hash_table[i].chain = tail;

		/* call rt_free on entries after the tail requiring flush */
		prev = &rt_hash_table[i].chain;
		for (p = *prev; p; p = next) {
			next = p->u.dst.rt_next;
			if (!rt_is_expired(p)) {
				prev = &p->u.dst.rt_next;
			} else {
				*prev = next;
				rt_free(p);
			}
		}
		}
#else
759 760
		rth = rt_hash_table[i].chain;
		rt_hash_table[i].chain = NULL;
761 762
		tail = NULL;
#endif
763 764
		spin_unlock_bh(rt_hash_lock_addr(i));

765
		for (; rth != tail; rth = next) {
766 767 768 769 770 771
			next = rth->u.dst.rt_next;
			rt_free(rth);
		}
	}
}

772 773 774 775 776 777 778 779 780 781 782
/*
 * While freeing expired entries, we compute average chain length
 * and standard deviation, using fixed-point arithmetic.
 * This to have an estimation of rt_chain_length_max
 *  rt_chain_length_max = max(elasticity, AVG + 4*SD)
 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
 */

#define FRACT_BITS 3
#define ONE (1UL << FRACT_BITS)

783
static void rt_check_expire(void)
Linus Torvalds's avatar
Linus Torvalds committed
784
{
785 786
	static unsigned int rover;
	unsigned int i = rover, goal;
787
	struct rtable *rth, *aux, **rthp;
788
	unsigned long samples = 0;
789
	unsigned long sum = 0, sum2 = 0;
790
	unsigned long delta;
791 792
	u64 mult;

793 794 795
	delta = jiffies - expires_ljiffies;
	expires_ljiffies = jiffies;
	mult = ((u64)delta) << rt_hash_log;
796 797 798
	if (ip_rt_gc_timeout > 1)
		do_div(mult, ip_rt_gc_timeout);
	goal = (unsigned int)mult;
799 800
	if (goal > rt_hash_mask)
		goal = rt_hash_mask + 1;
801
	for (; goal > 0; goal--) {
Linus Torvalds's avatar
Linus Torvalds committed
802
		unsigned long tmo = ip_rt_gc_timeout;
803
		unsigned long length;
Linus Torvalds's avatar
Linus Torvalds committed
804 805 806 807

		i = (i + 1) & rt_hash_mask;
		rthp = &rt_hash_table[i].chain;

808 809 810
		if (need_resched())
			cond_resched();

811 812
		samples++;

813
		if (*rthp == NULL)
814
			continue;
815
		length = 0;
816
		spin_lock_bh(rt_hash_lock_addr(i));
Linus Torvalds's avatar
Linus Torvalds committed
817
		while ((rth = *rthp) != NULL) {
818
			prefetch(rth->u.dst.rt_next);
819
			if (rt_is_expired(rth)) {
820 821 822 823
				*rthp = rth->u.dst.rt_next;
				rt_free(rth);
				continue;
			}
Linus Torvalds's avatar
Linus Torvalds committed
824 825
			if (rth->u.dst.expires) {
				/* Entry is expired even if it is in use */
826
				if (time_before_eq(jiffies, rth->u.dst.expires)) {
827
nofree:
Linus Torvalds's avatar
Linus Torvalds committed
828
					tmo >>= 1;
829
					rthp = &rth->u.dst.rt_next;
830
					/*
831
					 * We only count entries on
832 833 834 835 836 837
					 * a chain with equal hash inputs once
					 * so that entries for different QOS
					 * levels, and other non-hash input
					 * attributes don't unfairly skew
					 * the length computation
					 */
838 839 840 841 842 843 844 845 846
					for (aux = rt_hash_table[i].chain;;) {
						if (aux == rth) {
							length += ONE;
							break;
						}
						if (compare_hash_inputs(&aux->fl, &rth->fl))
							break;
						aux = aux->u.dst.rt_next;
					}
Linus Torvalds's avatar
Linus Torvalds committed
847 848
					continue;
				}
849 850
			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
				goto nofree;
Linus Torvalds's avatar
Linus Torvalds committed
851 852

			/* Cleanup aged off entries. */
853
			*rthp = rth->u.dst.rt_next;
854
			rt_free(rth);
Linus Torvalds's avatar
Linus Torvalds committed
855
		}
856
		spin_unlock_bh(rt_hash_lock_addr(i));
857 858 859 860 861 862 863 864 865
		sum += length;
		sum2 += length*length;
	}
	if (samples) {
		unsigned long avg = sum / samples;
		unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
		rt_chain_length_max = max_t(unsigned long,
					ip_rt_gc_elasticity,
					(avg + 4*sd) >> FRACT_BITS);
Linus Torvalds's avatar
Linus Torvalds committed
866 867
	}
	rover = i;
868 869 870 871
}

/*
 * rt_worker_func() is run in process context.
872
 * we call rt_check_expire() to scan part of the hash table
873 874 875
 */
static void rt_worker_func(struct work_struct *work)
{
876
	rt_check_expire();
877
	schedule_delayed_work(&expires_work, ip_rt_gc_interval);
Linus Torvalds's avatar
Linus Torvalds committed
878 879
}

880 881 882 883 884
/*
 * Pertubation of rt_genid by a small quantity [1..256]
 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
 * many times (2^24) without giving recent rt_genid.
 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
Linus Torvalds's avatar
Linus Torvalds committed
885
 */
886
static void rt_cache_invalidate(struct net *net)
Linus Torvalds's avatar
Linus Torvalds committed
887
{
888
	unsigned char shuffle;
Linus Torvalds's avatar
Linus Torvalds committed
889

890
	get_random_bytes(&shuffle, sizeof(shuffle));
891
	atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
Linus Torvalds's avatar
Linus Torvalds committed
892 893
}

894 895 896 897
/*
 * delay < 0  : invalidate cache (fast : entries will be deleted later)
 * delay >= 0 : invalidate & flush cache (can be long)
 */
898
void rt_cache_flush(struct net *net, int delay)
Linus Torvalds's avatar
Linus Torvalds committed
899
{
900
	rt_cache_invalidate(net);
901 902
	if (delay >= 0)
		rt_do_flush(!in_softirq());
Linus Torvalds's avatar
Linus Torvalds committed
903 904
}

905
/*
906
 * We change rt_genid and let gc do the cleanup
907
 */
908
static void rt_secret_rebuild(unsigned long __net)
Linus Torvalds's avatar
Linus Torvalds committed
909
{
910
	struct net *net = (struct net *)__net;
911
	rt_cache_invalidate(net);
912
	mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
Linus Torvalds's avatar
Linus Torvalds committed
913 914
}

915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
static void rt_secret_rebuild_oneshot(struct net *net)
{
	del_timer_sync(&net->ipv4.rt_secret_timer);
	rt_cache_invalidate(net);
	if (ip_rt_secret_interval) {
		net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
		add_timer(&net->ipv4.rt_secret_timer);
	}
}

static void rt_emergency_hash_rebuild(struct net *net)
{
	if (net_ratelimit()) {
		printk(KERN_WARNING "Route hash chain too long!\n");
		printk(KERN_WARNING "Adjust your secret_interval!\n");
	}

	rt_secret_rebuild_oneshot(net);
}

Linus Torvalds's avatar
Linus Torvalds committed
935 936 937 938 939 940 941 942 943 944 945 946 947
/*
   Short description of GC goals.

   We want to build algorithm, which will keep routing cache
   at some equilibrium point, when number of aged off entries
   is kept approximately equal to newly generated ones.

   Current expiration strength is variable "expire".
   We try to adjust it dynamically, so that if networking
   is idle expires is large enough to keep enough of warm entries,
   and when load increases it reduces to limit cache size.
 */

948
static int rt_garbage_collect(struct dst_ops *ops)
Linus Torvalds's avatar
Linus Torvalds committed
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
{
	static unsigned long expire = RT_GC_TIMEOUT;
	static unsigned long last_gc;
	static int rover;
	static int equilibrium;
	struct rtable *rth, **rthp;
	unsigned long now = jiffies;
	int goal;

	/*
	 * Garbage collection is pretty expensive,
	 * do not make it too frequently.
	 */

	RT_CACHE_STAT_INC(gc_total);

	if (now - last_gc < ip_rt_gc_min_interval &&
	    atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
		RT_CACHE_STAT_INC(gc_ignored);
		goto out;
	}

	/* Calculate number of entries, which we want to expire now. */
	goal = atomic_read(&ipv4_dst_ops.entries) -
		(ip_rt_gc_elasticity << rt_hash_log);
	if (goal <= 0) {
		if (equilibrium < ipv4_dst_ops.gc_thresh)
			equilibrium = ipv4_dst_ops.gc_thresh;
		goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
		if (goal > 0) {
979
			equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
Linus Torvalds's avatar
Linus Torvalds committed
980 981 982 983 984 985
			goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
		}
	} else {
		/* We are in dangerous area. Try to reduce cache really
		 * aggressively.
		 */
986
		goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
Linus Torvalds's avatar
Linus Torvalds committed
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
		equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
	}

	if (now - last_gc >= ip_rt_gc_min_interval)
		last_gc = now;

	if (goal <= 0) {
		equilibrium += goal;
		goto work_done;
	}

	do {
		int i, k;

		for (i = rt_hash_mask, k = rover; i >= 0; i--) {
			unsigned long tmo = expire;

			k = (k + 1) & rt_hash_mask;
			rthp = &rt_hash_table[k].chain;
1006
			spin_lock_bh(rt_hash_lock_addr(k));
Linus Torvalds's avatar
Linus Torvalds committed
1007
			while ((rth = *rthp) != NULL) {
1008
				if (!rt_is_expired(rth) &&
1009
					!rt_may_expire(rth, tmo, expire)) {
Linus Torvalds's avatar
Linus Torvalds committed
1010
					tmo >>= 1;
1011
					rthp = &rth->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013
					continue;
				}
1014
				*rthp = rth->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
1015 1016 1017
				rt_free(rth);
				goal--;
			}
1018
			spin_unlock_bh(rt_hash_lock_addr(k));
Linus Torvalds's avatar
Linus Torvalds committed
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
			if (goal <= 0)
				break;
		}
		rover = k;

		if (goal <= 0)
			goto work_done;

		/* Goal is not achieved. We stop process if:

		   - if expire reduced to zero. Otherwise, expire is halfed.
		   - if table is not full.
		   - if we are called from interrupt.
		   - jiffies check is just fallback/debug loop breaker.
		     We will not spin here for long time in any case.
		 */

		RT_CACHE_STAT_INC(gc_goal_miss);

		if (expire == 0)
			break;

		expire >>= 1;
#if RT_CACHE_DEBUG >= 2
		printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
				atomic_read(&ipv4_dst_ops.entries), goal, i);
#endif

		if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
			goto out;
	} while (!in_softirq() && time_before_eq(jiffies, now));

	if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
		goto out;
	if (net_ratelimit())
		printk(KERN_WARNING "dst cache overflow\n");
	RT_CACHE_STAT_INC(gc_dst_overflow);
	return 1;

work_done:
	expire += ip_rt_gc_min_interval;
	if (expire > ip_rt_gc_timeout ||
	    atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
		expire = ip_rt_gc_timeout;
#if RT_CACHE_DEBUG >= 2
	printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
			atomic_read(&ipv4_dst_ops.entries), goal, rover);
#endif
out:	return 0;
}

Eric Dumazet's avatar
Eric Dumazet committed
1070 1071
static int rt_intern_hash(unsigned hash, struct rtable *rt,
			  struct rtable **rp, struct sk_buff *skb)
Linus Torvalds's avatar
Linus Torvalds committed
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
{
	struct rtable	*rth, **rthp;
	unsigned long	now;
	struct rtable *cand, **candp;
	u32 		min_score;
	int		chain_length;
	int attempts = !in_softirq();

restart:
	chain_length = 0;
	min_score = ~(u32)0;
	cand = NULL;
	candp = NULL;
	now = jiffies;

1087
	if (!rt_caching(dev_net(rt->u.dst.dev))) {
1088 1089 1090 1091 1092 1093 1094 1095
		/*
		 * If we're not caching, just tell the caller we
		 * were successful and don't touch the route.  The
		 * caller hold the sole reference to the cache entry, and
		 * it will be released when the caller is done with it.
		 * If we drop it here, the callers have no way to resolve routes
		 * when we're not caching.  Instead, just point *rp at rt, so
		 * the caller gets a single use out of the route
1096 1097 1098 1099 1100 1101
		 * Note that we do rt_free on this new route entry, so that
		 * once its refcount hits zero, we are still able to reap it
		 * (Thanks Alexey)
		 * Note also the rt_free uses call_rcu.  We don't actually
		 * need rcu protection here, this is just our path to get
		 * on the route gc list.
1102
		 */
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116

		if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
			int err = arp_bind_neighbour(&rt->u.dst);
			if (err) {
				if (net_ratelimit())
					printk(KERN_WARNING
					    "Neighbour table failure & not caching routes.\n");
				rt_drop(rt);
				return err;
			}
		}

		rt_free(rt);
		goto skip_hashing;
1117 1118
	}

Linus Torvalds's avatar
Linus Torvalds committed
1119 1120
	rthp = &rt_hash_table[hash].chain;

1121
	spin_lock_bh(rt_hash_lock_addr(hash));
Linus Torvalds's avatar
Linus Torvalds committed
1122
	while ((rth = *rthp) != NULL) {
1123
		if (rt_is_expired(rth)) {
1124 1125 1126 1127
			*rthp = rth->u.dst.rt_next;
			rt_free(rth);
			continue;
		}
1128
		if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
Linus Torvalds's avatar
Linus Torvalds committed
1129
			/* Put it first */
1130
			*rthp = rth->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
1131 1132 1133 1134 1135
			/*
			 * Since lookup is lockfree, the deletion
			 * must be visible to another weakly ordered CPU before
			 * the insertion at the start of the hash chain.
			 */
1136
			rcu_assign_pointer(rth->u.dst.rt_next,
Linus Torvalds's avatar
Linus Torvalds committed
1137 1138 1139 1140 1141 1142 1143
					   rt_hash_table[hash].chain);
			/*
			 * Since lookup is lockfree, the update writes
			 * must be ordered for consistency on SMP.
			 */
			rcu_assign_pointer(rt_hash_table[hash].chain, rth);

1144
			dst_use(&rth->u.dst, now);
1145
			spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds's avatar
Linus Torvalds committed
1146 1147

			rt_drop(rt);
Eric Dumazet's avatar
Eric Dumazet committed
1148 1149 1150
			if (rp)
				*rp = rth;
			else
Eric Dumazet's avatar
Eric Dumazet committed
1151
				skb_dst_set(skb, &rth->u.dst);
Linus Torvalds's avatar
Linus Torvalds committed
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
			return 0;
		}

		if (!atomic_read(&rth->u.dst.__refcnt)) {
			u32 score = rt_score(rth);

			if (score <= min_score) {
				cand = rth;
				candp = rthp;
				min_score = score;
			}
		}

		chain_length++;

1167
		rthp = &rth->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	}

	if (cand) {
		/* ip_rt_gc_elasticity used to be average length of chain
		 * length, when exceeded gc becomes really aggressive.
		 *
		 * The second limit is less certain. At the moment it allows
		 * only 2 entries per bucket. We will see.
		 */
		if (chain_length > ip_rt_gc_elasticity) {
1178
			*candp = cand->u.dst.rt_next;
Linus Torvalds's avatar
Linus Torvalds committed
1179 1180
			rt_free(cand);
		}
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190
	} else {
		if (chain_length > rt_chain_length_max) {
			struct net *net = dev_net(rt->u.dst.dev);
			int num = ++net->ipv4.current_rt_cache_rebuild_count;
			if (!rt_caching(dev_net(rt->u.dst.dev))) {
				printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
					rt->u.dst.dev->name, num);
			}
			rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
		}
Linus Torvalds's avatar
Linus Torvalds committed
1191 1192 1193 1194 1195 1196 1197 1198
	}

	/* Try to bind route to arp only if it is output
	   route or unicast forwarding path.
	 */
	if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
		int err = arp_bind_neighbour(&rt->u.dst);
		if (err) {
1199
			spin_unlock_bh(rt_hash_lock_addr(hash));
Linus Torvalds's avatar
Linus Torvalds committed
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214

			if (err != -ENOBUFS) {
				rt_drop(rt);
				return err;
			}

			/* Neighbour tables are full and nothing
			   can be released. Try to shrink route cache,
			   it is most likely it holds some neighbour records.
			 */
			if (attempts-- > 0) {
				int saved_elasticity = ip_rt_gc_elasticity;
				int saved_int = ip_rt_gc_min_interval;
				ip_rt_gc_elasticity	= 1;
				ip_rt_gc_min_interval	= 0;
1215
				rt_garbage_collect(&ipv4_dst_ops);
Linus Torvalds's avatar
Linus Torvalds committed
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
				ip_rt_gc_min_interval	= saved_int;
				ip_rt_gc_elasticity	= saved_elasticity;
				goto restart;
			}

			if (net_ratelimit())
				printk(KERN_WARNING "Neighbour table overflow.\n");
			rt_drop(rt);
			return -ENOBUFS;
		}
	}

1228
	rt->u.dst.rt_next = rt_hash_table[hash].chain;
1229

Linus Torvalds's avatar
Linus Torvalds committed
1230
#if RT_CACHE_DEBUG >= 2
1231
	if (rt->u.dst.rt_next) {
Linus Torvalds's avatar
Linus Torvalds committed
1232
		struct rtable *trt;
1233 1234
		printk(KERN_DEBUG "rt_cache @%02x: %pI4",
		       hash, &rt->rt_dst);
1235
		for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1236
			printk(" . %pI4", &trt->rt_dst);
Linus Torvalds's avatar
Linus Torvalds committed
1237 1238 1239
		printk("\n");
	}
#endif
1240 1241 1242 1243 1244
	/*
	 * Since lookup is lockfree, we must make sure
	 * previous writes to rt are comitted to memory
	 * before making rt visible to other CPUS.
	 */
1245
	rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1246

1247
	spin_unlock_bh(rt_hash_lock_addr(hash));
1248

1249
skip_hashing:
Eric Dumazet's avatar
Eric Dumazet committed
1250 1251 1252
	if (rp)
		*rp = rt;
	else
Eric Dumazet's avatar
Eric Dumazet committed
1253
		skb_dst_set(skb, &rt->u.dst);
Linus Torvalds's avatar
Linus Torvalds committed
1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	return 0;
}

void rt_bind_peer(struct rtable *rt, int create)
{
	static DEFINE_SPINLOCK(rt_peer_lock);
	struct inet_peer *peer;

	peer = inet_getpeer(rt->rt_dst, create);

	spin_lock_bh(&rt_peer_lock);
	if (rt->peer == NULL) {
		rt->peer = peer;
		peer = NULL;
	}
	spin_unlock_bh(&rt_peer_lock);
	if (peer)
		inet_putpeer(peer);
}

/*
 * Peer allocation may fail only in serious out-of-memory conditions.  However
 * we still can generate some output.
 * Random ID selection looks a bit dangerous because we have no chances to
 * select ID being unique in a reasonable period of time.
 * But broken packet identifier may be better than no packet at all.
 */
static void ip_select_fb_ident(struct iphdr *iph)
{
	static DEFINE_SPINLOCK(ip_fb_id_lock);
	static u32 ip_fallback_id;
	u32 salt;

	spin_lock_bh(&ip_fb_id_lock);
1288
	salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
Linus Torvalds's avatar
Linus Torvalds committed
1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	iph->id = htons(salt & 0xFFFF);
	ip_fallback_id = salt;
	spin_unlock_bh(&ip_fb_id_lock);
}

void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
	struct rtable *rt = (struct rtable *) dst;

	if (rt) {
		if (rt->peer == NULL)
			rt_bind_peer(rt, 1);

		/* If peer is attached to destination, it is never detached,
		   so that we need not to grab a lock to dereference it.
		 */
		if (rt->peer) {
			iph->id = htons(inet_getid(rt->peer, more));
			return;
		}
	} else
1310
		printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1311
		       __builtin_return_address(0));
Linus Torvalds's avatar
Linus Torvalds committed
1312 1313 1314 1315 1316 1317

	ip_select_fb_ident(iph);
}

static void rt_del(unsigned hash, struct rtable *rt)
{
1318
	struct rtable **rthp, *aux;
Linus Torvalds's avatar
Linus Torvalds committed
1319

1320
	rthp = &rt_hash_table[hash].chain;
1321
	spin_lock_bh(rt_hash_lock_addr(hash));
Linus Torvalds's avatar