slab.c 107 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *	(c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 * 	(c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *	UNIX Internals: The New Frontiers by Uresh Vahalia
 *	Pub: Prentice Hall	ISBN 0-13-101908-2
 * or with a little more detail in;
 *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *	Jeff Bonwick (Sun Microsystems).
 *	Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
Simon Arlott's avatar
Simon Arlott committed
29
 * slabs and you must pass objects with the same initializations to
Linus Torvalds's avatar
Linus Torvalds committed
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
Andrew Morton's avatar
Andrew Morton committed
53
 * The c_cpuarray may not be read with enabled local interrupts -
Linus Torvalds's avatar
Linus Torvalds committed
54 55 56 57
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
58
 *  Several members in struct kmem_cache and struct slab never change, they
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64 65 66 67 68 69 70
 *	are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *  	and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
71
 *	The global cache-chain is protected by the mutex 'slab_mutex'.
Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77
 *	The sem is only needed when accessing/extending the cache-chain, which
 *	can never happen inside an interrupt (kmem_cache_create(),
 *	kmem_cache_shrink() and kmem_cache_reap()).
 *
 *	At present, each engine can be growing a cache.  This should be blocked.
 *
78 79 80 81 82 83 84 85 86
 * 15 March 2005. NUMA slab allocator.
 *	Shai Fultheim <shai@scalex86.org>.
 *	Shobhit Dayal <shobhit@calsoftinc.com>
 *	Alok N Kataria <alokk@calsoftinc.com>
 *	Christoph Lameter <christoph@lameter.com>
 *
 *	Modified the slab allocator to be node aware on NUMA systems.
 *	Each node has its own list of partial, free and full slabs.
 *	All object allocations for a node occur from node specific slab lists.
Linus Torvalds's avatar
Linus Torvalds committed
87 88 89 90
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
91
#include	<linux/poison.h>
Linus Torvalds's avatar
Linus Torvalds committed
92 93 94 95 96
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
97
#include	<linux/cpuset.h>
98
#include	<linux/proc_fs.h>
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101 102 103 104 105
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
106
#include	<linux/string.h>
107
#include	<linux/uaccess.h>
108
#include	<linux/nodemask.h>
109
#include	<linux/kmemleak.h>
110
#include	<linux/mempolicy.h>
Ingo Molnar's avatar
Ingo Molnar committed
111
#include	<linux/mutex.h>
112
#include	<linux/fault-inject.h>
Ingo Molnar's avatar
Ingo Molnar committed
113
#include	<linux/rtmutex.h>
114
#include	<linux/reciprocal_div.h>
115
#include	<linux/debugobjects.h>
Pekka Enberg's avatar
Pekka Enberg committed
116
#include	<linux/kmemcheck.h>
117
#include	<linux/memory.h>
118
#include	<linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
119

120 121
#include	<net/sock.h>

Linus Torvalds's avatar
Linus Torvalds committed
122 123 124 125
#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

126 127
#include <trace/events/kmem.h>

128 129
#include	"internal.h"

130 131
#include	"slab.h"

Linus Torvalds's avatar
Linus Torvalds committed
132
/*
133
 * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
Linus Torvalds's avatar
Linus Torvalds committed
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * STATS	- 1 to collect stats for /proc/slabinfo.
 *		  0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB
#define	DEBUG		1
#define	STATS		1
#define	FORCED_DEBUG	1
#else
#define	DEBUG		0
#define	STATS		0
#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */
#define	BYTES_PER_WORD		sizeof(void *)
David Woodhouse's avatar
David Woodhouse committed
154
#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159

#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif

160 161 162 163 164 165 166 167 168
#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
				<= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX
typedef unsigned char freelist_idx_t;
#else
typedef unsigned short freelist_idx_t;
#endif

169
#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)
170

171 172 173 174 175 176
/*
 * true if a page was allocated from pfmemalloc reserves for network-based
 * swap
 */
static bool pfmemalloc_active __read_mostly;

Linus Torvalds's avatar
Linus Torvalds committed
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */
struct array_cache {
	unsigned int avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int touched;
194
	void *entry[];	/*
Andrew Morton's avatar
Andrew Morton committed
195 196 197
			 * Must have this definition in here for the proper
			 * alignment of array_cache. Also simplifies accessing
			 * the entries.
198 199 200 201
			 *
			 * Entries should not be directly dereferenced as
			 * entries belonging to slabs marked pfmemalloc will
			 * have the lower bits set SLAB_OBJ_PFMEMALLOC
Andrew Morton's avatar
Andrew Morton committed
202
			 */
Linus Torvalds's avatar
Linus Torvalds committed
203 204
};

Joonsoo Kim's avatar
Joonsoo Kim committed
205 206 207 208 209
struct alien_cache {
	spinlock_t lock;
	struct array_cache ac;
};

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
#define SLAB_OBJ_PFMEMALLOC	1
static inline bool is_obj_pfmemalloc(void *objp)
{
	return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
}

static inline void set_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
	return;
}

static inline void clear_obj_pfmemalloc(void **objp)
{
	*objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
}

Andrew Morton's avatar
Andrew Morton committed
227 228 229
/*
 * bootstrap: The caches do not work without cpuarrays anymore, but the
 * cpuarrays are allocated from the generic caches...
Linus Torvalds's avatar
Linus Torvalds committed
230 231 232 233
 */
#define BOOT_CPUCACHE_ENTRIES	1
struct arraycache_init {
	struct array_cache cache;
234
	void *entries[BOOT_CPUCACHE_ENTRIES];
Linus Torvalds's avatar
Linus Torvalds committed
235 236
};

237 238 239
/*
 * Need this for bootstrapping a per node allocator.
 */
240
#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
241
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242
#define	CACHE_CACHE 0
243
#define	SIZE_NODE (MAX_NUMNODES)
244

245
static int drain_freelist(struct kmem_cache *cache,
246
			struct kmem_cache_node *n, int tofree);
247
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
248 249
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
250
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
251
static void cache_reap(struct work_struct *unused);
252

253 254
static int slab_early_init = 1;

255
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
Linus Torvalds's avatar
Linus Torvalds committed
256

257
static void kmem_cache_node_init(struct kmem_cache_node *parent)
258 259 260 261 262 263
{
	INIT_LIST_HEAD(&parent->slabs_full);
	INIT_LIST_HEAD(&parent->slabs_partial);
	INIT_LIST_HEAD(&parent->slabs_free);
	parent->shared = NULL;
	parent->alien = NULL;
264
	parent->colour_next = 0;
265 266 267 268 269
	spin_lock_init(&parent->list_lock);
	parent->free_objects = 0;
	parent->free_touched = 0;
}

Andrew Morton's avatar
Andrew Morton committed
270 271 272
#define MAKE_LIST(cachep, listp, slab, nodeid)				\
	do {								\
		INIT_LIST_HEAD(listp);					\
273
		list_splice(&get_node(cachep, nodeid)->slab, listp);	\
274 275
	} while (0)

Andrew Morton's avatar
Andrew Morton committed
276 277
#define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
	do {								\
278 279 280 281
	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
282 283 284 285 286

#define CFLGS_OFF_SLAB		(0x80000000UL)
#define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)

#define BATCHREFILL_LIMIT	16
Andrew Morton's avatar
Andrew Morton committed
287 288 289
/*
 * Optimization question: fewer reaps means less probability for unnessary
 * cpucache drain/refill cycles.
Linus Torvalds's avatar
Linus Torvalds committed
290
 *
Adrian Bunk's avatar
Adrian Bunk committed
291
 * OTOH the cpuarrays can contain lots of objects,
Linus Torvalds's avatar
Linus Torvalds committed
292 293
 * which could lock up otherwise freeable slabs.
 */
294 295
#define REAPTIMEOUT_AC		(2*HZ)
#define REAPTIMEOUT_NODE	(4*HZ)
Linus Torvalds's avatar
Linus Torvalds committed
296 297 298 299 300 301

#if STATS
#define	STATS_INC_ACTIVE(x)	((x)->num_active++)
#define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
#define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
#define	STATS_INC_GROWN(x)	((x)->grown++)
302
#define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
Andrew Morton's avatar
Andrew Morton committed
303 304 305 306 307
#define	STATS_SET_HIGH(x)						\
	do {								\
		if ((x)->num_active > (x)->high_mark)			\
			(x)->high_mark = (x)->num_active;		\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
308 309
#define	STATS_INC_ERR(x)	((x)->errors++)
#define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
310
#define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
311
#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
Andrew Morton's avatar
Andrew Morton committed
312 313 314 315 316
#define	STATS_SET_FREEABLE(x, i)					\
	do {								\
		if ((x)->max_freeable < i)				\
			(x)->max_freeable = i;				\
	} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
317 318 319 320 321 322 323 324 325
#define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
#else
#define	STATS_INC_ACTIVE(x)	do { } while (0)
#define	STATS_DEC_ACTIVE(x)	do { } while (0)
#define	STATS_INC_ALLOCED(x)	do { } while (0)
#define	STATS_INC_GROWN(x)	do { } while (0)
326
#define	STATS_ADD_REAPED(x,y)	do { (void)(y); } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
327 328 329
#define	STATS_SET_HIGH(x)	do { } while (0)
#define	STATS_INC_ERR(x)	do { } while (0)
#define	STATS_INC_NODEALLOCS(x)	do { } while (0)
330
#define	STATS_INC_NODEFREES(x)	do { } while (0)
331
#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
Andrew Morton's avatar
Andrew Morton committed
332
#define	STATS_SET_FREEABLE(x, i) do { } while (0)
Linus Torvalds's avatar
Linus Torvalds committed
333 334 335 336 337 338 339 340
#define STATS_INC_ALLOCHIT(x)	do { } while (0)
#define STATS_INC_ALLOCMISS(x)	do { } while (0)
#define STATS_INC_FREEHIT(x)	do { } while (0)
#define STATS_INC_FREEMISS(x)	do { } while (0)
#endif

#if DEBUG

Andrew Morton's avatar
Andrew Morton committed
341 342
/*
 * memory layout of objects:
Linus Torvalds's avatar
Linus Torvalds committed
343
 * 0		: objp
344
 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
Linus Torvalds's avatar
Linus Torvalds committed
345 346
 * 		the end of an object is aligned with the end of the real
 * 		allocation. Catches writes behind the end of the allocation.
347
 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
Linus Torvalds's avatar
Linus Torvalds committed
348
 * 		redzone word.
349
 * cachep->obj_offset: The real object.
350 351
 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
 * cachep->size - 1* BYTES_PER_WORD: last caller address
Andrew Morton's avatar
Andrew Morton committed
352
 *					[BYTES_PER_WORD long]
Linus Torvalds's avatar
Linus Torvalds committed
353
 */
354
static int obj_offset(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
355
{
356
	return cachep->obj_offset;
Linus Torvalds's avatar
Linus Torvalds committed
357 358
}

359
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
360 361
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
362 363
	return (unsigned long long*) (objp + obj_offset(cachep) -
				      sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
364 365
}

366
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
367 368 369
{
	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
	if (cachep->flags & SLAB_STORE_USER)
370
		return (unsigned long long *)(objp + cachep->size -
371
					      sizeof(unsigned long long) -
David Woodhouse's avatar
David Woodhouse committed
372
					      REDZONE_ALIGN);
373
	return (unsigned long long *) (objp + cachep->size -
374
				       sizeof(unsigned long long));
Linus Torvalds's avatar
Linus Torvalds committed
375 376
}

377
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
Linus Torvalds's avatar
Linus Torvalds committed
378 379
{
	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
380
	return (void **)(objp + cachep->size - BYTES_PER_WORD);
Linus Torvalds's avatar
Linus Torvalds committed
381 382 383 384
}

#else

385
#define obj_offset(x)			0
386 387
#define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
Linus Torvalds's avatar
Linus Torvalds committed
388 389 390 391
#define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})

#endif

392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
#define OBJECT_FREE (0)
#define OBJECT_ACTIVE (1)

#ifdef CONFIG_DEBUG_SLAB_LEAK

static void set_obj_status(struct page *page, int idx, int val)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;
	status[idx] = val;
}

static inline unsigned int get_obj_status(struct page *page, int idx)
{
	int freelist_size;
	char *status;
	struct kmem_cache *cachep = page->slab_cache;

	freelist_size = cachep->num * sizeof(freelist_idx_t);
	status = (char *)page->freelist + freelist_size;

	return status[idx];
}

#else
static inline void set_obj_status(struct page *page, int idx, int val) {}

#endif

Linus Torvalds's avatar
Linus Torvalds committed
425
/*
426 427
 * Do not go above this order unless 0 objects fit into the slab or
 * overridden on the command line.
Linus Torvalds's avatar
Linus Torvalds committed
428
 */
429 430 431
#define	SLAB_MAX_ORDER_HI	1
#define	SLAB_MAX_ORDER_LO	0
static int slab_max_order = SLAB_MAX_ORDER_LO;
432
static bool slab_max_order_set __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
433

434 435
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
436
	struct page *page = virt_to_head_page(obj);
437
	return page->slab_cache;
438 439
}

440
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
441 442
				 unsigned int idx)
{
443
	return page->s_mem + cache->size * idx;
444 445
}

446
/*
447 448 449
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
450 451 452
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
453
					const struct page *page, void *obj)
454
{
455
	u32 offset = (obj - page->s_mem);
456
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
457 458
}

Linus Torvalds's avatar
Linus Torvalds committed
459
/* internal cache of cache description objs */
460
static struct kmem_cache kmem_cache_boot = {
461 462 463
	.batchcount = 1,
	.limit = BOOT_CPUCACHE_ENTRIES,
	.shared = 1,
464
	.size = sizeof(struct kmem_cache),
465
	.name = "kmem_cache",
Linus Torvalds's avatar
Linus Torvalds committed
466 467
};

468 469
#define BAD_ALIEN_MAGIC 0x01020304ul

470
static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
Linus Torvalds's avatar
Linus Torvalds committed
471

472
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
Linus Torvalds's avatar
Linus Torvalds committed
473
{
474
	return this_cpu_ptr(cachep->cpu_cache);
Linus Torvalds's avatar
Linus Torvalds committed
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490
static size_t calculate_freelist_size(int nr_objs, size_t align)
{
	size_t freelist_size;

	freelist_size = nr_objs * sizeof(freelist_idx_t);
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		freelist_size += nr_objs * sizeof(char);

	if (align)
		freelist_size = ALIGN(freelist_size, align);

	return freelist_size;
}

491 492
static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
				size_t idx_size, size_t align)
Linus Torvalds's avatar
Linus Torvalds committed
493
{
494
	int nr_objs;
495
	size_t remained_size;
496
	size_t freelist_size;
497
	int extra_space = 0;
498

499 500
	if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
		extra_space = sizeof(char);
501 502 503 504 505 506 507 508
	/*
	 * Ignore padding for the initial guess. The padding
	 * is at most @align-1 bytes, and @buffer_size is at
	 * least @align. In the worst case, this result will
	 * be one greater than the number of objects that fit
	 * into the memory allocation when taking the padding
	 * into account.
	 */
509
	nr_objs = slab_size / (buffer_size + idx_size + extra_space);
510 511 512 513 514

	/*
	 * This calculated number will be either the right
	 * amount, or one greater than what we want.
	 */
515 516 517
	remained_size = slab_size - nr_objs * buffer_size;
	freelist_size = calculate_freelist_size(nr_objs, align);
	if (remained_size < freelist_size)
518 519 520
		nr_objs--;

	return nr_objs;
521
}
Linus Torvalds's avatar
Linus Torvalds committed
522

Andrew Morton's avatar
Andrew Morton committed
523 524 525
/*
 * Calculate the number of objects and left-over bytes for a given buffer size.
 */
526 527 528 529 530 531 532
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
			   size_t align, int flags, size_t *left_over,
			   unsigned int *num)
{
	int nr_objs;
	size_t mgmt_size;
	size_t slab_size = PAGE_SIZE << gfporder;
Linus Torvalds's avatar
Linus Torvalds committed
533

534 535 536 537 538
	/*
	 * The slab management structure can be either off the slab or
	 * on it. For the latter case, the memory allocated for a
	 * slab is used for:
	 *
Joonsoo Kim's avatar
Joonsoo Kim committed
539
	 * - One unsigned int for each object
540 541 542 543 544 545 546 547 548 549 550 551 552
	 * - Padding to respect alignment of @align
	 * - @buffer_size bytes for each object
	 *
	 * If the slab management structure is off the slab, then the
	 * alignment will already be calculated into the size. Because
	 * the slabs are all pages aligned, the objects will be at the
	 * correct alignment when allocated.
	 */
	if (flags & CFLGS_OFF_SLAB) {
		mgmt_size = 0;
		nr_objs = slab_size / buffer_size;

	} else {
553
		nr_objs = calculate_nr_objs(slab_size, buffer_size,
554
					sizeof(freelist_idx_t), align);
555
		mgmt_size = calculate_freelist_size(nr_objs, align);
556 557 558
	}
	*num = nr_objs;
	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
Linus Torvalds's avatar
Linus Torvalds committed
559 560
}

561
#if DEBUG
562
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
Linus Torvalds's avatar
Linus Torvalds committed
563

Andrew Morton's avatar
Andrew Morton committed
564 565
static void __slab_error(const char *function, struct kmem_cache *cachep,
			char *msg)
Linus Torvalds's avatar
Linus Torvalds committed
566 567
{
	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
568
	       function, cachep->name, msg);
Linus Torvalds's avatar
Linus Torvalds committed
569
	dump_stack();
570
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
571
}
572
#endif
Linus Torvalds's avatar
Linus Torvalds committed
573

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
/*
 * By default on NUMA we use alien caches to stage the freeing of
 * objects allocated from other nodes. This causes massive memory
 * inefficiencies when using fake NUMA setup to split memory into a
 * large number of small nodes, so it can be disabled on the command
 * line
  */

static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
	use_alien_caches = 0;
	return 1;
}
__setup("noaliencache", noaliencache_setup);

590 591 592 593 594 595 596 597 598 599 600
static int __init slab_max_order_setup(char *str)
{
	get_option(&str, &slab_max_order);
	slab_max_order = slab_max_order < 0 ? 0 :
				min(slab_max_order, MAX_ORDER - 1);
	slab_max_order_set = true;

	return 1;
}
__setup("slab_max_order=", slab_max_order_setup);

601 602 603 604 605 606 607
#ifdef CONFIG_NUMA
/*
 * Special reaping functions for NUMA systems called from cache_reap().
 * These take care of doing round robin flushing of alien caches (containing
 * objects freed on different nodes from which they were allocated) and the
 * flushing of remote pcps by calling drain_node_pages.
 */
608
static DEFINE_PER_CPU(unsigned long, slab_reap_node);
609 610 611 612 613

static void init_reap_node(int cpu)
{
	int node;

614
	node = next_node(cpu_to_mem(cpu), node_online_map);
615
	if (node == MAX_NUMNODES)
616
		node = first_node(node_online_map);
617

618
	per_cpu(slab_reap_node, cpu) = node;
619 620 621 622
}

static void next_reap_node(void)
{
623
	int node = __this_cpu_read(slab_reap_node);
624 625 626 627

	node = next_node(node, node_online_map);
	if (unlikely(node >= MAX_NUMNODES))
		node = first_node(node_online_map);
628
	__this_cpu_write(slab_reap_node, node);
629 630 631 632 633 634 635
}

#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
636 637 638 639 640 641 642
/*
 * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
 * via the workqueue/eventd.
 * Add the CPU number into the expiration time to minimize the possibility of
 * the CPUs getting into lockstep and contending for the global cache chain
 * lock.
 */
643
static void start_cpu_timer(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
644
{
645
	struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
646 647 648 649 650 651

	/*
	 * When this gets called from do_initcalls via cpucache_init(),
	 * init_workqueues() has already run, so keventd will be setup
	 * at that time.
	 */
652
	if (keventd_up() && reap_work->work.func == NULL) {
653
		init_reap_node(cpu);
654
		INIT_DEFERRABLE_WORK(reap_work, cache_reap);
655 656
		schedule_delayed_work_on(cpu, reap_work,
					__round_jiffies_relative(HZ, cpu));
Linus Torvalds's avatar
Linus Torvalds committed
657 658 659
	}
}

660
static void init_arraycache(struct array_cache *ac, int limit, int batch)
Linus Torvalds's avatar
Linus Torvalds committed
661
{
662 663
	/*
	 * The array_cache structures contain pointers to free object.
Lucas De Marchi's avatar
Lucas De Marchi committed
664
	 * However, when such objects are allocated or transferred to another
665 666 667 668
	 * cache the pointers are not cleared and they could be counted as
	 * valid references during a kmemleak scan. Therefore, kmemleak must
	 * not scan such objects.
	 */
669 670 671 672 673 674
	kmemleak_no_scan(ac);
	if (ac) {
		ac->avail = 0;
		ac->limit = limit;
		ac->batchcount = batch;
		ac->touched = 0;
Linus Torvalds's avatar
Linus Torvalds committed
675
	}
676 677 678 679 680
}

static struct array_cache *alloc_arraycache(int node, int entries,
					    int batchcount, gfp_t gfp)
{
681
	size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache);
682 683 684 685 686
	struct array_cache *ac = NULL;

	ac = kmalloc_node(memsize, gfp, node);
	init_arraycache(ac, entries, batchcount);
	return ac;
Linus Torvalds's avatar
Linus Torvalds committed
687 688
}

689
static inline bool is_slab_pfmemalloc(struct page *page)
690 691 692 693 694 695 696 697
{
	return PageSlabPfmemalloc(page);
}

/* Clears pfmemalloc_active if no slabs have pfmalloc set */
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
						struct array_cache *ac)
{
698
	struct kmem_cache_node *n = get_node(cachep, numa_mem_id());
699
	struct page *page;
700 701 702 703 704
	unsigned long flags;

	if (!pfmemalloc_active)
		return;

705
	spin_lock_irqsave(&n->list_lock, flags);
706 707
	list_for_each_entry(page, &n->slabs_full, lru)
		if (is_slab_pfmemalloc(page))
708 709
			goto out;

710 711
	list_for_each_entry(page, &n->slabs_partial, lru)
		if (is_slab_pfmemalloc(page))
712 713
			goto out;

714 715
	list_for_each_entry(page, &n->slabs_free, lru)
		if (is_slab_pfmemalloc(page))
716 717 718 719
			goto out;

	pfmemalloc_active = false;
out:
720
	spin_unlock_irqrestore(&n->list_lock, flags);
721 722
}

723
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
724 725 726 727 728 729 730
						gfp_t flags, bool force_refill)
{
	int i;
	void *objp = ac->entry[--ac->avail];

	/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
	if (unlikely(is_obj_pfmemalloc(objp))) {
731
		struct kmem_cache_node *n;
732 733 734 735 736 737 738

		if (gfp_pfmemalloc_allowed(flags)) {
			clear_obj_pfmemalloc(&objp);
			return objp;
		}

		/* The caller cannot use PFMEMALLOC objects, find another one */
739
		for (i = 0; i < ac->avail; i++) {
740 741 742 743 744 745 746 747 748 749 750 751 752
			/* If a !PFMEMALLOC object is found, swap them */
			if (!is_obj_pfmemalloc(ac->entry[i])) {
				objp = ac->entry[i];
				ac->entry[i] = ac->entry[ac->avail];
				ac->entry[ac->avail] = objp;
				return objp;
			}
		}

		/*
		 * If there are empty slabs on the slabs_free list and we are
		 * being forced to refill the cache, mark this one !pfmemalloc.
		 */
753
		n = get_node(cachep, numa_mem_id());
754
		if (!list_empty(&n->slabs_free) && force_refill) {
755
			struct page *page = virt_to_head_page(objp);
756
			ClearPageSlabPfmemalloc(page);
757 758 759 760 761 762 763 764 765 766 767 768 769
			clear_obj_pfmemalloc(&objp);
			recheck_pfmemalloc_active(cachep, ac);
			return objp;
		}

		/* No !PFMEMALLOC objects available */
		ac->avail++;
		objp = NULL;
	}

	return objp;
}

770 771 772 773 774 775 776 777 778 779 780 781 782
static inline void *ac_get_obj(struct kmem_cache *cachep,
			struct array_cache *ac, gfp_t flags, bool force_refill)
{
	void *objp;

	if (unlikely(sk_memalloc_socks()))
		objp = __ac_get_obj(cachep, ac, flags, force_refill);
	else
		objp = ac->entry[--ac->avail];

	return objp;
}

Joonsoo Kim's avatar
Joonsoo Kim committed
783 784
static noinline void *__ac_put_obj(struct kmem_cache *cachep,
			struct array_cache *ac, void *objp)
785 786 787
{
	if (unlikely(pfmemalloc_active)) {
		/* Some pfmemalloc slabs exist, check if this is one */
788
		struct page *page = virt_to_head_page(objp);
789 790 791 792
		if (PageSlabPfmemalloc(page))
			set_obj_pfmemalloc(&objp);
	}

793 794 795 796 797 798 799 800 801
	return objp;
}

static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
								void *objp)
{
	if (unlikely(sk_memalloc_socks()))
		objp = __ac_put_obj(cachep, ac, objp);

802 803 804
	ac->entry[ac->avail++] = objp;
}

805 806 807 808 809 810 811 812 813 814
/*
 * Transfer objects in one arraycache to another.
 * Locking must be handled by the caller.
 *
 * Return the number of entries transferred.
 */
static int transfer_objects(struct array_cache *to,
		struct array_cache *from, unsigned int max)
{
	/* Figure out how many entries to transfer */
815
	int nr = min3(from->avail, max, to->limit - to->avail);
816 817 818 819 820 821 822 823 824 825 826 827

	if (!nr)
		return 0;

	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
			sizeof(void *) *nr);

	from->avail -= nr;
	to->avail += nr;
	return nr;
}

828 829 830
#ifndef CONFIG_NUMA

#define drain_alien_cache(cachep, alien) do { } while (0)
831
#define reap_alien(cachep, n) do { } while (0)
832

Joonsoo Kim's avatar
Joonsoo Kim committed
833 834
static inline struct alien_cache **alloc_alien_cache(int node,
						int limit, gfp_t gfp)
835
{
836
	return (struct alien_cache **)BAD_ALIEN_MAGIC;
837 838
}

Joonsoo Kim's avatar
Joonsoo Kim committed
839
static inline void free_alien_cache(struct alien_cache **ac_ptr)
840 841 842 843 844 845 846 847 848 849 850 851 852 853
{
}

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	return 0;
}

static inline void *alternate_node_alloc(struct kmem_cache *cachep,
		gfp_t flags)
{
	return NULL;
}

854
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
855 856 857 858 859
		 gfp_t flags, int nodeid)
{
	return NULL;
}

David Rientjes's avatar
David Rientjes committed
860 861 862 863 864
static inline gfp_t gfp_exact_node(gfp_t flags)
{
	return flags;
}

865 866
#else	/* CONFIG_NUMA */

867
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
868
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
869

Joonsoo Kim's avatar
Joonsoo Kim committed
870 871 872
static struct alien_cache *__alloc_alien_cache(int node, int entries,
						int batch, gfp_t gfp)
{
873
	size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache);
Joonsoo Kim's avatar
Joonsoo Kim committed
874 875 876 877
	struct alien_cache *alc = NULL;

	alc = kmalloc_node(memsize, gfp, node);
	init_arraycache(&alc->ac, entries, batch);
878
	spin_lock_init(&alc->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
879 880 881 882
	return alc;
}

static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
883
{
Joonsoo Kim's avatar
Joonsoo Kim committed
884
	struct alien_cache **alc_ptr;
885
	size_t memsize = sizeof(void *) * nr_node_ids;
886 887 888 889
	int i;

	if (limit > 1)
		limit = 12;
Joonsoo Kim's avatar
Joonsoo Kim committed
890 891 892 893 894 895 896 897 898 899 900 901 902
	alc_ptr = kzalloc_node(memsize, gfp, node);
	if (!alc_ptr)
		return NULL;

	for_each_node(i) {
		if (i == node || !node_online(i))
			continue;
		alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp);
		if (!alc_ptr[i]) {
			for (i--; i >= 0; i--)
				kfree(alc_ptr[i]);
			kfree(alc_ptr);
			return NULL;
903 904
		}
	}
Joonsoo Kim's avatar
Joonsoo Kim committed
905
	return alc_ptr;
906 907
}

Joonsoo Kim's avatar
Joonsoo Kim committed
908
static void free_alien_cache(struct alien_cache **alc_ptr)
909 910 911
{
	int i;

Joonsoo Kim's avatar
Joonsoo Kim committed
912
	if (!alc_ptr)
913 914
		return;
	for_each_node(i)
Joonsoo Kim's avatar
Joonsoo Kim committed
915 916
	    kfree(alc_ptr[i]);
	kfree(alc_ptr);
917 918
}

919
static void __drain_alien_cache(struct kmem_cache *cachep,
920 921
				struct array_cache *ac, int node,
				struct list_head *list)
922
{
923
	struct kmem_cache_node *n = get_node(cachep, node);
924 925

	if (ac->avail) {
926
		spin_lock(&n->list_lock);
927 928 929 930 931
		/*
		 * Stuff objects into the remote nodes shared array first.
		 * That way we could avoid the overhead of putting the objects
		 * into the free lists and getting them back later.
		 */
932 933
		if (n->shared)
			transfer_objects(n->shared, ac, ac->limit);
934

935
		free_block(cachep, ac->entry, ac->avail, node, list);
936
		ac->avail = 0;
937
		spin_unlock(&n->list_lock);
938 939 940
	}
}

941 942 943
/*
 * Called from cache_reap() to regularly drain alien caches round robin.
 */
944
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
945
{
946
	int node = __this_cpu_read(slab_reap_node);
947

948
	if (n->alien) {
Joonsoo Kim's avatar
Joonsoo Kim committed
949 950 951 952 953
		struct alien_cache *alc = n->alien[node];
		struct array_cache *ac;

		if (alc) {
			ac = &alc->ac;
954
			if (ac->avail && spin_trylock_irq(&alc->lock)) {
955 956 957
				LIST_HEAD(list);

				__drain_alien_cache(cachep, ac, node, &list);
958
				spin_unlock_irq(&alc->lock);
959
				slabs_destroy(cachep, &list);
Joonsoo Kim's avatar
Joonsoo Kim committed
960
			}
961 962 963 964
		}
	}
}

Andrew Morton's avatar
Andrew Morton committed
965
static void drain_alien_cache(struct kmem_cache *cachep,
Joonsoo Kim's avatar
Joonsoo Kim committed
966
				struct alien_cache **alien)
967
{
968
	int i = 0;
Joonsoo Kim's avatar
Joonsoo Kim committed
969
	struct alien_cache *alc;
970 971 972 973
	struct array_cache *ac;
	unsigned long flags;

	for_each_online_node(i) {
Joonsoo Kim's avatar
Joonsoo Kim committed
974 975
		alc = alien[i];
		if (alc) {
976 977
			LIST_HEAD(list);

Joonsoo Kim's avatar
Joonsoo Kim committed
978
			ac = &alc->ac;
979
			spin_lock_irqsave(&alc->lock, flags);
980
			__drain_alien_cache(cachep, ac, i, &list);
981
			spin_unlock_irqrestore(&alc->lock, flags);
982
			slabs_destroy(cachep, &list);
983 984 985
		}
	}
}
986

987 988
static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
				int node, int page_node)
989
{
990
	struct kmem_cache_node *n;
Joonsoo Kim's avatar
Joonsoo Kim committed
991 992
	struct alien_cache *alien = NULL;
	struct array_cache *ac;
993
	LIST_HEAD(list);
994

995
	n = get_node(cachep, node);
996
	STATS_INC_NODEFREES(cachep);
997 998
	if (n->alien && n->alien[page_node]) {
		alien = n->alien[page_node];
Joonsoo Kim's avatar
Joonsoo Kim committed
999
		ac = &alien->ac;
1000
		spin_lock(&alien->lock);
Joonsoo Kim's avatar
Joonsoo Kim committed
1001
		if (unlikely(ac->avail == ac->limit)) {
1002
			STATS_INC_ACOVERFLOW(cachep);
1003
			__drain_alien_cache(cachep, ac, page_node, &list);
1004
		}
Joonsoo Kim's avatar
Joonsoo Kim committed
1005
		ac_put_obj(cachep, ac, objp);
1006
		spin_unlock(&alien->lock);
1007
		slabs_destroy(cachep, &list);
1008
	} else {
1009
		n = get_node(cachep, page_node);
1010
		spin_lock(&n->list_lock);
1011
		free_block(cachep, &objp, 1, page_node, &list);
1012
		spin_unlock(&n->list_lock);
1013
		slabs_destroy(cachep, &list);
1014 1015 1016
	}
	return 1;
}
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030

static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
	int page_node = page_to_nid(virt_to_page(objp));
	int node = numa_mem_id();
	/*
	 * Make sure we are not freeing a object from another node to the array
	 * cache on this cpu.
	 */
	if (likely(node == page_node))
		return 0;

	return __cache_free_alien(cachep, objp, node, page_node);
}
David Rientjes's avatar
David Rientjes committed
1031 1032 1033 1034 1035 1036 1037 1038 1039

/*
 * Construct gfp mask to allocate from a specific node but do not invoke reclaim
 * or warn about failures.
 */
static inline gfp_t gfp_exact_node(gfp_t flags)
{
	return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
}
1040 1041
#endif

1042
/*
1043
 * Allocates and initializes node for a node on each slab cache, used for
1044
 * either memory or cpu hotplug.  If memory is being hot-added, the kmem_cache_node
1045
 * will be allocated off-node since memory is not yet online for the new node.
1046
 * When hotplugging memory or a cpu, existing node are not replaced if
1047 1048
 * already in use.
 *
1049
 * Must hold slab_mutex.
1050
 */
1051
static int init_cache_node_node(int node)
1052 1053
{
	struct kmem_cache *cachep;
1054
	struct kmem_cache_node *n;
1055
	const size_t memsize = sizeof(struct kmem_cache_node);
1056

1057
	list_for_each_entry(cachep, &slab_caches, list) {
1058
		/*
1059
		 * Set up the kmem_cache_node for cpu before we can
1060 1061 1062
		 * begin anything. Make sure some other cpu on this
		 * node has not already allocated this
		 */
1063 1064
		n = get_node(cachep, node);
		if (!n) {
1065 1066
			n = kmalloc_node(memsize, GFP_KERNEL, node);
			if (!n)
1067
				return -ENOMEM;
1068
			kmem_cache_node_init(n);
1069 1070
			n->next_reap = jiffies + REAPTIMEOUT_NODE +
			    ((unsigned long)cachep) % REAPTIMEOUT_NODE;
1071 1072

			/*
1073 1074
			 * The kmem_cache_nodes don't come and go as CPUs
			 * come and go.  slab_mutex is sufficient
1075 1076
			 * protection here.
			 */
1077
			cachep->node[node] = n;
1078 1079
		}

1080 1081
		spin_lock_irq(&n->list_lock);
		n->free_limit =
1082 1083
			(1 + nr_cpus_node(node)) *
			cachep->batchcount + cachep->num;
1084
		spin_unlock_irq(&n->list_lock);