slab.h 12.6 KB
Newer Older
1 2 3 4 5 6
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
 * Internal slab definitions
 */

7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
#ifdef CONFIG_SLOB
/*
 * Common fields provided in kmem_cache by all slab allocators
 * This struct is either used directly by the allocator (SLOB)
 * or the allocator must include definitions for all fields
 * provided in kmem_cache_common in their definition of kmem_cache.
 *
 * Once we can do anonymous structs (C11 standard) we could put a
 * anonymous struct definition in these allocators so that the
 * separate allocations in the kmem_cache structure of SLAB and
 * SLUB is no longer needed.
 */
struct kmem_cache {
	unsigned int object_size;/* The original size of the object */
	unsigned int size;	/* The aligned/padded/added on size  */
	unsigned int align;	/* Alignment as calculated */
	unsigned long flags;	/* Active flags on the slab */
	const char *name;	/* Slab name for sysfs */
	int refcount;		/* Use counter */
	void (*ctor)(void *);	/* Called on object slot creation */
	struct list_head list;	/* List of all slab caches on the system */
};

#endif /* CONFIG_SLOB */

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#endif

#include <linux/memcontrol.h>
41 42 43 44
#include <linux/fault-inject.h>
#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
45

46 47 48 49 50 51 52 53 54 55 56
/*
 * State of the slab allocator.
 *
 * This is used to describe the states of the allocator during bootup.
 * Allocators use this to gradually bootstrap themselves. Most allocators
 * have the problem that the structures used for managing slab caches are
 * allocated from slab caches themselves.
 */
enum slab_state {
	DOWN,			/* No slab functionality yet */
	PARTIAL,		/* SLUB: kmem_cache_node available */
57
	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
58 59 60 61 62 63
	UP,			/* Slab caches usable but not all extras yet */
	FULL			/* Everything is working */
};

extern enum slab_state slab_state;

64 65
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
66 67

/* The list of all slab caches on the system */
68 69
extern struct list_head slab_caches;

70 71 72
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

73 74 75
unsigned long calculate_alignment(unsigned long flags,
		unsigned long align, unsigned long size);

76 77
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
78
void setup_kmalloc_cache_index_table(void);
79
void create_kmalloc_caches(unsigned long);
80 81 82

/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
83 84 85
#endif


86
/* Functions provided by the slab allocators */
87
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
88

89 90 91 92 93
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
			unsigned long flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
			size_t size, unsigned long flags);

94 95 96
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
		unsigned long flags, const char *name, void (*ctor)(void *));
Joonsoo Kim's avatar
Joonsoo Kim committed
97
#ifndef CONFIG_SLOB
98
struct kmem_cache *
99 100
__kmem_cache_alias(const char *name, size_t size, size_t align,
		   unsigned long flags, void (*ctor)(void *));
101 102 103 104

unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *));
105
#else
106
static inline struct kmem_cache *
107 108
__kmem_cache_alias(const char *name, size_t size, size_t align,
		   unsigned long flags, void (*ctor)(void *))
109
{ return NULL; }
110 111 112 113 114 115 116

static inline unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *))
{
	return flags;
}
117 118 119
#endif


120 121 122 123 124 125 126 127
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )

#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
128
			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
129 130 131 132 133 134
#else
#define SLAB_DEBUG_FLAGS (0)
#endif

#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
135 136
			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
			  SLAB_NOTRACK | SLAB_ACCOUNT)
137 138
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
139
			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
140 141 142 143 144 145
#else
#define SLAB_CACHE_FLAGS (0)
#endif

#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)

146
int __kmem_cache_shutdown(struct kmem_cache *);
147
void __kmem_cache_release(struct kmem_cache *);
148
int __kmem_cache_shrink(struct kmem_cache *, bool);
149
void slab_kmem_cache_release(struct kmem_cache *);
150

151 152 153
struct seq_file;
struct file;

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
struct slabinfo {
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs;
	unsigned long num_slabs;
	unsigned long shared_avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int shared;
	unsigned int objects_per_slab;
	unsigned int cache_order;
};

void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
169 170
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
		       size_t count, loff_t *ppos);
171

172 173 174
/*
 * Generic implementation of bulk operations
 * These are useful for situations in which the allocator cannot
175
 * perform optimizations. In that case segments of the object listed
176 177 178
 * may be allocated or freed using these operations.
 */
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
179
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
180

181
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
182 183 184 185 186 187 188 189
/*
 * Iterate over all memcg caches of the given root cache. The caller must hold
 * slab_mutex.
 */
#define for_each_memcg_cache(iter, root) \
	list_for_each_entry(iter, &(root)->memcg_params.list, \
			    memcg_params.list)

190 191
static inline bool is_root_cache(struct kmem_cache *s)
{
192
	return s->memcg_params.is_root_cache;
193
}
194

195
static inline bool slab_equal_or_root(struct kmem_cache *s,
196
				      struct kmem_cache *p)
197
{
198
	return p == s || p == s->memcg_params.root_cache;
199
}
200 201 202 203 204 205 206 207 208

/*
 * We use suffixes to the name in memcg because we can't have caches
 * created in the system with the same name. But when we print them
 * locally, better refer to them with the base name
 */
static inline const char *cache_name(struct kmem_cache *s)
{
	if (!is_root_cache(s))
209
		s = s->memcg_params.root_cache;
210 211 212
	return s->name;
}

213 214
/*
 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
215 216
 * That said the caller must assure the memcg's cache won't go away by either
 * taking a css reference to the owner cgroup, or holding the slab_mutex.
217
 */
218 219
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
220
{
221
	struct kmem_cache *cachep;
222
	struct memcg_cache_array *arr;
223 224

	rcu_read_lock();
225
	arr = rcu_dereference(s->memcg_params.memcg_caches);
226 227 228 229

	/*
	 * Make sure we will access the up-to-date value. The code updating
	 * memcg_caches issues a write barrier to match this (see
230
	 * memcg_create_kmem_cache()).
231
	 */
232
	cachep = lockless_dereference(arr->entries[idx]);
233 234
	rcu_read_unlock();

235
	return cachep;
236
}
237 238 239 240 241

static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	if (is_root_cache(s))
		return s;
242
	return s->memcg_params.root_cache;
243
}
244

245 246 247
static __always_inline int memcg_charge_slab(struct page *page,
					     gfp_t gfp, int order,
					     struct kmem_cache *s)
248
{
249 250
	int ret;

251 252 253 254
	if (!memcg_kmem_enabled())
		return 0;
	if (is_root_cache(s))
		return 0;
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	ret = __memcg_kmem_charge_memcg(page, gfp, order,
					s->memcg_params.memcg);
	if (ret)
		return ret;

	memcg_kmem_update_page_stat(page,
			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
			1 << order);
	return 0;
}

static __always_inline void memcg_uncharge_slab(struct page *page, int order,
						struct kmem_cache *s)
{
	memcg_kmem_update_page_stat(page,
			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
			-(1 << order));
	memcg_kmem_uncharge(page, order);
276
}
277 278 279

extern void slab_init_memcg_params(struct kmem_cache *);

280
#else /* CONFIG_MEMCG && !CONFIG_SLOB */
281

282 283 284
#define for_each_memcg_cache(iter, root) \
	for ((void)(iter), (void)(root); 0; )

285 286 287 288 289
static inline bool is_root_cache(struct kmem_cache *s)
{
	return true;
}

290 291 292 293 294
static inline bool slab_equal_or_root(struct kmem_cache *s,
				      struct kmem_cache *p)
{
	return true;
}
295 296 297 298 299 300

static inline const char *cache_name(struct kmem_cache *s)
{
	return s->name;
}

301 302
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
303 304 305
{
	return NULL;
}
306 307 308 309 310

static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	return s;
}
311

312 313
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
				    struct kmem_cache *s)
314 315 316 317
{
	return 0;
}

318 319 320 321 322
static inline void memcg_uncharge_slab(struct page *page, int order,
				       struct kmem_cache *s)
{
}

323 324 325
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
326
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
327 328 329 330 331 332 333 334 335 336 337 338 339

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;
	struct page *page;

	/*
	 * When kmemcg is not being used, both assignments should return the
	 * same value. but we don't want to pay the assignment price in that
	 * case. If it is not compiled in, the compiler should be smart enough
	 * to not do even the assignment. In that case, slab_equal_or_root
	 * will also be a constant.
	 */
340 341
	if (!memcg_kmem_enabled() &&
	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
342 343 344 345 346 347 348 349
		return s;

	page = virt_to_head_page(x);
	cachep = page->slab_cache;
	if (slab_equal_or_root(cachep, s))
		return cachep;

	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
350
	       __func__, s->name, cachep->name);
351 352 353
	WARN_ON_ONCE(1);
	return s;
}
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
	return s->object_size;

#else /* CONFIG_SLUB */
# ifdef CONFIG_SLUB_DEBUG
	/*
	 * Debugging requires use of the padding between object
	 * and whatever may come after it.
	 */
	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
		return s->object_size;
# endif
	/*
	 * If we have the need to store the freelist pointer
	 * back there or track user information then we can
	 * only use the space before that information.
	 */
	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
		return s->inuse;
	/*
	 * Else we can use all the padding etc for the allocation
	 */
	return s->size;
#endif
}

static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
						     gfp_t flags)
{
	flags &= gfp_allowed_mask;
	lockdep_trace_alloc(flags);
	might_sleep_if(gfpflags_allow_blocking(flags));

390
	if (should_failslab(s, flags))
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		return NULL;

	return memcg_kmem_get_cache(s, flags);
}

static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
					size_t size, void **p)
{
	size_t i;

	flags &= gfp_allowed_mask;
	for (i = 0; i < size; i++) {
		void *object = p[i];

		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
		kmemleak_alloc_recursive(object, s->object_size, 1,
					 s->flags, flags);
408
		kasan_slab_alloc(s, object, flags);
409 410 411 412
	}
	memcg_kmem_put_cache(s);
}

413
#ifndef CONFIG_SLOB
414 415 416 417 418 419 420 421 422 423 424 425 426 427
/*
 * The slab lists for all objects.
 */
struct kmem_cache_node {
	spinlock_t list_lock;

#ifdef CONFIG_SLAB
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
	unsigned int colour_next;	/* Per-node cache coloring */
	struct array_cache *shared;	/* shared per node */
Joonsoo Kim's avatar
Joonsoo Kim committed
428
	struct alien_cache **alien;	/* on other nodes */
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
#endif

#ifdef CONFIG_SLUB
	unsigned long nr_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
	atomic_long_t total_objects;
	struct list_head full;
#endif
#endif

};
444

445 446 447 448 449 450 451 452 453 454
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
	return s->node[node];
}

/*
 * Iterator over all nodes. The body will be executed for each node that has
 * a kmem_cache_node structure allocated (which is true for all online nodes)
 */
#define for_each_kmem_cache_node(__s, __node, __n) \
455 456
	for (__node = 0; __node < nr_node_ids; __node++) \
		 if ((__n = get_node(__s, __node)))
457 458 459

#endif

460
void *slab_start(struct seq_file *m, loff_t *pos);
461 462
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
463
int memcg_slab_show(struct seq_file *m, void *p);
464

465 466
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);

467
#endif /* MM_SLAB_H */