memcontrol.h 17.8 KB
Newer Older
1
2
3
4
5
/* memcontrol.h - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6
7
8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9
10
11
12
13
14
15
16
17
18
19
20
21
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#ifndef _LINUX_MEMCONTROL_H
#define _LINUX_MEMCONTROL_H
22
#include <linux/cgroup.h>
23
#include <linux/vm_event_item.h>
24
#include <linux/hardirq.h>
25
#include <linux/jump_label.h>
26

27
28
struct mem_cgroup;
struct page_cgroup;
29
30
struct page;
struct mm_struct;
31
struct kmem_cache;
32

33
34
35
36
37
38
39
40
41
42
43
44
/*
 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
 * These two lists should keep in accord with each other.
 */
enum mem_cgroup_stat_index {
	/*
	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
	 */
	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
45
	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
46
47
	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
	MEM_CGROUP_STAT_NSTATS,
48
49
};

50
51
52
53
54
55
struct mem_cgroup_reclaim_cookie {
	struct zone *zone;
	int priority;
	unsigned int generation;
};

Andrew Morton's avatar
Andrew Morton committed
56
#ifdef CONFIG_MEMCG
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
57
58
59
60
61
62
63
64
65
66
/*
 * All "charge" functions with gfp_mask should use GFP_KERNEL or
 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
 * alloc memory but reclaims memory from all available zones. So, "where I want
 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
 * available but adding a rule is better. charge functions' gfp_mask should
 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
 * codes.
 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
 */
67

68
extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
69
				gfp_t gfp_mask);
70
/* for swap handling */
71
extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
72
		struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
73
extern void mem_cgroup_commit_charge_swapin(struct page *page,
74
75
					struct mem_cgroup *memcg);
extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
76

77
78
extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
					gfp_t gfp_mask);
79
80

struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
81
struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
82
83
84
85
86

/* For coalescing uncharge for reducing memcg' overhead*/
extern void mem_cgroup_uncharge_start(void);
extern void mem_cgroup_uncharge_end(void);

87
extern void mem_cgroup_uncharge_page(struct page *page);
88
extern void mem_cgroup_uncharge_cache_page(struct page *page);
89

90
91
bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
				  struct mem_cgroup *memcg);
92
93
bool task_in_mem_cgroup(struct task_struct *task,
			const struct mem_cgroup *memcg);
94

95
extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
96
97
extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);

Glauber Costa's avatar
Glauber Costa committed
98
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
99
extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
Glauber Costa's avatar
Glauber Costa committed
100

101
static inline
102
bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
103
{
104
105
	struct mem_cgroup *task_memcg;
	bool match;
106

107
	rcu_read_lock();
108
109
	task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
	match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
110
	rcu_read_unlock();
111
	return match;
112
}
113

114
extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
115

116
117
118
extern void
mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
			     struct mem_cgroup **memcgp);
119
extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
120
	struct page *oldpage, struct page *newpage, bool migration_ok);
121

122
123
124
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
				   struct mem_cgroup *,
				   struct mem_cgroup_reclaim_cookie *);
125
126
void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);

127
128
129
/*
 * For memory reclaim.
 */
130
int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
131
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
132
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
133
void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
134
135
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
					struct task_struct *p);
136
137
extern void mem_cgroup_replace_page_cache(struct page *oldpage,
					struct page *newpage);
138

139
static inline void mem_cgroup_oom_enable(void)
140
{
141
142
	WARN_ON(current->memcg_oom.may_oom);
	current->memcg_oom.may_oom = 1;
143
144
}

145
static inline void mem_cgroup_oom_disable(void)
146
{
147
148
	WARN_ON(!current->memcg_oom.may_oom);
	current->memcg_oom.may_oom = 0;
149
150
}

151
152
static inline bool task_in_memcg_oom(struct task_struct *p)
{
153
	return p->memcg_oom.memcg;
154
155
}

156
bool mem_cgroup_oom_synchronize(bool wait);
157

Andrew Morton's avatar
Andrew Morton committed
158
#ifdef CONFIG_MEMCG_SWAP
159
160
extern int do_swap_account;
#endif
161
162
163

static inline bool mem_cgroup_disabled(void)
{
164
	if (memory_cgrp_subsys.disabled)
165
166
167
168
		return true;
	return false;
}

169
170
171
void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
					 unsigned long *flags);

172
173
extern atomic_t memcg_moving;

174
175
176
177
178
179
180
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
					bool *locked, unsigned long *flags)
{
	if (mem_cgroup_disabled())
		return;
	rcu_read_lock();
	*locked = false;
181
182
	if (atomic_read(&memcg_moving))
		__mem_cgroup_begin_update_page_stat(page, locked, flags);
183
184
185
186
187
188
189
190
191
192
193
194
195
196
}

void __mem_cgroup_end_update_page_stat(struct page *page,
				unsigned long *flags);
static inline void mem_cgroup_end_update_page_stat(struct page *page,
					bool *locked, unsigned long *flags)
{
	if (mem_cgroup_disabled())
		return;
	if (*locked)
		__mem_cgroup_end_update_page_stat(page, flags);
	rcu_read_unlock();
}

197
void mem_cgroup_update_page_stat(struct page *page,
198
				 enum mem_cgroup_stat_index idx,
199
200
201
				 int val);

static inline void mem_cgroup_inc_page_stat(struct page *page,
202
					    enum mem_cgroup_stat_index idx)
203
204
205
206
207
{
	mem_cgroup_update_page_stat(page, idx, 1);
}

static inline void mem_cgroup_dec_page_stat(struct page *page,
208
					    enum mem_cgroup_stat_index idx)
209
210
211
212
{
	mem_cgroup_update_page_stat(page, idx, -1);
}

213
214
215
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
						gfp_t gfp_mask,
						unsigned long *total_scanned);
216

217
218
219
220
221
222
223
224
void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
					     enum vm_event_item idx)
{
	if (mem_cgroup_disabled())
		return;
	__mem_cgroup_count_vm_event(mm, idx);
}
225
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
226
void mem_cgroup_split_huge_fixup(struct page *head);
227
228
#endif

229
230
231
232
#ifdef CONFIG_DEBUG_VM
bool mem_cgroup_bad_page_check(struct page *page);
void mem_cgroup_print_bad_page(struct page *page);
#endif
Andrew Morton's avatar
Andrew Morton committed
233
#else /* CONFIG_MEMCG */
234
235
236
struct mem_cgroup;

static inline int mem_cgroup_newpage_charge(struct page *page,
237
					struct mm_struct *mm, gfp_t gfp_mask)
238
239
240
241
{
	return 0;
}

242
243
static inline int mem_cgroup_cache_charge(struct page *page,
					struct mm_struct *mm, gfp_t gfp_mask)
244
{
245
	return 0;
246
247
}

248
static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
249
		struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
250
251
252
253
254
{
	return 0;
}

static inline void mem_cgroup_commit_charge_swapin(struct page *page,
255
					  struct mem_cgroup *memcg)
256
257
258
{
}

259
static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
260
261
262
{
}

263
264
265
266
267
268
269
270
static inline void mem_cgroup_uncharge_start(void)
{
}

static inline void mem_cgroup_uncharge_end(void)
{
}

271
272
273
274
static inline void mem_cgroup_uncharge_page(struct page *page)
{
}

275
276
277
278
static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
}

279
280
static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
						    struct mem_cgroup *memcg)
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
281
{
282
	return &zone->lruvec;
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
283
284
}

285
286
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
						    struct zone *zone)
287
{
288
	return &zone->lruvec;
289
290
}

291
292
293
294
295
static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
	return NULL;
}

296
static inline bool mm_match_cgroup(struct mm_struct *mm,
297
		struct mem_cgroup *memcg)
298
{
299
	return true;
300
301
}

302
303
static inline bool task_in_mem_cgroup(struct task_struct *task,
				      const struct mem_cgroup *memcg)
304
{
305
	return true;
306
307
}

308
309
static inline struct cgroup_subsys_state
		*mem_cgroup_css(struct mem_cgroup *memcg)
310
311
312
313
{
	return NULL;
}

314
static inline void
315
mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
316
			     struct mem_cgroup **memcgp)
317
318
319
{
}

320
static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
321
		struct page *oldpage, struct page *newpage, bool migration_ok)
322
323
324
{
}

325
326
327
328
329
330
331
332
333
334
335
336
337
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
		struct mem_cgroup *prev,
		struct mem_cgroup_reclaim_cookie *reclaim)
{
	return NULL;
}

static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
					 struct mem_cgroup *prev)
{
}

338
339
340
341
static inline bool mem_cgroup_disabled(void)
{
	return true;
}
342

343
static inline int
344
mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
345
346
347
348
{
	return 1;
}

349
static inline unsigned long
350
mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
351
352
353
354
{
	return 0;
}

355
356
357
static inline void
mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
			      int increment)
KOSAKI Motohiro's avatar
KOSAKI Motohiro committed
358
359
360
{
}

361
362
363
364
365
static inline void
mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}

366
367
368
369
370
371
372
373
374
375
static inline void mem_cgroup_begin_update_page_stat(struct page *page,
					bool *locked, unsigned long *flags)
{
}

static inline void mem_cgroup_end_update_page_stat(struct page *page,
					bool *locked, unsigned long *flags)
{
}

376
static inline void mem_cgroup_oom_enable(void)
377
378
379
{
}

380
static inline void mem_cgroup_oom_disable(void)
381
382
383
{
}

384
385
386
387
388
static inline bool task_in_memcg_oom(struct task_struct *p)
{
	return false;
}

389
static inline bool mem_cgroup_oom_synchronize(bool wait)
390
391
392
393
{
	return false;
}

394
static inline void mem_cgroup_inc_page_stat(struct page *page,
395
					    enum mem_cgroup_stat_index idx)
396
397
398
399
{
}

static inline void mem_cgroup_dec_page_stat(struct page *page,
400
					    enum mem_cgroup_stat_index idx)
401
402
403
{
}

404
static inline
405
406
407
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
					    gfp_t gfp_mask,
					    unsigned long *total_scanned)
408
{
409
	return 0;
410
411
}

412
static inline void mem_cgroup_split_huge_fixup(struct page *head)
413
414
415
{
}

416
417
418
419
static inline
void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
{
}
420
421
422
423
static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
				struct page *newpage)
{
}
Andrew Morton's avatar
Andrew Morton committed
424
#endif /* CONFIG_MEMCG */
425

Andrew Morton's avatar
Andrew Morton committed
426
#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
427
428
429
430
431
432
433
434
435
436
437
438
static inline bool
mem_cgroup_bad_page_check(struct page *page)
{
	return false;
}

static inline void
mem_cgroup_print_bad_page(struct page *page)
{
}
#endif

Glauber Costa's avatar
Glauber Costa committed
439
440
441
442
443
444
445
enum {
	UNDER_LIMIT,
	SOFT_LIMIT,
	OVER_LIMIT,
};

struct sock;
446
#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
Glauber Costa's avatar
Glauber Costa committed
447
448
449
450
451
452
453
454
455
void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
#else
static inline void sock_update_memcg(struct sock *sk)
{
}
static inline void sock_release_memcg(struct sock *sk)
{
}
456
#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
457
458

#ifdef CONFIG_MEMCG_KMEM
459
extern struct static_key memcg_kmem_enabled_key;
460
461

extern int memcg_limited_groups_array_size;
462
463
464
465
466
467

/*
 * Helper macro to loop through all memcg-specific caches. Callers must still
 * check if the cache is valid (it is either valid or NULL).
 * the slab_mutex must be held when looping through those caches
 */
468
#define for_each_memcg_cache_index(_idx)	\
469
	for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
470

471
472
static inline bool memcg_kmem_enabled(void)
{
473
	return static_key_false(&memcg_kmem_enabled_key);
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
}

/*
 * In general, we'll do everything in our power to not incur in any overhead
 * for non-memcg users for the kmem functions. Not even a function call, if we
 * can avoid it.
 *
 * Therefore, we'll inline all those functions so that in the best case, we'll
 * see that kmemcg is off for everybody and proceed quickly.  If it is on,
 * we'll still do most of the flag checking inline. We check a lot of
 * conditions, but because they are pretty simple, they are expected to be
 * fast.
 */
bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
					int order);
void __memcg_kmem_commit_charge(struct page *page,
				       struct mem_cgroup *memcg, int order);
void __memcg_kmem_uncharge_pages(struct page *page, int order);

493
int memcg_cache_id(struct mem_cgroup *memcg);
494
495
496
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
			     struct kmem_cache *root_cache);
void memcg_free_cache_params(struct kmem_cache *s);
497
498
void memcg_register_cache(struct kmem_cache *s);
void memcg_unregister_cache(struct kmem_cache *s);
499

500
501
int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups);
502
503
504
505

struct kmem_cache *
__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);

Glauber Costa's avatar
Glauber Costa committed
506
void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
507
void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
Glauber Costa's avatar
Glauber Costa committed
508

509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
/**
 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
 * @gfp: the gfp allocation flags.
 * @memcg: a pointer to the memcg this was charged against.
 * @order: allocation order.
 *
 * returns true if the memcg where the current task belongs can hold this
 * allocation.
 *
 * We return true automatically if this allocation is not to be accounted to
 * any memcg.
 */
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
	if (!memcg_kmem_enabled())
		return true;

	/*
	 * __GFP_NOFAIL allocations will move on even if charging is not
	 * possible. Therefore we don't even try, and have this allocation
	 * unaccounted. We could in theory charge it with
	 * res_counter_charge_nofail, but we hope those allocations are rare,
	 * and won't be worth the trouble.
	 */
	if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
		return true;
	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
		return true;

	/* If the test is dying, just let it go. */
	if (unlikely(fatal_signal_pending(current)))
		return true;

	return __memcg_kmem_newpage_charge(gfp, memcg, order);
}

/**
 * memcg_kmem_uncharge_pages: uncharge pages from memcg
 * @page: pointer to struct page being freed
 * @order: allocation order.
 *
 * there is no need to specify memcg here, since it is embedded in page_cgroup
 */
static inline void
memcg_kmem_uncharge_pages(struct page *page, int order)
{
	if (memcg_kmem_enabled())
		__memcg_kmem_uncharge_pages(page, order);
}

/**
 * memcg_kmem_commit_charge: embeds correct memcg in a page
 * @page: pointer to struct page recently allocated
 * @memcg: the memcg structure we charged against
 * @order: allocation order.
 *
 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
 * failure of the allocation. if @page is NULL, this function will revert the
 * charges. Otherwise, it will commit the memcg given by @memcg to the
 * corresponding page_cgroup.
 */
static inline void
memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
{
	if (memcg_kmem_enabled() && memcg)
		__memcg_kmem_commit_charge(page, memcg, order);
}

578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
/**
 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
 * @cachep: the original global kmem cache
 * @gfp: allocation flags.
 *
 * This function assumes that the task allocating, which determines the memcg
 * in the page allocator, belongs to the same cgroup throughout the whole
 * process.  Misacounting can happen if the task calls memcg_kmem_get_cache()
 * while belonging to a cgroup, and later on changes. This is considered
 * acceptable, and should only happen upon task migration.
 *
 * Before the cache is created by the memcg core, there is also a possible
 * imbalance: the task belongs to a memcg, but the cache being allocated from
 * is the global cache, since the child cache is not yet guaranteed to be
 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
 * passed and the page allocator will not attempt any cgroup accounting.
 */
static __always_inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
	if (!memcg_kmem_enabled())
		return cachep;
	if (gfp & __GFP_NOFAIL)
		return cachep;
	if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
		return cachep;
	if (unlikely(fatal_signal_pending(current)))
		return cachep;

	return __memcg_kmem_get_cache(cachep, gfp);
}
609
#else
610
611
612
#define for_each_memcg_cache_index(_idx)	\
	for (; NULL; )

613
614
615
616
617
static inline bool memcg_kmem_enabled(void)
{
	return false;
}

618
619
620
621
622
623
624
625
626
627
628
629
630
631
static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
	return true;
}

static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
{
}

static inline void
memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
{
}
632
633
634
635
636
637

static inline int memcg_cache_id(struct mem_cgroup *memcg)
{
	return -1;
}

638
639
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
		struct kmem_cache *s, struct kmem_cache *root_cache)
640
641
642
643
{
	return 0;
}

644
645
646
647
static inline void memcg_free_cache_params(struct kmem_cache *s)
{
}

648
static inline void memcg_register_cache(struct kmem_cache *s)
649
650
651
{
}

652
static inline void memcg_unregister_cache(struct kmem_cache *s)
653
654
{
}
655
656
657
658
659
660

static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
	return cachep;
}
661
662
663
664

static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
}
665
#endif /* CONFIG_MEMCG_KMEM */
666
667
#endif /* _LINUX_MEMCONTROL_H */