All new accounts created on Gitlab now require administrator approval. If you invite any collaborators, please let Flux staff know so they can approve the accounts.

memcontrol.c 151 KB
Newer Older
1 2 3 4 5
/* memcontrol.c - Memory Controller
 *
 * Copyright IBM Corporation, 2007
 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
 *
6 7 8
 * Copyright 2007 OpenVZ SWsoft Inc
 * Author: Pavel Emelianov <xemul@openvz.org>
 *
9 10 11 12
 * Memory thresholds
 * Copyright (C) 2009 Nokia Corporation
 * Author: Kirill A. Shutemov
 *
13 14 15 16
 * Kernel Memory Controller
 * Copyright (C) 2012 Parallels Inc. and Google Inc.
 * Authors: Glauber Costa and Suleiman Souhlal
 *
17 18 19 20 21 22
 * Native page reclaim
 * Charge lifetime sanitation
 * Lockless page tracking & accounting
 * Unified hierarchy configuration model
 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
 *
23 24 25 26 27 28 29 30 31 32 33
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

34
#include <linux/page_counter.h>
35 36
#include <linux/memcontrol.h>
#include <linux/cgroup.h>
37
#include <linux/mm.h>
38
#include <linux/hugetlb.h>
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
39
#include <linux/pagemap.h>
40
#include <linux/smp.h>
41
#include <linux/page-flags.h>
42
#include <linux/backing-dev.h>
43 44
#include <linux/bit_spinlock.h>
#include <linux/rcupdate.h>
45
#include <linux/limits.h>
46
#include <linux/export.h>
47
#include <linux/mutex.h>
48
#include <linux/rbtree.h>
49
#include <linux/slab.h>
50
#include <linux/swap.h>
51
#include <linux/swapops.h>
52
#include <linux/spinlock.h>
53
#include <linux/eventfd.h>
54
#include <linux/poll.h>
55
#include <linux/sort.h>
56
#include <linux/fs.h>
57
#include <linux/seq_file.h>
58
#include <linux/vmpressure.h>
59
#include <linux/mm_inline.h>
60
#include <linux/swap_cgroup.h>
61
#include <linux/cpu.h>
62
#include <linux/oom.h>
63
#include <linux/lockdep.h>
64
#include <linux/file.h>
65
#include <linux/tracehook.h>
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
66
#include "internal.h"
Glauber Costa's avatar
Glauber Costa committed
67
#include <net/sock.h>
Michal Hocko's avatar
Michal Hocko committed
68
#include <net/ip.h>
69
#include "slab.h"
70

71 72
#include <asm/uaccess.h>

73 74
#include <trace/events/vmscan.h>

75 76
struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
77

78 79
struct mem_cgroup *root_mem_cgroup __read_mostly;

80
#define MEM_CGROUP_RECLAIM_RETRIES	5
81

82 83 84
/* Socket memory accounting disabled? */
static bool cgroup_memory_nosocket;

85 86 87
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem;

88
/* Whether the swap controller is active */
Andrew Morton's avatar
Andrew Morton committed
89
#ifdef CONFIG_MEMCG_SWAP
90 91
int do_swap_account __read_mostly;
#else
92
#define do_swap_account		0
93 94
#endif

95 96 97 98 99 100
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
	return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
}

101 102 103
static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
104
	"rss_huge",
105
	"mapped_file",
106
	"dirty",
107
	"writeback",
108 109 110 111 112 113 114 115 116 117
	"swap",
};

static const char * const mem_cgroup_events_names[] = {
	"pgpgin",
	"pgpgout",
	"pgfault",
	"pgmajfault",
};

118 119 120 121 122 123 124 125
static const char * const mem_cgroup_lru_names[] = {
	"inactive_anon",
	"active_anon",
	"inactive_file",
	"active_file",
	"unevictable",
};

126 127 128
#define THRESHOLDS_EVENTS_TARGET 128
#define SOFTLIMIT_EVENTS_TARGET 1024
#define NUMAINFO_EVENTS_TARGET	1024
129

130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
/*
 * Cgroups above their limits are maintained in a RB-Tree, independent of
 * their hierarchy representation
 */

struct mem_cgroup_tree_per_zone {
	struct rb_root rb_root;
	spinlock_t lock;
};

struct mem_cgroup_tree_per_node {
	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
};

struct mem_cgroup_tree {
	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
};

static struct mem_cgroup_tree soft_limit_tree __read_mostly;

KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
150 151 152 153 154
/* for OOM */
struct mem_cgroup_eventfd_list {
	struct list_head list;
	struct eventfd_ctx *eventfd;
};
155

156 157 158
/*
 * cgroup_event represents events which userspace want to receive.
 */
159
struct mem_cgroup_event {
160
	/*
161
	 * memcg which the event belongs to.
162
	 */
163
	struct mem_cgroup *memcg;
164 165 166 167 168 169 170 171
	/*
	 * eventfd to signal userspace about the event.
	 */
	struct eventfd_ctx *eventfd;
	/*
	 * Each of these stored in a list by the cgroup.
	 */
	struct list_head list;
172 173 174 175 176
	/*
	 * register_event() callback will be used to add new userspace
	 * waiter for changes related to this event.  Use eventfd_signal()
	 * on eventfd to send notification to userspace.
	 */
177
	int (*register_event)(struct mem_cgroup *memcg,
Tejun Heo's avatar
Tejun Heo committed
178
			      struct eventfd_ctx *eventfd, const char *args);
179 180 181 182 183
	/*
	 * unregister_event() callback will be called when userspace closes
	 * the eventfd or on cgroup removing.  This callback must be set,
	 * if you want provide notification functionality.
	 */
184
	void (*unregister_event)(struct mem_cgroup *memcg,
185
				 struct eventfd_ctx *eventfd);
186 187 188 189 190 191 192 193 194 195
	/*
	 * All fields below needed to unregister event when
	 * userspace closes eventfd.
	 */
	poll_table pt;
	wait_queue_head_t *wqh;
	wait_queue_t wait;
	struct work_struct remove;
};

196 197
static void mem_cgroup_threshold(struct mem_cgroup *memcg);
static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198

199 200
/* Stuffs for move charges at task migration. */
/*
201
 * Types of charges to be moved.
202
 */
203 204 205
#define MOVE_ANON	0x1U
#define MOVE_FILE	0x2U
#define MOVE_MASK	(MOVE_ANON | MOVE_FILE)
206

207 208
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
209
	spinlock_t	  lock; /* for from, to */
210
	struct mm_struct  *mm;
211 212
	struct mem_cgroup *from;
	struct mem_cgroup *to;
213
	unsigned long flags;
214
	unsigned long precharge;
215
	unsigned long moved_charge;
216
	unsigned long moved_swap;
217 218 219
	struct task_struct *moving_task;	/* a task moving charges */
	wait_queue_head_t waitq;		/* a waitq for other context */
} mc = {
220
	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
221 222
	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
};
223

224 225 226 227
/*
 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
 * limit reclaim to prevent infinite loops, if they ever occur.
 */
228
#define	MEM_CGROUP_MAX_RECLAIM_LOOPS		100
229
#define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	2
230

231 232
enum charge_type {
	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
233
	MEM_CGROUP_CHARGE_TYPE_ANON,
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
234
	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
235
	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
236 237 238
	NR_CHARGE_TYPE,
};

239
/* for encoding cft->private value on file */
240 241 242 243
enum res_type {
	_MEM,
	_MEMSWAP,
	_OOM_TYPE,
244
	_KMEM,
Vladimir Davydov's avatar
Vladimir Davydov committed
245
	_TCP,
246 247
};

248 249
#define MEMFILE_PRIVATE(x, val)	((x) << 16 | (val))
#define MEMFILE_TYPE(val)	((val) >> 16 & 0xffff)
250
#define MEMFILE_ATTR(val)	((val) & 0xffff)
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
251 252
/* Used for OOM nofiier */
#define OOM_CONTROL		(0)
253

254 255 256 257 258 259 260 261 262 263 264 265 266
/* Some nice accessors for the vmpressure. */
struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
{
	if (!memcg)
		memcg = root_mem_cgroup;
	return &memcg->vmpressure;
}

struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
{
	return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
}

267 268 269 270 271
static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
{
	return (memcg == root_mem_cgroup);
}

272
#ifndef CONFIG_SLOB
273
/*
274
 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
Li Zefan's avatar
Li Zefan committed
275 276 277 278 279
 * The main reason for not using cgroup id for this:
 *  this works better in sparse environments, where we have a lot of memcgs,
 *  but only a few kmem-limited. Or also, if we have, for instance, 200
 *  memcgs, and none but the 200th is kmem-limited, we'd have to have a
 *  200 entry array for that.
280
 *
281 282
 * The current size of the caches array is stored in memcg_nr_cache_ids. It
 * will double each time we have to increase it.
283
 */
284 285
static DEFINE_IDA(memcg_cache_ida);
int memcg_nr_cache_ids;
286

287 288 289 290 291 292 293 294 295 296 297 298 299
/* Protects memcg_nr_cache_ids */
static DECLARE_RWSEM(memcg_cache_ids_sem);

void memcg_get_cache_ids(void)
{
	down_read(&memcg_cache_ids_sem);
}

void memcg_put_cache_ids(void)
{
	up_read(&memcg_cache_ids_sem);
}

300 301 302 303 304 305
/*
 * MIN_SIZE is different than 1, because we would like to avoid going through
 * the alloc/free process all the time. In a small machine, 4 kmem-limited
 * cgroups is a reasonable guess. In the future, it could be a parameter or
 * tunable, but that is strictly not necessary.
 *
Li Zefan's avatar
Li Zefan committed
306
 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
307 308
 * this constant directly from cgroup, but it is understandable that this is
 * better kept as an internal representation in cgroup.c. In any case, the
Li Zefan's avatar
Li Zefan committed
309
 * cgrp_id space is not getting any smaller, and we don't have to necessarily
310 311 312
 * increase ours as well if it increases.
 */
#define MEMCG_CACHES_MIN_SIZE 4
Li Zefan's avatar
Li Zefan committed
313
#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
314

315 316 317 318 319 320
/*
 * A lot of the calls to the cache allocation functions are expected to be
 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
 * conditional to this static branch, we'll have to allow modules that does
 * kmem_cache_alloc and the such to see this symbol as well
 */
321
DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
322
EXPORT_SYMBOL(memcg_kmem_enabled_key);
323

324
#endif /* !CONFIG_SLOB */
325

326
static struct mem_cgroup_per_zone *
327
mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
328
{
329 330 331
	int nid = zone_to_nid(zone);
	int zid = zone_idx(zone);

332
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
333 334
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/**
 * mem_cgroup_css_from_page - css of the memcg associated with a page
 * @page: page of interest
 *
 * If memcg is bound to the default hierarchy, css of the memcg associated
 * with @page is returned.  The returned css remains associated with @page
 * until it is released.
 *
 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
 * is returned.
 */
struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
{
	struct mem_cgroup *memcg;

	memcg = page->mem_cgroup;

352
	if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
353 354 355 356 357
		memcg = root_mem_cgroup;

	return &memcg->css;
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
/**
 * page_cgroup_ino - return inode number of the memcg a page is charged to
 * @page: the page
 *
 * Look up the closest online ancestor of the memory cgroup @page is charged to
 * and return its inode number or 0 if @page is not charged to any cgroup. It
 * is safe to call this function without holding a reference to @page.
 *
 * Note, this function is inherently racy, because there is nothing to prevent
 * the cgroup inode from getting torn down and potentially reallocated a moment
 * after page_cgroup_ino() returns, so it only should be used by callers that
 * do not care (such as procfs interfaces).
 */
ino_t page_cgroup_ino(struct page *page)
{
	struct mem_cgroup *memcg;
	unsigned long ino = 0;

	rcu_read_lock();
	memcg = READ_ONCE(page->mem_cgroup);
	while (memcg && !(memcg->css.flags & CSS_ONLINE))
		memcg = parent_mem_cgroup(memcg);
	if (memcg)
		ino = cgroup_ino(memcg->css.cgroup);
	rcu_read_unlock();
	return ino;
}

386
static struct mem_cgroup_per_zone *
387
mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
388
{
389 390
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);
391

392
	return &memcg->nodeinfo[nid]->zoneinfo[zid];
393 394
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid, int zid)
{
	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page *page)
{
	int nid = page_to_nid(page);
	int zid = page_zonenum(page);

	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
}

410 411
static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
					 struct mem_cgroup_tree_per_zone *mctz,
412
					 unsigned long new_usage_in_excess)
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
{
	struct rb_node **p = &mctz->rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct mem_cgroup_per_zone *mz_node;

	if (mz->on_tree)
		return;

	mz->usage_in_excess = new_usage_in_excess;
	if (!mz->usage_in_excess)
		return;
	while (*p) {
		parent = *p;
		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
					tree_node);
		if (mz->usage_in_excess < mz_node->usage_in_excess)
			p = &(*p)->rb_left;
		/*
		 * We can't avoid mem cgroups that are over their soft
		 * limit by the same amount
		 */
		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
			p = &(*p)->rb_right;
	}
	rb_link_node(&mz->tree_node, parent, p);
	rb_insert_color(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = true;
}

442 443
static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
					 struct mem_cgroup_tree_per_zone *mctz)
444 445 446 447 448 449 450
{
	if (!mz->on_tree)
		return;
	rb_erase(&mz->tree_node, &mctz->rb_root);
	mz->on_tree = false;
}

451 452
static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
				       struct mem_cgroup_tree_per_zone *mctz)
453
{
454 455 456
	unsigned long flags;

	spin_lock_irqsave(&mctz->lock, flags);
457
	__mem_cgroup_remove_exceeded(mz, mctz);
458
	spin_unlock_irqrestore(&mctz->lock, flags);
459 460
}

461 462 463
static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
{
	unsigned long nr_pages = page_counter_read(&memcg->memory);
464
	unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
465 466 467 468 469 470 471
	unsigned long excess = 0;

	if (nr_pages > soft_limit)
		excess = nr_pages - soft_limit;

	return excess;
}
472 473 474

static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
{
475
	unsigned long excess;
476 477 478
	struct mem_cgroup_per_zone *mz;
	struct mem_cgroup_tree_per_zone *mctz;

479
	mctz = soft_limit_tree_from_page(page);
480 481 482 483 484
	/*
	 * Necessary to update all ancestors when hierarchy is used.
	 * because their event counter is not touched.
	 */
	for (; memcg; memcg = parent_mem_cgroup(memcg)) {
485
		mz = mem_cgroup_page_zoneinfo(memcg, page);
486
		excess = soft_limit_excess(memcg);
487 488 489 490 491
		/*
		 * We have to update the tree if mz is on RB-tree or
		 * mem is over its softlimit.
		 */
		if (excess || mz->on_tree) {
492 493 494
			unsigned long flags;

			spin_lock_irqsave(&mctz->lock, flags);
495 496
			/* if on-tree, remove it */
			if (mz->on_tree)
497
				__mem_cgroup_remove_exceeded(mz, mctz);
498 499 500 501
			/*
			 * Insert again. mz->usage_in_excess will be updated.
			 * If excess is 0, no tree ops.
			 */
502
			__mem_cgroup_insert_exceeded(mz, mctz, excess);
503
			spin_unlock_irqrestore(&mctz->lock, flags);
504 505 506 507 508 509 510
		}
	}
}

static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
{
	struct mem_cgroup_tree_per_zone *mctz;
511 512
	struct mem_cgroup_per_zone *mz;
	int nid, zid;
513

514 515 516 517
	for_each_node(nid) {
		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
			mctz = soft_limit_tree_node_zone(nid, zid);
518
			mem_cgroup_remove_exceeded(mz, mctz);
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
		}
	}
}

static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct rb_node *rightmost = NULL;
	struct mem_cgroup_per_zone *mz;

retry:
	mz = NULL;
	rightmost = rb_last(&mctz->rb_root);
	if (!rightmost)
		goto done;		/* Nothing to reclaim from */

	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
	/*
	 * Remove the node now but someone else can add it back,
	 * we will to add it back at the end of reclaim to its correct
	 * position in the tree.
	 */
541
	__mem_cgroup_remove_exceeded(mz, mctz);
542
	if (!soft_limit_excess(mz->memcg) ||
543
	    !css_tryget_online(&mz->memcg->css))
544 545 546 547 548 549 550 551 552 553
		goto retry;
done:
	return mz;
}

static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
	struct mem_cgroup_per_zone *mz;

554
	spin_lock_irq(&mctz->lock);
555
	mz = __mem_cgroup_largest_soft_limit_node(mctz);
556
	spin_unlock_irq(&mctz->lock);
557 558 559
	return mz;
}

560
/*
561 562
 * Return page count for single (non recursive) @memcg.
 *
563 564 565 566 567
 * Implementation Note: reading percpu statistics for memcg.
 *
 * Both of vmstat[] and percpu_counter has threshold and do periodic
 * synchronization to implement "quick" read. There are trade-off between
 * reading cost and precision of value. Then, we may have a chance to implement
568
 * a periodic synchronization of counter in memcg's counter.
569 570 571 572 573 574 575 576 577
 *
 * But this _read() function is used for user interface now. The user accounts
 * memory usage by memory cgroup and he _always_ requires exact value because
 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
 * have to visit all online cpus and make sum. So, for now, unnecessary
 * synchronization is not implemented. (just implemented for cpu hotplug)
 *
 * If there are kernel internal actions which can make use of some not-exact
 * value, and reading all cpu value can be performance bottleneck in some
578
 * common workload, threshold and synchronization as vmstat[] should be
579 580
 * implemented.
 */
581 582
static unsigned long
mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
583
{
584
	long val = 0;
585 586
	int cpu;

587
	/* Per-cpu values can be negative, use a signed accumulator */
588
	for_each_possible_cpu(cpu)
589
		val += per_cpu(memcg->stat->count[idx], cpu);
590 591 592 593 594 595
	/*
	 * Summing races with updates, so val may be negative.  Avoid exposing
	 * transient negative values.
	 */
	if (val < 0)
		val = 0;
596 597 598
	return val;
}

599
static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
600 601 602 603 604
					    enum mem_cgroup_events_index idx)
{
	unsigned long val = 0;
	int cpu;

605
	for_each_possible_cpu(cpu)
606
		val += per_cpu(memcg->stat->events[idx], cpu);
607 608 609
	return val;
}

610
static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
611
					 struct page *page,
612
					 bool compound, int nr_pages)
613
{
614 615 616 617
	/*
	 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
	 * counted as CACHE even if it's on ANON LRU.
	 */
618
	if (PageAnon(page))
619
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
620
				nr_pages);
621
	else
622
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
623
				nr_pages);
624

625 626
	if (compound) {
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
627 628
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
				nr_pages);
629
	}
630

631 632
	/* pagein of a big page is an event. So, ignore page size */
	if (nr_pages > 0)
633
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
634
	else {
635
		__this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
636 637
		nr_pages = -nr_pages; /* for event */
	}
638

639
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
640 641
}

642 643
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
					   int nid, unsigned int lru_mask)
644
{
645
	unsigned long nr = 0;
646 647
	int zid;

648
	VM_BUG_ON((unsigned)nid >= nr_node_ids);
649

650 651 652 653 654 655 656 657 658 659 660 661
	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		struct mem_cgroup_per_zone *mz;
		enum lru_list lru;

		for_each_lru(lru) {
			if (!(BIT(lru) & lru_mask))
				continue;
			mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
			nr += mz->lru_size[lru];
		}
	}
	return nr;
662
}
663

664
static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
665
			unsigned int lru_mask)
666
{
667
	unsigned long nr = 0;
668
	int nid;
669

670
	for_each_node_state(nid, N_MEMORY)
671 672
		nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
	return nr;
673 674
}

675 676
static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
				       enum mem_cgroup_events_target target)
677 678 679
{
	unsigned long val, next;

680
	val = __this_cpu_read(memcg->stat->nr_page_events);
681
	next = __this_cpu_read(memcg->stat->targets[target]);
682
	/* from time_after() in jiffies.h */
683 684 685 686 687
	if ((long)next - (long)val < 0) {
		switch (target) {
		case MEM_CGROUP_TARGET_THRESH:
			next = val + THRESHOLDS_EVENTS_TARGET;
			break;
688 689 690
		case MEM_CGROUP_TARGET_SOFTLIMIT:
			next = val + SOFTLIMIT_EVENTS_TARGET;
			break;
691 692 693 694 695 696 697 698
		case MEM_CGROUP_TARGET_NUMAINFO:
			next = val + NUMAINFO_EVENTS_TARGET;
			break;
		default:
			break;
		}
		__this_cpu_write(memcg->stat->targets[target], next);
		return true;
699
	}
700
	return false;
701 702 703 704 705 706
}

/*
 * Check events in order.
 *
 */
707
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
708 709
{
	/* threshold event is triggered in finer grain than soft limit */
710 711
	if (unlikely(mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_THRESH))) {
712
		bool do_softlimit;
713
		bool do_numainfo __maybe_unused;
714

715 716
		do_softlimit = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_SOFTLIMIT);
717 718 719 720
#if MAX_NUMNODES > 1
		do_numainfo = mem_cgroup_event_ratelimit(memcg,
						MEM_CGROUP_TARGET_NUMAINFO);
#endif
721
		mem_cgroup_threshold(memcg);
722 723
		if (unlikely(do_softlimit))
			mem_cgroup_update_tree(memcg, page);
724
#if MAX_NUMNODES > 1
725
		if (unlikely(do_numainfo))
726
			atomic_inc(&memcg->numainfo_events);
727
#endif
728
	}
729 730
}

731
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
732
{
733 734 735 736 737 738 739 740
	/*
	 * mm_update_next_owner() may clear mm->owner to NULL
	 * if it races with swapoff, page migration, etc.
	 * So this can be called with p == NULL.
	 */
	if (unlikely(!p))
		return NULL;

741
	return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
742
}
Michal Hocko's avatar
Michal Hocko committed
743
EXPORT_SYMBOL(mem_cgroup_from_task);
744

745
static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
746
{
747
	struct mem_cgroup *memcg = NULL;
748

749 750
	rcu_read_lock();
	do {
751 752 753 754 755 756
		/*
		 * Page cache insertions can happen withou an
		 * actual mm context, e.g. during disk probing
		 * on boot, loopback IO, acct() writes etc.
		 */
		if (unlikely(!mm))
757
			memcg = root_mem_cgroup;
758 759 760 761 762
		else {
			memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
			if (unlikely(!memcg))
				memcg = root_mem_cgroup;
		}
763
	} while (!css_tryget_online(&memcg->css));
764
	rcu_read_unlock();
765
	return memcg;
766 767
}

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/**
 * mem_cgroup_iter - iterate over memory cgroup hierarchy
 * @root: hierarchy root
 * @prev: previously returned memcg, NULL on first invocation
 * @reclaim: cookie for shared reclaim walks, NULL for full walks
 *
 * Returns references to children of the hierarchy below @root, or
 * @root itself, or %NULL after a full round-trip.
 *
 * Caller must pass the return value in @prev on subsequent
 * invocations for reference counting, or use mem_cgroup_iter_break()
 * to cancel a hierarchy walk before the round-trip is complete.
 *
 * Reclaimers can specify a zone and a priority level in @reclaim to
 * divide up the memcgs in the hierarchy among all concurrent
 * reclaimers operating on the same zone and priority.
 */
785
struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
786
				   struct mem_cgroup *prev,
787
				   struct mem_cgroup_reclaim_cookie *reclaim)
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
788
{
Michal Hocko's avatar
Michal Hocko committed
789
	struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
790
	struct cgroup_subsys_state *css = NULL;
791
	struct mem_cgroup *memcg = NULL;
792
	struct mem_cgroup *pos = NULL;
793

794 795
	if (mem_cgroup_disabled())
		return NULL;
796

797 798
	if (!root)
		root = root_mem_cgroup;
799

800
	if (prev && !reclaim)
801
		pos = prev;
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
802

803 804
	if (!root->use_hierarchy && root != root_mem_cgroup) {
		if (prev)
805
			goto out;
806
		return root;
807
	}
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
808

809
	rcu_read_lock();
Michal Hocko's avatar
Michal Hocko committed
810

811 812 813 814 815 816 817 818 819
	if (reclaim) {
		struct mem_cgroup_per_zone *mz;

		mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
		iter = &mz->iter[reclaim->priority];

		if (prev && reclaim->generation != iter->generation)
			goto out_unlock;

820
		while (1) {
821
			pos = READ_ONCE(iter->position);
822 823
			if (!pos || css_tryget(&pos->css))
				break;
824
			/*
825 826 827 828 829 830
			 * css reference reached zero, so iter->position will
			 * be cleared by ->css_released. However, we should not
			 * rely on this happening soon, because ->css_released
			 * is called from a work queue, and by busy-waiting we
			 * might block it. So we clear iter->position right
			 * away.
831
			 */
832 833
			(void)cmpxchg(&iter->position, pos, NULL);
		}
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	}

	if (pos)
		css = &pos->css;

	for (;;) {
		css = css_next_descendant_pre(css, &root->css);
		if (!css) {
			/*
			 * Reclaimers share the hierarchy walk, and a
			 * new one might jump in right at the end of
			 * the hierarchy - make sure they see at least
			 * one group and restart from the beginning.
			 */
			if (!prev)
				continue;
			break;
851
		}
852

853 854 855 856 857 858
		/*
		 * Verify the css and acquire a reference.  The root
		 * is provided by the caller, so we know it's alive
		 * and kicking, and don't take an extra reference.
		 */
		memcg = mem_cgroup_from_css(css);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
859

860 861
		if (css == &root->css)
			break;
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
862

863 864
		if (css_tryget(css))
			break;
865

866
		memcg = NULL;
867
	}
868 869 870

	if (reclaim) {
		/*
871 872 873
		 * The position could have already been updated by a competing
		 * thread, so check that the value hasn't changed since we read
		 * it to avoid reclaiming from the same cgroup twice.
874
		 */
875 876
		(void)cmpxchg(&iter->position, pos, memcg);

877 878 879 880 881 882 883
		if (pos)
			css_put(&pos->css);

		if (!memcg)
			iter->generation++;
		else if (!prev)
			reclaim->generation = iter->generation;
884
	}
885

886 887
out_unlock:
	rcu_read_unlock();
888
out:
889 890 891
	if (prev && prev != root)
		css_put(&prev->css);

892
	return memcg;
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
893
}
894

895 896 897 898 899 900 901
/**
 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
 * @root: hierarchy root
 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
 */
void mem_cgroup_iter_break(struct mem_cgroup *root,
			   struct mem_cgroup *prev)
902 903 904 905 906 907
{
	if (!root)
		root = root_mem_cgroup;
	if (prev && prev != root)
		css_put(&prev->css);
}
908

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
{
	struct mem_cgroup *memcg = dead_memcg;
	struct mem_cgroup_reclaim_iter *iter;
	struct mem_cgroup_per_zone *mz;
	int nid, zid;
	int i;

	while ((memcg = parent_mem_cgroup(memcg))) {
		for_each_node(nid) {
			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
				mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
				for (i = 0; i <= DEF_PRIORITY; i++) {
					iter = &mz->iter[i];
					cmpxchg(&iter->position,
						dead_memcg, NULL);
				}
			}
		}
	}
}

931 932 933 934 935 936
/*
 * Iteration constructs for visiting all cgroups (under a tree).  If
 * loops are exited prematurely (break), mem_cgroup_iter_break() must
 * be used for reference counting.
 */
#define for_each_mem_cgroup_tree(iter, root)		\
937
	for (iter = mem_cgroup_iter(root, NULL, NULL);	\
938
	     iter != NULL;				\
939
	     iter = mem_cgroup_iter(root, iter, NULL))
940

941
#define for_each_mem_cgroup(iter)			\
942
	for (iter = mem_cgroup_iter(NULL, NULL, NULL);	\
943
	     iter != NULL;				\
944
	     iter = mem_cgroup_iter(NULL, iter, NULL))
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
945

946 947 948
/**
 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
 * @zone: zone of the wanted lruvec
949
 * @memcg: memcg of the wanted lruvec
950 951 952 953 954 955 956 957 958
 *
 * Returns the lru list vector holding pages for the given @zone and
 * @mem.  This can be the global zone lruvec, if the memory controller
 * is disabled.
 */
struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
				      struct mem_cgroup *memcg)
{
	struct mem_cgroup_per_zone *mz;
959
	struct lruvec *lruvec;
960

961 962 963 964
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
965

966
	mz = mem_cgroup_zone_zoneinfo(memcg, zone);
967 968 969 970 971 972 973 974 975 976
	lruvec = &mz->lruvec;
out:
	/*
	 * Since a node can be onlined after the mem_cgroup was created,
	 * we have to be prepared to initialize lruvec->zone here;
	 * and if offlined then reonlined, we need to reinitialize it.
	 */
	if (unlikely(lruvec->zone != zone))
		lruvec->zone = zone;
	return lruvec;
977 978 979
}

/**
980
 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
981
 * @page: the page
982
 * @zone: zone of the page
983 984 985 986
 *
 * This function is only safe when following the LRU page isolation
 * and putback protocol: the LRU lock must be held, and the page must
 * either be PageLRU() or the caller must have isolated/allocated it.
987
 */
988
struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
989 990
{
	struct mem_cgroup_per_zone *mz;
991
	struct mem_cgroup *memcg;
992
	struct lruvec *lruvec;
993

994 995 996 997
	if (mem_cgroup_disabled()) {
		lruvec = &zone->lruvec;
		goto out;
	}
998

999
	memcg = page->mem_cgroup;
1000
	/*
1001
	 * Swapcache readahead pages are added to the LRU - and
1002
	 * possibly migrated - before they are charged.
1003
	 */