All new accounts created on Gitlab now require administrator approval. If you invite any collaborators, please let Flux staff know so they can approve the accounts.

Commit c4a25635 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: move vmscan writes and file write accounting to the node

As reclaim is now node-based, it follows that page write activity due to
page reclaim should also be accounted for on the node.  For consistency,
also account page writes and page dirtying on a per-node basis.

After this patch, there are a few remaining zone counters that may appear
strange but are fine.  NUMA stats are still per-zone as this is a
user-space interface that tools consume.  NR_MLOCK, NR_SLAB_*,
NR_PAGETABLE, NR_KERNEL_STACK and NR_BOUNCE are all allocations that
potentially pin low memory and cannot trivially be reclaimed on demand.
This information is still useful for debugging a page allocation failure
warning.

Link: http://lkml.kernel.org/r/1467970510-21195-21-git-send-email-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 11fb9989
...@@ -122,10 +122,6 @@ enum zone_stat_item { ...@@ -122,10 +122,6 @@ enum zone_stat_item {
NR_KERNEL_STACK, NR_KERNEL_STACK,
/* Second 128 byte cacheline */ /* Second 128 byte cacheline */
NR_BOUNCE, NR_BOUNCE,
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
#if IS_ENABLED(CONFIG_ZSMALLOC) #if IS_ENABLED(CONFIG_ZSMALLOC)
NR_ZSPAGES, /* allocated in zsmalloc */ NR_ZSPAGES, /* allocated in zsmalloc */
#endif #endif
...@@ -165,6 +161,10 @@ enum node_stat_item { ...@@ -165,6 +161,10 @@ enum node_stat_item {
NR_SHMEM_PMDMAPPED, NR_SHMEM_PMDMAPPED,
NR_ANON_THPS, NR_ANON_THPS,
NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_VMSCAN_WRITE,
NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */
NR_DIRTIED, /* page dirtyings since bootup */
NR_WRITTEN, /* page writings since bootup */
NR_VM_NODE_STAT_ITEMS NR_VM_NODE_STAT_ITEMS
}; };
......
...@@ -415,8 +415,8 @@ TRACE_EVENT(global_dirty_state, ...@@ -415,8 +415,8 @@ TRACE_EVENT(global_dirty_state,
__entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY);
__entry->nr_writeback = global_node_page_state(NR_WRITEBACK); __entry->nr_writeback = global_node_page_state(NR_WRITEBACK);
__entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS); __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS);
__entry->nr_dirtied = global_page_state(NR_DIRTIED); __entry->nr_dirtied = global_node_page_state(NR_DIRTIED);
__entry->nr_written = global_page_state(NR_WRITTEN); __entry->nr_written = global_node_page_state(NR_WRITTEN);
__entry->background_thresh = background_thresh; __entry->background_thresh = background_thresh;
__entry->dirty_thresh = dirty_thresh; __entry->dirty_thresh = dirty_thresh;
__entry->dirty_limit = global_wb_domain.dirty_limit; __entry->dirty_limit = global_wb_domain.dirty_limit;
......
...@@ -2461,7 +2461,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) ...@@ -2461,7 +2461,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY);
__inc_node_page_state(page, NR_FILE_DIRTY); __inc_node_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
__inc_zone_page_state(page, NR_DIRTIED); __inc_node_page_state(page, NR_DIRTIED);
__inc_wb_stat(wb, WB_RECLAIMABLE); __inc_wb_stat(wb, WB_RECLAIMABLE);
__inc_wb_stat(wb, WB_DIRTIED); __inc_wb_stat(wb, WB_DIRTIED);
task_io_account_write(PAGE_SIZE); task_io_account_write(PAGE_SIZE);
...@@ -2550,7 +2550,7 @@ void account_page_redirty(struct page *page) ...@@ -2550,7 +2550,7 @@ void account_page_redirty(struct page *page)
wb = unlocked_inode_to_wb_begin(inode, &locked); wb = unlocked_inode_to_wb_begin(inode, &locked);
current->nr_dirtied--; current->nr_dirtied--;
dec_zone_page_state(page, NR_DIRTIED); dec_node_page_state(page, NR_DIRTIED);
dec_wb_stat(wb, WB_DIRTIED); dec_wb_stat(wb, WB_DIRTIED);
unlocked_inode_to_wb_end(inode, locked); unlocked_inode_to_wb_end(inode, locked);
} }
...@@ -2787,7 +2787,7 @@ int test_clear_page_writeback(struct page *page) ...@@ -2787,7 +2787,7 @@ int test_clear_page_writeback(struct page *page)
mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
dec_node_page_state(page, NR_WRITEBACK); dec_node_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_zone_page_state(page, NR_WRITTEN); inc_node_page_state(page, NR_WRITTEN);
} }
unlock_page_memcg(page); unlock_page_memcg(page);
return ret; return ret;
......
...@@ -612,7 +612,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping, ...@@ -612,7 +612,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
ClearPageReclaim(page); ClearPageReclaim(page);
} }
trace_mm_vmscan_writepage(page); trace_mm_vmscan_writepage(page);
inc_zone_page_state(page, NR_VMSCAN_WRITE); inc_node_page_state(page, NR_VMSCAN_WRITE);
return PAGE_SUCCESS; return PAGE_SUCCESS;
} }
...@@ -1117,7 +1117,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -1117,7 +1117,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
* except we already have the page isolated * except we already have the page isolated
* and know it's dirty * and know it's dirty
*/ */
inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page); SetPageReclaim(page);
goto keep_locked; goto keep_locked;
......
...@@ -931,10 +931,6 @@ const char * const vmstat_text[] = { ...@@ -931,10 +931,6 @@ const char * const vmstat_text[] = {
"nr_page_table_pages", "nr_page_table_pages",
"nr_kernel_stack", "nr_kernel_stack",
"nr_bounce", "nr_bounce",
"nr_vmscan_write",
"nr_vmscan_immediate_reclaim",
"nr_dirtied",
"nr_written",
#if IS_ENABLED(CONFIG_ZSMALLOC) #if IS_ENABLED(CONFIG_ZSMALLOC)
"nr_zspages", "nr_zspages",
#endif #endif
...@@ -971,6 +967,10 @@ const char * const vmstat_text[] = { ...@@ -971,6 +967,10 @@ const char * const vmstat_text[] = {
"nr_shmem_pmdmapped", "nr_shmem_pmdmapped",
"nr_anon_transparent_hugepages", "nr_anon_transparent_hugepages",
"nr_unstable", "nr_unstable",
"nr_vmscan_write",
"nr_vmscan_immediate_reclaim",
"nr_dirtied",
"nr_written",
/* enum writeback_stat_item counters */ /* enum writeback_stat_item counters */
"nr_dirty_threshold", "nr_dirty_threshold",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment