Commit 4db7548c authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, page_alloc: defer debugging checks of freed pages until a PCP drain

Every page free checks a number of page fields for validity.  This
catches premature frees and corruptions but it is also expensive.  This
patch weakens the debugging check by checking PCP pages at the time they
are drained from the PCP list.  This will trigger the bug but the site
that freed the corrupt page will be lost.  To get the full context, a
kernel rebuild with DEBUG_VM is necessary.

[akpm@linux-foundation.org: fix build]
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 002f2906
......@@ -941,6 +941,103 @@ static inline int free_pages_check(struct page *page)
return 1;
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
{
int ret = 1;
/*
* We rely page->lru.next never has bit 0 set, unless the page
* is PageTail(). Let's make sure that's true even for poisoned ->lru.
*/
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
ret = 0;
goto out;
}
switch (page - head_page) {
case 1:
/* the first tail page: ->mapping is compound_mapcount() */
if (unlikely(compound_mapcount(page))) {
bad_page(page, "nonzero compound_mapcount", 0);
goto out;
}
break;
case 2:
/*
* the second tail page: ->mapping is
* page_deferred_list().next -- ignore value.
*/
break;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page", 0);
goto out;
}
break;
}
if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set", 0);
goto out;
}
if (unlikely(compound_head(page) != head_page)) {
bad_page(page, "compound_head not consistent", 0);
goto out;
}
ret = 0;
out:
page->mapping = NULL;
clear_compound_head(page);
return ret;
}
static bool free_pages_prepare(struct page *page, unsigned int order);
#ifdef CONFIG_DEBUG_VM
static inline bool free_pcp_prepare(struct page *page)
{
return free_pages_prepare(page, 0);
}
static inline bool bulkfree_pcp_prepare(struct page *page)
{
return false;
}
#else
static bool free_pcp_prepare(struct page *page)
{
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, 0);
kmemcheck_free_shadow(page, 0);
kasan_free_pages(page, 0);
if (PageAnonHead(page))
page->mapping = NULL;
reset_page_owner(page, 0);
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),
PAGE_SIZE);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE);
}
arch_free_page(page, 0);
kernel_poison_pages(page, 0, 0);
kernel_map_pages(page, 0, 0);
page_cpupid_reset_last(page);
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
return true;
}
static bool bulkfree_pcp_prepare(struct page *page)
{
return free_pages_check(page);
}
#endif /* CONFIG_DEBUG_VM */
/*
* Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order.
......@@ -1002,6 +1099,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
if (unlikely(isolated_pageblocks))
mt = get_pageblock_migratetype(page);
if (bulkfree_pcp_prepare(page))
continue;
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
} while (--count && --batch_free && !list_empty(list));
......@@ -1028,56 +1128,6 @@ static void free_one_page(struct zone *zone,
spin_unlock(&zone->lock);
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
{
int ret = 1;
/*
* We rely page->lru.next never has bit 0 set, unless the page
* is PageTail(). Let's make sure that's true even for poisoned ->lru.
*/
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
ret = 0;
goto out;
}
switch (page - head_page) {
case 1:
/* the first tail page: ->mapping is compound_mapcount() */
if (unlikely(compound_mapcount(page))) {
bad_page(page, "nonzero compound_mapcount", 0);
goto out;
}
break;
case 2:
/*
* the second tail page: ->mapping is
* page_deferred_list().next -- ignore value.
*/
break;
default:
if (page->mapping != TAIL_MAPPING) {
bad_page(page, "corrupted mapping in tail page", 0);
goto out;
}
break;
}
if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set", 0);
goto out;
}
if (unlikely(compound_head(page) != head_page)) {
bad_page(page, "compound_head not consistent", 0);
goto out;
}
ret = 0;
out:
page->mapping = NULL;
clear_compound_head(page);
return ret;
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{
......@@ -2339,7 +2389,7 @@ void free_hot_cold_page(struct page *page, bool cold)
unsigned long pfn = page_to_pfn(page);
int migratetype;
if (!free_pages_prepare(page, 0))
if (!free_pcp_prepare(page))
return;
migratetype = get_pfnblock_migratetype(page, pfn);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment