Commit 7ca43e75 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds
Browse files

mm: use debug_kmap_atomic



Use debug_kmap_atomic in kmap_atomic, kmap_atomic_pfn, and
iomap_atomic_prot_pfn.
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f4112de6
...@@ -42,6 +42,7 @@ void *__kmap_atomic(struct page *page, enum km_type type) ...@@ -42,6 +42,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
...@@ -88,6 +89,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) ...@@ -88,6 +89,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/highmem.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -94,6 +95,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro ...@@ -94,6 +95,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
......
...@@ -39,6 +39,7 @@ void *kmap_atomic(struct page *page, enum km_type type) ...@@ -39,6 +39,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
if (!PageHighMem(page)) if (!PageHighMem(page))
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
......
...@@ -40,6 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) ...@@ -40,6 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
debug_kmap_atomic(type); debug_kmap_atomic(type);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR*smp_processor_id(); idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx))); BUG_ON(!pte_none(*(kmap_pte-idx)));
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/iomap.h> #include <asm/iomap.h>
#include <asm/pat.h> #include <asm/pat.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/highmem.h>
int is_io_mapping_possible(resource_size_t base, unsigned long size) int is_io_mapping_possible(resource_size_t base, unsigned long size)
{ {
...@@ -71,6 +72,7 @@ iounmap_atomic(void *kvaddr, enum km_type type) ...@@ -71,6 +72,7 @@ iounmap_atomic(void *kvaddr, enum km_type type)
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
debug_kmap_atomic(type);
/* /*
* Force other mappings to Oops if they'll try to access this pte * Force other mappings to Oops if they'll try to access this pte
* without first remap it. Keeping stale mappings around is a bad idea * without first remap it. Keeping stale mappings around is a bad idea
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/init.h> #include <linux/init.h>
#include <linux/highmem.h>
#include <asm/mem-layout.h> #include <asm/mem-layout.h>
#include <asm/spr-regs.h> #include <asm/spr-regs.h>
#include <asm/mb-regs.h> #include <asm/mb-regs.h>
...@@ -116,6 +117,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) ...@@ -116,6 +117,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long paddr; unsigned long paddr;
pagefault_disable(); pagefault_disable();
debug_kmap_atomic(type);
paddr = page_to_phys(page); paddr = page_to_phys(page);
switch (type) { switch (type) {
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/highmem.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -77,6 +78,7 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type) ...@@ -77,6 +78,7 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
if (page < highmem_start_page) if (page < highmem_start_page)
return page_address(page); return page_address(page);
debug_kmap_atomic(type);
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#if HIGHMEM_DEBUG #if HIGHMEM_DEBUG
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment