highmem.c 2.64 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#include <linux/module.h>
#include <linux/highmem.h>
#include <asm/tlbflush.h>

void *__kmap(struct page *page)
{
	void *addr;

	might_sleep();
	if (!PageHighMem(page))
		return page_address(page);
	addr = kmap_high(page);
	flush_tlb_one((unsigned long)addr);

	return addr;
}

void __kunmap(struct page *page)
{
20
	BUG_ON(in_interrupt());
Linus Torvalds's avatar
Linus Torvalds committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
	if (!PageHighMem(page))
		return;
	kunmap_high(page);
}

/*
 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 * no global lock is needed and because the kmap code must perform a global TLB
 * invalidation when the kmap pool wraps.
 *
 * However when holding an atomic kmap is is not legal to sleep, so atomic
 * kmaps are appropriate for short, tight code paths only.
 */

void *__kmap_atomic(struct page *page, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
41
	pagefault_disable();
Linus Torvalds's avatar
Linus Torvalds committed
42
43
44
	if (!PageHighMem(page))
		return page_address(page);

Akinobu Mita's avatar
Akinobu Mita committed
45
	debug_kmap_atomic(type);
Linus Torvalds's avatar
Linus Torvalds committed
46
47
48
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
49
	BUG_ON(!pte_none(*(kmap_pte - idx)));
Linus Torvalds's avatar
Linus Torvalds committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#endif
	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
	local_flush_tlb_one((unsigned long)vaddr);

	return (void*) vaddr;
}

void __kunmap_atomic(void *kvaddr, enum km_type type)
{
#ifdef CONFIG_DEBUG_HIGHMEM
	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
	enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

	if (vaddr < FIXADDR_START) { // FIXME
64
		pagefault_enable();
Linus Torvalds's avatar
Linus Torvalds committed
65
66
67
		return;
	}

68
	BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
Linus Torvalds's avatar
Linus Torvalds committed
69
70
71
72
73
74
75
76
77

	/*
	 * force other mappings to Oops if they'll try to access
	 * this pte without first remap it
	 */
	pte_clear(&init_mm, vaddr, kmap_pte-idx);
	local_flush_tlb_one(vaddr);
#endif

78
	pagefault_enable();
Linus Torvalds's avatar
Linus Torvalds committed
79
80
}

81
82
83
84
85
86
87
88
89
/*
 * This is the same as kmap_atomic() but can map memory that doesn't
 * have a struct page associated with it.
 */
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
{
	enum fixed_addresses idx;
	unsigned long vaddr;

90
	pagefault_disable();
91

Akinobu Mita's avatar
Akinobu Mita committed
92
	debug_kmap_atomic(type);
93
94
95
96
97
98
99
100
	idx = type + KM_TYPE_NR*smp_processor_id();
	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
	set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
	flush_tlb_one(vaddr);

	return (void*) vaddr;
}

Linus Torvalds's avatar
Linus Torvalds committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
struct page *__kmap_atomic_to_page(void *ptr)
{
	unsigned long idx, vaddr = (unsigned long)ptr;
	pte_t *pte;

	if (vaddr < FIXADDR_START)
		return virt_to_page(ptr);

	idx = virt_to_fix(vaddr);
	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
	return pte_page(*pte);
}

EXPORT_SYMBOL(__kmap);
EXPORT_SYMBOL(__kunmap);
EXPORT_SYMBOL(__kmap_atomic);
EXPORT_SYMBOL(__kunmap_atomic);