mmu.c 3.87 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9
/* 
 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
 * Licensed under the GPL
 */

#include "linux/sched.h"
#include "linux/list.h"
#include "linux/spinlock.h"
#include "linux/slab.h"
10 11
#include "linux/errno.h"
#include "linux/mm.h"
Linus Torvalds's avatar
Linus Torvalds committed
12 13 14
#include "asm/current.h"
#include "asm/segment.h"
#include "asm/mmu.h"
15 16
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
17
#include "asm/ldt.h"
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20
#include "os.h"
#include "skas.h"

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
extern int __syscall_stub_start;

static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
			 unsigned long kernel)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = pgd_offset(mm, proc);
	pud = pud_alloc(mm, pgd, proc);
	if (!pud)
		goto out;

	pmd = pmd_alloc(mm, pud, proc);
	if (!pmd)
		goto out_pmd;

	pte = pte_alloc_map(mm, pmd, proc);
	if (!pte)
		goto out_pte;

	/* There's an interaction between the skas0 stub pages, stack
	 * randomization, and the BUG at the end of exit_mmap.  exit_mmap
         * checks that the number of page tables freed is the same as had
         * been allocated.  If the stack is on the last page table page,
	 * then the stack pte page will be freed, and if not, it won't.  To
	 * avoid having to know where the stack is, or if the process mapped
	 * something at the top of its address space for some other reason,
	 * we set TASK_SIZE to end at the start of the last page table.
	 * This keeps exit_mmap off the last page, but introduces a leak
	 * of that page.  So, we hang onto it here and free it in
	 * destroy_context_skas.
	 */

57
        mm->context.skas.last_page_table = pmd_page_vaddr(*pmd);
Jeff Dike's avatar
Jeff Dike committed
58 59 60
#ifdef CONFIG_3_LEVEL_PGTABLES
        mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif
61 62

	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
Jeff Dike's avatar
Jeff Dike committed
63 64 65 66
	/* This is wrong for the code page, but it doesn't matter since the
	 * stub is mapped by hand with the correct permissions.
	 */
	*pte = pte_mkwrite(*pte);
67 68 69 70 71 72 73 74 75 76
	return(0);

 out_pmd:
	pud_free(pud);
 out_pte:
	pmd_free(pmd);
 out:
	return(-ENOMEM);
}

Linus Torvalds's avatar
Linus Torvalds committed
77 78
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
{
79 80
 	struct mmu_context_skas *from_mm = NULL;
	struct mmu_context_skas *to_mm = &mm->context.skas;
81
	unsigned long stack = 0;
82
	int ret = -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed
83

84
	if(skas_needs_stub){
85 86 87
		stack = get_zeroed_page(GFP_KERNEL);
		if(stack == 0)
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
88

89 90 91 92 93
		/* This zeros the entry that pgd_alloc didn't, needed since
		 * we are about to reinitialize it, and want mm.nr_ptes to
		 * be accurate.
		 */
		mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
Linus Torvalds's avatar
Linus Torvalds committed
94

95 96 97
		ret = init_stub_pte(mm, CONFIG_STUB_CODE,
				    (unsigned long) &__syscall_stub_start);
		if(ret)
98
			goto out_free;
99 100 101 102 103 104

		ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
		if(ret)
			goto out_free;

		mm->nr_ptes--;
105
	}
106 107 108 109

	to_mm->id.stack = stack;
	if(current->mm != NULL && current->mm != &init_mm)
		from_mm = &current->mm->context.skas;
110

111
	if(proc_mm){
112
		ret = new_mm(stack);
113 114 115 116 117
		if(ret < 0){
			printk("init_new_context_skas - new_mm failed, "
			       "errno = %d\n", ret);
			goto out_free;
		}
118
		to_mm->id.u.mm_fd = ret;
119 120
	}
	else {
121 122 123 124 125 126 127 128 129 130 131
		if(from_mm)
			to_mm->id.u.pid = copy_context_skas0(stack,
							     from_mm->id.u.pid);
		else to_mm->id.u.pid = start_userspace(stack);
	}

	ret = init_new_ldt(to_mm, from_mm);
	if(ret < 0){
		printk("init_new_context_skas - init_ldt"
		       " failed, errno = %d\n", ret);
		goto out_free;
132 133 134 135 136
	}

	return 0;

 out_free:
137 138
	if(to_mm->id.stack != 0)
		free_page(to_mm->id.stack);
139 140
 out:
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
141 142 143 144
}

void destroy_context_skas(struct mm_struct *mm)
{
145
	struct mmu_context_skas *mmu = &mm->context.skas;
Linus Torvalds's avatar
Linus Torvalds committed
146

147 148
	if(proc_mm)
		os_close_file(mmu->id.u.mm_fd);
149
	else
150
		os_kill_ptraced_process(mmu->id.u.pid, 1);
151 152

	if(!proc_mm || !ptrace_faultinfo){
153
		free_page(mmu->id.stack);
154
		pte_lock_deinit(virt_to_page(mmu->last_page_table));
Jeff Dike's avatar
Jeff Dike committed
155
		pte_free_kernel((pte_t *) mmu->last_page_table);
156
		dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
Jeff Dike's avatar
Jeff Dike committed
157 158 159
#ifdef CONFIG_3_LEVEL_PGTABLES
		pmd_free((pmd_t *) mmu->last_pmd);
#endif
160 161
	}
}