mmu.c 3.22 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5
/* 
 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
 * Licensed under the GPL
 */

6
#include "linux/config.h"
Linus Torvalds's avatar
Linus Torvalds committed
7 8 9 10
#include "linux/sched.h"
#include "linux/list.h"
#include "linux/spinlock.h"
#include "linux/slab.h"
11 12
#include "linux/errno.h"
#include "linux/mm.h"
Linus Torvalds's avatar
Linus Torvalds committed
13 14 15
#include "asm/current.h"
#include "asm/segment.h"
#include "asm/mmu.h"
16 17
#include "asm/pgalloc.h"
#include "asm/pgtable.h"
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20
#include "os.h"
#include "skas.h"

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
extern int __syscall_stub_start;

static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
			 unsigned long kernel)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	spin_lock(&mm->page_table_lock);
	pgd = pgd_offset(mm, proc);
	pud = pud_alloc(mm, pgd, proc);
	if (!pud)
		goto out;

	pmd = pmd_alloc(mm, pud, proc);
	if (!pmd)
		goto out_pmd;

	pte = pte_alloc_map(mm, pmd, proc);
	if (!pte)
		goto out_pte;

	/* There's an interaction between the skas0 stub pages, stack
	 * randomization, and the BUG at the end of exit_mmap.  exit_mmap
         * checks that the number of page tables freed is the same as had
         * been allocated.  If the stack is on the last page table page,
	 * then the stack pte page will be freed, and if not, it won't.  To
	 * avoid having to know where the stack is, or if the process mapped
	 * something at the top of its address space for some other reason,
	 * we set TASK_SIZE to end at the start of the last page table.
	 * This keeps exit_mmap off the last page, but introduces a leak
	 * of that page.  So, we hang onto it here and free it in
	 * destroy_context_skas.
	 */

        mm->context.skas.last_page_table = pmd_page_kernel(*pmd);

	*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
	*pte = pte_mkexec(*pte);
	*pte = pte_wrprotect(*pte);
	spin_unlock(&mm->page_table_lock);
	return(0);

 out_pmd:
	pud_free(pud);
 out_pte:
	pmd_free(pmd);
 out:
	spin_unlock(&mm->page_table_lock);
	return(-ENOMEM);
}

Linus Torvalds's avatar
Linus Torvalds committed
75 76
int init_new_context_skas(struct task_struct *task, struct mm_struct *mm)
{
77 78 79 80
	struct mm_struct *cur_mm = current->mm;
	struct mm_id *mm_id = &mm->context.skas.id;
	unsigned long stack;
	int from, ret;
Linus Torvalds's avatar
Linus Torvalds committed
81

82 83 84 85
	if(proc_mm){
		if((cur_mm != NULL) && (cur_mm != &init_mm))
			from = cur_mm->context.skas.id.u.mm_fd;
		else from = -1;
Linus Torvalds's avatar
Linus Torvalds committed
86

87 88 89 90 91 92 93
		ret = new_mm(from);
		if(ret < 0){
			printk("init_new_context_skas - new_mm failed, "
			       "errno = %d\n", ret);
			return ret;
		}
		mm_id->u.mm_fd = ret;
Linus Torvalds's avatar
Linus Torvalds committed
94
	}
95 96 97 98 99 100
	else {
		/* This zeros the entry that pgd_alloc didn't, needed since
		 * we are about to reinitialize it, and want mm.nr_ptes to
		 * be accurate.
		 */
		mm->pgd[USER_PTRS_PER_PGD] = __pgd(0);
Linus Torvalds's avatar
Linus Torvalds committed
101

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
		ret = init_stub_pte(mm, CONFIG_STUB_CODE,
				    (unsigned long) &__syscall_stub_start);
		if(ret)
			goto out;

		ret = -ENOMEM;
		stack = get_zeroed_page(GFP_KERNEL);
		if(stack == 0)
			goto out;
		mm_id->stack = stack;

		ret = init_stub_pte(mm, CONFIG_STUB_DATA, stack);
		if(ret)
			goto out_free;

		mm->nr_ptes--;
		mm_id->u.pid = start_userspace(stack);
	}

	return 0;

 out_free:
	free_page(mm_id->stack);
 out:
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
127 128 129 130
}

void destroy_context_skas(struct mm_struct *mm)
{
131
	struct mmu_context_skas *mmu = &mm->context.skas;
Linus Torvalds's avatar
Linus Torvalds committed
132

133 134 135 136 137 138 139 140
	if(proc_mm)
		os_close_file(mmu->id.u.mm_fd);
	else {
		os_kill_ptraced_process(mmu->id.u.pid, 1);
		free_page(mmu->id.stack);
		free_page(mmu->last_page_table);
	}
}