Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xcap
xcap-capability-linux
Commits
c6e8b587
Commit
c6e8b587
authored
Feb 10, 2005
by
Ralf Baechle
Browse files
Update MIPS to use the 4-level pagetable code thereby getting rid of
the compacrapability headers. Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
57f0060b
Changes
17
Hide whitespace changes
Inline
Side-by-side
arch/mips/lib-32/dump_tlb.c
View file @
c6e8b587
...
...
@@ -139,6 +139,7 @@ void dump_tlb_nonwired(void)
void
dump_list_process
(
struct
task_struct
*
t
,
void
*
address
)
{
pgd_t
*
page_dir
,
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
,
page
;
unsigned
long
addr
,
val
;
...
...
@@ -162,7 +163,10 @@ void dump_list_process(struct task_struct *t, void *address)
pgd
=
pgd_offset
(
t
->
mm
,
addr
);
printk
(
"pgd == %08x, "
,
(
unsigned
int
)
pgd
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
printk
(
"pud == %08x, "
,
(
unsigned
int
)
pud
);
pmd
=
pmd_offset
(
pud
,
addr
);
printk
(
"pmd == %08x, "
,
(
unsigned
int
)
pmd
);
pte
=
pte_offset
(
pmd
,
addr
);
...
...
@@ -195,13 +199,15 @@ void dump_list_current(void *address)
unsigned
int
vtop
(
void
*
address
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
unsigned
int
addr
,
paddr
;
addr
=
(
unsigned
long
)
address
;
pgd
=
pgd_offset
(
current
->
mm
,
addr
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
pte
=
pte_offset
(
pmd
,
addr
);
paddr
=
(
KSEG1
|
(
unsigned
int
)
pte_val
(
*
pte
))
&
PAGE_MASK
;
paddr
|=
(
addr
&
~
PAGE_MASK
);
...
...
arch/mips/lib-32/r3k_dump_tlb.c
View file @
c6e8b587
...
...
@@ -105,6 +105,7 @@ void dump_tlb_nonwired(void)
void
dump_list_process
(
struct
task_struct
*
t
,
void
*
address
)
{
pgd_t
*
page_dir
,
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
,
page
;
unsigned
int
addr
;
...
...
@@ -121,7 +122,10 @@ void dump_list_process(struct task_struct *t, void *address)
pgd
=
pgd_offset
(
t
->
mm
,
addr
);
printk
(
"pgd == %08x, "
,
(
unsigned
int
)
pgd
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
printk
(
"pud == %08x, "
,
(
unsigned
int
)
pud
);
pmd
=
pmd_offset
(
pud
,
addr
);
printk
(
"pmd == %08x, "
,
(
unsigned
int
)
pmd
);
pte
=
pte_offset
(
pmd
,
addr
);
...
...
@@ -149,13 +153,15 @@ void dump_list_current(void *address)
unsigned
int
vtop
(
void
*
address
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
unsigned
int
addr
,
paddr
;
addr
=
(
unsigned
long
)
address
;
pgd
=
pgd_offset
(
current
->
mm
,
addr
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
pte
=
pte_offset
(
pmd
,
addr
);
paddr
=
(
KSEG1
|
(
unsigned
int
)
pte_val
(
*
pte
))
&
PAGE_MASK
;
paddr
|=
(
addr
&
~
PAGE_MASK
);
...
...
arch/mips/lib-64/dump_tlb.c
View file @
c6e8b587
...
...
@@ -140,6 +140,7 @@ void dump_tlb_nonwired(void)
void
dump_list_process
(
struct
task_struct
*
t
,
void
*
address
)
{
pgd_t
*
page_dir
,
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
,
page
;
unsigned
long
addr
,
val
;
...
...
@@ -155,7 +156,10 @@ void dump_list_process(struct task_struct *t, void *address)
pgd
=
pgd_offset
(
t
->
mm
,
addr
);
printk
(
"pgd == %016lx
\n
"
,
(
unsigned
long
)
pgd
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
printk
(
"pud == %016lx
\n
"
,
(
unsigned
long
)
pud
);
pmd
=
pmd_offset
(
pud
,
addr
);
printk
(
"pmd == %016lx
\n
"
,
(
unsigned
long
)
pmd
);
pte
=
pte_offset
(
pmd
,
addr
);
...
...
@@ -184,13 +188,15 @@ void dump_list_current(void *address)
unsigned
int
vtop
(
void
*
address
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
unsigned
int
addr
,
paddr
;
addr
=
(
unsigned
long
)
address
;
pgd
=
pgd_offset
(
current
->
mm
,
addr
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
pte
=
pte_offset
(
pmd
,
addr
);
paddr
=
(
CKSEG1
|
(
unsigned
int
)
pte_val
(
*
pte
))
&
PAGE_MASK
;
paddr
|=
(
addr
&
~
PAGE_MASK
);
...
...
arch/mips/mm/c-r3k.c
View file @
c6e8b587
...
...
@@ -221,12 +221,14 @@ static inline unsigned long get_phys_page (unsigned long addr,
struct
mm_struct
*
mm
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
unsigned
long
physpage
;
pgd
=
pgd_offset
(
mm
,
addr
);
pmd
=
pmd_offset
(
pgd
,
addr
);
pud
=
pud_offset
(
pgd
,
addr
);
pmd
=
pmd_offset
(
pud
,
addr
);
pte
=
pte_offset
(
pmd
,
addr
);
if
((
physpage
=
pte_val
(
*
pte
))
&
_PAGE_VALID
)
...
...
arch/mips/mm/c-r4k.c
View file @
c6e8b587
...
...
@@ -372,12 +372,14 @@ static inline void local_r4k_flush_cache_page(void *args)
int
exec
=
vma
->
vm_flags
&
VM_EXEC
;
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
pgd_t
*
pgdp
;
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
page
&=
PAGE_MASK
;
pgdp
=
pgd_offset
(
mm
,
page
);
pmdp
=
pmd_offset
(
pgdp
,
page
);
pudp
=
pud_offset
(
pgdp
,
page
);
pmdp
=
pmd_offset
(
pudp
,
page
);
ptep
=
pte_offset
(
pmdp
,
page
);
/*
...
...
arch/mips/mm/c-tx39.c
View file @
c6e8b587
...
...
@@ -183,6 +183,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
int
exec
=
vma
->
vm_flags
&
VM_EXEC
;
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
pgd_t
*
pgdp
;
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
...
...
@@ -195,7 +196,8 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
page
&=
PAGE_MASK
;
pgdp
=
pgd_offset
(
mm
,
page
);
pmdp
=
pmd_offset
(
pgdp
,
page
);
pudp
=
pud_offset
(
pgdp
,
page
);
pmdp
=
pmd_offset
(
pudp
,
page
);
ptep
=
pte_offset
(
pmdp
,
page
);
/*
...
...
arch/mips/mm/fault.c
View file @
c6e8b587
...
...
@@ -212,6 +212,7 @@ vmalloc_fault:
*/
int
offset
=
__pgd_offset
(
address
);
pgd_t
*
pgd
,
*
pgd_k
;
pud_t
*
pud
,
*
pud_k
;
pmd_t
*
pmd
,
*
pmd_k
;
pte_t
*
pte_k
;
...
...
@@ -222,8 +223,13 @@ vmalloc_fault:
goto
no_context
;
set_pgd
(
pgd
,
*
pgd_k
);
pmd
=
pmd_offset
(
pgd
,
address
);
pmd_k
=
pmd_offset
(
pgd_k
,
address
);
pud
=
pud_offset
(
pgd
,
address
);
pud_k
=
pud_offset
(
pgd_k
,
address
);
if
(
!
pud_present
(
*
pud_k
))
goto
no_context
;
pmd
=
pmd_offset
(
pud
,
address
);
pmd_k
=
pmd_offset
(
pud_k
,
address
);
if
(
!
pmd_present
(
*
pmd_k
))
goto
no_context
;
set_pmd
(
pmd
,
*
pmd_k
);
...
...
arch/mips/mm/init.c
View file @
c6e8b587
...
...
@@ -83,7 +83,7 @@ pte_t *kmap_pte;
pgprot_t
kmap_prot
;
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
pte_offset_kernel(pmd_offset(
pud_offset(
pgd_offset_k(vaddr), (vaddr)), (vaddr))
, (vaddr))
static
void
__init
kmap_init
(
void
)
{
...
...
@@ -101,26 +101,32 @@ void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t
*
pgd_base
)
{
pgd_t
*
pgd
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
int
i
,
j
;
int
i
,
j
,
k
;
unsigned
long
vaddr
;
vaddr
=
start
;
i
=
__pgd_offset
(
vaddr
);
j
=
__pmd_offset
(
vaddr
);
j
=
__pud_offset
(
vaddr
);
k
=
__pmd_offset
(
vaddr
);
pgd
=
pgd_base
+
i
;
for
(
;
(
i
<
PTRS_PER_PGD
)
&&
(
vaddr
!=
end
);
pgd
++
,
i
++
)
{
pmd
=
(
pmd_t
*
)
pgd
;
for
(;
(
j
<
PTRS_PER_PMD
)
&&
(
vaddr
!=
end
);
pmd
++
,
j
++
)
{
if
(
pmd_none
(
*
pmd
))
{
pte
=
(
pte_t
*
)
alloc_bootmem_low_pages
(
PAGE_SIZE
);
set_pmd
(
pmd
,
__pmd
(
pte
));
if
(
pte
!=
pte_offset_kernel
(
pmd
,
0
))
BUG
();
pud
=
(
pud_t
*
)
pgd
;
for
(
;
(
j
<
PTRS_PER_PUD
)
&&
(
vaddr
!=
end
);
pud
++
,
j
++
)
{
pmd
=
(
pmd_t
*
)
pud
;
for
(;
(
k
<
PTRS_PER_PMD
)
&&
(
vaddr
!=
end
);
pmd
++
,
k
++
)
{
if
(
pmd_none
(
*
pmd
))
{
pte
=
(
pte_t
*
)
alloc_bootmem_low_pages
(
PAGE_SIZE
);
set_pmd
(
pmd
,
__pmd
(
pte
));
if
(
pte
!=
pte_offset_kernel
(
pmd
,
0
))
BUG
();
}
vaddr
+=
PMD_SIZE
;
}
vaddr
+=
PMD_SIZE
;
k
=
0
;
}
j
=
0
;
}
...
...
arch/mips/mm/ioremap.c
View file @
c6e8b587
...
...
@@ -79,9 +79,14 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr,
BUG
();
spin_lock
(
&
init_mm
.
page_table_lock
);
do
{
pud_t
*
pud
;
pmd_t
*
pmd
;
pmd
=
pmd_alloc
(
&
init_mm
,
dir
,
address
);
error
=
-
ENOMEM
;
pud
=
pud_alloc
(
&
init_mm
,
dir
,
address
);
if
(
!
pud
)
break
;
pmd
=
pmd_alloc
(
&
init_mm
,
pud
,
address
);
if
(
!
pmd
)
break
;
if
(
remap_area_pmd
(
pmd
,
address
,
end
-
address
,
...
...
@@ -141,7 +146,7 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
*/
if
(
IS_LOW512
(
phys_addr
)
&&
IS_LOW512
(
last_addr
)
&&
flags
==
_CACHE_UNCACHED
)
return
(
void
*
)
KSEG1ADDR
(
phys_addr
);
return
(
void
*
)
C
KSEG1ADDR
(
phys_addr
);
/*
* Don't allow anybody to remap normal RAM that we're using..
...
...
@@ -180,7 +185,7 @@ void * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
return
(
void
*
)
(
offset
+
(
char
*
)
addr
);
}
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) ==
C
KSEG1)
void
__iounmap
(
volatile
void
__iomem
*
addr
)
{
...
...
@@ -190,10 +195,8 @@ void __iounmap(volatile void __iomem *addr)
return
;
p
=
remove_vm_area
((
void
*
)
(
PAGE_MASK
&
(
unsigned
long
__force
)
addr
));
if
(
!
p
)
{
if
(
!
p
)
printk
(
KERN_ERR
"iounmap: bad address %p
\n
"
,
addr
);
return
;
}
kfree
(
p
);
}
...
...
arch/mips/mm/pgtable-32.c
View file @
c6e8b587
...
...
@@ -35,6 +35,7 @@ void __init pagetable_init(void)
#ifdef CONFIG_HIGHMEM
unsigned
long
vaddr
;
pgd_t
*
pgd
,
*
pgd_base
;
pud_t
*
pud
;
pmd_t
*
pmd
;
pte_t
*
pte
;
#endif
...
...
@@ -60,7 +61,8 @@ void __init pagetable_init(void)
fixrange_init
(
vaddr
,
vaddr
+
PAGE_SIZE
*
LAST_PKMAP
,
pgd_base
);
pgd
=
swapper_pg_dir
+
__pgd_offset
(
vaddr
);
pmd
=
pmd_offset
(
pgd
,
vaddr
);
pud
=
pud_offset
(
pgd
,
vaddr
);
pmd
=
pmd_offset
(
pud
,
vaddr
);
pte
=
pte_offset_kernel
(
pmd
,
vaddr
);
pkmap_page_table
=
pte
;
#endif
...
...
arch/mips/mm/tlb-andes.c
View file @
c6e8b587
...
...
@@ -195,6 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned
long
flags
;
pgd_t
*
pgdp
;
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
int
idx
,
pid
;
...
...
@@ -220,7 +221,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
write_c0_entryhi
(
address
|
(
pid
));
pgdp
=
pgd_offset
(
vma
->
vm_mm
,
address
);
tlb_probe
();
pmdp
=
pmd_offset
(
pgdp
,
address
);
pudp
=
pud_offset
(
pgdp
,
address
);
pmdp
=
pmd_offset
(
pudp
,
address
);
idx
=
read_c0_index
();
ptep
=
pte_offset_map
(
pmdp
,
address
);
write_c0_entrylo0
(
pte_val
(
*
ptep
++
)
>>
6
);
...
...
arch/mips/mm/tlb-r4k.c
View file @
c6e8b587
...
...
@@ -227,6 +227,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
{
unsigned
long
flags
;
pgd_t
*
pgdp
;
pud_t
*
pudp
;
pmd_t
*
pmdp
;
pte_t
*
ptep
;
int
idx
,
pid
;
...
...
@@ -246,7 +247,8 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
mtc0_tlbw_hazard
();
tlb_probe
();
BARRIER
;
pmdp
=
pmd_offset
(
pgdp
,
address
);
pudp
=
pud_offset
(
pgdp
,
address
);
pmdp
=
pmd_offset
(
pudp
,
address
);
idx
=
read_c0_index
();
ptep
=
pte_offset_map
(
pmdp
,
address
);
...
...
include/asm-mips/page.h
View file @
c6e8b587
...
...
@@ -87,22 +87,48 @@ static inline void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
typedef
struct
{
unsigned
long
pte
;
}
pte_t
;
#define pte_val(x) ((x).pte)
#endif
#define __pte(x) ((pte_t) { (x) } )
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
typedef
struct
{
unsigned
long
pgd
;
}
pgd_t
;
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
/*
* For 3-level pagetables we defines these ourselves, for 2-level the
* definitions are supplied by <asm-generic/pgtable-nopmd.h>.
*/
#ifdef CONFIG_64BIT
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pmd(x) ((pmd_t) { (x) } )
#
define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
#
endif
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
/*
* Right now we don't support 4-level pagetables, so all pud-related
* definitions come from <asm-generic/pgtable-nopud.h>.
*/
/*
* Finall the top of the hierarchy, the pgd
*/
typedef
struct
{
unsigned
long
pgd
;
}
pgd_t
;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) } )
/*
* Manipulate page protection bits
*/
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
/*
* On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
* pair of pages we only have a single global bit per pair of pages. When
* writing to the TLB make sure we always have the bit set for both pages
* or none. This macro is used to access the `buddy' of the pte we're just
* working on.
*/
#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
#endif
/* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
...
...
include/asm-mips/pgalloc.h
View file @
c6e8b587
...
...
@@ -25,11 +25,23 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd
(
pmd
,
__pmd
((
unsigned
long
)
page_address
(
pte
)));
}
/*
* Initialize a new pmd table with invalid pointers.
*/
extern
void
pmd_init
(
unsigned
long
page
,
unsigned
long
pagetable
);
#ifdef CONFIG_64BIT
static
inline
void
pud_populate
(
struct
mm_struct
*
mm
,
pud_t
*
pud
,
pmd_t
*
pmd
)
{
set_pud
(
pud
,
__pud
((
unsigned
long
)
pmd
));
}
#endif
/*
* Initialize a new pgd / pmd table with invalid pointers.
*/
extern
void
pgd_init
(
unsigned
long
page
);
extern
void
pmd_init
(
unsigned
long
page
,
unsigned
long
pagetable
);
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
...
...
@@ -86,21 +98,18 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#ifdef CONFIG_32BIT
#define pgd_populate(mm, pmd, pte) BUG()
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define __pmd_free_tlb(tlb,x) do { } while (0)
#endif
#ifdef CONFIG_64BIT
#define pgd_populate(mm, pgd, pmd) set_pgd(pgd, __pgd(pmd))
static
inline
pmd_t
*
pmd_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
pmd_t
*
pmd
;
...
...
include/asm-mips/pgtable-32.h
View file @
c6e8b587
...
...
@@ -17,6 +17,8 @@
#include
<asm/cachectl.h>
#include
<asm/fixmap.h>
#include
<asm-generic/pgtable-nopmd.h>
/*
* - add_wired_entry() add a fixed TLB entry, and move wired register
*/
...
...
@@ -42,35 +44,35 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
*/
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#ifdef CONFIG_64BIT_PHYS_ADDR
#define PMD_SHIFT 21
#else
#define PMD_SHIFT 22
#endif
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT PMD_SHIFT
#ifdef CONFIG_64BIT_PHYS_ADDR
#define PGDIR_SHIFT 21
#else
#define PGDIR_SHIFT 22
#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* Entries per page directory level: we use two-level, so
* we don't really have any PMD directory physically.
* we don't really have any
PUD/
PMD directory physically.
*/
#ifdef CONFIG_64BIT_PHYS_ADDR
#define PGD_ORDER 1
#define PMD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 1
#define PTE_ORDER 0
#else
#define PGD_ORDER 0
#define PMD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 1
#define PTE_ORDER 0
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
#define PTRS_PER_PMD 1
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
...
...
@@ -91,8 +93,6 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#endif
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
...
...
@@ -120,16 +120,6 @@ static inline void pmd_clear(pmd_t *pmdp)
pmd_val
(
*
pmdp
)
=
((
unsigned
long
)
invalid_pte_table
);
}
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
static
inline
int
pgd_none
(
pgd_t
pgd
)
{
return
0
;
}
static
inline
int
pgd_bad
(
pgd_t
pgd
)
{
return
0
;
}
static
inline
int
pgd_present
(
pgd_t
pgd
)
{
return
1
;
}
static
inline
void
pgd_clear
(
pgd_t
*
pgdp
)
{
}
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
...
...
@@ -166,12 +156,6 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
/* to find an entry in a page-table-directory */
#define pgd_offset(mm,addr) ((mm)->pgd + pgd_index(addr))
/* Find an entry in the second-level page table.. */
static
inline
pmd_t
*
pmd_offset
(
pgd_t
*
dir
,
unsigned
long
address
)
{
return
(
pmd_t
*
)
dir
;
}
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
...
...
include/asm-mips/pgtable-64.h
View file @
c6e8b587
...
...
@@ -16,13 +16,15 @@
#include
<asm/page.h>
#include
<asm/cachectl.h>
#include
<asm-generic/pgtable-nopud.h>
/*
* Each address space has 2 4K pages as its page directory, giving 1024
* (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
*
pair of
4K page
s
, giving
1024
(== PTRS_PER_PMD) 8 byte pointers to
*
page
tables. Each page table is a single 4K page, giving 512 (==
* PTRS_PER_PTE) 8 byte ptes. Each p
gde
is initialized to point to
* invalid_pmd_table, each pmd
e
is initialized to point to
*
single
4K page, giving
512
(== PTRS_PER_PMD) 8 byte pointers to
page
* tables. Each page table is
also
a single 4K page, giving 512 (==
* PTRS_PER_PTE) 8 byte ptes. Each p
ud entry
is initialized to point to
* invalid_pmd_table, each pmd
entry
is initialized to point to
* invalid_pte_table, each pte is initialized to 0. When memory is low,
* and a pmd table or a page table allocation fails, empty_bad_pmd_table
* and empty_bad_page_table is returned back to higher layer code, so
...
...
@@ -36,17 +38,17 @@
*/
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT
+ PTE_ORDER
- 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT +
1
- 3))
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT +
PMD_ORDER
- 3))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* For 4kB page size we use a 3 level page tree and a 8kB p
md and pgds
which
* For 4kB page size we use a 3 level page tree and a
n
8kB p
ud,
which
* permits us mapping 40 bits of virtual address space.
*
* We used to implement 41 bits by having an order 1 pmd level but that seemed
...
...
@@ -65,21 +67,25 @@
*/
#ifdef CONFIG_PAGE_SIZE_4KB
#define PGD_ORDER 1
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_8KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
#endif
...
...
@@ -102,10 +108,10 @@
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern
pte_t
invalid_pte_table
[
P
AGE_SIZE
/
sizeof
(
pte_t
)
];
extern
pte_t
empty_bad_page_table
[
P
AGE_SIZE
/
sizeof
(
pte_t
)
];
extern
pmd_t
invalid_pmd_table
[
2
*
PAGE_SIZE
/
sizeof
(
pmd_t
)
];
extern
pmd_t
empty_bad_pmd_table
[
2
*
PAGE_SIZE
/
sizeof
(
pmd_t
)
];
extern
pte_t
invalid_pte_table
[
P
TRS_PER_PTE
];
extern
pte_t
empty_bad_page_table
[
P
TRS_PER_PTE
];
extern
pmd_t
invalid_pmd_table
[
PTRS_PER_PMD
];
extern
pmd_t
empty_bad_pmd_table
[
PTRS_PER_PMD
];
/*
* Empty pmd entries point to the invalid_pte_table.
...
...
@@ -130,21 +136,24 @@ static inline void pmd_clear(pmd_t *pmdp)
/*
* Empty pgd entries point to the invalid_pmd_table.
*/
static
inline
int
p
g
d_none
(
p
g
d_t
p
g
d
)
static
inline
int
p
u
d_none
(
p
u
d_t
p
u
d
)
{
<