Commit 844ab6f9 authored by Jacob Shin's avatar Jacob Shin Committed by H. Peter Anvin
Browse files

x86, mm: Find_early_table_space based on ranges that are actually being mapped

Current logic finds enough space for direct mapping page tables from 0
to end. Instead, we only need to find enough space to cover mr[0].start
to mr[nr_range].end -- the range that is actually being mapped by
init_memory_mapping()

This is needed after 1bbbbe77, to address
the panic reported here:

  https://lkml.org/lkml/2012/10/20/160
  https://lkml.org/lkml/2012/10/21/157

Signed-off-by: default avatarJacob Shin <jacob.shin@amd.com>
Link: http://lkml.kernel.org/r/20121024195311.GB11779@jshin-Toonie

Tested-by: default avatarTom Rini <trini@ti.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 1f2ff682
......@@ -29,36 +29,54 @@ int direct_gbpages
#endif
;
static void __init find_early_table_space(unsigned long end, int use_pse,
int use_gbpages)
struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};
/*
* First calculate space needed for kernel direct mapping page tables to cover
* mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
* pages. Then find enough contiguous space for those page tables.
*/
static void __init find_early_table_space(struct map_range *mr, int nr_range)
{
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
int i;
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
unsigned long start = 0, good_end;
phys_addr_t base;
puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
for (i = 0; i < nr_range; i++) {
unsigned long range, extra;
if (use_gbpages) {
unsigned long extra;
range = mr[i].end - mr[i].start;
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
} else
pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
} else {
pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
}
if (use_pse) {
unsigned long extra;
extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
#ifdef CONFIG_X86_32
extra += PMD_SIZE;
extra += PMD_SIZE;
#endif
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* The first 2/4M doesn't use large pages. */
if (mr[i].start < PMD_SIZE)
extra += range;
ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
} else {
ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
}
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
#ifdef CONFIG_X86_32
......@@ -76,7 +94,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
end - 1, pgt_buf_start << PAGE_SHIFT,
mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
(pgt_buf_top << PAGE_SHIFT) - 1);
}
......@@ -85,12 +103,6 @@ void __init native_pagetable_reserve(u64 start, u64 end)
memblock_reserve(start, end - start);
}
struct map_range {
unsigned long start;
unsigned long end;
unsigned page_size_mask;
};
#ifdef CONFIG_X86_32
#define NR_RANGE_MR 3
#else /* CONFIG_X86_64 */
......@@ -263,7 +275,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
* nodes are discovered.
*/
if (!after_bootmem)
find_early_table_space(end, use_pse, use_gbpages);
find_early_table_space(mr, nr_range);
for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment