diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b2f3dbca695223fc988c1be9cc778243481eb221..f15dfb92dec052d51f6c9212a8323ea2cf517945 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -329,12 +329,14 @@ static void __init htab_init_page_sizes(void)
 	 */
 	if (mmu_psize_defs[MMU_PAGE_16M].shift)
 		mmu_huge_psize = MMU_PAGE_16M;
+	/* With 4k/4level pagetables, we can't (for now) cope with a
+	 * huge page size < PMD_SIZE */
 	else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 		mmu_huge_psize = MMU_PAGE_1M;
 
 	/* Calculate HPAGE_SHIFT and sanity check it */
-	if (mmu_psize_defs[mmu_huge_psize].shift > 16 &&
-	    mmu_psize_defs[mmu_huge_psize].shift < 28)
+	if (mmu_psize_defs[mmu_huge_psize].shift > MIN_HUGEPTE_SHIFT &&
+	    mmu_psize_defs[mmu_huge_psize].shift < SID_SHIFT)
 		HPAGE_SHIFT = mmu_psize_defs[mmu_huge_psize].shift;
 	else
 		HPAGE_SHIFT = 0; /* No huge pages dude ! */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 0073a04047e48b6a7b8144ee1538142925d7bdb3..426c269e552eec77bbee18cb086f900716cffb4a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -212,6 +212,12 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
 
 	BUG_ON(area >= NUM_HIGH_AREAS);
 
+	/* Hack, so that each addresses is controlled by exactly one
+	 * of the high or low area bitmaps, the first high area starts
+	 * at 4GB, not 0 */
+	if (start == 0)
+		start = 0x100000000UL;
+
 	/* Check no VMAs are in the region */
 	vma = find_vma(mm, start);
 	if (vma && (vma->vm_start < end))
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 3e18241b6f35218b65619762f63fc1867052aa83..950ffc5848c7950e1d449615aa61fb5782dde2cc 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -80,12 +80,17 @@ _GLOBAL(slb_miss_kernel_load_virtual)
 BEGIN_FTR_SECTION
 	b	1f
 END_FTR_SECTION_IFCLR(CPU_FTR_16M_PAGE)
+	cmpldi	r10,16
+
+	lhz	r9,PACALOWHTLBAREAS(r13)
+	mr	r11,r10
+	blt	5f
+
 	lhz	r9,PACAHIGHHTLBAREAS(r13)
 	srdi	r11,r10,(HTLB_AREA_SHIFT-SID_SHIFT)
-	srd	r9,r9,r11
-	lhz	r11,PACALOWHTLBAREAS(r13)
-	srd	r11,r11,r10
-	or.	r9,r9,r11
+
+5:	srd	r9,r9,r11
+	andi.	r9,r9,1
 	beq	1f
 _GLOBAL(slb_miss_user_load_huge)
 	li	r11,0
diff --git a/include/asm-ppc64/pgtable-4k.h b/include/asm-ppc64/pgtable-4k.h
index c883a274855878c20602cf246d45993f218ebffe..e9590c06ad9276541a191ec1f3a3057452c7bde9 100644
--- a/include/asm-ppc64/pgtable-4k.h
+++ b/include/asm-ppc64/pgtable-4k.h
@@ -23,6 +23,9 @@
 #define PMD_SIZE	(1UL << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
 
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT	PMD_SHIFT
+
 /* PUD_SHIFT determines what a third-level page table entry can map */
 #define PUD_SHIFT	(PMD_SHIFT + PMD_INDEX_SIZE)
 #define PUD_SIZE	(1UL << PUD_SHIFT)
diff --git a/include/asm-ppc64/pgtable-64k.h b/include/asm-ppc64/pgtable-64k.h
index c5f437c86b3c7a67b264edea5b9de83dedf9001c..154f1840ece4d0c49e403177b8262640e91048a6 100644
--- a/include/asm-ppc64/pgtable-64k.h
+++ b/include/asm-ppc64/pgtable-64k.h
@@ -14,6 +14,9 @@
 #define PTRS_PER_PMD	(1 << PMD_INDEX_SIZE)
 #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
 
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT	PAGE_SHIFT
+
 /* PMD_SHIFT determines what a second-level page table entry can map */
 #define PMD_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
 #define PMD_SIZE	(1UL << PMD_SHIFT)