diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 147abe0383d8b49e03b548764ddf44bf830c525d..f9362ee9955f2411e932dbdca13a9dab5bf2c364 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -99,7 +99,7 @@ config ARCH_MTD_XIP
 
 config VECTORS_BASE
 	hex
-	default 0xffff0000 if MMU
+	default 0xffff0000 if MMU || CPU_HIGH_VECTOR
 	default DRAM_BASE if REMAP_VECTORS_TO_RAM
 	default 0x00000000
 	help
@@ -626,6 +626,7 @@ config LEDS_CPU
 
 config ALIGNMENT_TRAP
 	bool
+	depends on CPU_CP15_MMU
 	default y if !ARCH_EBSA110
 	help
 	  ARM processors can not fetch/store information which is not
@@ -857,7 +858,7 @@ source "drivers/base/Kconfig"
 
 source "drivers/connector/Kconfig"
 
-if ALIGNMENT_TRAP
+if ALIGNMENT_TRAP || !CPU_CP15_MMU
 source "drivers/mtd/Kconfig"
 endif
 
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index e1574be2ded614e05633c715381df9086d69f776..f087376748d12253bb9f0414c6c742dfd6eb0b89 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -25,6 +25,14 @@ config FLASH_SIZE
 	hex 'FLASH Size' if SET_MEM_PARAM
 	default 0x00400000
 
+config PROCESSOR_ID
+	hex
+	default 0x00007700
+	depends on !CPU_CP15
+	help
+	  If processor has no CP15 register, this processor ID is
+	  used instead of the auto-probing which utilizes the register.
+
 config REMAP_VECTORS_TO_RAM
 	bool 'Install vectors to the begining of RAM' if DRAM_BASE
 	depends on DRAM_BASE
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 80cee786500c87b3a4315c39851d219325f2e45b..2a0b2c8a1fe00df55a7dd87092d4ce3661399cfe 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -55,7 +55,12 @@ arch-$(CONFIG_CPU_32v3)		:=-D__LINUX_ARM_ARCH__=3 -march=armv3
 # This selects how we optimise for the processor.
 tune-$(CONFIG_CPU_ARM610)	:=-mtune=arm610
 tune-$(CONFIG_CPU_ARM710)	:=-mtune=arm710
+tune-$(CONFIG_CPU_ARM7TDMI)	:=-mtune=arm7tdmi
 tune-$(CONFIG_CPU_ARM720T)	:=-mtune=arm7tdmi
+tune-$(CONFIG_CPU_ARM740T)	:=-mtune=arm7tdmi
+tune-$(CONFIG_CPU_ARM9TDMI)	:=-mtune=arm9tdmi
+tune-$(CONFIG_CPU_ARM940T)	:=-mtune=arm9tdmi
+tune-$(CONFIG_CPU_ARM946T)	:=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi)
 tune-$(CONFIG_CPU_ARM920T)	:=-mtune=arm9tdmi
 tune-$(CONFIG_CPU_ARM922T)	:=-mtune=arm9tdmi
 tune-$(CONFIG_CPU_ARM925T)	:=-mtune=arm9tdmi
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 2adc1527e0ebc9d7a3d4936b0eb4e767381e2e84..adddc71316852f5001314faf10678f5fc4e3ef8d 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -51,7 +51,11 @@ OBJS		+= head-at91rm9200.o
 endif
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
+ifeq ($(CONFIG_CPU_CP15),y)
 OBJS		+= big-endian.o
+else
+# The endian should be set by h/w design.
+endif
 endif
 
 #
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 75df1f764a10eff4af12c5e5e55240d42f6da941..e5ab51b9cceb1bdfd155c9824ba6364d21cc1977 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -82,9 +82,11 @@
 		kphex	r6, 8		/* processor id */
 		kputc	#':'
 		kphex	r7, 8		/* architecture id */
+#ifdef CONFIG_CPU_CP15
 		kputc	#':'
 		mrc	p15, 0, r0, c1, c0
 		kphex	r0, 8		/* control reg */
+#endif
 		kputc	#'\n'
 		kphex	r5, 8		/* decompressed kernel start */
 		kputc	#'-'
@@ -507,7 +509,11 @@ call_kernel:	bl	cache_clean_flush
  */
 
 call_cache_fn:	adr	r12, proc_types
+#ifdef CONFIG_CPU_CP15
 		mrc	p15, 0, r6, c0, c0	@ get processor ID
+#else
+		ldr	r6, =CONFIG_PROCESSOR_ID
+#endif
 1:		ldr	r1, [r12, #0]		@ get value
 		ldr	r2, [r12, #4]		@ get mask
 		eor	r1, r1, r6		@ (real ^ match)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index ac9eb3d30518ade76e1b01965fb969d90f856950..f359a189dcf2cda84f112dc0003e616cdd6434d4 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -9,7 +9,6 @@
  * published by the Free Software Foundation.
  *
  *  Common kernel startup code (non-paged MM)
- *    for 32-bit CPUs which has a process ID register(CP15).
  *
  */
 #include <linux/linkage.h>
@@ -40,7 +39,11 @@
 ENTRY(stext)
 	msr	cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
 						@ and irqs disabled
+#ifndef CONFIG_CPU_CP15
+	ldr	r9, =CONFIG_PROCESSOR_ID
+#else
 	mrc	p15, 0, r9, c0, c0		@ get processor id
+#endif
 	bl	__lookup_processor_type		@ r5=procinfo r9=cpuid
 	movs	r10, r5				@ invalid processor (r5=0)?
 	beq	__error_p				@ yes, error 'p'
@@ -58,6 +61,7 @@ ENTRY(stext)
  */
 	.type	__after_proc_init, %function
 __after_proc_init:
+#ifdef CONFIG_CPU_CP15
 	mrc	p15, 0, r0, c1, c0, 0		@ read control reg
 #ifdef CONFIG_ALIGNMENT_TRAP
 	orr	r0, r0, #CR_A
@@ -72,8 +76,14 @@ __after_proc_init:
 #endif
 #ifdef CONFIG_CPU_ICACHE_DISABLE
 	bic	r0, r0, #CR_I
+#endif
+#ifdef CONFIG_CPU_HIGH_VECTOR
+	orr	r0, r0, #CR_V
+#else
+	bic	r0, r0, #CR_V
 #endif
 	mcr	p15, 0, r0, c1, c0, 0		@ write control reg
+#endif /* CONFIG_CPU_CP15 */
 
 	mov	pc, r13				@ clear the BSS and jump
 						@ to start_kernel
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 298363d97047755c3ac969baef5a95217a69bea8..1b061583408ed7f44158a7dfcf7b996e56f7a53f 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -2,6 +2,7 @@
  *  linux/arch/arm/kernel/module.c
  *
  *  Copyright (C) 2002 Russell King.
+ *  Modified for nommu by Hyok S. Choi
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +33,7 @@ extern void _etext;
 #define MODULE_START	(((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
 #endif
 
+#ifdef CONFIG_MMU
 void *module_alloc(unsigned long size)
 {
 	struct vm_struct *area;
@@ -46,6 +48,12 @@ void *module_alloc(unsigned long size)
 
 	return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
 }
+#else /* CONFIG_MMU */
+void *module_alloc(unsigned long size)
+{
+	return size == 0 ? NULL : vmalloc(size);
+}
+#endif /* !CONFIG_MMU */
 
 void module_free(struct module *module, void *region)
 {
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3079535afccd4be9daf1219c101ac8bc0959e7c7..bf35c178a8772d232fc5e5e1eb0e5ba91449a2fd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -221,16 +221,26 @@ void __show_regs(struct pt_regs *regs)
 		processor_modes[processor_mode(regs)],
 		thumb_mode(regs) ? " (T)" : "",
 		get_fs() == get_ds() ? "kernel" : "user");
+#if CONFIG_CPU_CP15
 	{
-		unsigned int ctrl, transbase, dac;
+		unsigned int ctrl;
 		  __asm__ (
 		"	mrc p15, 0, %0, c1, c0\n"
-		"	mrc p15, 0, %1, c2, c0\n"
-		"	mrc p15, 0, %2, c3, c0\n"
-		: "=r" (ctrl), "=r" (transbase), "=r" (dac));
-		printk("Control: %04X  Table: %08X  DAC: %08X\n",
-		  	ctrl, transbase, dac);
+		: "=r" (ctrl));
+		printk("Control: %04X\n", ctrl);
 	}
+#ifdef CONFIG_CPU_CP15_MMU
+	{
+		unsigned int transbase, dac;
+		  __asm__ (
+		"	mrc p15, 0, %0, c2, c0\n"
+		"	mrc p15, 0, %1, c3, c0\n"
+		: "=r" (transbase), "=r" (dac));
+		printk("Table: %08X  DAC: %08X\n",
+		  	transbase, dac);
+	}
+#endif
+#endif
 }
 
 void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index b59c74100a84f5f16345da689a7d719740091100..c0bfb8212b7742abda15da17ffd47b09c3fe24e2 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -15,6 +15,7 @@ config CPU_ARM610
 	select CPU_32v3
 	select CPU_CACHE_V3
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V3 if MMU
 	select CPU_TLB_V3 if MMU
 	help
@@ -24,6 +25,20 @@ config CPU_ARM610
 	  Say Y if you want support for the ARM610 processor.
 	  Otherwise, say N.
 
+# ARM7TDMI
+config CPU_ARM7TDMI
+	bool "Support ARM7TDMI processor"
+	depends on !MMU
+	select CPU_32v4T
+	select CPU_ABRT_LV4T
+	select CPU_CACHE_V4
+	help
+	  A 32-bit RISC microprocessor based on the ARM7 processor core
+	  which has no memory control unit and cache.
+
+	  Say Y if you want support for the ARM7TDMI processor.
+	  Otherwise, say N.
+
 # ARM710
 config CPU_ARM710
 	bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
@@ -31,6 +46,7 @@ config CPU_ARM710
 	select CPU_32v3
 	select CPU_CACHE_V3
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V3 if MMU
 	select CPU_TLB_V3 if MMU
 	help
@@ -50,6 +66,7 @@ config CPU_ARM720T
 	select CPU_ABRT_LV4T
 	select CPU_CACHE_V4
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WT if MMU
 	select CPU_TLB_V4WT if MMU
 	help
@@ -59,6 +76,36 @@ config CPU_ARM720T
 	  Say Y if you want support for the ARM720T processor.
 	  Otherwise, say N.
 
+# ARM740T
+config CPU_ARM740T
+	bool "Support ARM740T processor" if ARCH_INTEGRATOR
+	depends on !MMU
+	select CPU_32v4T
+	select CPU_ABRT_LV4T
+	select CPU_CACHE_V3	# although the core is v4t
+	select CPU_CP15_MPU
+	help
+	  A 32-bit RISC processor with 8KB cache or 4KB variants,
+	  write buffer and MPU(Protection Unit) built around
+	  an ARM7TDMI core.
+
+	  Say Y if you want support for the ARM740T processor.
+	  Otherwise, say N.
+
+# ARM9TDMI
+config CPU_ARM9TDMI
+	bool "Support ARM9TDMI processor"
+	depends on !MMU
+	select CPU_32v4T
+	select CPU_ABRT_NOMMU
+	select CPU_CACHE_V4
+	help
+	  A 32-bit RISC microprocessor based on the ARM9 processor core
+	  which has no memory control unit and cache.
+
+	  Say Y if you want support for the ARM9TDMI processor.
+	  Otherwise, say N.
+
 # ARM920T
 config CPU_ARM920T
 	bool "Support ARM920T processor"
@@ -68,6 +115,7 @@ config CPU_ARM920T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -89,6 +137,7 @@ config CPU_ARM922T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -108,6 +157,7 @@ config CPU_ARM925T
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
  	help
@@ -126,6 +176,7 @@ config CPU_ARM926T
 	select CPU_32v5
 	select CPU_ABRT_EV5TJ
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -136,6 +187,39 @@ config CPU_ARM926T
 	  Say Y if you want support for the ARM926T processor.
 	  Otherwise, say N.
 
+# ARM940T
+config CPU_ARM940T
+	bool "Support ARM940T processor" if ARCH_INTEGRATOR
+	depends on !MMU
+	select CPU_32v4T
+	select CPU_ABRT_NOMMU
+	select CPU_CACHE_VIVT
+	select CPU_CP15_MPU
+	help
+	  ARM940T is a member of the ARM9TDMI family of general-
+	  purpose microprocessors with MPU and seperate 4KB
+	  instruction and 4KB data cases, each with a 4-word line
+	  length.
+
+	  Say Y if you want support for the ARM940T processor.
+	  Otherwise, say N.
+
+# ARM946E-S
+config CPU_ARM946E
+	bool "Support ARM946E-S processor" if ARCH_INTEGRATOR
+	depends on !MMU
+	select CPU_32v5
+	select CPU_ABRT_NOMMU
+	select CPU_CACHE_VIVT
+	select CPU_CP15_MPU
+	help
+	  ARM946E-S is a member of the ARM9E-S family of high-
+	  performance, 32-bit system-on-chip processor solutions.
+	  The TCM and ARMv5TE 32-bit instruction set is supported.
+
+	  Say Y if you want support for the ARM946E-S processor.
+	  Otherwise, say N.
+
 # ARM1020 - needs validating
 config CPU_ARM1020
 	bool "Support ARM1020T (rev 0) processor"
@@ -144,6 +228,7 @@ config CPU_ARM1020
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -161,6 +246,7 @@ config CPU_ARM1020E
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_V4WT
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WBI if MMU
 	depends on n
@@ -172,6 +258,7 @@ config CPU_ARM1022
 	select CPU_32v5
 	select CPU_ABRT_EV4T
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU # can probably do better
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -189,6 +276,7 @@ config CPU_ARM1026
 	select CPU_32v5
 	select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU # can probably do better
 	select CPU_TLB_V4WBI if MMU
 	help
@@ -207,6 +295,7 @@ config CPU_SA110
 	select CPU_ABRT_EV4
 	select CPU_CACHE_V4WB
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_COPY_V4WB if MMU
 	select CPU_TLB_V4WB if MMU
 	help
@@ -227,6 +316,7 @@ config CPU_SA1100
 	select CPU_ABRT_EV4
 	select CPU_CACHE_V4WB
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_TLB_V4WB if MMU
 
 # XScale
@@ -237,6 +327,7 @@ config CPU_XSCALE
 	select CPU_32v5
 	select CPU_ABRT_EV5T
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_TLB_V4WBI if MMU
 
 # XScale Core Version 3
@@ -247,6 +338,7 @@ config CPU_XSC3
 	select CPU_32v5
 	select CPU_ABRT_EV5T
 	select CPU_CACHE_VIVT
+	select CPU_CP15_MMU
 	select CPU_TLB_V4WBI if MMU
 	select IO_36
 
@@ -258,6 +350,7 @@ config CPU_V6
 	select CPU_ABRT_EV6
 	select CPU_CACHE_V6
 	select CPU_CACHE_VIPT
+	select CPU_CP15_MMU
 	select CPU_COPY_V6 if MMU
 	select CPU_TLB_V6 if MMU
 
@@ -299,6 +392,9 @@ config CPU_32v6
 	bool
 
 # The abort model
+config CPU_ABRT_NOMMU
+	bool
+
 config CPU_ABRT_EV4
 	bool
 
@@ -380,6 +476,23 @@ config CPU_TLB_V6
 
 endif
 
+config CPU_CP15
+	bool
+	help
+	  Processor has the CP15 register.
+
+config CPU_CP15_MMU
+	bool
+	select CPU_CP15
+	help
+	  Processor has the CP15 register, which has MMU related registers.
+
+config CPU_CP15_MPU
+	bool
+	select CPU_CP15
+	help
+	  Processor has the CP15 register, which has MPU related registers.
+
 #
 # CPU supports 36-bit I/O
 #
@@ -390,7 +503,7 @@ comment "Processor Features"
 
 config ARM_THUMB
 	bool "Support Thumb user binaries"
-	depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
+	depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
 	default y
 	help
 	  Say Y if you want to include kernel support for running user space
@@ -411,23 +524,48 @@ config CPU_BIG_ENDIAN
 	  port must properly enable any big-endian related features
 	  of your chipset/board/processor.
 
+config CPU_HIGH_VECTOR
+	depends !MMU && CPU_CP15 && !CPU_ARM740T
+	bool "Select the High exception vector"
+	default n
+	help
+	  Say Y here to select high exception vector(0xFFFF0000~).
+	  The exception vector can be vary depending on the platform
+	  design in nommu mode. If your platform needs to select
+	  high exception vector, say Y.
+	  Otherwise or if you are unsure, say N, and the low exception
+	  vector (0x00000000~) will be used.
+
 config CPU_ICACHE_DISABLE
-	bool "Disable I-Cache"
-	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
+	bool "Disable I-Cache (I-bit)"
+	depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
 	help
 	  Say Y here to disable the processor instruction cache. Unless
 	  you have a reason not to or are unsure, say N.
 
 config CPU_DCACHE_DISABLE
-	bool "Disable D-Cache"
-	depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6
+	bool "Disable D-Cache (C-bit)"
+	depends on CPU_CP15
 	help
 	  Say Y here to disable the processor data cache. Unless
 	  you have a reason not to or are unsure, say N.
 
+config CPU_DCACHE_SIZE
+	hex
+	depends on CPU_ARM740T || CPU_ARM946E
+	default 0x00001000 if CPU_ARM740T
+	default 0x00002000 # default size for ARM946E-S
+	help
+	  Some cores are synthesizable to have various sized cache. For
+	  ARM946E-S case, it can vary from 0KB to 1MB.
+	  To support such cache operations, it is efficient to know the size
+	  before compile time.
+	  If your SoC is configured to have a different size, define the value
+	  here with proper conditions.
+
 config CPU_DCACHE_WRITETHROUGH
 	bool "Force write through D-cache"
-	depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
+	depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
 	default y if CPU_ARM925T
 	help
 	  Say Y here to use the data cache in writethrough mode. Unless you
@@ -435,7 +573,7 @@ config CPU_DCACHE_WRITETHROUGH
 
 config CPU_CACHE_ROUND_ROBIN
 	bool "Round robin I and D cache replacement algorithm"
-	depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
+	depends on (CPU_ARM926T || CPU_ARM946E || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
 	help
 	  Say Y here to use the predictable round-robin cache replacement
 	  policy.  Unless you specifically require this or are unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 1a1563f859af624f06c6125a479b97d60163607a..d2f5672ecf62e2a06967884f9edf1c7b53975ab6 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y				:= consistent.o extable.o fault.o init.o \
 				   iomap.o
 
 obj-$(CONFIG_MMU)		+= fault-armv.o flush.o ioremap.o mmap.o \
-				   mm-armv.o
+				   pgd.o mmu.o
 
 ifneq ($(CONFIG_MMU),y)
 obj-y				+= nommu.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_MODULES)		+= proc-syms.o
 obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
 obj-$(CONFIG_DISCONTIGMEM)	+= discontig.o
 
+obj-$(CONFIG_CPU_ABRT_NOMMU)	+= abort-nommu.o
 obj-$(CONFIG_CPU_ABRT_EV4)	+= abort-ev4.o
 obj-$(CONFIG_CPU_ABRT_EV4T)	+= abort-ev4t.o
 obj-$(CONFIG_CPU_ABRT_LV4T)	+= abort-lv4t.o
@@ -46,11 +47,16 @@ obj-$(CONFIG_CPU_TLB_V6)	+= tlb-v6.o
 
 obj-$(CONFIG_CPU_ARM610)	+= proc-arm6_7.o
 obj-$(CONFIG_CPU_ARM710)	+= proc-arm6_7.o
+obj-$(CONFIG_CPU_ARM7TDMI)	+= proc-arm7tdmi.o
 obj-$(CONFIG_CPU_ARM720T)	+= proc-arm720.o
+obj-$(CONFIG_CPU_ARM740T)	+= proc-arm740.o
+obj-$(CONFIG_CPU_ARM9TDMI)	+= proc-arm9tdmi.o
 obj-$(CONFIG_CPU_ARM920T)	+= proc-arm920.o
 obj-$(CONFIG_CPU_ARM922T)	+= proc-arm922.o
 obj-$(CONFIG_CPU_ARM925T)	+= proc-arm925.o
 obj-$(CONFIG_CPU_ARM926T)	+= proc-arm926.o
+obj-$(CONFIG_CPU_ARM940T)	+= proc-arm940.o
+obj-$(CONFIG_CPU_ARM946E)	+= proc-arm946.o
 obj-$(CONFIG_CPU_ARM1020)	+= proc-arm1020.o
 obj-$(CONFIG_CPU_ARM1020E)	+= proc-arm1020e.o
 obj-$(CONFIG_CPU_ARM1022)	+= proc-arm1022.o
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index db743e510214513740a31b28ee1aca1dc1c2bb43..9fb7b0e25ea1094cec42e47b76c69e45ddf13f44 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -19,11 +19,16 @@
  */
 ENTRY(v4t_late_abort)
 	tst	r3, #PSR_T_BIT			@ check for thumb mode
+#ifdef CONFIG_CPU_CP15_MMU
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
+	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
+#else
+	mov	r0, #0				@ clear r0, r1 (no FSR/FAR)
+	mov	r1, #0
+#endif
 	bne	.data_thumb_abort
 	ldr	r8, [r2]			@ read arm instruction
-	bic	r1, r1, #1 << 11 | 1 << 10	@ clear bits 11 and 10 of FSR
 	tst	r8, #1 << 20			@ L = 1 -> write?
 	orreq	r1, r1, #1 << 11		@ yes.
 	and	r7, r8, #15 << 24
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
new file mode 100644
index 0000000000000000000000000000000000000000..a7cc7f9ee45df02c28caa48d64eb447f1f8eefd3
--- /dev/null
+++ b/arch/arm/mm/abort-nommu.S
@@ -0,0 +1,19 @@
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+/*
+ * Function: nommu_early_abort
+ *
+ * Params  : r2 = address of aborted instruction
+ *         : r3 = saved SPSR
+ *
+ * Returns : r0 = 0 (abort address)
+ *	   : r1 = 0 (FSR)
+ *
+ * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
+ *       Just fill zero into the registers.
+ */
+	.align	5
+ENTRY(nommu_early_abort)
+	mov	r0, #0				@ clear r0, r1 (no FSR/FAR)
+	mov	r1, #0
+	mov	pc, lr
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e0d21bbbe7d788e55854dfd3b2c5eb62cc96050a..aa109f074dd9d84d64e0873133c899ef22ebb449 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -735,7 +735,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	/*
 	 * We got a fault - fix it up, or die.
 	 */
-	do_bad_area(current, current->mm, addr, fsr, regs);
+	do_bad_area(addr, fsr, regs);
 	return 0;
 
  swp:
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index b8ad5d58ebe2ff11f687704c8f31a86d83014dd3..b2908063ed6aa5be10ebdfd7ee00b37ef5e8021f 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -29,9 +29,13 @@ ENTRY(v4_flush_user_cache_all)
  *	Clean and invalidate the entire cache.
  */
 ENTRY(v4_flush_kern_cache_all)
+#ifdef CPU_CP15
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
 	mov	pc, lr
+#else
+	/* FALLTHROUGH */
+#endif
 
 /*
  *	flush_user_cache_range(start, end, flags)
@@ -44,9 +48,13 @@ ENTRY(v4_flush_kern_cache_all)
  *	- flags	- vma_area_struct flags describing address space
  */
 ENTRY(v4_flush_user_cache_range)
+#ifdef CPU_CP15
 	mov	ip, #0
 	mcreq	p15, 0, ip, c7, c7, 0		@ flush ID cache
 	mov	pc, lr
+#else
+	/* FALLTHROUGH */
+#endif
 
 /*
  *	coherent_kern_range(start, end)
@@ -108,8 +116,10 @@ ENTRY(v4_dma_inv_range)
  *	- end	 - virtual end address
  */
 ENTRY(v4_dma_flush_range)
+#ifdef CPU_CP15
 	mov	r0, #0
 	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
+#endif
 	/* FALLTHROUGH */
 
 /*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c5e0622c77650480f4dc148254d77f3545606d5d..f0943d160ffeccc8d02ece3f82d023bc7c68970a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -131,10 +131,11 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
 	force_sig_info(sig, &si, tsk);
 }
 
-void
-do_bad_area(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr,
-	    unsigned int fsr, struct pt_regs *regs)
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->active_mm;
+
 	/*
 	 * If we are in kernel mode at this point, we
 	 * have no context to handle this fault with.
@@ -319,7 +320,6 @@ static int
 do_translation_fault(unsigned long addr, unsigned int fsr,
 		     struct pt_regs *regs)
 {
-	struct task_struct *tsk;
 	unsigned int index;
 	pgd_t *pgd, *pgd_k;
 	pmd_t *pmd, *pmd_k;
@@ -351,9 +351,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
 	return 0;
 
 bad_area:
-	tsk = current;
-
-	do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
+	do_bad_area(addr, fsr, regs);
 	return 0;
 }
 
@@ -364,8 +362,7 @@ bad_area:
 static int
 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
-	struct task_struct *tsk = current;
-	do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
+	do_bad_area(addr, fsr, regs);
 	return 0;
 }
 
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 73b59e83227f87d4183c5a97db0b9aee41aaaed2..49e9e3804de41ead9959b676a57279cfdd800baf 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,6 +1,3 @@
-void do_bad_area(struct task_struct *tsk, struct mm_struct *mm,
-		 unsigned long addr, unsigned int fsr, struct pt_regs *regs);
-
-void show_pte(struct mm_struct *mm, unsigned long addr);
+void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
 
 unsigned long search_exception_table(unsigned long addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 64262bda8e54d97d4b3002b24b4024865fd12de6..22217fe2650b61276b82a280ea9eecc18b48b613 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -27,10 +27,7 @@
 
 #include "mm.h"
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end;
+extern void _text, _etext, __data_start, _end, __init_begin, __init_end;
 extern unsigned long phys_initrd_start;
 extern unsigned long phys_initrd_size;
 
@@ -40,17 +37,6 @@ extern unsigned long phys_initrd_size;
  */
 static struct meminfo meminfo __initdata = { 0, };
 
-/*
- * empty_zero_page is a special page that is used for
- * zero-initialized data and COW.
- */
-struct page *empty_zero_page;
-
-/*
- * The pmd table for the upper-most set of pages.
- */
-pmd_t *top_pmd;
-
 void show_mem(void)
 {
 	int free = 0, total = 0, reserved = 0;
@@ -173,57 +159,18 @@ static int __init check_initrd(struct meminfo *mi)
 	return initrd_node;
 }
 
-/*
- * Reserve the various regions of node 0
- */
-static __init void reserve_node_zero(pg_data_t *pgdat)
+static inline void map_memory_bank(struct membank *bank)
 {
-	unsigned long res_size = 0;
-
-	/*
-	 * Register the kernel text and data with bootmem.
-	 * Note that this can only be in node 0.
-	 */
-#ifdef CONFIG_XIP_KERNEL
-	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
-#else
-	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
-#endif
-
-	/*
-	 * Reserve the page tables.  These are already in use,
-	 * and can only be in node 0.
-	 */
-	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
-			     PTRS_PER_PGD * sizeof(pgd_t));
+#ifdef CONFIG_MMU
+	struct map_desc map;
 
-	/*
-	 * Hmm... This should go elsewhere, but we really really need to
-	 * stop things allocating the low memory; ideally we need a better
-	 * implementation of GFP_DMA which does not assume that DMA-able
-	 * memory starts at zero.
-	 */
-	if (machine_is_integrator() || machine_is_cintegrator())
-		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
+	map.pfn = __phys_to_pfn(bank->start);
+	map.virtual = __phys_to_virt(bank->start);
+	map.length = bank->size;
+	map.type = MT_MEMORY;
 
-	/*
-	 * These should likewise go elsewhere.  They pre-reserve the
-	 * screen memory region at the start of main system memory.
-	 */
-	if (machine_is_edb7211())
-		res_size = 0x00020000;
-	if (machine_is_p720t())
-		res_size = 0x00014000;
-
-#ifdef CONFIG_SA1111
-	/*
-	 * Because of the SA1111 DMA bug, we want to preserve our
-	 * precious DMA-able memory...
-	 */
-	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
+	create_mapping(&map);
 #endif
-	if (res_size)
-		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
 }
 
 static unsigned long __init
@@ -242,23 +189,18 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
 	 * Calculate the pfn range, and map the memory banks for this node.
 	 */
 	for_each_nodebank(i, mi, node) {
+		struct membank *bank = &mi->bank[i];
 		unsigned long start, end;
-		struct map_desc map;
 
-		start = mi->bank[i].start >> PAGE_SHIFT;
-		end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT;
+		start = bank->start >> PAGE_SHIFT;
+		end = (bank->start + bank->size) >> PAGE_SHIFT;
 
 		if (start_pfn > start)
 			start_pfn = start;
 		if (end_pfn < end)
 			end_pfn = end;
 
-		map.pfn = __phys_to_pfn(mi->bank[i].start);
-		map.virtual = __phys_to_virt(mi->bank[i].start);
-		map.length = mi->bank[i].size;
-		map.type = MT_MEMORY;
-
-		create_mapping(&map);
+		map_memory_bank(bank);
 	}
 
 	/*
@@ -340,9 +282,9 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
 	return end_pfn;
 }
 
-static void __init bootmem_init(struct meminfo *mi)
+void __init bootmem_init(struct meminfo *mi)
 {
-	unsigned long addr, memend_pfn = 0;
+	unsigned long memend_pfn = 0;
 	int node, initrd_node, i;
 
 	/*
@@ -354,26 +296,6 @@ static void __init bootmem_init(struct meminfo *mi)
 
 	memcpy(&meminfo, mi, sizeof(meminfo));
 
-	/*
-	 * Clear out all the mappings below the kernel image.
-	 */
-	for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
-		pmd_clear(pmd_off_k(addr));
-#ifdef CONFIG_XIP_KERNEL
-	/* The XIP kernel is mapped in the module area -- skip over it */
-	addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
-#endif
-	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
-		pmd_clear(pmd_off_k(addr));
-
-	/*
-	 * Clear out all the kernel space mappings, except for the first
-	 * memory bank, up to the end of the vmalloc region.
-	 */
-	for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
-	     addr < VMALLOC_END; addr += PGDIR_SIZE)
-		pmd_clear(pmd_off_k(addr));
-
 	/*
 	 * Locate which node contains the ramdisk image, if any.
 	 */
@@ -407,114 +329,6 @@ static void __init bootmem_init(struct meminfo *mi)
 	max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
 }
 
-/*
- * Set up device the mappings.  Since we clear out the page tables for all
- * mappings above VMALLOC_END, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function.  This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
- */
-static void __init devicemaps_init(struct machine_desc *mdesc)
-{
-	struct map_desc map;
-	unsigned long addr;
-	void *vectors;
-
-	/*
-	 * Allocate the vector page early.
-	 */
-	vectors = alloc_bootmem_low_pages(PAGE_SIZE);
-	BUG_ON(!vectors);
-
-	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
-		pmd_clear(pmd_off_k(addr));
-
-	/*
-	 * Map the kernel if it is XIP.
-	 * It is always first in the modulearea.
-	 */
-#ifdef CONFIG_XIP_KERNEL
-	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
-	map.virtual = MODULE_START;
-	map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
-	map.type = MT_ROM;
-	create_mapping(&map);
-#endif
-
-	/*
-	 * Map the cache flushing regions.
-	 */
-#ifdef FLUSH_BASE
-	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
-	map.virtual = FLUSH_BASE;
-	map.length = SZ_1M;
-	map.type = MT_CACHECLEAN;
-	create_mapping(&map);
-#endif
-#ifdef FLUSH_BASE_MINICACHE
-	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
-	map.virtual = FLUSH_BASE_MINICACHE;
-	map.length = SZ_1M;
-	map.type = MT_MINICLEAN;
-	create_mapping(&map);
-#endif
-
-	/*
-	 * Create a mapping for the machine vectors at the high-vectors
-	 * location (0xffff0000).  If we aren't using high-vectors, also
-	 * create a mapping at the low-vectors virtual address.
-	 */
-	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
-	map.virtual = 0xffff0000;
-	map.length = PAGE_SIZE;
-	map.type = MT_HIGH_VECTORS;
-	create_mapping(&map);
-
-	if (!vectors_high()) {
-		map.virtual = 0;
-		map.type = MT_LOW_VECTORS;
-		create_mapping(&map);
-	}
-
-	/*
-	 * Ask the machine support to map in the statically mapped devices.
-	 */
-	if (mdesc->map_io)
-		mdesc->map_io();
-
-	/*
-	 * Finally flush the caches and tlb to ensure that we're in a
-	 * consistent state wrt the writebuffer.  This also ensures that
-	 * any write-allocated cache lines in the vector page are written
-	 * back.  After this point, we can start to touch devices again.
-	 */
-	local_flush_tlb_all();
-	flush_cache_all();
-}
-
-/*
- * paging_init() sets up the page tables, initialises the zone memory
- * maps, and sets up the zero page, bad page and bad page tables.
- */
-void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
-{
-	void *zero_page;
-
-	build_mem_type_table();
-	bootmem_init(mi);
-	devicemaps_init(mdesc);
-
-	top_pmd = pmd_off_k(0xffff0000);
-
-	/*
-	 * allocate the zero page.  Note that we count on this going ok.
-	 */
-	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
-	memzero(zero_page, PAGE_SIZE);
-	empty_zero_page = virt_to_page(zero_page);
-	flush_dcache_page(empty_zero_page);
-}
-
 static inline void free_area(unsigned long addr, unsigned long end, char *s)
 {
 	unsigned int size = (end - addr) >> 10;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 8d73ffbce8df292a6ff49bcd9f72b79dc1cd1581..bb2bc9ab6bd38fd06243542effd2017cc4326b8a 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -14,6 +14,9 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
 }
 
 struct map_desc;
+struct meminfo;
+struct pglist_data;
 
-void __init build_mem_type_table(void);
 void __init create_mapping(struct map_desc *md);
+void __init bootmem_init(struct meminfo *mi);
+void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mmu.c
similarity index 72%
rename from arch/arm/mm/mm-armv.c
rename to arch/arm/mm/mmu.c
index ee9647823fadf2d2ed0a364ea4bb47a0fb248448..e566cbe4b222cd93186d6d06dd7b279fbbae4563 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mmu.c
@@ -1,30 +1,46 @@
 /*
- *  linux/arch/arm/mm/mm-armv.c
+ *  linux/arch/arm/mm/mmu.c
  *
- *  Copyright (C) 1998-2005 Russell King
+ *  Copyright (C) 1995-2005 Russell King
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- *  Page table sludge for ARM v3 and v4 processor architectures.
  */
 #include <linux/module.h>
-#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
-#include <linux/highmem.h>
+#include <linux/mman.h>
 #include <linux/nodemask.h>
 
-#include <asm/pgalloc.h>
-#include <asm/page.h>
+#include <asm/mach-types.h>
 #include <asm/setup.h>
-#include <asm/tlbflush.h>
+#include <asm/sizes.h>
+#include <asm/tlb.h>
 
+#include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
 #include "mm.h"
 
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+extern void _stext, __data_start, _end;
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+
+/*
+ * The pmd table for the upper-most set of pages.
+ */
+pmd_t *top_pmd;
+
 #define CPOLICY_UNCACHED	0
 #define CPOLICY_BUFFERED	1
 #define CPOLICY_WRITETHROUGH	2
@@ -99,6 +115,7 @@ static void __init early_cachepolicy(char **p)
 	flush_cache_all();
 	set_cr(cr_alignment);
 }
+__early_param("cachepolicy=", early_cachepolicy);
 
 static void __init early_nocache(char **__unused)
 {
@@ -106,6 +123,7 @@ static void __init early_nocache(char **__unused)
 	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
 	early_cachepolicy(&p);
 }
+__early_param("nocache", early_nocache);
 
 static void __init early_nowrite(char **__unused)
 {
@@ -113,6 +131,7 @@ static void __init early_nowrite(char **__unused)
 	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
 	early_cachepolicy(&p);
 }
+__early_param("nowb", early_nowrite);
 
 static void __init early_ecc(char **p)
 {
@@ -124,10 +143,6 @@ static void __init early_ecc(char **p)
 		*p += 3;
 	}
 }
-
-__early_param("nocache", early_nocache);
-__early_param("nowb", early_nowrite);
-__early_param("cachepolicy=", early_cachepolicy);
 __early_param("ecc=", early_ecc);
 
 static int __init noalign_setup(char *__unused)
@@ -137,149 +152,8 @@ static int __init noalign_setup(char *__unused)
 	set_cr(cr_alignment);
 	return 1;
 }
-
 __setup("noalign", noalign_setup);
 
-#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
-
-/*
- * need to get a 16k page for level 1
- */
-pgd_t *get_pgd_slow(struct mm_struct *mm)
-{
-	pgd_t *new_pgd, *init_pgd;
-	pmd_t *new_pmd, *init_pmd;
-	pte_t *new_pte, *init_pte;
-
-	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
-	if (!new_pgd)
-		goto no_pgd;
-
-	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
-
-	/*
-	 * Copy over the kernel and IO PGD entries
-	 */
-	init_pgd = pgd_offset_k(0);
-	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
-		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
-
-	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
-
-	if (!vectors_high()) {
-		/*
-		 * On ARM, first page must always be allocated since it
-		 * contains the machine vectors.
-		 */
-		new_pmd = pmd_alloc(mm, new_pgd, 0);
-		if (!new_pmd)
-			goto no_pmd;
-
-		new_pte = pte_alloc_map(mm, new_pmd, 0);
-		if (!new_pte)
-			goto no_pte;
-
-		init_pmd = pmd_offset(init_pgd, 0);
-		init_pte = pte_offset_map_nested(init_pmd, 0);
-		set_pte(new_pte, *init_pte);
-		pte_unmap_nested(init_pte);
-		pte_unmap(new_pte);
-	}
-
-	return new_pgd;
-
-no_pte:
-	pmd_free(new_pmd);
-no_pmd:
-	free_pages((unsigned long)new_pgd, 2);
-no_pgd:
-	return NULL;
-}
-
-void free_pgd_slow(pgd_t *pgd)
-{
-	pmd_t *pmd;
-	struct page *pte;
-
-	if (!pgd)
-		return;
-
-	/* pgd is always present and good */
-	pmd = pmd_off(pgd, 0);
-	if (pmd_none(*pmd))
-		goto free;
-	if (pmd_bad(*pmd)) {
-		pmd_ERROR(*pmd);
-		pmd_clear(pmd);
-		goto free;
-	}
-
-	pte = pmd_page(*pmd);
-	pmd_clear(pmd);
-	dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
-	pte_lock_deinit(pte);
-	pte_free(pte);
-	pmd_free(pmd);
-free:
-	free_pages((unsigned long) pgd, 2);
-}
-
-/*
- * Create a SECTION PGD between VIRT and PHYS in domain
- * DOMAIN with protection PROT.  This operates on half-
- * pgdir entry increments.
- */
-static inline void
-alloc_init_section(unsigned long virt, unsigned long phys, int prot)
-{
-	pmd_t *pmdp = pmd_off_k(virt);
-
-	if (virt & (1 << 20))
-		pmdp++;
-
-	*pmdp = __pmd(phys | prot);
-	flush_pmd_entry(pmdp);
-}
-
-/*
- * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
- */
-static inline void
-alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
-{
-	int i;
-
-	for (i = 0; i < 16; i += 1) {
-		alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
-
-		virt += (PGDIR_SIZE / 2);
-	}
-}
-
-/*
- * Add a PAGE mapping between VIRT and PHYS in domain
- * DOMAIN with protection PROT.  Note that due to the
- * way we map the PTEs, we must allocate two PTE_SIZE'd
- * blocks - one for the Linux pte table, and one for
- * the hardware pte table.
- */
-static inline void
-alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
-{
-	pmd_t *pmdp = pmd_off_k(virt);
-	pte_t *ptep;
-
-	if (pmd_none(*pmdp)) {
-		ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
-					       sizeof(pte_t));
-
-		__pmd_populate(pmdp, __pa(ptep) | prot_l1);
-	}
-	ptep = pte_offset_kernel(pmdp, virt);
-
-	set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
-}
-
 struct mem_types {
 	unsigned int	prot_pte;
 	unsigned int	prot_l1;
@@ -344,7 +218,7 @@ static struct mem_types mem_types[] __initdata = {
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
-void __init build_mem_type_table(void)
+static void __init build_mem_type_table(void)
 {
 	struct cachepolicy *cp;
 	unsigned int cr = get_cr();
@@ -481,6 +355,62 @@ void __init build_mem_type_table(void)
 
 #define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 
+/*
+ * Create a SECTION PGD between VIRT and PHYS in domain
+ * DOMAIN with protection PROT.  This operates on half-
+ * pgdir entry increments.
+ */
+static inline void
+alloc_init_section(unsigned long virt, unsigned long phys, int prot)
+{
+	pmd_t *pmdp = pmd_off_k(virt);
+
+	if (virt & (1 << 20))
+		pmdp++;
+
+	*pmdp = __pmd(phys | prot);
+	flush_pmd_entry(pmdp);
+}
+
+/*
+ * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
+ */
+static inline void
+alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
+{
+	int i;
+
+	for (i = 0; i < 16; i += 1) {
+		alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
+
+		virt += (PGDIR_SIZE / 2);
+	}
+}
+
+/*
+ * Add a PAGE mapping between VIRT and PHYS in domain
+ * DOMAIN with protection PROT.  Note that due to the
+ * way we map the PTEs, we must allocate two PTE_SIZE'd
+ * blocks - one for the Linux pte table, and one for
+ * the hardware pte table.
+ */
+static inline void
+alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
+{
+	pmd_t *pmdp = pmd_off_k(virt);
+	pte_t *ptep;
+
+	if (pmd_none(*pmdp)) {
+		ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
+					       sizeof(pte_t));
+
+		__pmd_populate(pmdp, __pa(ptep) | prot_l1);
+	}
+	ptep = pte_offset_kernel(pmdp, virt);
+
+	set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
+}
+
 /*
  * Create the page directory entries and any necessary
  * page tables for the mapping specified by `md'.  We
@@ -610,6 +540,205 @@ void __init create_mapping(struct map_desc *md)
 	}
 }
 
+/*
+ * Create the architecture specific mappings
+ */
+void __init iotable_init(struct map_desc *io_desc, int nr)
+{
+	int i;
+
+	for (i = 0; i < nr; i++)
+		create_mapping(io_desc + i);
+}
+
+static inline void prepare_page_table(struct meminfo *mi)
+{
+	unsigned long addr;
+
+	/*
+	 * Clear out all the mappings below the kernel image.
+	 */
+	for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+		pmd_clear(pmd_off_k(addr));
+
+#ifdef CONFIG_XIP_KERNEL
+	/* The XIP kernel is mapped in the module area -- skip over it */
+	addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+#endif
+	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+		pmd_clear(pmd_off_k(addr));
+
+	/*
+	 * Clear out all the kernel space mappings, except for the first
+	 * memory bank, up to the end of the vmalloc region.
+	 */
+	for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
+	     addr < VMALLOC_END; addr += PGDIR_SIZE)
+		pmd_clear(pmd_off_k(addr));
+}
+
+/*
+ * Reserve the various regions of node 0
+ */
+void __init reserve_node_zero(pg_data_t *pgdat)
+{
+	unsigned long res_size = 0;
+
+	/*
+	 * Register the kernel text and data with bootmem.
+	 * Note that this can only be in node 0.
+	 */
+#ifdef CONFIG_XIP_KERNEL
+	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
+#else
+	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+#endif
+
+	/*
+	 * Reserve the page tables.  These are already in use,
+	 * and can only be in node 0.
+	 */
+	reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
+			     PTRS_PER_PGD * sizeof(pgd_t));
+
+	/*
+	 * Hmm... This should go elsewhere, but we really really need to
+	 * stop things allocating the low memory; ideally we need a better
+	 * implementation of GFP_DMA which does not assume that DMA-able
+	 * memory starts at zero.
+	 */
+	if (machine_is_integrator() || machine_is_cintegrator())
+		res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
+
+	/*
+	 * These should likewise go elsewhere.  They pre-reserve the
+	 * screen memory region at the start of main system memory.
+	 */
+	if (machine_is_edb7211())
+		res_size = 0x00020000;
+	if (machine_is_p720t())
+		res_size = 0x00014000;
+
+#ifdef CONFIG_SA1111
+	/*
+	 * Because of the SA1111 DMA bug, we want to preserve our
+	 * precious DMA-able memory...
+	 */
+	res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
+#endif
+	if (res_size)
+		reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
+}
+
+/*
+ * Set up device the mappings.  Since we clear out the page tables for all
+ * mappings above VMALLOC_END, we will remove any debug device mappings.
+ * This means you have to be careful how you debug this function, or any
+ * called function.  This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
+ */
+static void __init devicemaps_init(struct machine_desc *mdesc)
+{
+	struct map_desc map;
+	unsigned long addr;
+	void *vectors;
+
+	/*
+	 * Allocate the vector page early.
+	 */
+	vectors = alloc_bootmem_low_pages(PAGE_SIZE);
+	BUG_ON(!vectors);
+
+	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+		pmd_clear(pmd_off_k(addr));
+
+	/*
+	 * Map the kernel if it is XIP.
+	 * It is always first in the modulearea.
+	 */
+#ifdef CONFIG_XIP_KERNEL
+	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
+	map.virtual = MODULE_START;
+	map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
+	map.type = MT_ROM;
+	create_mapping(&map);
+#endif
+
+	/*
+	 * Map the cache flushing regions.
+	 */
+#ifdef FLUSH_BASE
+	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
+	map.virtual = FLUSH_BASE;
+	map.length = SZ_1M;
+	map.type = MT_CACHECLEAN;
+	create_mapping(&map);
+#endif
+#ifdef FLUSH_BASE_MINICACHE
+	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
+	map.virtual = FLUSH_BASE_MINICACHE;
+	map.length = SZ_1M;
+	map.type = MT_MINICLEAN;
+	create_mapping(&map);
+#endif
+
+	/*
+	 * Create a mapping for the machine vectors at the high-vectors
+	 * location (0xffff0000).  If we aren't using high-vectors, also
+	 * create a mapping at the low-vectors virtual address.
+	 */
+	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+	map.virtual = 0xffff0000;
+	map.length = PAGE_SIZE;
+	map.type = MT_HIGH_VECTORS;
+	create_mapping(&map);
+
+	if (!vectors_high()) {
+		map.virtual = 0;
+		map.type = MT_LOW_VECTORS;
+		create_mapping(&map);
+	}
+
+	/*
+	 * Ask the machine support to map in the statically mapped devices.
+	 */
+	if (mdesc->map_io)
+		mdesc->map_io();
+
+	/*
+	 * Finally flush the caches and tlb to ensure that we're in a
+	 * consistent state wrt the writebuffer.  This also ensures that
+	 * any write-allocated cache lines in the vector page are written
+	 * back.  After this point, we can start to touch devices again.
+	 */
+	local_flush_tlb_all();
+	flush_cache_all();
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+{
+	void *zero_page;
+
+	build_mem_type_table();
+	prepare_page_table(mi);
+	bootmem_init(mi);
+	devicemaps_init(mdesc);
+
+	top_pmd = pmd_off_k(0xffff0000);
+
+	/*
+	 * allocate the zero page.  Note that we count on this going ok.
+	 */
+	zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
+	memzero(zero_page, PAGE_SIZE);
+	empty_zero_page = virt_to_page(zero_page);
+	flush_dcache_page(empty_zero_page);
+}
+
 /*
  * In order to soft-boot, we need to insert a 1:1 mapping in place of
  * the user-mode pages.  This will then ensure that we have predictable
@@ -640,14 +769,3 @@ void setup_mm_for_reboot(char mode)
 		flush_pmd_entry(pmd);
 	}
 }
-
-/*
- * Create the architecture specific mappings
- */
-void __init iotable_init(struct map_desc *io_desc, int nr)
-{
-	int i;
-
-	for (i = 0; i < nr; i++)
-		create_mapping(io_desc + i);
-}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1464ed817b5dc87b1d456a898bf66c8bc9b4007f..d0e66424a59720886d81908643ba1431afdd609e 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -11,6 +11,49 @@
 #include <asm/io.h>
 #include <asm/page.h>
 
+#include "mm.h"
+
+extern void _stext, __data_start, _end;
+
+/*
+ * Reserve the various regions of node 0
+ */
+void __init reserve_node_zero(pg_data_t *pgdat)
+{
+	/*
+	 * Register the kernel text and data with bootmem.
+	 * Note that this can only be in node 0.
+	 */
+#ifdef CONFIG_XIP_KERNEL
+	reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
+#else
+	reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
+#endif
+
+	/*
+	 * Register the exception vector page.
+	 * some architectures which the DRAM is the exception vector to trap,
+	 * alloc_page breaks with error, although it is not NULL, but "0."
+	 */
+	reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE);
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
+{
+	bootmem_init(mi);
+}
+
+/*
+ * We don't need to do anything here for nommu machines.
+ */
+void setup_mm_for_reboot(char mode)
+{
+}
+
 void flush_dcache_page(struct page *page)
 {
 	__cpuc_flush_dcache_page(page_address(page));
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
new file mode 100644
index 0000000000000000000000000000000000000000..20c1b0df75f2c386c5966076e7784644f6e4845f
--- /dev/null
+++ b/arch/arm/mm/pgd.c
@@ -0,0 +1,101 @@
+/*
+ *  linux/arch/arm/mm/pgd.c
+ *
+ *  Copyright (C) 1998-2005 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/highmem.h>
+
+#include <asm/pgalloc.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+
+#include "mm.h"
+
+#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
+
+/*
+ * need to get a 16k page for level 1
+ */
+pgd_t *get_pgd_slow(struct mm_struct *mm)
+{
+	pgd_t *new_pgd, *init_pgd;
+	pmd_t *new_pmd, *init_pmd;
+	pte_t *new_pte, *init_pte;
+
+	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
+	if (!new_pgd)
+		goto no_pgd;
+
+	memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
+
+	/*
+	 * Copy over the kernel and IO PGD entries
+	 */
+	init_pgd = pgd_offset_k(0);
+	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
+
+	if (!vectors_high()) {
+		/*
+		 * On ARM, first page must always be allocated since it
+		 * contains the machine vectors.
+		 */
+		new_pmd = pmd_alloc(mm, new_pgd, 0);
+		if (!new_pmd)
+			goto no_pmd;
+
+		new_pte = pte_alloc_map(mm, new_pmd, 0);
+		if (!new_pte)
+			goto no_pte;
+
+		init_pmd = pmd_offset(init_pgd, 0);
+		init_pte = pte_offset_map_nested(init_pmd, 0);
+		set_pte(new_pte, *init_pte);
+		pte_unmap_nested(init_pte);
+		pte_unmap(new_pte);
+	}
+
+	return new_pgd;
+
+no_pte:
+	pmd_free(new_pmd);
+no_pmd:
+	free_pages((unsigned long)new_pgd, 2);
+no_pgd:
+	return NULL;
+}
+
+void free_pgd_slow(pgd_t *pgd)
+{
+	pmd_t *pmd;
+	struct page *pte;
+
+	if (!pgd)
+		return;
+
+	/* pgd is always present and good */
+	pmd = pmd_off(pgd, 0);
+	if (pmd_none(*pmd))
+		goto free;
+	if (pmd_bad(*pmd)) {
+		pmd_ERROR(*pmd);
+		pmd_clear(pmd);
+		goto free;
+	}
+
+	pte = pmd_page(*pmd);
+	pmd_clear(pmd);
+	dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
+	pte_lock_deinit(pte);
+	pte_free(pte);
+	pmd_free(pmd);
+free:
+	free_pages((unsigned long) pgd, 2);
+}
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
new file mode 100644
index 0000000000000000000000000000000000000000..40713818a87b2701071ef866e3575c5099c1dd7c
--- /dev/null
+++ b/arch/arm/mm/proc-arm740.S
@@ -0,0 +1,174 @@
+/*
+ *  linux/arch/arm/mm/arm740.S: utility functions for ARM740
+ *
+ *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+
+	.text
+/*
+ * cpu_arm740_proc_init()
+ * cpu_arm740_do_idle()
+ * cpu_arm740_dcache_clean_area()
+ * cpu_arm740_switch_mm()
+ *
+ * These are not required.
+ */
+ENTRY(cpu_arm740_proc_init)
+ENTRY(cpu_arm740_do_idle)
+ENTRY(cpu_arm740_dcache_clean_area)
+ENTRY(cpu_arm740_switch_mm)
+	mov	pc, lr
+
+/*
+ * cpu_arm740_proc_fin()
+ */
+ENTRY(cpu_arm740_proc_fin)
+	stmfd	sp!, {lr}
+	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+	msr	cpsr_c, ip
+	mrc	p15, 0, r0, c1, c0, 0
+	bic	r0, r0, #0x3f000000		@ bank/f/lock/s
+	bic	r0, r0, #0x0000000c		@ w-buffer/cache
+	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
+	mcr	p15, 0, r0, c7, c0, 0		@ invalidate cache
+	ldmfd	sp!, {pc}
+
+/*
+ * cpu_arm740_reset(loc)
+ * Params  : r0 = address to jump to
+ * Notes   : This sets up everything for a reset
+ */
+ENTRY(cpu_arm740_reset)
+	mov	ip, #0
+	mcr	p15, 0, ip, c7, c0, 0		@ invalidate cache
+	mrc	p15, 0, ip, c1, c0, 0		@ get ctrl register
+	bic	ip, ip, #0x0000000c		@ ............wc..
+	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
+	mov	pc, r0
+
+	__INIT
+
+	.type	__arm740_setup, #function
+__arm740_setup:
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c0, 0		@ invalidate caches
+
+	mcr	p15, 0, r0, c6, c3		@ disable area 3~7
+	mcr	p15, 0, r0, c6, c4
+	mcr	p15, 0, r0, c6, c5
+	mcr	p15, 0, r0, c6, c6
+	mcr	p15, 0, r0, c6, c7
+
+	mov	r0, #0x0000003F			@ base = 0, size = 4GB
+	mcr	p15, 0, r0, c6,	c0		@ set area 0, default
+
+	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
+	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the area register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c1		@ set area 1, RAM
+
+	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
+	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the area register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c2		@ set area 2, ROM/FLASH
+
+	mov	r0, #0x06
+	mcr	p15, 0, r0, c2, c0		@ Region 1&2 cacheable
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mov	r0, #0x00			@ disable whole write buffer
+#else
+	mov	r0, #0x02			@ Region 1 write bufferred
+#endif
+	mcr	p15, 0, r0, c3, c0
+
+	mov	r0, #0x10000
+	sub	r0, r0, #1			@ r0 = 0xffff
+	mcr	p15, 0, r0, c5, c0		@ all read/write access
+
+	mrc	p15, 0, r0, c1, c0		@ get control register
+	bic	r0, r0, #0x3F000000		@ set to standard caching mode
+						@ need some benchmark
+	orr	r0, r0, #0x0000000d		@ MPU/Cache/WB
+
+	mov	pc, lr
+
+	.size	__arm740_setup, . - __arm740_setup
+
+	__INITDATA
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ *	     come through these
+ */
+	.type	arm740_processor_functions, #object
+ENTRY(arm740_processor_functions)
+	.word	v4t_late_abort
+	.word	cpu_arm740_proc_init
+	.word	cpu_arm740_proc_fin
+	.word	cpu_arm740_reset
+	.word   cpu_arm740_do_idle
+	.word	cpu_arm740_dcache_clean_area
+	.word	cpu_arm740_switch_mm
+	.word	0			@ cpu_*_set_pte
+	.size	arm740_processor_functions, . - arm740_processor_functions
+
+	.section ".rodata"
+
+	.type	cpu_arch_name, #object
+cpu_arch_name:
+	.asciz	"armv4"
+	.size	cpu_arch_name, . - cpu_arch_name
+
+	.type	cpu_elf_name, #object
+cpu_elf_name:
+	.asciz	"v4"
+	.size	cpu_elf_name, . - cpu_elf_name
+
+	.type	cpu_arm740_name, #object
+cpu_arm740_name:
+	.ascii	"ARM740T"
+	.size	cpu_arm740_name, . - cpu_arm740_name
+
+	.align
+
+	.section ".proc.info.init", #alloc, #execinstr
+	.type	__arm740_proc_info,#object
+__arm740_proc_info:
+	.long	0x41807400
+	.long	0xfffffff0
+	.long	0
+	b	__arm740_setup
+	.long	cpu_arch_name
+	.long	cpu_elf_name
+	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+	.long	cpu_arm740_name
+	.long	arm740_processor_functions
+	.long	0
+	.long	0
+	.long	v3_cache_fns			@ cache model
+	.size	__arm740_proc_info, . - __arm740_proc_info
+
+
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
new file mode 100644
index 0000000000000000000000000000000000000000..22d7e3100ea6f38f797b20e58664b285d13ae499
--- /dev/null
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -0,0 +1,249 @@
+/*
+ *  linux/arch/arm/mm/proc-arm7tdmi.S: utility functions for ARM7TDMI
+ *
+ *  Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+
+	.text
+/*
+ * cpu_arm7tdmi_proc_init()
+ * cpu_arm7tdmi_do_idle()
+ * cpu_arm7tdmi_dcache_clean_area()
+ * cpu_arm7tdmi_switch_mm()
+ *
+ * These are not required.
+ */
+ENTRY(cpu_arm7tdmi_proc_init)
+ENTRY(cpu_arm7tdmi_do_idle)
+ENTRY(cpu_arm7tdmi_dcache_clean_area)
+ENTRY(cpu_arm7tdmi_switch_mm)
+		mov	pc, lr
+
+/*
+ * cpu_arm7tdmi_proc_fin()
+ */
+ENTRY(cpu_arm7tdmi_proc_fin)
+		mov	r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+		msr	cpsr_c, r0
+		mov	pc, lr
+
+/*
+ * Function: cpu_arm7tdmi_reset(loc)
+ * Params  : loc(r0)	address to jump to
+ * Purpose : Sets up everything for a reset and jump to the location for soft reset.
+ */
+ENTRY(cpu_arm7tdmi_reset)
+		mov	pc, r0
+
+		__INIT
+
+		.type	__arm7tdmi_setup, #function
+__arm7tdmi_setup:
+		mov	pc, lr
+		.size	__arm7tdmi_setup, . - __arm7tdmi_setup
+
+		__INITDATA
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ *	     come through these
+ */
+		.type	arm7tdmi_processor_functions, #object
+ENTRY(arm7tdmi_processor_functions)
+		.word	v4t_late_abort
+		.word	cpu_arm7tdmi_proc_init
+		.word	cpu_arm7tdmi_proc_fin
+		.word	cpu_arm7tdmi_reset
+		.word	cpu_arm7tdmi_do_idle
+		.word	cpu_arm7tdmi_dcache_clean_area
+		.word	cpu_arm7tdmi_switch_mm
+		.word	0		@ cpu_*_set_pte
+		.size	arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
+
+		.section ".rodata"
+
+		.type	cpu_arch_name, #object
+cpu_arch_name:
+		.asciz	"armv4t"
+		.size	cpu_arch_name, . - cpu_arch_name
+
+		.type	cpu_elf_name, #object
+cpu_elf_name:
+		.asciz	"v4"
+		.size	cpu_elf_name, . - cpu_elf_name
+
+		.type	cpu_arm7tdmi_name, #object
+cpu_arm7tdmi_name:
+		.asciz	"ARM7TDMI"
+		.size	cpu_arm7tdmi_name, . - cpu_arm7tdmi_name
+
+		.type	cpu_triscenda7_name, #object
+cpu_triscenda7_name:
+		.asciz	"Triscend-A7x"
+		.size	cpu_triscenda7_name, . - cpu_triscenda7_name
+
+		.type	cpu_at91_name, #object
+cpu_at91_name:
+		.asciz	"Atmel-AT91M40xxx"
+		.size	cpu_at91_name, . - cpu_at91_name
+
+		.type	cpu_s3c3410_name, #object
+cpu_s3c3410_name:
+		.asciz	"Samsung-S3C3410"
+		.size	cpu_s3c3410_name, . - cpu_s3c3410_name
+
+		.type	cpu_s3c44b0x_name, #object
+cpu_s3c44b0x_name:
+		.asciz	"Samsung-S3C44B0x"
+		.size	cpu_s3c44b0x_name, . - cpu_s3c44b0x_name
+
+		.type	cpu_s3c4510b, #object
+cpu_s3c4510b_name:
+		.asciz	"Samsung-S3C4510B"
+		.size	cpu_s3c4510b_name, . - cpu_s3c4510b_name
+
+		.type	cpu_s3c4530_name, #object
+cpu_s3c4530_name:
+		.asciz	"Samsung-S3C4530"
+		.size	cpu_s3c4530_name, . - cpu_s3c4530_name
+
+		.type	cpu_netarm_name, #object
+cpu_netarm_name:
+		.asciz	"NETARM"
+		.size	cpu_netarm_name, . - cpu_netarm_name
+
+		.align
+
+		.section ".proc.info.init", #alloc, #execinstr
+
+		.type	__arm7tdmi_proc_info, #object
+__arm7tdmi_proc_info:
+		.long	0x41007700
+		.long	0xfff8ff00
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_26BIT
+		.long	cpu_arm7tdmi_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__arm7tdmi_proc_info, . - __arm7dmi_proc_info
+
+		.type	__triscenda7_proc_info, #object
+__triscenda7_proc_info:
+		.long	0x0001d2ff
+		.long	0x0001ffff
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_triscenda7_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__triscenda7_proc_info, . - __triscenda7_proc_info
+
+		.type	__at91_proc_info, #object
+__at91_proc_info:
+		.long	0x14000040
+		.long	0xfff000e0
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_at91_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__at91_proc_info, . - __at91_proc_info
+
+		.type	__s3c4510b_proc_info, #object
+__s3c4510b_proc_info:
+		.long	0x36365000
+		.long	0xfffff000
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_s3c4510b_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__s3c4510b_proc_info, . - __s3c4510b_proc_info
+
+		.type	__s3c4530_proc_info, #object
+__s3c4530_proc_info:
+		.long	0x4c000000
+		.long	0xfff000e0
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_s3c4530_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__s3c4530_proc_info, . - __s3c4530_proc_info
+
+		.type	__s3c3410_proc_info, #object
+__s3c3410_proc_info:
+		.long	0x34100000
+		.long	0xffff0000
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_s3c3410_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__s3c3410_proc_info, . - __s3c3410_proc_info
+
+		.type	__s3c44b0x_proc_info, #object
+__s3c44b0x_proc_info:
+		.long	0x44b00000
+		.long	0xffff0000
+		.long	0
+		.long	0
+		b	__arm7tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_s3c44b0x_name
+		.long	arm7tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__s3c44b0x_proc_info, . - __s3c44b0x_proc_info
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
new file mode 100644
index 0000000000000000000000000000000000000000..2397f4b6e15149f31b9b317904b5f03dc3b41fe6
--- /dev/null
+++ b/arch/arm/mm/proc-arm940.S
@@ -0,0 +1,369 @@
+/*
+ *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
+ *
+ *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+
+/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
+#define CACHE_DLINESIZE	16
+#define CACHE_DSEGMENTS	4
+#define CACHE_DENTRIES	64
+
+	.text
+/*
+ * cpu_arm940_proc_init()
+ * cpu_arm940_switch_mm()
+ *
+ * These are not required.
+ */
+ENTRY(cpu_arm940_proc_init)
+ENTRY(cpu_arm940_switch_mm)
+	mov	pc, lr
+
+/*
+ * cpu_arm940_proc_fin()
+ */
+ENTRY(cpu_arm940_proc_fin)
+	stmfd	sp!, {lr}
+	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+	msr	cpsr_c, ip
+	bl	arm940_flush_kern_cache_all
+	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
+	bic	r0, r0, #0x00001000		@ i-cache
+	bic	r0, r0, #0x00000004		@ d-cache
+	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
+	ldmfd	sp!, {pc}
+
+/*
+ * cpu_arm940_reset(loc)
+ * Params  : r0 = address to jump to
+ * Notes   : This sets up everything for a reset
+ */
+ENTRY(cpu_arm940_reset)
+	mov	ip, #0
+	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
+	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
+	bic	ip, ip, #0x00000005		@ .............c.p
+	bic	ip, ip, #0x00001000		@ i-cache
+	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
+	mov	pc, r0
+
+/*
+ * cpu_arm940_do_idle()
+ */
+	.align	5
+ENTRY(cpu_arm940_do_idle)
+	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
+	mov	pc, lr
+
+/*
+ *	flush_user_cache_all()
+ */
+ENTRY(arm940_flush_user_cache_all)
+	/* FALLTHROUGH */
+
+/*
+ *	flush_kern_cache_all()
+ *
+ *	Clean and invalidate the entire cache.
+ */
+ENTRY(arm940_flush_kern_cache_all)
+	mov	r2, #VM_EXEC
+	/* FALLTHROUGH */
+
+/*
+ *	flush_user_cache_range(start, end, flags)
+ *
+ *	There is no efficient way to flush a range of cache entries
+ *	in the specified address range. Thus, flushes all.
+ *
+ *	- start	- start address (inclusive)
+ *	- end	- end address (exclusive)
+ *	- flags	- vm_flags describing address space
+ */
+ENTRY(arm940_flush_user_cache_range)
+	mov	ip, #0
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
+#else
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
+	subs	r3, r3, #1 << 26
+	bcs	2b				@ entries 63 to 0
+	subs	r1, r1, #1 << 4
+	bcs	1b				@ segments 3 to 0
+#endif
+	tst	r2, #VM_EXEC
+	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
+	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	coherent_kern_range(start, end)
+ *
+ *	Ensure coherency between the Icache and the Dcache in the
+ *	region described by start, end.  If you have non-snooping
+ *	Harvard caches, you need to implement this function.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm940_coherent_kern_range)
+	/* FALLTHROUGH */
+
+/*
+ *	coherent_user_range(start, end)
+ *
+ *	Ensure coherency between the Icache and the Dcache in the
+ *	region described by start, end.  If you have non-snooping
+ *	Harvard caches, you need to implement this function.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm940_coherent_user_range)
+	/* FALLTHROUGH */
+
+/*
+ *	flush_kern_dcache_page(void *page)
+ *
+ *	Ensure no D cache aliasing occurs, either with itself or
+ *	the I cache
+ *
+ *	- addr	- page aligned address
+ */
+ENTRY(arm940_flush_kern_dcache_page)
+	mov	ip, #0
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
+	subs	r3, r3, #1 << 26
+	bcs	2b				@ entries 63 to 0
+	subs	r1, r1, #1 << 4
+	bcs	1b				@ segments 7 to 0
+	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_inv_range(start, end)
+ *
+ *	There is no efficient way to invalidate a specifid virtual
+ *	address range. Thus, invalidates all.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm940_dma_inv_range)
+	mov	ip, #0
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
+	subs	r3, r3, #1 << 26
+	bcs	2b				@ entries 63 to 0
+	subs	r1, r1, #1 << 4
+	bcs	1b				@ segments 7 to 0
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_clean_range(start, end)
+ *
+ *	There is no efficient way to clean a specifid virtual
+ *	address range. Thus, cleans all.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm940_dma_clean_range)
+ENTRY(cpu_arm940_dcache_clean_area)
+	mov	ip, #0
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
+	subs	r3, r3, #1 << 26
+	bcs	2b				@ entries 63 to 0
+	subs	r1, r1, #1 << 4
+	bcs	1b				@ segments 7 to 0
+#endif
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_flush_range(start, end)
+ *
+ *	There is no efficient way to clean and invalidate a specifid
+ *	virtual address range.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm940_dma_flush_range)
+	mov	ip, #0
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
+2:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
+#else
+	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
+#endif
+	subs	r3, r3, #1 << 26
+	bcs	2b				@ entries 63 to 0
+	subs	r1, r1, #1 << 4
+	bcs	1b				@ segments 7 to 0
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+ENTRY(arm940_cache_fns)
+	.long	arm940_flush_kern_cache_all
+	.long	arm940_flush_user_cache_all
+	.long	arm940_flush_user_cache_range
+	.long	arm940_coherent_kern_range
+	.long	arm940_coherent_user_range
+	.long	arm940_flush_kern_dcache_page
+	.long	arm940_dma_inv_range
+	.long	arm940_dma_clean_range
+	.long	arm940_dma_flush_range
+
+	__INIT
+
+	.type	__arm940_setup, #function
+__arm940_setup:
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+
+	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
+	mcr	p15, 0, r0, c6, c4, 0
+	mcr	p15, 0, r0, c6, c5, 0
+	mcr	p15, 0, r0, c6, c6, 0
+	mcr	p15, 0, r0, c6, c7, 0
+
+	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
+	mcr	p15, 0, r0, c6, c4, 1
+	mcr	p15, 0, r0, c6, c5, 1
+	mcr	p15, 0, r0, c6, c6, 1
+	mcr	p15, 0, r0, c6, c7, 1
+
+	mov	r0, #0x0000003F			@ base = 0, size = 4GB
+	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
+	mcr	p15, 0, r0, c6,	c0, 1
+
+	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
+	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the area register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c1, 0		@ set area 1, RAM
+	mcr	p15, 0, r0, c6,	c1, 1
+
+	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
+	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the area register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c2, 0		@ set area 2, ROM/FLASH
+	mcr	p15, 0, r0, c6,	c2, 1
+
+	mov	r0, #0x06
+	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
+	mcr	p15, 0, r0, c2, c0, 1
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mov	r0, #0x00			@ disable whole write buffer
+#else
+	mov	r0, #0x02			@ Region 1 write bufferred
+#endif
+	mcr	p15, 0, r0, c3, c0, 0
+
+	mov	r0, #0x10000
+	sub	r0, r0, #1			@ r0 = 0xffff
+	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
+	mcr	p15, 0, r0, c5, c0, 1
+
+	mrc	p15, 0, r0, c1, c0		@ get control register
+	orr	r0, r0, #0x00001000		@ I-cache
+	orr	r0, r0, #0x00000005		@ MPU/D-cache
+
+	mov	pc, lr
+
+	.size	__arm940_setup, . - __arm940_setup
+
+	__INITDATA
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ *	     come through these
+ */
+	.type	arm940_processor_functions, #object
+ENTRY(arm940_processor_functions)
+	.word	nommu_early_abort
+	.word	cpu_arm940_proc_init
+	.word	cpu_arm940_proc_fin
+	.word	cpu_arm940_reset
+	.word   cpu_arm940_do_idle
+	.word	cpu_arm940_dcache_clean_area
+	.word	cpu_arm940_switch_mm
+	.word	0		@ cpu_*_set_pte
+	.size	arm940_processor_functions, . - arm940_processor_functions
+
+	.section ".rodata"
+
+.type	cpu_arch_name, #object
+cpu_arch_name:
+	.asciz	"armv4t"
+	.size	cpu_arch_name, . - cpu_arch_name
+
+	.type	cpu_elf_name, #object
+cpu_elf_name:
+	.asciz	"v4"
+	.size	cpu_elf_name, . - cpu_elf_name
+
+	.type	cpu_arm940_name, #object
+cpu_arm940_name:
+	.ascii	"ARM940T"
+	.size	cpu_arm940_name, . - cpu_arm940_name
+
+	.align
+
+	.section ".proc.info.init", #alloc, #execinstr
+
+	.type	__arm940_proc_info,#object
+__arm940_proc_info:
+	.long	0x41009400
+	.long	0xff00fff0
+	.long	0
+	b	__arm940_setup
+	.long	cpu_arch_name
+	.long	cpu_elf_name
+	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
+	.long	cpu_arm940_name
+	.long	arm940_processor_functions
+	.long	0
+	.long	0
+	.long	arm940_cache_fns
+	.size	__arm940_proc_info, . - __arm940_proc_info
+
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
new file mode 100644
index 0000000000000000000000000000000000000000..e186175644213ab6b505343ea32c2f84ceb062f9
--- /dev/null
+++ b/arch/arm/mm/proc-arm946.S
@@ -0,0 +1,424 @@
+/*
+ *  linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S
+ *
+ *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
+ *
+ *  (Many of cache codes are from proc-arm926.S)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+
+/*
+ * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
+ * comprising 256 lines of 32 bytes (8 words).
+ */
+#define CACHE_DSIZE	(CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */
+#define CACHE_DLINESIZE	32			/* fixed */
+#define CACHE_DSEGMENTS	4			/* fixed */
+#define CACHE_DENTRIES	(CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE)
+#define CACHE_DLIMIT	(CACHE_DSIZE * 4)	/* benchmark needed */
+
+	.text
+/*
+ * cpu_arm946_proc_init()
+ * cpu_arm946_switch_mm()
+ *
+ * These are not required.
+ */
+ENTRY(cpu_arm946_proc_init)
+ENTRY(cpu_arm946_switch_mm)
+	mov	pc, lr
+
+/*
+ * cpu_arm946_proc_fin()
+ */
+ENTRY(cpu_arm946_proc_fin)
+	stmfd	sp!, {lr}
+	mov	ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+	msr	cpsr_c, ip
+	bl	arm946_flush_kern_cache_all
+	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
+	bic	r0, r0, #0x00001000		@ i-cache
+	bic	r0, r0, #0x00000004		@ d-cache
+	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
+	ldmfd	sp!, {pc}
+
+/*
+ * cpu_arm946_reset(loc)
+ * Params  : r0 = address to jump to
+ * Notes   : This sets up everything for a reset
+ */
+ENTRY(cpu_arm946_reset)
+	mov	ip, #0
+	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
+	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
+	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
+	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
+	bic	ip, ip, #0x00000005		@ .............c.p
+	bic	ip, ip, #0x00001000		@ i-cache
+	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
+	mov	pc, r0
+
+/*
+ * cpu_arm946_do_idle()
+ */
+	.align	5
+ENTRY(cpu_arm946_do_idle)
+	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
+	mov	pc, lr
+
+/*
+ *	flush_user_cache_all()
+ */
+ENTRY(arm946_flush_user_cache_all)
+	/* FALLTHROUGH */
+
+/*
+ *	flush_kern_cache_all()
+ *
+ *	Clean and invalidate the entire cache.
+ */
+ENTRY(arm946_flush_kern_cache_all)
+	mov	r2, #VM_EXEC
+	mov	ip, #0
+__flush_whole_cache:
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
+#else
+	mov	r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments
+1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries
+2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
+	subs	r3, r3, #1 << 4
+	bcs	2b				@ entries n to 0
+	subs	r1, r1, #1 << 29
+	bcs	1b				@ segments 3 to 0
+#endif
+	tst	r2, #VM_EXEC
+	mcrne	p15, 0, ip, c7, c5, 0		@ flush I cache
+	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	flush_user_cache_range(start, end, flags)
+ *
+ *	Clean and invalidate a range of cache entries in the
+ *	specified address range.
+ *
+ *	- start	- start address (inclusive)
+ *	- end	- end address (exclusive)
+ *	- flags	- vm_flags describing address space
+ * (same as arm926)
+ */
+ENTRY(arm946_flush_user_cache_range)
+	mov	ip, #0
+	sub	r3, r1, r0			@ calculate total size
+	cmp	r3, #CACHE_DLIMIT
+	bhs	__flush_whole_cache
+
+1:	tst	r2, #VM_EXEC
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
+	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
+	add	r0, r0, #CACHE_DLINESIZE
+	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
+	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
+	add	r0, r0, #CACHE_DLINESIZE
+#else
+	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
+	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
+	add	r0, r0, #CACHE_DLINESIZE
+	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
+	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
+	add	r0, r0, #CACHE_DLINESIZE
+#endif
+	cmp	r0, r1
+	blo	1b
+	tst	r2, #VM_EXEC
+	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	coherent_kern_range(start, end)
+ *
+ *	Ensure coherency between the Icache and the Dcache in the
+ *	region described by start, end.  If you have non-snooping
+ *	Harvard caches, you need to implement this function.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ */
+ENTRY(arm946_coherent_kern_range)
+	/* FALLTHROUGH */
+
+/*
+ *	coherent_user_range(start, end)
+ *
+ *	Ensure coherency between the Icache and the Dcache in the
+ *	region described by start, end.  If you have non-snooping
+ *	Harvard caches, you need to implement this function.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ * (same as arm926)
+ */
+ENTRY(arm946_coherent_user_range)
+	bic	r0, r0, #CACHE_DLINESIZE - 1
+1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
+	add	r0, r0, #CACHE_DLINESIZE
+	cmp	r0, r1
+	blo	1b
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	flush_kern_dcache_page(void *page)
+ *
+ *	Ensure no D cache aliasing occurs, either with itself or
+ *	the I cache
+ *
+ *	- addr	- page aligned address
+ * (same as arm926)
+ */
+ENTRY(arm946_flush_kern_dcache_page)
+	add	r1, r0, #PAGE_SZ
+1:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
+	add	r0, r0, #CACHE_DLINESIZE
+	cmp	r0, r1
+	blo	1b
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_inv_range(start, end)
+ *
+ *	Invalidate (discard) the specified virtual address range.
+ *	May not write back any entries.  If 'start' or 'end'
+ *	are not cache line aligned, those lines must be written
+ *	back.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ * (same as arm926)
+ */
+ENTRY(arm946_dma_inv_range)
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	tst	r0, #CACHE_DLINESIZE - 1
+	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
+	tst	r1, #CACHE_DLINESIZE - 1
+	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
+#endif
+	bic	r0, r0, #CACHE_DLINESIZE - 1
+1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
+	add	r0, r0, #CACHE_DLINESIZE
+	cmp	r0, r1
+	blo	1b
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_clean_range(start, end)
+ *
+ *	Clean the specified virtual address range.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ *
+ * (same as arm926)
+ */
+ENTRY(arm946_dma_clean_range)
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	bic	r0, r0, #CACHE_DLINESIZE - 1
+1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+	add	r0, r0, #CACHE_DLINESIZE
+	cmp	r0, r1
+	blo	1b
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+/*
+ *	dma_flush_range(start, end)
+ *
+ *	Clean and invalidate the specified virtual address range.
+ *
+ *	- start	- virtual start address
+ *	- end	- virtual end address
+ *
+ * (same as arm926)
+ */
+ENTRY(arm946_dma_flush_range)
+	bic	r0, r0, #CACHE_DLINESIZE - 1
+1:
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
+#else
+	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+#endif
+	add	r0, r0, #CACHE_DLINESIZE
+	cmp	r0, r1
+	blo	1b
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+ENTRY(arm946_cache_fns)
+	.long	arm946_flush_kern_cache_all
+	.long	arm946_flush_user_cache_all
+	.long	arm946_flush_user_cache_range
+	.long	arm946_coherent_kern_range
+	.long	arm946_coherent_user_range
+	.long	arm946_flush_kern_dcache_page
+	.long	arm946_dma_inv_range
+	.long	arm946_dma_clean_range
+	.long	arm946_dma_flush_range
+
+
+ENTRY(cpu_arm946_dcache_clean_area)
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
+	add	r0, r0, #CACHE_DLINESIZE
+	subs	r1, r1, #CACHE_DLINESIZE
+	bhi	1b
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+	mov	pc, lr
+
+	__INIT
+
+	.type	__arm946_setup, #function
+__arm946_setup:
+	mov	r0, #0
+	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
+	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
+	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
+
+	mcr	p15, 0, r0, c6, c3, 0		@ disable memory region 3~7
+	mcr	p15, 0, r0, c6, c4, 0
+	mcr	p15, 0, r0, c6, c5, 0
+	mcr	p15, 0, r0, c6, c6, 0
+	mcr	p15, 0, r0, c6, c7, 0
+
+	mov	r0, #0x0000003F			@ base = 0, size = 4GB
+	mcr	p15, 0, r0, c6,	c0, 0		@ set region 0, default
+
+	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
+	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the region register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c1, 0		@ set region 1, RAM
+
+	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
+	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
+	mov	r2, #10				@ 11 is the minimum (4KB)
+1:	add	r2, r2, #1			@ area size *= 2
+	mov	r1, r1, lsr #1
+	bne	1b				@ count not zero r-shift
+	orr	r0, r0, r2, lsl #1		@ the region register value
+	orr	r0, r0, #1			@ set enable bit
+	mcr	p15, 0, r0, c6,	c2, 0		@ set region 2, ROM/FLASH
+
+	mov	r0, #0x06
+	mcr	p15, 0, r0, c2, c0, 0		@ region 1,2 d-cacheable
+	mcr	p15, 0, r0, c2, c0, 1		@ region 1,2 i-cacheable
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+	mov	r0, #0x00			@ disable whole write buffer
+#else
+	mov	r0, #0x02			@ region 1 write bufferred
+#endif
+	mcr	p15, 0, r0, c3, c0, 0
+
+/*
+ *  Access Permission Settings for future permission control by PU.
+ *
+ *				priv.	user
+ * 	region 0 (whole)	rw	--	: b0001
+ * 	region 1 (RAM)		rw	rw	: b0011
+ * 	region 2 (FLASH)	rw	r-	: b0010
+ *	region 3~7 (none)	--	--	: b0000
+ */
+	mov	r0, #0x00000031
+	orr	r0, r0, #0x00000200
+	mcr	p15, 0, r0, c5, c0, 2		@ set data access permission
+	mcr	p15, 0, r0, c5, c0, 3		@ set inst. access permission
+
+	mrc	p15, 0, r0, c1, c0		@ get control register
+	orr	r0, r0, #0x00001000		@ I-cache
+	orr	r0, r0, #0x00000005		@ MPU/D-cache
+#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
+	orr	r0, r0, #0x00004000		@ .1.. .... .... ....
+#endif
+	mov	pc, lr
+
+	.size	__arm946_setup, . - __arm946_setup
+
+	__INITDATA
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ *	     come through these
+ */
+	.type	arm946_processor_functions, #object
+ENTRY(arm946_processor_functions)
+	.word	nommu_early_abort
+	.word	cpu_arm946_proc_init
+	.word	cpu_arm946_proc_fin
+	.word	cpu_arm946_reset
+	.word   cpu_arm946_do_idle
+
+	.word	cpu_arm946_dcache_clean_area
+	.word	cpu_arm946_switch_mm
+	.word	0		@ cpu_*_set_pte
+	.size	arm946_processor_functions, . - arm946_processor_functions
+
+	.section ".rodata"
+
+	.type	cpu_arch_name, #object
+cpu_arch_name:
+	.asciz	"armv5te"
+	.size	cpu_arch_name, . - cpu_arch_name
+
+	.type	cpu_elf_name, #object
+cpu_elf_name:
+	.asciz	"v5t"
+	.size	cpu_elf_name, . - cpu_elf_name
+
+	.type	cpu_arm946_name, #object
+cpu_arm946_name:
+	.ascii	"ARM946E-S"
+	.size	cpu_arm946_name, . - cpu_arm946_name
+
+	.align
+
+	.section ".proc.info.init", #alloc, #execinstr
+	.type	__arm946_proc_info,#object
+__arm946_proc_info:
+	.long	0x41009460
+	.long	0xff00fff0
+	.long	0
+	b	__arm946_setup
+	.long	cpu_arch_name
+	.long	cpu_elf_name
+	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
+	.long	cpu_arm946_name
+	.long	arm946_processor_functions
+	.long	0
+	.long	0
+	.long	arm940_cache_fns
+	.size	__arm946_proc_info, . - __arm946_proc_info
+
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
new file mode 100644
index 0000000000000000000000000000000000000000..918ebf65d4f696e32bba0a5516950faa0839c211
--- /dev/null
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -0,0 +1,134 @@
+/*
+ *  linux/arch/arm/mm/proc-arm9tdmi.S: utility functions for ARM9TDMI
+ *
+ *  Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/pgtable.h>
+#include <asm/procinfo.h>
+#include <asm/ptrace.h>
+
+	.text
+/*
+ * cpu_arm9tdmi_proc_init()
+ * cpu_arm9tdmi_do_idle()
+ * cpu_arm9tdmi_dcache_clean_area()
+ * cpu_arm9tdmi_switch_mm()
+ *
+ * These are not required.
+ */
+ENTRY(cpu_arm9tdmi_proc_init)
+ENTRY(cpu_arm9tdmi_do_idle)
+ENTRY(cpu_arm9tdmi_dcache_clean_area)
+ENTRY(cpu_arm9tdmi_switch_mm)
+		mov	pc, lr
+
+/*
+ * cpu_arm9tdmi_proc_fin()
+ */
+ENTRY(cpu_arm9tdmi_proc_fin)
+		mov	r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
+		msr	cpsr_c, r0
+		mov	pc, lr
+
+/*
+ * Function: cpu_arm9tdmi_reset(loc)
+ * Params  : loc(r0)	address to jump to
+ * Purpose : Sets up everything for a reset and jump to the location for soft reset.
+ */
+ENTRY(cpu_arm9tdmi_reset)
+		mov	pc, r0
+
+		__INIT
+
+		.type	__arm9tdmi_setup, #function
+__arm9tdmi_setup:
+		mov	pc, lr
+		.size	__arm9tdmi_setup, . - __arm9tdmi_setup
+
+		__INITDATA
+
+/*
+ * Purpose : Function pointers used to access above functions - all calls
+ *	     come through these
+ */
+		.type	arm9tdmi_processor_functions, #object
+ENTRY(arm9tdmi_processor_functions)
+		.word	nommu_early_abort
+		.word	cpu_arm9tdmi_proc_init
+		.word	cpu_arm9tdmi_proc_fin
+		.word	cpu_arm9tdmi_reset
+		.word	cpu_arm9tdmi_do_idle
+		.word	cpu_arm9tdmi_dcache_clean_area
+		.word	cpu_arm9tdmi_switch_mm
+		.word	0		@ cpu_*_set_pte
+		.size	arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
+
+		.section ".rodata"
+
+		.type	cpu_arch_name, #object
+cpu_arch_name:
+		.asciz	"armv4t"
+		.size	cpu_arch_name, . - cpu_arch_name
+
+		.type	cpu_elf_name, #object
+cpu_elf_name:
+		.asciz	"v4"
+		.size	cpu_elf_name, . - cpu_elf_name
+
+		.type	cpu_arm9tdmi_name, #object
+cpu_arm9tdmi_name:
+		.asciz	"ARM9TDMI"
+		.size	cpu_arm9tdmi_name, . - cpu_arm9tdmi_name
+
+		.type	cpu_p2001_name, #object
+cpu_p2001_name:
+		.asciz	"P2001"
+		.size	cpu_p2001_name, . - cpu_p2001_name
+
+		.align
+
+		.section ".proc.info.init", #alloc, #execinstr
+
+		.type	__arm9tdmi_proc_info, #object
+__arm9tdmi_proc_info:
+		.long	0x41009900
+		.long	0xfff8ff00
+		.long	0
+		.long	0
+		b	__arm9tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_arm9tdmi_name
+		.long	arm9tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__arm9tdmi_proc_info, . - __arm9dmi_proc_info
+
+		.type	__p2001_proc_info, #object
+__p2001_proc_info:
+		.long	0x41029000
+		.long	0xffffffff
+		.long	0
+		.long	0
+		b	__arm9tdmi_setup
+		.long	cpu_arch_name
+		.long	cpu_elf_name
+		.long	HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
+		.long	cpu_p2001_name
+		.long	arm9tdmi_processor_functions
+		.long	0
+		.long	0
+		.long	v4_cache_fns
+		.size	__p2001_proc_info, . - __p2001_proc_info
diff --git a/include/asm-arm/cacheflush.h b/include/asm-arm/cacheflush.h
index e4a2569c636c688da3da32b7ff73fde1105a3fcd..f0845646aacb34a05332f7b7348cbbec436ed9f1 100644
--- a/include/asm-arm/cacheflush.h
+++ b/include/asm-arm/cacheflush.h
@@ -25,7 +25,7 @@
 #undef _CACHE
 #undef MULTI_CACHE
 
-#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
+#if defined(CONFIG_CPU_CACHE_V3)
 # ifdef _CACHE
 #  define MULTI_CACHE 1
 # else
@@ -33,7 +33,7 @@
 # endif
 #endif
 
-#if defined(CONFIG_CPU_ARM720T)
+#if defined(CONFIG_CPU_CACHE_V4)
 # ifdef _CACHE
 #  define MULTI_CACHE 1
 # else
@@ -54,7 +54,23 @@
 # endif
 #endif
 
-#if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
+#if defined(CONFIG_CPU_ARM940T)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm940
+# endif
+#endif
+
+#if defined(CONFIG_CPU_ARM946E)
+# ifdef _CACHE
+#  define MULTI_CACHE 1
+# else
+#  define _CACHE arm946
+# endif
+#endif
+
+#if defined(CONFIG_CPU_CACHE_V4WB)
 # ifdef _CACHE
 #  define MULTI_CACHE 1
 # else
diff --git a/include/asm-arm/proc-fns.h b/include/asm-arm/proc-fns.h
index 1bde92cdaebd01739c3db199a438f47a7ffd0493..ea7e54c319be535ed1777640f0e1740b36c748a5 100644
--- a/include/asm-arm/proc-fns.h
+++ b/include/asm-arm/proc-fns.h
@@ -33,6 +33,14 @@
 #   define CPU_NAME cpu_arm6
 #  endif
 # endif
+# ifdef CONFIG_CPU_ARM7TDMI
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm7tdmi
+#  endif
+# endif
 # ifdef CONFIG_CPU_ARM710
 #  ifdef CPU_NAME
 #   undef  MULTI_CPU
@@ -49,6 +57,22 @@
 #   define CPU_NAME cpu_arm720
 #  endif
 # endif
+# ifdef CONFIG_CPU_ARM740T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm740
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM9TDMI
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm9tdmi
+#  endif
+# endif
 # ifdef CONFIG_CPU_ARM920T
 #  ifdef CPU_NAME
 #   undef  MULTI_CPU
@@ -81,6 +105,22 @@
 #   define CPU_NAME cpu_arm926
 #  endif
 # endif
+# ifdef CONFIG_CPU_ARM940T
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm940
+#  endif
+# endif
+# ifdef CONFIG_CPU_ARM946E
+#  ifdef CPU_NAME
+#   undef  MULTI_CPU
+#   define MULTI_CPU
+#  else
+#   define CPU_NAME cpu_arm946
+#  endif
+# endif
 # ifdef CONFIG_CPU_SA110
 #  ifdef CPU_NAME
 #   undef  MULTI_CPU
diff --git a/include/asm-arm/setup.h b/include/asm-arm/setup.h
index ea3ed24652333e8dbcc3020074a626050895ed84..aa4b5782f0c9723cf1d604ca7756a09bea8e6829 100644
--- a/include/asm-arm/setup.h
+++ b/include/asm-arm/setup.h
@@ -194,13 +194,15 @@ static struct tagtable __tagtable_##fn __tag = { tag, fn }
 # define NR_BANKS 8
 #endif
 
+struct membank {
+	unsigned long start;
+	unsigned long size;
+	int           node;
+};
+
 struct meminfo {
 	int nr_banks;
-	struct {
-		unsigned long start;
-		unsigned long size;
-		int           node;
-	} bank[NR_BANKS];
+	struct membank bank[NR_BANKS];
 };
 
 /*
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index c19c5b009f71f0409e47bafbe53c4933f19d402f..f05fbe31576cbeabb2b6d717698d7cebfdc63b4e 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -46,6 +46,7 @@
 #define CPUID_TCM	2
 #define CPUID_TLBTYPE	3
 
+#ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)							\
 	({								\
 		unsigned int __val;					\
@@ -55,6 +56,9 @@
 		    : "cc");						\
 		__val;							\
 	})
+#else
+#define read_cpuid(reg) (processor_id)
+#endif
 
 /*
  * This is used to ensure the compiler did actually allocate the register we