diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index e8a53552b13df147753efed1084b8a96744d667f..d0cadb33b54c1088f4eb609a28409fecf7ed7c74 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -224,7 +224,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
 		return;
 	if (!enable)
 		mutex_debug_check_no_locks_freed(page_address(page),
-						 page_address(page+numpages));
+						 numpages * PAGE_SIZE);
 
 	/* the return value is ignored - the calls cannot fail,
 	 * large pages are disabled at boot time.
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3f1fafc0245e7eebf0ee45334d3c7c3ff77f4a89..e53d2c6fd5f4b0ef2321d12d7a6d75bf7c9dae4c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1027,7 +1027,7 @@ kernel_map_pages(struct page *page, int numpages, int enable)
 {
 	if (!PageHighMem(page) && !enable)
 		mutex_debug_check_no_locks_freed(page_address(page),
-						 page_address(page + numpages));
+						 numpages * PAGE_SIZE);
 }
 #endif
 
diff --git a/include/linux/mutex-debug.h b/include/linux/mutex-debug.h
index 8138d9eb58ec1c7e64b801fdbb47edb97e21df3d..8b5769f00467c6ea172112d6a02b80f5f8c370ca 100644
--- a/include/linux/mutex-debug.h
+++ b/include/linux/mutex-debug.h
@@ -18,6 +18,6 @@ extern void FASTCALL(mutex_destroy(struct mutex *lock));
 extern void mutex_debug_show_all_locks(void);
 extern void mutex_debug_show_held_locks(struct task_struct *filter);
 extern void mutex_debug_check_no_locks_held(struct task_struct *task);
-extern void mutex_debug_check_no_locks_freed(const void *from, const void *to);
+extern void mutex_debug_check_no_locks_freed(const void *from, unsigned long len);
 
 #endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index f1c84b1252f5d208f14c18ba7f9fe0b8e890bb81..f1ac507fa20da323800bf32393a0c77c5ad64dde 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -79,7 +79,7 @@ struct mutex_waiter {
 # define mutex_debug_show_all_locks()			do { } while (0)
 # define mutex_debug_show_held_locks(p)			do { } while (0)
 # define mutex_debug_check_no_locks_held(task)		do { } while (0)
-# define mutex_debug_check_no_locks_freed(from, to)	do { } while (0)
+# define mutex_debug_check_no_locks_freed(from, len)	do { } while (0)
 #endif
 
 #define __MUTEX_INITIALIZER(lockname) \
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 6f829058ae4a7ee17ae8f3859669e9836d59906d..f4913c3769505a10a0bfd169b6e2ad5c0f25f48c 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -333,9 +333,10 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
  * is destroyed or reinitialized - this code checks whether there is
  * any held lock in the memory range of <from> to <to>:
  */
-void mutex_debug_check_no_locks_freed(const void *from, const void *to)
+void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
 {
 	struct list_head *curr, *next;
+	const void *to = from + len;
 	unsigned long flags;
 	struct mutex *lock;
 	void *lock_addr;
@@ -437,7 +438,7 @@ void debug_mutex_init(struct mutex *lock, const char *name)
 	/*
 	 * Make sure we are not reinitializing a held lock:
 	 */
-	mutex_debug_check_no_locks_freed((void *)lock, (void *)(lock + 1));
+	mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock));
 	lock->owner = NULL;
 	INIT_LIST_HEAD(&lock->held_list);
 	lock->name = name;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a5e6891f7bb6f366025e56dd701f9794d29ce67d..8e363536e2da327f602a4a82d3dc36ed237919bc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -417,7 +417,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 	arch_free_page(page, order);
 	if (!PageHighMem(page))
 		mutex_debug_check_no_locks_freed(page_address(page),
-			page_address(page+(1<<order)));
+						 PAGE_SIZE<<order);
 
 #ifndef CONFIG_MMU
 	for (i = 1 ; i < (1 << order) ; ++i)
diff --git a/mm/slab.c b/mm/slab.c
index 33aab345cd4a0fddbd0c20b5763cfe9aae194162..9374293a301297edef94491494db8e58f591ba82 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3071,7 +3071,7 @@ void kfree(const void *objp)
 	local_irq_save(flags);
 	kfree_debugcheck(objp);
 	c = page_get_cache(virt_to_page(objp));
-	mutex_debug_check_no_locks_freed(objp, objp+obj_reallen(c));
+	mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
 	__cache_free(c, (void *)objp);
 	local_irq_restore(flags);
 }