Commit d0a21265 authored by David Rientjes's avatar David Rientjes Committed by Linus Torvalds

mm: unify module_alloc code for vmalloc

Four architectures (arm, mips, sparc, x86) use __vmalloc_area() for
module_init().  Much of the code is duplicated and can be generalized in a
globally accessible function, __vmalloc_node_range().

__vmalloc_node() now calls into __vmalloc_node_range() with a range of
[VMALLOC_START, VMALLOC_END) for functionally equivalent behavior.

Each architecture may then use __vmalloc_node_range() directly to remove
the duplication of code.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ec3f64fc
...@@ -38,17 +38,9 @@ ...@@ -38,17 +38,9 @@
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
struct vm_struct *area; return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
size = PAGE_ALIGN(size); __builtin_return_address(0));
if (!size)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
} }
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
......
...@@ -46,17 +46,9 @@ static DEFINE_SPINLOCK(dbe_lock); ...@@ -46,17 +46,9 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
#ifdef MODULE_START #ifdef MODULE_START
struct vm_struct *area; return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, -1,
size = PAGE_ALIGN(size); __builtin_return_address(0));
if (!size)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
#else #else
if (size == 0) if (size == 0)
return NULL; return NULL;
......
...@@ -23,17 +23,11 @@ ...@@ -23,17 +23,11 @@
static void *module_map(unsigned long size) static void *module_map(unsigned long size)
{ {
struct vm_struct *area; if (PAGE_ALIGN(size) > MODULES_LEN)
size = PAGE_ALIGN(size);
if (!size || size > MODULES_LEN)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL; return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); GFP_KERNEL, PAGE_KERNEL, -1,
__builtin_return_address(0));
} }
static char *dot2underscore(char *name) static char *dot2underscore(char *name)
......
...@@ -37,20 +37,11 @@ ...@@ -37,20 +37,11 @@
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
{ {
struct vm_struct *area; if (PAGE_ALIGN(size) > MODULES_LEN)
if (!size)
return NULL;
size = PAGE_ALIGN(size);
if (size > MODULES_LEN)
return NULL; return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
if (!area) -1, __builtin_return_address(0));
return NULL;
return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_EXEC);
} }
/* Free memory returned from module_alloc */ /* Free memory returned from module_alloc */
......
...@@ -59,8 +59,9 @@ extern void *vmalloc_exec(unsigned long size); ...@@ -59,8 +59,9 @@ extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size); extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
pgprot_t prot); unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, int node, void *caller);
extern void vfree(const void *addr); extern void vfree(const void *addr);
extern void *vmap(struct page **pages, unsigned int count, extern void *vmap(struct page **pages, unsigned int count,
......
...@@ -1530,25 +1530,12 @@ fail: ...@@ -1530,25 +1530,12 @@ fail:
return NULL; return NULL;
} }
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
__builtin_return_address(0));
/*
* A ref_count = 3 is needed because the vm_struct and vmap_area
* structures allocated in the __get_vm_area_node() function contain
* references to the virtual address of the vmalloc'ed block.
*/
kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
return addr;
}
/** /**
* __vmalloc_node - allocate virtually contiguous memory * __vmalloc_node_range - allocate virtually contiguous memory
* @size: allocation size * @size: allocation size
* @align: desired alignment * @align: desired alignment
* @start: vm area range start
* @end: vm area range end
* @gfp_mask: flags for the page level allocator * @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages * @prot: protection mask for the allocated pages
* @node: node to use for allocation or -1 * @node: node to use for allocation or -1
...@@ -1558,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) ...@@ -1558,9 +1545,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
* allocator with @gfp_mask flags. Map them into contiguous * allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot. * kernel virtual space, using a pagetable protection of @prot.
*/ */
static void *__vmalloc_node(unsigned long size, unsigned long align, void *__vmalloc_node_range(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot, unsigned long start, unsigned long end, gfp_t gfp_mask,
int node, void *caller) pgprot_t prot, int node, void *caller)
{ {
struct vm_struct *area; struct vm_struct *area;
void *addr; void *addr;
...@@ -1570,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -1570,8 +1557,8 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages) if (!size || (size >> PAGE_SHIFT) > totalram_pages)
return NULL; return NULL;
area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
VMALLOC_END, node, gfp_mask, caller); gfp_mask, caller);
if (!area) if (!area)
return NULL; return NULL;
...@@ -1588,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align, ...@@ -1588,6 +1575,27 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
return addr; return addr;
} }
/**
* __vmalloc_node - allocate virtually contiguous memory
* @size: allocation size
* @align: desired alignment
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @node: node to use for allocation or -1
* @caller: caller's return address
*
* Allocate enough pages to cover @size from the page level
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
int node, void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{ {
return __vmalloc_node(size, 1, gfp_mask, prot, -1, return __vmalloc_node(size, 1, gfp_mask, prot, -1,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment