Commit fa0aea92 authored by Charlie Jacobsen's avatar Charlie Jacobsen Committed by Vikram Narayanan

liblcd-v2: Heap allocator internals in place, first draft.

Uses the "generalized" allocator to manage 16 MBs of guest physical
address space for a heap. Only the initial 4 MBs will be backed,
and host memory is sucked in at 4 MB granularity.

Some tuning of parameters will be necessary soon.
parent 9829af71
......@@ -41,4 +41,27 @@ int __liblcd_mem_itree_insert(gpa_t start, unsigned long size,
*/
void __liblcd_mem_itree_delete(struct lcd_resource_node *n);
/*
* HEAP CONFIGURATION --------------------------------------------------
*
* Heap is 2^12 pages, or 16 MBs.
*
* The minimum you can allocate from the heap is 4 KBs (2^0 = 1 page).
*
* The maximum you can allocate from the heap is 4 MBs (2^10 = 1024 pages).
*/
#define LCD_HEAP_NR_PAGES_ORDER 12
#define LCD_HEAP_SIZE (1UL << (LCD_HEAP_NR_PAGES_ORDER + PAGE_SHIFT))
#define LCD_HEAP_MIN_ORDER 0
#define LCD_HEAP_MAX_ORDER 10
/* MEMORY SUBSYSTEM INTERNALS ------------------------------ */
/**
* __liblcd_mem_init -- Call during boot after mem itree initialized
*
* This initializes the heap allocator.
*/
int __liblcd_mem_init(void);
#endif /* LCD_DOMAINS_LIBLCD_H */
......@@ -8,13 +8,6 @@
#ifndef LCD_DOMAINS_TYPES_H
#define LCD_DOMAINS_TYPES_H
/**
* Return value to signal that a host/non-isolated resource (like a page)
* is not part of the capability system (i.e., is not in any thread's
* cspace).
*/
#define LCD_ERR_NOT_VOLUNTEERED (-512)
/*
* Extra reserved cnodes:
*
......
......@@ -69,28 +69,28 @@ struct lcd_page_allocator;
struct lcd_page_allocator_cbs {
int (*alloc_map_metadata_memory_chunk)(
struct lcd_page_allocator_cbs*,
unsigned long,
unsigned int,
struct lcd_resource_node**);
struct lcd_page_allocator_cbs *this,
unsigned long mapping_offset,
unsigned int alloc_order,
struct lcd_resource_node **n_out);
void (*free_unmap_metadata_memory_chunk)(
struct lcd_page_allocator_cbs*,
struct lcd_resource_node*);
struct lcd_page_allocator_cbs *this,
struct lcd_resource_node *n_to_delete);
int (*alloc_map_regular_mem_chunk)(
struct lcd_page_allocator *,
struct lcd_page_block *,
unsigned long,
unsigned int,
struct lcd_resource_node **);
struct lcd_page_allocator *this_page_allocator,
struct lcd_page_block *destination_page_block,
unsigned long mapping_offset,
unsigned int alloc_order,
struct lcd_resource_node **n_out);
void (*free_unmap_regular_mem_chunk)(
struct lcd_page_allocator *,
struct lcd_page_block *,
struct lcd_resource_node *,
unsigned long,
unsigned int);
struct lcd_page_allocator *this_page_allocator,
struct lcd_page_block *containing_page_block,
struct lcd_resource_node *n_to_delete,
unsigned long mapping_offset,
unsigned int memory_order);
};
/**
......@@ -217,16 +217,16 @@ struct lcd_page_allocator {
* -- alloc_map_regular_mem_chunk
* This will be called when:
* 1 - It is non-NULL; and
* 2 - 2^max_order pages are free; and
* 3 - We try to allocate pages, and this leads to
* allocating those 2^max_order pages, or some
* 2 - 2^max_order page blocks are free; and
* 3 - We try to allocate, and this leads to
* allocating those 2^max_order page blocks, or some
* subset thereof
* This is the demand paging in action. We want to allocate
* pages from a 2^max_order-page region of memory, but it's
* not backed by real memory. This function should allocate
* the real memory, and map it in so the pages become backed.
*
* This function will provided with the calling page allocator,
* This function will be provided with the calling page allocator,
* the page block at the beginning of the 2^max_order chunk,
* the offset into the memory area where the memory should be
* mapped, and the size (for now, will always be
......
......@@ -7,10 +7,15 @@
#include <lcd_config/pre_hook.h>
#include <linux/mm.h>
#include <liblcd/mem.h>
#include <liblcd/allocator.h>
#include <lcd_config/post_hook.h>
struct lcd_page_allocator *heap_allocator;
struct page *heap_page_array;
/* LOW-LEVEL SYSCALLS -------------------------------------------------- */
int _lcd_alloc_pages_exact_node(int nid, unsigned int flags, unsigned int order,
......@@ -160,13 +165,221 @@ void _lcd_munmap(cptr_t mo, gpa_t base)
lcd_syscall_munmap(mo);
}
/* PAGE ALLOCATOR -------------------------------------------------- */
/* PAGE ALLOCATOR INTERNALS ---------------------------------------- */
static int
heap_alloc_map_metadata_memory_chunk(struct lcd_page_allocator_cbs *cbs,
unsigned long mapping_offset,
unsigned int alloc_order,
struct lcd_resource_node **n_out)
{
int ret;
cptr_t pages;
gpa_t dest = gpa_add(LCD_HEAP_GP_ADDR, mapping_offset);
/*
* Do low-level page alloc out into microkernel
*/
ret = _lcd_alloc_pages(0, alloc_order, &pages);
if (ret) {
LIBLCD_ERR("low level alloc failed");
goto fail1;
}
/*
* Map in guest physical at the right offset into the heap region
*/
ret = _lcd_mmap(pages, alloc_order, dest);
if (ret) {
LIBLCD_ERR("low level mmap failed");
goto fail2;
}
/*
* Look up resource node for new pages
*/
ret = lcd_phys_to_resource_node(dest, n_out);
if (ret) {
LIBLCD_ERR("failed to get new resource node");
goto fail3;
}
return 0;
fail3:
_lcd_munmap(pages, dest);
fail2:
lcd_cap_delete(pages);
fail1:
return ret;
}
static void
heap_free_unmap_metadata_memory_chunk(struct lcd_page_allocator_cbs *cbs,
struct lcd_resource_node *n_to_delete)
{
cptr_t pages = n_to_delete->cptr;
gpa_t base = __gpa(lcd_resource_node_start(n_to_delete));
/*
* Unmap from guest physical
*/
_lcd_munmap(pages, base);
/*
* Remove from resource tree
*/
__liblcd_mem_itree_delete(n_to_delete);
/*
* Free pages from host
*/
lcd_cap_delete(pages);
}
static int
heap_alloc_map_regular_mem_chunk(struct lcd_page_allocator *pa,
struct lcd_page_block *dest_blocks,
unsigned long mapping_offset,
unsigned int alloc_order,
struct lcd_resource_node **n_out)
{
/*
* For now, we just re-use the metadata func, since there
* is no difference.
*/
return heap_alloc_map_metadata_memory_chunk(&pa->cbs, mapping_offset,
alloc_order, n_out);
}
static void
heap_free_unmap_regular_mem_chunk(struct lcd_page_allocator *pa,
struct lcd_page_block *page_blocks,
struct lcd_resource_node *n_to_delete,
unsigned long mapping_offset,
unsigned int order)
{
/*
* Again, we re-use the metadata funcs since they're the same
* right now.
*/
heap_free_unmap_metadata_memory_chunk(&pa->cbs, n_to_delete);
}
struct lcd_page_allocator_cbs heap_page_allocator_cbs = {
.alloc_map_metadata_memory_chunk = heap_alloc_map_metadata_memory_chunk,
.free_unmap_metadata_memory_chunk = heap_free_unmap_metadata_memory_chunk,
.alloc_map_regular_mem_chunk = heap_alloc_map_regular_mem_chunk,
.free_unmap_regular_mem_chunk = heap_free_unmap_regular_mem_chunk,
};
static inline gva_t heap_page_block_to_addr(struct lcd_page_block *pb)
{
return gva_add(LCD_HEAP_GV_ADDR,
lcd_page_block_to_offset(heap_allocator, pb));
}
static inline struct lcd_page_block *heap_addr_to_page_block(gva_t addr)
{
return lcd_offset_to_page_block(
heap_allocator,
gva_val(addr) - gva_val(LCD_HEAP_GV_ADDR));
}
static inline struct page *heap_addr_to_struct_page(gva_t addr)
{
unsigned long idx;
idx = (gva_val(addr) - gva_val(LCD_HEAP_GV_ADDR)) >> PAGE_SHIFT;
return &heap_page_array[idx];
}
static inline gva_t heap_struct_page_to_addr(struct page *p)
{
unsigned long idx;
idx = p - heap_page_array;
return gva_add(LCD_HEAP_GV_ADDR, idx * PAGE_SIZE);
}
static int setup_struct_page_array(void)
{
struct lcd_page_block *pb;
unsigned int order;
unsigned long bytes;
/*
* Compute number of struct pages we need
*/
bytes = roundup_pow_of_two((1UL << LCD_HEAP_NR_PAGES_ORDER) *
sizeof(struct page));
order = ilog2(bytes >> PAGE_SHIFT);
/*
* Do the alloc
*/
pb = lcd_page_allocator_alloc(heap_allocator, order);
if (!pb) {
LIBLCD_ERR("error setting up struct page array for heap");
ret = -ENOMEM;
goto fail1;
}
/*
* Zero out the array (unnecessary right now, but just in case)
*/
heap_page_array = (void *)gva_val(lcd_page_block_to_addr(pb));
memset(heap_page_array,
0,
(1 << (order + PAGE_SHIFT)));
return 0;
fail1:
return ret;
}
void cpucache_init(void);
static void __init_refok kmalloc_init(void)
{
kmem_cache_init();
kmem_cache_init_late();
cpucache_init();
}
static int init_heap(void)
{
int ret;
/*
* Create new page allocator in heap region
*/
ret = lcd_page_allocator_create(LCD_HEAP_SIZE,
LCD_HEAP_MIN_ORDER,
LCD_HEAP_MAX_ORDER,
LCD_HEAP_MAX_ORDER,
&heap_page_allocator_cbs,
1, /* embed metadata */
&heap_allocator);
if (ret) {
LIBLCD_ERR("error initializing heap allocator");
goto fail1;
}
/*
* Set up struct page array
*/
ret = setup_struct_page_array();
if (ret) {
LIBLCD_ERR("error setting up struct page array for heap");
goto fail2;
}
/*
* Initialize kmalloc
*/
kmalloc_init();
/*
* Inform mem itree the page and slab allocators are up (and so
* it can start using kmalloc for allocating nodes)
*/
__liblcd_mem_itree_booted();
return 0;
fail2:
lcd_page_allocator_destroy(heap_allocator);
heap_allocator = NULL;
fail1:
return ret;
}
/* VOLUNTEERING -------------------------------------------------- */
......@@ -224,6 +437,16 @@ gva_t lcd_gpa2gva(gpa_t gpa)
int __liblcd_mem_init(void)
{
int ret;
ret = init_heap();
if (ret) {
LIBLCD_ERR("heap init failed");
goto fail1;
}
return 0;
fail1:
return ret;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment