Commit 0ae93fbc authored by Charlie Jacobsen's avatar Charlie Jacobsen Committed by Vikram Narayanan
Browse files

generalized-allocator: Add page block to offset conversion functions.

parent 267f0dfa
......@@ -2,60 +2,60 @@
* allocator.h
*
* Code for creating instances of general buddy
* allocators. Allocator metadata embedded inside memory
* region (but not in the alloc'd chunks).
*
* Can't use page fault in, because this would be EPT violation.
* (Maybe can have LCD handle EPT fault as virtualization exception,
* allocators for mem mgt of different memory area
* types.
*
* An lcd_page_allocator is similar in some ways to
* a Linux zone - it's for managing one contiguous
* chunk of similar data.
*
* This code provides the option for demand paging like
* functionality. You might wonder - why not fault in memory
* on EPT faults? Because this would bring the microkernel
* into the picture, and we don't want the microkernel doing
* internal LCD mem mgt. (It's possible for an LCD to handle
* certain EPT faults as a virtualization exception,
* but that's for another day ... and that may not be efficient for
* a general allocator? would also need background thingamajig for
* shrinking faulted in mem that's not being used)
* shrinking faulted in mem that's not being used).
*/
#include <linux/page.h>
#include <linux/list.h>
#include <lcd-domains/liblcd.h>
/* CAN'T ALLOC IN MEM REGION? - WHAT IF REGION IS UNCACHEABLE? */
/* Make it optional / possible to insert into mem region during init.
* -- initializer calls out to get memory for metadata
* -- callee allocs mem and maps it; maybe return value signals
* to insert into itself - takes mem, zeros it out, init resource
* tree, insert mem into tree; what about when allocator is freed?
* - during destroy,
* -- problem - alloc tree nodes during heap init . */
struct lcd_free_lists {
unsigned int min_order, notify_order, max_order;
struct lcd_mem_chunk *free_lists;
unsigned int nr_free;
/* Called when trying to alloc mc on 2^(backing order) boundary and
* n is
* null. (e.g. not backed by memory)). This function
* can be null.
*/
int (*alloc_notify)(struct lcd_free_lists *fl,
struct lcd_mem_chunk *mc);
/* Called when freeing mc, n is non-null, and mc's order is >=
* n's order. This
* gives user chance to e.g. free mem that was backing this chunk
* of memory */
void (*free_notify)(struct lcd_free_lists *fl,
struct lcd_mem_chunk *mc);
};
/* STRUCT DEFS -------------------------------------------------- */
struct lcd_page_block {
struct list_head buddy_list;
unsigned int order;
struct lcd_resource_node *n;
struct list_head buddy_list;
};
struct allocator_callbacks {
/* alloc notify */
/* free notify */
/* init alloc */
/* exit dealloc */
struct lcd_page_allocator_cbs {
int (*alloc_map_metadata_memory_chunk)(
struct lcd_page_allocator_cbs*,
unsigned int,
unsigned long,
struct lcd_resource_node**);
void (*free_unmap_metadata_memory_chunk)(
struct lcd_page_allocator_cbs*,
struct lcd_resource_node*);
int (*alloc_map_regular_mem_chunk)(
struct lcd_page_allocator *,
struct lcd_page_block *,
unsigned long offset,
unsigned int order,
struct lcd_resource_node **);
void (*free_unmap_regular_mem_chunk)(
struct lcd_page_allocator *,
struct lcd_page_block *,
struct lcd_resource_node *,
unsigned long,
unsigned int);
};
struct lcd_page_allocator {
......@@ -64,7 +64,9 @@ struct lcd_page_allocator {
unsigned int min_order;
unsigned int metadata_malloc_order;
unsigned int max_order;
int embed_metadata;
unsigned long first_non_metadata_pb_idx;
struct list_head *free_lists;
unsigned long nr_page_blocks_free;
......@@ -74,98 +76,29 @@ struct lcd_page_allocator {
const struct lcd_page_allocator_cbs *cbs;
};
/* INTERFACE -------------------------------------------------- */
int mk_page_allocator(
unsigned long nr_pages_order,
unsigned int min_order,
unsigned int backing_order,
unsigned int max_order,
const struct allocator_callbacks *cbs);
int lcd_page_allocator_create(unsigned long nr_pages_order,
unsigned int min_order,
unsigned int metadata_malloc_order,
unsigned int max_order,
const struct lcd_page_allocator_cbs *cbs,
int embed_metadata,
struct lcd_page_allocator **pa_out);
/* Alloc 2^order mem chunks */
struct lcd_mem_chunk* lcd_page_allocator_alloc(struct lcd_page_allocator *a,
unsigned int order);
void lcd_page_allocator_destroy(struct lcd_page_allocator *pa);
void lcd_page_allocator_free(struct lcd_page_allocator *a,
struct lcd_mem_chunk *base,
struct lcd_page_block*
lcd_page_allocator_alloc(struct lcd_page_allocator *pa,
unsigned int order);
void lcd_page_allocator_destroy(struct lcd_page_allocator *a);
unsigned long to_offset(struct lcd_mem_chunk *c);
struct lcd_mem_chunk* to_chunk(unsigned long offset);
/*
* chicken and egg - tree root and node(s) before allocator is set up; provide
* special slab user can alloc tree root and nodes from.
*
* what about tear down? user may malloc later nodes, so will use kfree; and
* as user is tearing down tree, what if they delete the cap to pages that
* contain nodes in the tree? ans: need to use flags on nodes to indicate
* how they were allocated; if node is flagged as e.g. "embedded", don't use
* kfree, only way to really "free" it is to destroy the page allocator
* and return pages to microkernel.
*/
/* Usage examples */
void lcd_page_allocator_free(struct lcd_page_allocator *pa,
struct lcd_page_block *pb,
unsigned int order);
/* heap setup: nr pages order = 24 (16 mb's); min order = 1,
* backing order = 10, max order = 11
*
* init alloc: allocator says give me 2^10 pages of mem;
* 2^10 = 1 MB which is enough to hold meta data with
* some left over
*
* allocator then writes metadata to pages, then
* calls alloc on itself for required pages to hold
* metadata
*
* after allocator init'd, heap alloc's enough mem for struct page array
* for all 16 MBs, zero's it out.
*
* alloc notify: allocator calls when tries to alloc page on
* 2^(notify order) boundary; heap will alloc
* 2^(notify order) pages from mk, put in resource
* tree, and store in mem chunk
*
* free notify: free them
*
*
* lcd_page_alloc --> lcd_page_allocator_alloc, then heap does
* to_offset on chunk, then idx's into struct page array
* note that size of chunks must be 1
*
* lcd_page_free --> get page idx, then calc offset into heap, then
* use to_chunk to get lcd_mem_chunk; then lcd_page_allocator_free,
* passing along order
*
* virt_to_head_page --> virt = phys; phys - heap base = offset;
* offset >> PAGE_SHIFT; then idx into page array
*
* page_address --> (page - page base) * 4096 = offset;
* phys = heap base + offset; virt = phys
*/
unsigned long lcd_page_block_to_offset(struct lcd_page_allocator *pa,
struct lcd_page_block *pb);
/*
* ioremap: nr pages order = 34 (16 gb's); min order = 10 (1 MB),
* backing order = 12 (don't alloc notify), max order = 16 (64 MBs)
*
* (I don't know now that ioremap needs to be this big ...)
*
* alloc_notify = null
*
* ioremap_phys --> lcd_page_allocator_alloc( size to fit map )
* then insert resource into tree, attach node to lcd_mem_chunk
* that is returned
*
* free_notify = ioremapper removes node from tree
*
* iounmap_phys: phys addr - ioremap base = offset; to_chunk(offset) = c;
* lcd_page_allocator_free(c); fires free_notify, we get tree
* lookup fast and free (of course, we could have done the removal
* ourselves after looking up c)
*/
struct lcd_page_block*
lcd_offset_to_page_block(struct lcd_page_allocator *pa,
unsigned long offset);
......@@ -15,17 +15,22 @@
/* MISC ------------------------------------------------------------ */
unsigned int pa_nr_free_lists(struct lcd_page_allocator *pa)
static unsigned int pa_nr_free_lists(struct lcd_page_allocator *pa)
{
return pa->max_order - pa->min_order + 1;
}
unsigned long pa_nr_page_blocks(struct lcd_page_allocator *pa)
static unsigned long pa_nr_page_blocks(struct lcd_page_allocator *pa)
{
/* 2^nr_pages_order / 2^min_order */
return (1UL << (pa->nr_pages_order - pa->min_order));
}
static int pb_is_free(struct lcd_page_block *pb)
{
return !list_empty(&buddy->buddy_list);
}
/* ALLOC ------------------------------------------------------------ */
static int pb_backing(struct lcd_page_allocator *pa,
......@@ -176,7 +181,7 @@ static inline int pb_is_buddy(struct lcd_page_block *pb,
struct lcd_page_block *buddy,
int order)
{
if (list_empty(&buddy->buddy_list)) {
if (!pb_is_free(buddy)) {
/* Not in a free list; buddy is in use. */
return 0;
}
......@@ -420,6 +425,12 @@ static int alloc_metadata_page_blocks(struct lcd_page_allocator *pa,
return -EIO;
}
pb->n = n;
/*
* Remember offset to first non-metadata page block
*/
pa->first_non_metadata_pb_idx =
(pb + (1UL << tail_order) - pa->pb_array) /
sizeof(struct lcd_page_block);
/*
* All page blocks that contain metadata have been allocated,
* and resource nodes have been installed.
......@@ -509,7 +520,7 @@ static void free_metadata(void *metadata_addr,
LIBLCD_ERR("error looking up metadata resource node for gpa = %lx", gpa_val(addr));
continue;
}
cbs->free_unmap_metadata_memory_chunk(n);
cbs->free_unmap_metadata_memory_chunk(cbs, n);
}
}
......@@ -539,7 +550,8 @@ static void* malloc_metadata(unsigned int alloc_order,
/*
* Allocate and map a memory chunk
*/
ret = cbs->alloc_map_metadata_memory_chunk(alloc_order,
ret = cbs->alloc_map_metadata_memory_chunk(cbs,
alloc_order,
i * (1UL << alloc_order),
&n);
if (ret) {
......@@ -558,7 +570,7 @@ static void* malloc_metadata(unsigned int alloc_order,
fail1:
for (j = 0; j < i; j++)
cbs->free_unmap_metadata_memory_chunk(n);
cbs->free_unmap_metadata_memory_chunk(cbs, n);
return NULL;
}
......@@ -721,6 +733,32 @@ fail1:
}
void lcd_page_allocator_destroy(struct lcd_page_allocator *pa)
{
struct lcd_page_block *cursor;
unsigned long i;
cursor = pa->pb_array;
/*
* Iterate over all page blocks, and if it's not free,
* free it. This may move neighbors ahead in the
* giant array into the free lists.
*
* We do this so that it triggers the free callbacks.
*
* Skip over metadata (if we try to delete resources that contain
* metadata while we're tearing everything down, we'll crash).
*/
for (i = pa->embed_metadata_offset; i < pa_nr_page_blocks(pa); i++) {
if (!pb_is_free(&cursor[i]))
lcd_page_allocator_free(pa, &cursor[i], 0);
}
/*
* The caller is responsible for freeing the metadata
*/
return;
}
void lcd_page_allocator_free(struct lcd_page_allocator *pa,
struct lcd_page_block *pb,
unsigned int order)
......@@ -753,3 +791,21 @@ fail2:
fail1:
return NULL;
}
unsigned long lcd_page_block_to_offset(struct lcd_page_allocator *pa,
struct lcd_page_block *pb)
{
unsigned long offset;
offset = (pb - pa->pb_array) / sizeof(struct lcd_page_block);
/* Each page block is 2^min_order bytes of mem */
return offset << pa->min_order;
}
struct lcd_page_block*
lcd_offset_to_page_block(struct lcd_page_allocator *pa,
unsigned long offset)
{
return &pa->pb_array[offset >> pa->min_order];
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment