Commit 92e45ef3 authored by Charlie Jacobsen's avatar Charlie Jacobsen Committed by Vikram Narayanan
Browse files

Adds support for passing short strings back and forth.

Simple illustrative example in test-mods/string_example. Ping-pongs
a string 'hello' back and forth a few times.

The string/data needs to be contained inside a single page
for now. This should hopefully handle most of our immediate
use cases.

Idea: The sender grants the receiver access to the page that
contains the data. The receiver maps it in their address space. This
is clearly too slow for the main data path, and it's a bit hackie.
But it works for strings that are passed in control plane interactions
(rather than trying to cram the darn thing inside the message buffer).

Wiki to be updated soon with more details.
parent d8a32d9c
......@@ -33,8 +33,19 @@ void klcd_exit(int retval);
int klcd_page_alloc(cptr_t *slot_out, gpa_t gpa);
int klcd_pages_alloc(cptr_t *slots_out, hpa_t *hp_base_out,
hva_t *hv_base_out, unsigned order);
int klcd_phys_addr_to_page_cptr(unsigned long data,
unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out);
int klcd_virt_addr_to_page_cptr(char *data, unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out);
int klcd_addr_to_page_cptr_cleanup(cptr_t page_cptr);
int klcd_gfp(cptr_t *slot_out, gpa_t *gpa_out, gva_t *gva_out);
int klcd_free_page(cptr_t page);
int klcd_map_pages_phys(cptr_t *pages, gpa_t *base_out, unsigned order);
int klcd_map_pages_both(cptr_t *pages, gva_t *base_out, unsigned order);
void klcd_unmap_pages_both(cptr_t *pages, gva_t base, unsigned order);
int klcd_create_sync_endpoint(cptr_t *slot_out);
int klcd_send(cptr_t endpoint);
int klcd_recv(cptr_t endpoint);
......@@ -223,6 +234,134 @@ static inline int lcd_free_page(cptr_t page)
return klcd_free_page(page);
}
/* ADDR -> CPTR TRANSLATION ------------------------------ */
/**
* Get the cptr for the page that contains data, and the offset
* into the page. If data + len goes beyond the page, returns
* EINVAL. _virt_ takes a guest virtual address, _phys_ takes a
* guest physical address.
*
* For non-isolated code, these functions will determine the
* page that contains the data and "volunteer" the page to the
* microkernel for cspace management (i.e., will insert the page
* into the caller's cspace).
*
* This is why it is IMPORTANT that you call lcd_addr_to_page_cptr_cleanup
* when you are finished sharing the page (perhaps you have revoked
* access to it). This will "unvolunteer" the page, removing it from
* the caller's cspace. (It's not the end of the world if you don't
* call lcd_addr_to_page_cptr_cleanup, you'll just have pages floating around
* in your cspace that you may not have expected to see there.)
*
* (Further NOTE: if you call this function multiple times with the
* same pointer, or multiple pointers inside the same page, the page
* will be inserted multiple times inside the caller's cspace. Yes, we
* could keep track of whether a page has been inserted already, but it
* leads to a bit of memory and compute overhead that I'm avoiding for now.)
*/
static inline int lcd_phys_addr_to_page_cptr(unsigned long data,
unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out)
{
return klcd_phys_addr_to_page_cptr(data, len, page_cptr_out,
offset_out);
}
static inline int lcd_virt_addr_to_page_cptr(char *data, unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out)
{
return klcd_virt_addr_to_page_cptr(data, len,
page_cptr_out, offset_out);
}
/**
* This function should be called when you are no longer using the
* page returned from the lookup functions above (phys/virt addr to page cptr).
*
* For the motivation, see the doc for the address lookup functions above.
*/
static inline int lcd_addr_to_page_cptr_cleanup(cptr_t page_cptr)
{
return klcd_addr_to_page_cptr_cleanup(page_cptr);
}
/* PAGE MAPPING ---------------------------------------- */
/**
* "Map" 2^order pages in caller's physical address space. For
* non-isolated code, the pages will already be available in the
* physical address space (non-isolated code has access to host
* physical). This function just locates where the pages are, and
* returns the address in base_out. (Note that gpa == hpa for non-isolated
* code.)
*
* This function assumes the pages are contiguous in host physical.
*/
static inline int lcd_map_pages_phys(cptr_t *pages, gpa_t *base_out,
unsigned order)
{
return klcd_map_pages_phys(pages, base_out, order);
}
/**
* "Unmap" 2^order pages that were previously mapped via lcd_map_pages_phys.
* Note that this is a no-op for non-isolated code, and is only here to
* make non-isolated and isolated code symmetric (for isolated code,
* this function is meaningful, as it will update the isolated code's
* guest physical address space).
*/
static inline void lcd_unmap_pages_phys(cptr_t *pages, gpa_t base,
unsigned order)
{
return;
}
/**
* Map 2^order pages in the guest physical *and* virtual address spaces
* of the caller. Returns the virtual address of the starting point of the
* pages.
*
* Note: For 64-bit x86 (current implementation), this is basically
* just a lookup. No mapping is done. (The pages are already mapped. See
* Documentation/x86/x86_64/mm.txt.)
*/
static inline int lcd_map_pages_both(cptr_t *pages, gva_t *base_out,
unsigned order)
{
return klcd_map_pages_both(pages, base_out, order);
}
/**
* Unmap 2^order pages in both address spaces.
*
* For 64-bit x86 (current implementation), this is a no-op. (The pages
* are always mapped. See note for lcd_map_pages_both.)
*/
static inline void lcd_unmap_pages_both(cptr_t *pages, gva_t base,
unsigned order)
{
return;
}
/**
* The following just call the above functions
*/
static inline int lcd_map_page_phys(cptr_t page_cptr, gpa_t *gpa_out)
{
return lcd_map_pages_phys(&page_cptr, gpa_out, 0);
}
static inline int lcd_map_page_both(cptr_t page_cptr, gva_t *gva_out)
{
return lcd_map_pages_both(&page_cptr, gva_out, 0);
}
static inline void lcd_unmap_page_phys(cptr_t page_cptr, gpa_t gpa)
{
lcd_unmap_pages_phys(&page_cptr, gpa, 0);
}
static inline void lcd_unmap_page_both(cptr_t page_cptr, gva_t gva)
{
lcd_unmap_pages_both(&page_cptr, gva, 0);
}
/* IPC -------------------------------------------------- */
......
......@@ -145,28 +145,68 @@ int lcd_mem_init(void);
int lcd_page_alloc(cptr_t *slot_out);
/**
* Map page at guest physical address gpa.
*
* THIS IS A WRAPPER AROUND THE LOWER LEVEL HYPERCALL. Use the more
* friendly functions below.
*/
int lcd_page_map(cptr_t page, gpa_t gpa);
/**
* Unmap page at guest physical address gpa.
*
* THIS IS A WRAPPER AROUND THE LOWER LEVEL HYPERCALL. Use the more
* friendly functions below.
*/
int lcd_page_unmap(cptr_t page, gpa_t gpa);
/**
* Maps pages in guest virtual *and* physical address spaces.
* FRIENDLY
*
* Map 2^order pages in the "heap". (This function will find a free
* place to put them. Think: mmap, but for guest physical.) Returns
* the physical address of the starting point where the pages were
* mapped.
*/
int lcd_map_pages(cptr_t *pages, gva_t *base_out, unsigned order);
int lcd_map_pages_phys(cptr_t *pages, gpa_t *base_out, unsigned order);
/**
* Maps page in gv and gp address spaces.
* FRIENDLY
*
* Unmap 2^order pages that were previously mapped in the "heap" via
* lcd_map_pages_phys.
*/
void lcd_unmap_pages_phys(cptr_t *pages, gpa_t base, unsigned order);
/**
* FRIENDLY
*
* Map 2^order pages in the guest physical *and* virtual address spaces
* of the caller. The pages will be mapped in the "heap". Returns the
* virtual address of the starting point of the pages.
*/
int lcd_map_page(cptr_t page_cptr, gva_t *gva_out);
int lcd_map_pages_both(cptr_t *pages, gva_t *base_out, unsigned order);
/**
* Unmaps pages in gv and gp address spaces.
* FRIENDLY
*
* Unmap 2^order pages in both address spaces (pages should have been
* previously mapped via lcd_map_pages_both).
*/
void lcd_unmap_pages(cptr_t *pages, gva_t base, unsigned order);
void lcd_unmap_pages_both(cptr_t *pages, gva_t base, unsigned order);
/**
* Unmaps page in gv and gp address spaces.
* The following just call the above functions
*/
void lcd_unmap_page(cptr_t page_cptr, gva_t page);
static inline int lcd_map_page_phys(cptr_t page_cptr, gpa_t *gpa_out)
{
return lcd_map_pages_phys(&page_cptr, gpa_out, 0);
}
static inline int lcd_map_page_both(cptr_t page_cptr, gva_t *gva_out)
{
return lcd_map_pages_both(&page_cptr, gva_out, 0);
}
static inline void lcd_unmap_page_phys(cptr_t page_cptr, gpa_t gpa)
{
lcd_unmap_pages_phys(&page_cptr, gpa, 0);
}
static inline void lcd_unmap_page_both(cptr_t page_cptr, gva_t gva)
{
lcd_unmap_pages_both(&page_cptr, gva, 0);
}
/**
* Allocates 2^order pages. These will be properly mapped in gv and gp
* address spaces (so you're done). Returns gv address of first page
......@@ -217,6 +257,37 @@ struct page * lcd_alloc_pages_exact_node(int nid, gfp_t gfp_mask,
*/
void lcd_free_memcg_kmem_pages(unsigned long addr, unsigned int order);
/**
* Get the cptr for the page that contains data, and the offset
* into the page. If data + len goes beyond the page, returns
* EINVAL. _virt_ takes a guest virtual address, _phys_ takes a
* guest physical address.
*
* For isolated code, this function just does a look up in
* the giant map/array that translates pages -> cptr's.
*
* You MUST call lcd_addr_to_page_cptr_cleanup below when you no
* longer are sharing the page with someone else.
*/
int lcd_phys_addr_to_page_cptr(unsigned long data, unsigned long len,
cptr_t *page_cptr_out, unsigned long *offset_out);
int lcd_virt_addr_to_page_cptr(char *data, unsigned long len,
cptr_t *page_cptr_out, unsigned long *offset_out);
/**
* This function should be called when you are no longer using the
* page returned from the lookup functions above (phys/virt addr to page cptr).
*
* For isolated code, right now, this is a no-op.
*
* For the motivation, see the non-isolated code counterpart in the
* kliblcd.h header.
*/
static inline int lcd_addr_to_page_cptr_cleanup(cptr_t page_cptr)
{
return 0;
}
/* IPC -------------------------------------------------- */
/**
......
......@@ -160,10 +160,160 @@ int klcd_free_page(cptr_t page)
return 0;
}
/* ADDR -> CPTR ---------------------------------------- */
static int fits_in_page(unsigned long data, unsigned long len)
{
return (data & PAGE_MASK) == ((data + len) & PAGE_MASK);
}
int klcd_phys_addr_to_page_cptr(unsigned long data, unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out)
{
int ret;
struct page *p;
/*
* Make sure data doesn't trail off page
*/
if (!fits_in_page(data, len)) {
LIBLCD_ERR("data (pa=%lx,len=%lx) spans more than one page",
data, len);
ret = -EINVAL;
goto fail1;
}
/*
* Get struct page for physical address
*/
p = pfn_to_page(data >> PAGE_SHIFT);
if (!p) {
LIBLCD_ERR("error getting page for pa=%lx", data);
ret = -EINVAL;
goto fail2;
}
/*
* "Volunteer" page
*/
ret = klcd_add_page(p, page_cptr_out);
if (ret)
goto fail3;
/*
* Calculate offset into page
*/
*offset_out = data & ~PAGE_MASK;
return 0;
fail3:
fail2:
fail1:
return ret;
}
int klcd_virt_addr_to_page_cptr(char *data, unsigned long len,
cptr_t *page_cptr_out,
unsigned long *offset_out)
{
/*
* Use physical address for look up
*/
return lcd_phys_addr_to_page_cptr(__pa(data),
len,
page_cptr_out,
offset_out);
}
int klcd_addr_to_page_cptr_cleanup(cptr_t page_cptr)
{
klcd_rm_page(page_cptr);
return 0;
}
/* PAGE MAPPING ---------------------------------------- */
int klcd_map_pages_phys(cptr_t *pages, gpa_t *base_out, unsigned order)
{
int ret;
struct cnode *cnode;
unsigned long hpa;
/*
* Look up first page
*/
ret = __lcd_cnode_get(&current->lcd->cspace, pages[0], &cnode);
if (ret) {
LCD_ERR("couldn't find page in caller's cspace");
goto fail1;
}
/*
* Confirm it's a page, and get the physical address.
*/
switch (cnode->type) {
case LCD_CAP_TYPE_PAGE:
hpa = __pa(page_address(cnode->object));
break;
case LCD_CAP_TYPE_KPAGE:
hpa = __pa(page_address(cnode->object));
break;
default:
LCD_ERR("not a page");
goto fail2;
}
*base_out = __gpa(hpa); /* gpa == hpa for non-isolated code */
ret = 0;
goto out;
out:
fail2:
__lcd_cnode_put(cnode);
fail1:
return ret;
}
int klcd_map_pages_both(cptr_t *pages, gva_t *base_out, unsigned order)
{
int ret;
gpa_t gpa_base;
/*
* "Map" it in physical, and get the base phys address.
*
* (This basically boils down to just looking up the physical
* address of the pages. The pages are assumed to be contiguous.)
*/
ret = klcd_map_pages_phys(pages, &gpa_base, order);
if (ret) {
LCD_ERR("error 'mapping' in phys");
goto fail1;
}
#ifndef CONFIG_HIGHMEM
/*
* For e.g. x86_64, all of phys memory is already mapped in
* the kernel's address space. (See Documentation/x86/x86_64/mm.txt.)
* So, we can just return the page's address.
*
* Remember that gva == hva for non-isolated code.
*/
*base_out = __gva(hva_val(va2hva(__va(gpa_val(gpa_base)))));
#else /* CONFIG_HIGHMEM */
#error "We don't support this kind of arch right now."
#endif
return 0;
fail1:
return ret;
}
/* EXPORTS -------------------------------------------------- */
EXPORT_SYMBOL(klcd_add_page);
EXPORT_SYMBOL(klcd_rm_page);
EXPORT_SYMBOL(klcd_page_alloc);
EXPORT_SYMBOL(klcd_gfp);
EXPORT_SYMBOL(klcd_phys_addr_to_page_cptr);
EXPORT_SYMBOL(klcd_virt_addr_to_page_cptr);
EXPORT_SYMBOL(klcd_addr_to_page_cptr_cleanup);
EXPORT_SYMBOL(klcd_map_pages_both);
EXPORT_SYMBOL(klcd_map_pages_phys);
......@@ -674,7 +674,7 @@ static int check_get_idx(gva_t base, unsigned long *out, unsigned order)
/* INTERFACE -------------------------------------------------- */
int lcd_map_pages(cptr_t *pages, gva_t *base_out, unsigned order)
int lcd_map_pages_phys(cptr_t *pages, gpa_t *base_out, unsigned order)
{
unsigned long idx;
unsigned long base;
......@@ -685,7 +685,7 @@ int lcd_map_pages(cptr_t *pages, gva_t *base_out, unsigned order)
idx = find_first_zero_bits(free_mem_bmap, LCD_FREE_MEM_BMAP_SIZE,
order);
if (idx >= LCD_FREE_MEM_BMAP_SIZE) {
lcd_printk("lcd_map_pages: not enough mem for %d pages",
lcd_printk("lcd_map_pages_phys: not enough mem for %d pages",
(1 << order));
ret = -ENOMEM;
goto fail1;
......@@ -701,68 +701,44 @@ int lcd_map_pages(cptr_t *pages, gva_t *base_out, unsigned order)
*/
ret = do_mk_map_pages(pages, __gpa(base), order);
if (ret) {
lcd_printk("map_page: failed to map in gp");
lcd_printk("lcd_map_pages_phys: failed to map in gp");
goto fail2;
}
/*
* Map page in gv there, gv addr = gp addr
*/
ret = gv_map_pages(__gva(base), __gpa(base), order);
if (ret) {
lcd_printk("map_page: failed to map in guest virtual");
goto fail3;
}
/*
* Update page -> cptr correspondences
*/
update_page2cptr(idx, pages, order);
*base_out = __gva(base);
*base_out = __gpa(base);
return 0;
fail3:
do_mk_unmap_pages(pages, gpa_add(LCD_FREE_MEM_BASE, idx << PAGE_SHIFT),
order);
fail2:
clear_bits(idx, free_mem_bmap, order);
fail1:
return ret;
}
int lcd_map_page(cptr_t page_cptr, gva_t *gva_out)
{
return lcd_map_pages(&page_cptr, gva_out, 0);
}
static void lcd_do_unmap_pages(cptr_t *pages, gva_t base, unsigned order)
void lcd_unmap_pages_phys(cptr_t *pages, gpa_t base, unsigned order)
{
int ret;
unsigned long idx;
/*
* Get index to base page in free area
* Get index to base page in free area.
*
* XXX: Remember gpa == gva in heap area.
*/
ret = check_get_idx(base, &idx, order);
ret = check_get_idx(__gva(gpa_val(base)), &idx, order);
if (ret) {
lcd_printk("lcd_do_unmap_pages: bad base/order");
lcd_printk("lcd_unmap_pages_phys: bad base/order");
goto out;
}
/*
* Tell microkernel to unmap in guest physical
*/
ret = do_mk_unmap_pages(pages, __gpa(gva_val(base)), order);
if (ret) {
lcd_printk("lcd_do_unmap_pages: failed to unmap in gp");
goto out;
}
/*
* Unmap in guest virtual
*/
ret = gv_unmap_pages(base, order);
ret = do_mk_unmap_pages(pages, base, order);
if (ret) {
lcd_printk("lcd_do_unmap_pages: failed to unmap in gv");
lcd_printk("lcd_unmap_pages_phys: failed to unmap in gp");
goto out;
}
/*
......@@ -776,14 +752,58 @@ out:
return;
}
void lcd_unmap_pages(cptr_t *pages, gva_t base, unsigned order)
int lcd_map_pages_both(cptr_t *pages, gva_t *base_out, unsigned order)
{
lcd_do_unmap_pages(pages, base, order);
gpa_t gpa_base;
int ret;
/*
* Map in guest physical first
*/
ret = lcd_map_pages_phys(pages, &gpa_base, order);
if (ret) {
lcd_printk("lcd_map_pages_both: error mapping in gpa");
goto fail1;
}
/*
* Map in guest virtual next, gv addr = gp addr
*/
ret = gv_map_pages(__gva(gpa_val(gpa_base)), gpa_base, order);
if (ret) {
lcd_printk("lcd_map_pages_both: failed to map in guest virtual");
goto fail2;
}
*base_out = __gva(gpa_val(gpa_base));
return 0;
fail2:
lcd_unmap_pages_phys(pages, gpa_base, order);
fail1:
return ret;
}
void lcd_unmap_page(cptr_t page_cptr, gva_t page)
void lcd_unmap_pages_both(cptr_t *pages, gva_t base, unsigned order)
{
lcd_unmap_pages(&page_cptr, page, 0);
int ret;
/*
* Unmap in guest physical first, gpa == gva
*/
lcd_unmap_pages_phys(pages, __gpa(gva_val(base)), order);
/*
* Unmap in gva.
*
* XXX: If we fail here, may be in inconsistent state: mappings
* still in guest virtual, but no mappings in guest physical.
*/
ret = gv_unmap_pages(base, order);
if (ret) {
lcd_printk("lcd_unmap_pages_both: failed to unmap in gv");
goto out;
}
out:
return;
}
int lcd_alloc_pages(unsigned order, gva_t *base_out)
......@@ -812,7 +832,7 @@ int lcd_alloc_pages(unsigned order, gva_t *base_out)
/*
* Map them
*/
ret = lcd_map_pages(pages, base_out, order);
ret = lcd_map_pages_both(pages, base_out, order);
if (ret) {
lcd_printk("lcd_alloc_pages: failed to map");
goto fail1;
......@@ -847,7 +867,7 @@ void lcd_free_pages(gva_t base, unsigned order)
/*
* Do unmap
*/
lcd_do_unmap_pages(&free_mem_page2cptr[idx], base, order);
lcd_unmap_pages_both(&free_mem_page2cptr[idx], base, order);
}
void lcd_free_page(gva_t page)
......@@ -920,6 +940,97 @@ void lcd_free_memcg_kmem_pages(unsigned long addr, unsigned int order)
lcd_free_pages(__gva(addr), order);
}
/* ADDR -> CPTR TRANSLATION ---------------------------------------- */
static int fits_in_page(unsigned long data, unsigned long len)