Commit c9cb61a2 authored by Charlie Jacobsen's avatar Charlie Jacobsen Committed by Vikram Narayanan

EPT deallocation code in place (untested).

vmx_free_ept
-- frees all memory associated with extended
   page tables (paging structures and mapped
   physical mem)

vmx_free_ept_dir_level
-- frees all memory associated with an epte
   at a level in the hierarchy
-- uses shallow recursion to make the code
   more readable

Simple updates to some of the EPT macros.
parent 8f65d678
......@@ -42,6 +42,7 @@ enum lcd_arch_reg {
#define LCD_ARCH_EPT_WALK_LENGTH 4
#define LCD_ARCH_EPTP_WALK_SHIFT 3
#define LCD_ARCH_PTRS_PER_EPTE (1 << 9)
struct lcd_arch_ept {
spinlock_t lock;
......
......@@ -878,6 +878,7 @@ void lcd_arch_exit(void)
*/
#define VMX_EPTE_ADDR_MASK PAGE_MASK
#define VMX_EPTE_ADDR(epte) (((u64)epte) & PAGE_MASK)
#define VMX_EPTE_VADDR(epte) (__va(VMX_EPTE_ADDR(epte)))
#define VMX_EPT_ALL_MASK (VMX_EPT_READABLE_MASK | \
VMX_EPT_WRITABLE_MASK | \
VMX_EPT_EXECUTABLE_MASK)
......@@ -896,7 +897,14 @@ enum vmx_epte_mts {
* we are using a page walk length of 4, epte's at all levels have
* the `size' bit (bit 7) set to 0. Page table entries (entries at the final
* level) have the IPAT (ignore page attribute table) and EPT MT (memory
* type) bits set. See Intel SDM V3 Figure 28-1 and 28.2.2.
* type) bits set. Paging levels are zero-indexed:
*
* 0 = PML4 entry
* 1 = PDPTE entry
* 2 = Page Directory entry
* 3 = Page Table entry
*
* See Intel SDM V3 Figure 28-1 and 28.2.2.
*/
static void vmx_epte_set(lcd_arch_epte_t *epte, u64 hpa, int level)
{
......@@ -976,6 +984,61 @@ u64 lcd_arch_ept_hpa(lcd_arch_epte_t *epte)
return VMX_EPTE_ADDR(*epte);
}
/**
* Recursively frees all present entries in dir at level, and
* the page containing the dir.
*
* 0 = pml4
* 1 = pdpt
* 2 = page dir
* 3 = page table
*/
static void vmx_free_ept_dir_level(lcd_arch_epte_t *dir, int level)
{
int idx;
lcd_arch_epte_t *dir;
if (level == 3) {
/*
* Base case of recursion
*
* Free present pages in page table
*/
for (idx = 0; idx < LCD_ARCH_PTRS_PER_EPTE; idx++) {
if (VMX_EPTE_PRESENT(dir[idx]))
free_page(VMX_EPTE_VADDR(dir[idx]));
}
} else {
/*
* pml4, pdpt, or page directory
*
* Recur on present entries
*/
for (idx = 0; idx < LCD_ARCH_PTRS_PER_EPTE; idx++) {
if (VMX_EPTE_PRESENT(dir[idx]))
vmx_free_ept_dir_level(dir[idx], level + 1);
}
}
/*
* Free page containing dir
*/
free_page((u64)dir);
}
/**
* Frees all memory associated with ept (ept paging
* structures and mapped physical mem).
*/
static void vmx_free_ept(struct lcd_arch *vcpu)
{
lcd_arch_epte_t *dir;
/*
* Get pml4 table
*/
dir = (lcd_arch_epte_t *) __va(vcpu->ept.root_hpa);
vmx_free_ept_dir_level(dir, 0);
}
/**
* Initializes the EPT's root global page directory page, the
* VMCS pointer, and the spinlock.
......
......@@ -302,23 +302,23 @@
/* return (epte & __EPTE_SZ) > 0; */
/* } */
static void free_ept_page(epte_t epte) {
struct page *page = pfn_to_page(epte_addr(epte) >> PAGE_SHIFT);
/* static void free_ept_page(epte_t epte) { */
/* struct page *page = pfn_to_page(epte_addr(epte) >> PAGE_SHIFT); */
if (epte & __EPTE_WRITE)
set_page_dirty_lock(page);
put_page(page);
}
/* if (epte & __EPTE_WRITE) */
/* set_page_dirty_lock(page); */
/* put_page(page); */
/* } */
static int clear_epte(epte_t *epte) {
if (*epte == __EPTE_NONE)
return 0;
/* static int clear_epte(epte_t *epte) { */
/* if (*epte == __EPTE_NONE) */
/* return 0; */
free_ept_page(*epte);
*epte = __EPTE_NONE;
/* free_ept_page(*epte); */
/* *epte = __EPTE_NONE; */
return 1;
}
/* return 1; */
/* } */
/* /\** */
/* * Look up the ept entry for guest physical */
......@@ -450,51 +450,51 @@ static int lcd_ept_gpa_to_hva(struct lcd* vcpu, u64 gpa, u64 *hva) {
return 0;
}
static void lcd_free_ept(u64 ept_root) {
epte_t *pgd;
int i, j, k, l;
/* static void lcd_free_ept(u64 ept_root) { */
/* epte_t *pgd; */
/* int i, j, k, l; */
pgd = (epte_t *) __va(ept_root);
/* pgd = (epte_t *) __va(ept_root); */
for (i = 0; i < PTRS_PER_PGD; i++) {
epte_t *pud = (epte_t *) epte_page_vaddr(pgd[i]);
if (!epte_present(pgd[i]))
continue;
/* for (i = 0; i < PTRS_PER_PGD; i++) { */
/* epte_t *pud = (epte_t *) epte_page_vaddr(pgd[i]); */
/* if (!epte_present(pgd[i])) */
/* continue; */
for (j = 0; j < PTRS_PER_PUD; j++) {
epte_t *pmd = (epte_t *) epte_page_vaddr(pud[j]);
if (!epte_present(pud[j]))
continue;
if (epte_flags(pud[j]) & __EPTE_SZ)
continue;
/* for (j = 0; j < PTRS_PER_PUD; j++) { */
/* epte_t *pmd = (epte_t *) epte_page_vaddr(pud[j]); */
/* if (!epte_present(pud[j])) */
/* continue; */
/* if (epte_flags(pud[j]) & __EPTE_SZ) */
/* continue; */
for (k = 0; k < PTRS_PER_PMD; k++) {
epte_t *pte = (epte_t *) epte_page_vaddr(pmd[k]);
if (!epte_present(pmd[k]))
continue;
if (epte_flags(pmd[k]) & __EPTE_SZ) {
free_ept_page(pmd[k]);
continue;
}
/* for (k = 0; k < PTRS_PER_PMD; k++) { */
/* epte_t *pte = (epte_t *) epte_page_vaddr(pmd[k]); */
/* if (!epte_present(pmd[k])) */
/* continue; */
/* if (epte_flags(pmd[k]) & __EPTE_SZ) { */
/* free_ept_page(pmd[k]); */
/* continue; */
/* } */
for (l = 0; l < PTRS_PER_PTE; l++) {
if (!epte_present(pte[l]))
continue;
/* for (l = 0; l < PTRS_PER_PTE; l++) { */
/* if (!epte_present(pte[l])) */
/* continue; */
free_ept_page(pte[l]);
} // PTE loop
/* free_ept_page(pte[l]); */
/* } // PTE loop */
free_page((unsigned long) pte);
} // PMD loop
/* free_page((unsigned long) pte); */
/* } // PMD loop */
free_page((unsigned long) pmd);
} // PUD loop
/* free_page((unsigned long) pmd); */
/* } // PUD loop */
free_page((unsigned long) pud);
} // PGD loop
/* free_page((unsigned long) pud); */
/* } // PGD loop */
free_page((unsigned long) pgd);
}
/* free_page((unsigned long) pgd); */
/* } */
/* int vmx_init_ept(struct lcd *vcpu) { */
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment