...
 
Commits (42)
......@@ -2,26 +2,11 @@
#define PRIV_MEMPOOL_H
#define MTU 1470
#define HEADERS 42
#define HEADERS 42
#define SKB_LCD_MEMBERS_SZ 48
#define SKB_SHARED_INFO (sizeof(struct skb_shared_info))
#define DATA_ALIGNED_SZ (SKB_DATA_ALIGN(MTU + HEADERS + SKB_LCD_MEMBERS_SZ))
#define SKB_DATA_SIZE (DATA_ALIGNED_SZ + SKB_DATA_ALIGN(SKB_SHARED_INFO))
#define SKB_CONTAINER_SIZE 128
typedef enum {
/* for skb->data */
SKB_DATA_POOL = 0,
/* for skb->page_frag */
SKB_FRAG_POOL,
/* for skb_container */
SKB_CONTAINER_POOL,
POOL_MAX,
} pool_type_t;
#define DATA_ALIGNED_SZ (SKB_DATA_ALIGN(MTU + HEADERS + SKB_LCD_MEMBERS_SZ))
#define SKB_DATA_SIZE (DATA_ALIGNED_SZ + SKB_DATA_ALIGN(SKB_SHARED_INFO))
struct object {
struct object *next;
......@@ -38,34 +23,28 @@ struct atom {
} __attribute__((aligned(16)));
typedef struct {
struct object __percpu **head;
struct object __percpu **marker;
#ifdef PBUF
char __percpu **buf;
char __percpu **bufend;
#endif
int __percpu *cached;
unsigned int obj_size;
unsigned int total_pages;
unsigned int num_objs_percpu;
unsigned int obj_size;
unsigned int total_objs;
unsigned int num_cpus;
void *pool;
void *gpool;
unsigned int num_objs_percpu;
void *base;
struct atom stack;
unsigned int pool_order;
spinlock_t pool_spin_lock;
bool dump_once;
struct dentry *pstats;
struct object __percpu **head;
struct object __percpu **marker;
int __percpu *cached;
} priv_pool_t;
void *priv_alloc(pool_type_t type);
void priv_free(void *p, pool_type_t type);
//priv_pool_t *priv_pool_init(pool_type_t type, unsigned int num_objs, unsigned int obj_size);
void *priv_alloc(priv_pool_t *pool);
void priv_free(priv_pool_t *pool, void *obj);
priv_pool_t *priv_pool_init(void *pool_base, size_t pool_size,
unsigned int obj_size, const char* name);
priv_pool_t *priv_pool_init(pool_type_t type, void *pool_base,
size_t pool_size,
unsigned int obj_size);
void priv_pool_destroy(priv_pool_t *p);
#endif /* PRIV_MEMPOOL_H */
......@@ -16,14 +16,14 @@
/**
* Creates the arch-dependent part of an LCD (e.g., the ept).
*/
int lcd_arch_create(struct lcd_arch **out);
int lcd_arch_create(struct lcd_arch **out, bool is_child);
/**
* Tears down arch-dep part of LCD.
*
* IMPORTANT: When the ept is torn down, any host memory that is still mapped
* will *not* be freed. Beware.
*/
void lcd_arch_destroy(struct lcd_arch *lcd_arch);
void lcd_arch_destroy(struct lcd_arch *lcd_arch, bool is_child);
/**
* Set the lcd's program counter to the guest virtual address
* a.
......@@ -46,6 +46,9 @@ int lcd_arch_set_sp(struct lcd_arch *lcd_arch, gva_t a);
* guest physical address a.
*/
int lcd_arch_set_gva_root(struct lcd_arch *lcd_arch, gpa_t a);
int lcd_arch_set_gs_base(struct lcd_arch *lcd_arch, gva_t a);
/**
* Allocate memory for the VMCS for an LCD.
*/
......
......@@ -23,7 +23,7 @@ int lcd_arch_ept_walk(struct lcd_arch *lcd, gpa_t a, int create,
/**
* Set the guest physical => host physical mapping in the ept entry.
*/
void lcd_arch_ept_set(lcd_arch_epte_t *epte, hpa_t a);
void lcd_arch_ept_set(lcd_arch_epte_t *epte, gpa_t gpa, hpa_t hpa);
/**
* Read the host physical address stored in epte.
*/
......@@ -110,6 +110,9 @@ void lcd_arch_ept_invept(u64 eptp);
* VMCS pointer, and the spinlock.
*/
int lcd_arch_ept_init(struct lcd_arch *lcd_arch);
#ifdef CONFIG_LCD_SINGLE_EPT
int lcd_arch_ept_child_init(struct lcd_arch *lcd_arch);
#endif
/**
* Free an LCD's EPT tables.
*
......
......@@ -124,6 +124,32 @@ static inline int lcd_syscall_five_args(int id,
return (int)ret;
}
static inline int lcd_syscall_six_args(int id,
unsigned long arg0,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5)
{
long ret;
asm volatile(
"movq %7, %%r13 \n\t"
"movq %6, %%r12 \n\t"
"movq %5, %%r11 \n\t"
"movq %4, %%r10 \n\t"
"movq %3, %%r9 \n\t"
"movq %2, %%r8 \n\t"
"movq %1, %%rax \n\t"
"vmcall \n\t"
"movq %%rax, %0 \n\t"
: "=g" (ret)
: "g" (id), "g" (arg0), "g" (arg1), "g" (arg2), "g" (arg3),
"g" (arg4), "g" (arg5)
: "rax", "r8", "r9", "r10", "r11", "r12", "r13");
return (int)ret;
}
static inline void lcd_syscall_cap_delete(cptr_t cptr)
{
lcd_syscall_one_arg(LCD_SYSCALL_CAP_DELETE, cptr_val(cptr));
......@@ -152,11 +178,11 @@ static inline int lcd_syscall_create(cptr_t lcd_slot)
static inline int lcd_syscall_config_registers(cptr_t lcd, gva_t pc,
gva_t sp, gpa_t gva_root,
gpa_t utcb_page)
gpa_t utcb_page, gva_t gs_base)
{
return lcd_syscall_five_args(LCD_SYSCALL_CONFIG_REGISTERS,
return lcd_syscall_six_args(LCD_SYSCALL_CONFIG_REGISTERS,
cptr_val(lcd), gva_val(pc), gva_val(sp),
gpa_val(gva_root), gpa_val(utcb_page));
gpa_val(gva_root), gpa_val(utcb_page), gva_val(gs_base));
}
static inline int lcd_syscall_memory_grant_and_map(cptr_t lcd,
......
......@@ -252,7 +252,7 @@ int lcd_arch_create(struct lcd_arch **out);
* IMPORTANT: When the ept is torn down, any host memory that is still mapped
* will be freed. This is for convenience. But beware.
*/
void lcd_arch_destroy(struct lcd_arch *lcd_arch);
void lcd_arch_destroy(struct lcd_arch *lcd_arch, bool is_child);
/**
* Does logical consistency checks (e.g., runs through checks
* listed in Intel SDM V3 26.1, 26.2, and 26.3).
......@@ -415,6 +415,10 @@ static inline u64 lcd_arch_get_syscall_arg4(struct lcd_arch *lcd)
{
return lcd->regs.r12;
}
static inline u64 lcd_arch_get_syscall_arg5(struct lcd_arch *lcd)
{
return lcd->regs.r13;
}
static inline void lcd_arch_set_syscall_ret(struct lcd_arch *lcd, u64 val)
{
lcd->regs.rax = val;
......
......@@ -65,6 +65,10 @@ static inline u64 lcd_arch_get_syscall_arg4(struct lcd_arch *lcd)
{
return lcd->regs.r12;
}
static inline u64 lcd_arch_get_syscall_arg5(struct lcd_arch *lcd)
{
return lcd->regs.r13;
}
static inline void lcd_arch_set_syscall_ret(struct lcd_arch *lcd, u64 val)
{
lcd->regs.rax = val;
......
......@@ -165,6 +165,7 @@
#define LCD_BOOTSTRAP_PAGES_SIZE (1 * PAGE_SIZE) /* .......... 4 KBs */
#define LCD_BOOTSTRAP_PAGE_TABLES_SIZE (16 * PAGE_SIZE) /* ... 64 KBs */
#define LCD_STACK_SIZE (2 * PAGE_SIZE) /* .................... 8 KBs */
#define LCD_GLOBAL_SEGMENT_SIZE PAGE_SIZE /* ..................... 4 KBs */
/* Orders (for convenience) */
......@@ -174,22 +175,133 @@
(ilog2(LCD_BOOTSTRAP_PAGE_TABLES_SIZE >> PAGE_SHIFT))
#define LCD_STACK_ORDER \
(ilog2(LCD_STACK_SIZE >> PAGE_SHIFT))
#define LCD_GLOBAL_SEGMENT_ORDER \
(ilog2(LCD_GLOBAL_SEGMENT_SIZE >> PAGE_SHIFT))
/* Offsets. */
#define LCD_MISC_REGION_OFFSET (1UL << 30)
#define LCD_UTCB_OFFSET LCD_MISC_REGION_OFFSET
#define LCD_BOOTSTRAP_PAGES_OFFSET (LCD_UTCB_OFFSET + LCD_UTCB_SIZE)
#ifndef CONFIG_LCD_SINGLE_EPT
#define LCD_BOOTSTRAP_PAGE_TABLES_OFFSET \
(LCD_BOOTSTRAP_PAGES_OFFSET + LCD_BOOTSTRAP_PAGES_SIZE)
#define LCD_STACK_PROT_PAGE_OFFSET \
(LCD_BOOTSTRAP_PAGE_TABLES_OFFSET+ LCD_BOOTSTRAP_PAGE_TABLES_SIZE)
#endif /* CONFIG_LCD_SINGLE_EPT */
/* HOLE */
#define LCD_STACK_REGION_OFFSET \
(LCD_MISC_REGION_OFFSET + LCD_MISC_REGION_SIZE + (1UL << 30))
#define LCD_STACK_OFFSET \
(LCD_STACK_REGION_OFFSET + LCD_STACK_REGION_SIZE - LCD_STACK_SIZE)
#ifdef CONFIG_LCD_SINGLE_EPT
/*
* When we have multiple LCDs and decide to have a single EPT for all of them,
* we need to have private UTCB, bootstrap pages and stack pages. For
* allocating those pages, we need the ID of the LCD to move offsets
* accordingly. So, we partition the UTCB, BOOTSTRAP_PAGES and STACK regions to
* support multiple LCDs.
*/
/*
* +---------------------------+ 0x0000 0001 0000 0000 (4 GB)
* | Stack for LCD 0 |
* +---------------------------+ (4 GB - STACK_SZ (8 KB) - (0 * STACK_SZ)
* | Stack for LCD 1 |
* +---------------------------+ (4 GB - STACK_SZ (8 KB) - (1 * STACK_SZ)
* | Stack for LCD 2 |
* +---------------------------+ (4 GB - STACK_SZ (8 KB) - (2 * STACK_SZ)
* | . |
* | . |
* +---------------------------+ (4 GB - STACK_SZ (8 KB) - ((N-2) * STACK_SZ)
* | Stack for LCD N |
* +---------------------------+ (4 GB - STACK_SZ (8 KB) - ((N-1) * STACK_SZ)
* | Stack Region |
* +---------------------------+ 0x0000 0000 c000 0000 (3 GB)
* | HOLE / Unmapped |
* | (1 GB) |
* +---------------------------+ 0x0000 0000 8000 0000 (2 GB)
* | Bootstrap Pagetable |
* | Pages (256 MB) |
* +---------------------------+ (1 GB + 768 MB)
* | Global segment |
* | Pages (256MB) |
* +---------------------------+ (1 GB + 512 MB)
* | Bootstrap Pages |
* | (256MB) |
* +---------------------------+ (1 GB + 256 MB)
* | UTCB Pages |
* | (256MB) |
* +---------------------------+ 0x0000 0000 4000 0000 (1 GB)
*/
/* Region Sizes */
#define LCD_UTCB_REGION_SIZE (1UL << 28) /* .................. 256 MBs */
#define LCD_BOOTSTRAP_PAGES_REGION_SIZE (1UL << 28) /* ........... 256 MBs */
#define LCD_GLOBAL_SEGMENT_PAGE_REGION_SIZE (1UL << 28) /* ........... 256 MBs */
/* Region offsets */
#define LCD_BOOTSTRAP_PAGES_REGION_OFFSET \
(LCD_UTCB_OFFSET + LCD_UTCB_REGION_SIZE)
#define LCD_GLOBAL_SEGMENT_PAGE_REGION_OFFSET \
(LCD_BOOTSTRAP_PAGES_REGION_OFFSET + LCD_BOOTSTRAP_PAGES_REGION_SIZE)
#define LCD_BOOTSTRAP_PAGE_TABLES_REGION_OFFSET \
(LCD_GLOBAL_SEGMENT_PAGE_REGION_OFFSET+ LCD_GLOBAL_SEGMENT_PAGE_REGION_SIZE)
#define LCD_BOOTSTRAP_PAGE_TABLES_OFFSET LCD_BOOTSTRAP_PAGE_TABLES_REGION_OFFSET
#define LCD_GLOBAL_SEGMENT_PAGE_OFFSET LCD_GLOBAL_SEGMENT_PAGE_REGION_OFFSET
/* Offsets for LCDs within a region */
#define LCD_UTCB_OFFSET_CHILD(id) \
(LCD_MISC_REGION_OFFSET + ((id) * LCD_UTCB_SIZE))
#define LCD_BOOTSTRAP_PAGES_OFFSET_CHILD(id) (LCD_BOOTSTRAP_PAGES_REGION_OFFSET \
+ ((id) * LCD_BOOTSTRAP_PAGES_SIZE))
#define LCD_GLOBAL_SEGMENT_PAGE_OFFSET_CHILD(id) (LCD_GLOBAL_SEGMENT_PAGE_REGION_OFFSET \
+ ((id) * LCD_GLOBAL_SEGMENT_SIZE))
#define LCD_STACK_OFFSET_CHILD(id) \
(LCD_STACK_REGION_OFFSET + LCD_STACK_REGION_SIZE \
- LCD_STACK_SIZE - ((id) * LCD_STACK_SIZE))
/* GVA/GPA addresses */
/* UTCB pages */
#define LCD_UTCB_GP_ADDR_CHILD(id) (__gpa(LCD_PHYS_BASE \
+ LCD_UTCB_OFFSET + (id * LCD_UTCB_SIZE)))
#define LCD_UTCB_GV_ADDR_CHILD(id) (__gva(LCD_VIRT_BASE \
+ LCD_UTCB_OFFSET + (id * LCD_UTCB_SIZE)))
/* Bootstrap pages */
#define LCD_BOOTSTRAP_PAGES_GP_ADDR_CHILD(id) \
(__gpa(LCD_PHYS_BASE + LCD_BOOTSTRAP_PAGES_OFFSET_CHILD(id)))
#define LCD_BOOTSTRAP_PAGES_GV_ADDR_CHILD(id) \
(__gva(LCD_VIRT_BASE + LCD_BOOTSTRAP_PAGES_OFFSET_CHILD(id)))
/* global segment pages */
#define LCD_GLOBAL_SEGMENT_PAGE_GP_ADDR_CHILD(id) \
(__gpa(LCD_PHYS_BASE + LCD_GLOBAL_SEGMENT_PAGE_OFFSET_CHILD(id)))
#define LCD_GLOBAL_SEGMENT_PAGE_GV_ADDR_CHILD(id) \
(__gva(LCD_VIRT_BASE + LCD_GLOBAL_SEGMENT_PAGE_OFFSET_CHILD(id)))
/* Stack pages */
#define LCD_STACK_GP_ADDR_CHILD(id) (__gpa(LCD_PHYS_BASE \
+ LCD_STACK_OFFSET_CHILD(id)))
#define LCD_STACK_GV_ADDR_CHILD(id) (__gva(LCD_VIRT_BASE \
+ LCD_STACK_OFFSET_CHILD(id)))
#define LCD_ARCH_GS_BASE_CHILD(id) __gpa(LCD_PHYS_BASE \
+ LCD_GLOBAL_SEGMENT_PAGE_OFFSET_CHILD(id))
#define LCD_ARCH_GS_GV_BASE_CHILD(id) __gva(LCD_VIRT_BASE \
+ LCD_GLOBAL_SEGMENT_PAGE_OFFSET_CHILD(id))
#endif /* CONFIG_LCD_SINGLE_EPT */
/* HOLE */
#define LCD_HEAP_REGION_OFFSET \
......@@ -253,6 +365,7 @@
#define LCD_ARCH_FS_BASE __gpa(0UL)
#define LCD_ARCH_FS_LIMIT 0xFFFFFFFF
#define LCD_ARCH_GS_BASE __gpa(0UL)
#define LCD_ARCH_GS_GV_BASE __gva(0UL)
#define LCD_ARCH_GS_LIMIT 0xFFFFFFFF
#define LCD_ARCH_GDTR_BASE __gpa(0UL)
#define LCD_ARCH_GDTR_LIMIT 0x0 /* no gdt */
......
......@@ -866,7 +866,7 @@ static void vmx_free_vpid(struct lcd_arch *lcd_arch)
spin_unlock(&lcd_vpids.lock);
}
int lcd_arch_create(struct lcd_arch **out)
int lcd_arch_create(struct lcd_arch **out, bool is_child)
{
struct lcd_arch *lcd_arch;
int ret;
......@@ -879,13 +879,19 @@ int lcd_arch_create(struct lcd_arch **out)
ret = -ENOMEM;
goto fail_alloc;
}
/*
* Set up ept
*/
ret = lcd_arch_ept_init(lcd_arch);
if (ret) {
LCD_ERR("setting up etp");
goto fail_ept;
if (is_child) {
printk("%s, initializing EPT for child \n", __func__);
ret = lcd_arch_ept_child_init(lcd_arch);
} else {
ret = lcd_arch_ept_init(lcd_arch);
if (ret) {
LCD_ERR("setting up etp");
goto fail_ept;
}
}
/*
* Alloc vmcs
......@@ -930,7 +936,7 @@ fail_alloc:
return ret;
}
void lcd_arch_destroy(struct lcd_arch *lcd_arch)
void lcd_arch_destroy(struct lcd_arch *lcd_arch, bool is_child)
{
/*
* Premption Disabled
......@@ -964,7 +970,8 @@ void lcd_arch_destroy(struct lcd_arch *lcd_arch)
*/
vmx_free_vpid(lcd_arch);
lcd_arch_free_vmcs(lcd_arch->vmcs);
lcd_arch_ept_free(lcd_arch);
if (!is_child)
lcd_arch_ept_free(lcd_arch);
kmem_cache_free(lcd_arch_cache, lcd_arch);
}
......@@ -1061,3 +1068,12 @@ void lcd_arch_irq_enable(struct lcd_arch *lcd_arch)
PIN_BASED_EXT_INTR_MASK);
vmx_put_cpu(lcd_arch);
}
int lcd_arch_set_gs_base(struct lcd_arch *lcd_arch, gva_t a)
{
lcd_arch->regs.gs = gva_val(a);
vmx_get_cpu(lcd_arch);
vmcs_writel(GUEST_GS_BASE, gva_val(a));
vmx_put_cpu(lcd_arch);
return 0;
}
......@@ -10,6 +10,13 @@
#include <lcd_domains/types.h>
#include <asm/lcd_domains/types.h>
#include <asm/lcd_domains/microkernel.h>
#include <liblcd/address_spaces.h>
#ifdef CONFIG_LCD_SINGLE_EPT
u64 g_vmcs_ptr;
lcd_arch_epte_t *g_ept_root;
struct mutex g_ept_lock;
#endif
/* INVEPT / INVVPID --------------------------------------------------*/
......@@ -198,7 +205,7 @@ enum vmx_epte_mts {
*
* See Intel SDM V3 Figure 28-1 and 28.2.2.
*/
static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
static void vmx_epte_set(lcd_arch_epte_t *epte, gpa_t ga, hpa_t a, int level)
{
/*
* zero out epte, and set
......@@ -217,6 +224,25 @@ static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
* & section 28.2.5.2 of the Intel Software Developer
* Manual Vol 3 for effective memory type.
*/
/*
* XXX: To support ioremap, set the effective memory type to be
* uncacheable. According to Intel SDM 28.2.6.2, If IPAT
* (ignore PAT) is set, the memory type set in EPT (bits 5:3)
* would take effect.
* TODO: create new iommu_map api which would propagate this
* setting.
*/
if ((gpa_val(ga) >= gpa_val(LCD_IOREMAP_GP_ADDR)) &&
(gpa_val(ga) <= gpa_val(gpa_add(LCD_IOREMAP_GP_ADDR,
LCD_IOREMAP_REGION_SIZE)))) {
*epte |= VMX_EPTE_MT_UC << VMX_EPT_MT_EPTE_SHIFT;
*epte |= VMX_EPT_IPAT_BIT;
//printk("%s, set (epte:%lx) UC to gpa:%lx hpa: %lx\n", __func__, *epte, gpa_val(ga));
} else {
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
}
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
}
......@@ -254,7 +280,7 @@ int lcd_arch_ept_walk(struct lcd_arch *lcd, gpa_t a, int create,
return -ENOMEM;
}
memset(hva2va(page), 0, PAGE_SIZE);
vmx_epte_set(&dir[idx], hva2hpa(page), i);
vmx_epte_set(&dir[idx], a, hva2hpa(page), i);
}
dir = (lcd_arch_epte_t *) hva2va(vmx_epte_hva(dir[idx]));
......@@ -267,9 +293,9 @@ int lcd_arch_ept_walk(struct lcd_arch *lcd, gpa_t a, int create,
return 0;
}
void lcd_arch_ept_set(lcd_arch_epte_t *epte, hpa_t a)
void lcd_arch_ept_set(lcd_arch_epte_t *epte, gpa_t gpa, hpa_t hpa)
{
vmx_epte_set(epte, a, 3);
vmx_epte_set(epte, gpa, hpa, 3);
}
int lcd_arch_ept_unset(lcd_arch_epte_t *epte)
......@@ -292,10 +318,16 @@ int lcd_arch_ept_map(struct lcd_arch *lcd, gpa_t ga, hpa_t ha,
/*
* Walk ept
*/
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_lock(&g_ept_lock);
#endif
ret = lcd_arch_ept_walk(lcd, ga, create, &ept_entry);
if (ret)
if (ret) {
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return ret;
}
/*
* Check if guest physical address already mapped
*/
......@@ -303,13 +335,20 @@ int lcd_arch_ept_map(struct lcd_arch *lcd, gpa_t ga, hpa_t ha,
LCD_ERR("would overwrite hpa %lx with hpa %lx\n",
hpa_val(lcd_arch_ept_hpa(ept_entry)),
hpa_val(ha));
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return -EINVAL;
}
/*
* Map the guest physical addr to the host physical addr.
*/
lcd_arch_ept_set(ept_entry, ha);
lcd_arch_ept_set(ept_entry, ga, ha);
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return 0;
}
......@@ -321,6 +360,7 @@ int lcd_arch_ept_map_range(struct lcd_arch *lcd, gpa_t ga_start,
unsigned long len;
len = npages * PAGE_SIZE;
for (off = 0; off < len; off += PAGE_SIZE) {
if (lcd_arch_ept_map(lcd,
/* gpa */
......@@ -349,15 +389,25 @@ int lcd_arch_ept_unmap(struct lcd_arch *lcd, gpa_t a)
/*
* Walk ept
*/
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_lock(&g_ept_lock);
#endif
ret = lcd_arch_ept_walk(lcd, a, 0, &ept_entry);
if (ret)
if (ret) {
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return ret;
}
/*
* Unset
*/
lcd_arch_ept_unset(ept_entry);
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return 0;
}
......@@ -369,9 +419,17 @@ int lcd_arch_ept_unmap2(struct lcd_arch *lcd, gpa_t a, hpa_t *hpa_out)
/*
* Walk ept
*/
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_lock(&g_ept_lock);
#endif
ret = lcd_arch_ept_walk(lcd, a, 0, &ept_entry);
if (ret)
if (ret) {
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return ret;
}
/*
* Extract hpa
*/
......@@ -382,6 +440,9 @@ int lcd_arch_ept_unmap2(struct lcd_arch *lcd, gpa_t a, hpa_t *hpa_out)
*/
lcd_arch_ept_unset(ept_entry);
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return 0;
}
......@@ -412,9 +473,18 @@ int lcd_arch_ept_gpa_to_hpa(struct lcd_arch *lcd, gpa_t ga, hpa_t *ha_out, bool
/*
* Walk ept
*/
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_lock(&g_ept_lock);
#endif
ret = lcd_arch_ept_walk(lcd, ga, 0, &ept_entry);
if (ret)
if (ret) {
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return ret;
}
/*
* Confirm the entry is present
......@@ -422,6 +492,9 @@ int lcd_arch_ept_gpa_to_hpa(struct lcd_arch *lcd, gpa_t ga, hpa_t *ha_out, bool
if (!vmx_epte_present(*ept_entry) && verbose) {
LCD_ERR("gpa %lx is not mapped\n",
gpa_val(ga));
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return -EINVAL;
}
......@@ -432,6 +505,9 @@ int lcd_arch_ept_gpa_to_hpa(struct lcd_arch *lcd, gpa_t ga, hpa_t *ha_out, bool
hpa = hpa_add(hpa, vmx_ept_offset(ga));
*ha_out = hpa;
#ifdef CONFIG_LCD_SINGLE_EPT
mutex_unlock(&g_ept_lock);
#endif
return 0;
}
EXPORT_SYMBOL(lcd_arch_ept_gpa_to_hpa);
......@@ -487,6 +563,15 @@ void lcd_arch_ept_free(struct lcd_arch *lcd)
vmx_free_ept_dir_level(dir, 0);
}
#ifdef CONFIG_LCD_SINGLE_EPT
int lcd_arch_ept_child_init(struct lcd_arch *lcd_arch)
{
lcd_arch->ept.root = g_ept_root;
lcd_arch->ept.vmcs_ptr = g_vmcs_ptr;
return 0;
}
#endif
int lcd_arch_ept_init(struct lcd_arch *lcd_arch)
{
hva_t page;
......@@ -524,7 +609,11 @@ int lcd_arch_ept_init(struct lcd_arch *lcd_arch)
* Init the mutex
*/
mutex_init(&lcd_arch->ept.lock);
#ifdef CONFIG_LCD_SINGLE_EPT
g_ept_root = lcd_arch->ept.root;
g_vmcs_ptr = eptp;
mutex_init(&g_ept_lock);
#endif
return 0;
}
......
This diff is collapsed.
......@@ -142,15 +142,60 @@
* All locking etc. is elided.
*/
#undef DEFINE_MUTEX
#define DEFINE_MUTEX(x) struct mutex x
#undef get_online_cpus
#define get_online_cpus() do { } while(0)
#undef put_online_cpus
#define put_online_cpus() do { } while(0)
#undef DEFINE_MUTEX
#define DEFINE_MUTEX(x) struct mutex x
#undef local_irq_enable
#define local_irq_enable() do { } while(0)
#undef local_irq_disable
#define local_irq_disable() do { } while(0)
#undef local_irq_save
#define local_irq_save(x) do { } while(0)
#undef local_irq_restore
#define local_irq_restore(x) do { } while(0)
#ifdef CONFIG_SMP
#undef spin_lock_init
#define spin_lock_init(x) lcd_spin_lock_init(x)
#undef spin_lock
#define spin_lock(x) lcd_spin_lock(x)
#undef spin_unlock
#define spin_unlock(x) lcd_spin_unlock(x)
#undef spin_lock_irqsave
#define spin_lock_irqsave(x,flags) spin_lock(x)
#undef spin_lock_irq
#define spin_lock_irq(x) spin_lock(x)
#undef spin_unlock_irq
#define spin_unlock_irq(x) spin_unlock(x)
#undef spin_unlock_irqrestore
#define spin_unlock_irqrestore(x,flags) spin_unlock(x)
#undef _raw_spin_lock
#define _raw_spin_lock(x) spin_lock(x)
#undef _raw_spin_unlock
#define _raw_spin_unlock(x) spin_unlock(x)
#else /* CONFIG_SMP */
#undef DEFINE_MUTEX
#define DEFINE_MUTEX(x) struct mutex x
#undef spin_lock_init
#define spin_lock_init(x) do { } while(0)
......@@ -172,27 +217,23 @@
#undef spin_unlock_irq
#define spin_unlock_irq(x) do { } while(0)
#undef _raw_spin_lock
#define _raw_spin_lock(x) do { } while (0)
#undef _raw_spin_unlock
#define _raw_spin_unlock(x) do { } while (0)
#endif /* CONFIG_SMP */
#undef rcu_barrier
#define rcu_barrier() do { smp_mb(); } while(0)
#undef call_rcu
#define call_rcu(arg, func) do { func(arg); } while(0)
#undef local_irq_enable
#define local_irq_enable() do { } while(0)
#undef local_irq_disable
#define local_irq_disable() do { } while(0)
#undef might_sleep_if
#define might_sleep_if(x) do { } while (0)
#undef _raw_spin_lock
#define _raw_spin_lock(x) do { } while (0)
#undef _raw_spin_unlock
#define _raw_spin_unlock(x) do { } while (0)
static inline void force_down_read(void *x)
{
}
......
......@@ -62,9 +62,11 @@
#undef CONFIG_LOCKDEP
#undef CONFIG_DEBUG_LOCK_ALLOC
#undef CONFIG_LOCK_STAT
#undef CONFIG_SMP
#undef CONFIG_KALLSYMS
#undef CONFIG_SWAP
#ifndef CONFIG_LCD_SINGLE_EPT
#undef CONFIG_SMP
#endif
#undef CONFIG_TRANSPARENT_HUGEPAGE
#ifndef CONFIG_SLAB
......@@ -104,3 +106,12 @@
#ifndef _LINUX_SLAB_DEF_H
#define _LINUX_SLAB_DEF_H
#endif
#ifndef _ASM_X86_CURRENT_H
#define _ASM_X86_CURRENT_H
#endif
extern struct task_struct *current_task;
#define get_current() (current_task)
#define current get_current()
......@@ -28,7 +28,7 @@
*
* This should be called before the page allocator inits.
*/
int __liblcd_mem_itree_init(void);
int __liblcd_mem_itree_init(int lcd_id);
/**
* __liblcd_mem_itree_booted -- Call this when kmalloc is initialized
*
......
......@@ -157,6 +157,19 @@ struct lcd {
* needs to delete capability for reply endpoint)
*/
int doing_reply;
/*
* List of child LCD pointers that are tied to the parent LCD's cspace
*/
struct list_head child_lcds;
struct list_head lcd_item;
/*
* reference to the parent LCD
*/
struct lcd *parent_lcd;
bool is_child;
/*
* Send/recv queue we are on.
*
......@@ -275,14 +288,14 @@ int __lcd_create_no_vm_no_thread(struct lcd **out);
* which we need a new kthread to run the LCD's code, but we don't want
* the code to run inside an isolated VM.
*/
int __lcd_create_no_vm(struct lcd **out, const char *name);
int __lcd_create_no_vm(struct lcd **out, const char *name, int lcd_id, int num_lcds);
/**
* __lcd_create -- Create an empty isolated LCD
* @caller: the LCD doing the creating
* @slot: the slot in the @caller's cspace where the LCD capability should
* be stored
*/
int __lcd_create(struct lcd *caller, cptr_t slot);
int __lcd_create(struct lcd *caller, cptr_t slot, int lcd_id, int num_lcds);
/**
* __lcd_create_klcd -- Create an empty non-isolated LCD (kLCD)
* @caller: the LCD doing the creating
......@@ -318,7 +331,7 @@ void __lcd_put(struct lcd *caller, struct cnode *cnode, struct lcd *lcd);
* @utcb_page: where to map the UTCB in the LCD's address space
*/
int __lcd_config(struct lcd *caller, cptr_t lcd, gva_t pc, gva_t sp,
gpa_t gva_root, gpa_t utcb_page);
gpa_t gva_root, gpa_t utcb_page, gva_t gs_base);
/**
* __lcd_memory_grant_and_map -- Grant LCD memory object capability and
......
......@@ -21,6 +21,7 @@ struct lcd_boot_cptrs {
cptr_t boot_pages;
cptr_t stack;
cptr_t gv;
cptr_t gs_page;
};
struct lcd_boot_info {
......@@ -35,6 +36,8 @@ struct lcd_boot_info {
gva_t module_core_base;
unsigned long module_init_size;
unsigned long module_core_size;
int lcd_id;
struct task_struct *lcd_current_task;
/*
* Capabilities to memory objects
*/
......@@ -45,6 +48,8 @@ struct lcd_boot_info {
cptr_t cptrs[LCD_NUM_BOOT_CPTRS];
};
#define current_lcd_id (lcd_get_boot_info()->lcd_id)
#define current_task (lcd_get_boot_info()->lcd_current_task)
/**
* lcd_get_boot_info -- Return the bootstrap info
*
......
......@@ -38,7 +38,7 @@
* Creates a bare LCD with an empty cspace and address space (nothing is
* mapped it - except the dedicated page for the LCD's UTCB - see sync_ipc.h).
*/
int lcd_create(cptr_t *lcd);
int lcd_create(cptr_t *lcd, int lcd_id, int num_lcds);
/**
* lcd_create_klcd -- Create an LCD that runs in the non-isolated host
......@@ -73,7 +73,7 @@ int lcd_create_klcd(cptr_t *klcd);
* For now, we assume lcd will boot with a guest virtual address space.
*/
int lcd_config_registers(cptr_t lcd, gva_t pc, gva_t sp, gpa_t gva_root,
gpa_t utcb_page);
gpa_t utcb_page, gva_t gs_base);
/**
* lcd_memory_grant_and_map -- Grant LCD access to memory object, and map it
......@@ -148,6 +148,7 @@ struct lcd_create_ctx {
void *stack;
void *m_init_bits;
void *m_core_bits;
void *gs_page;
};
/**
......@@ -177,8 +178,10 @@ struct lcd_create_ctx {
* to any other lcd, etc., this will stop and destroy the lcd.
*/
int lcd_create_module_lcd(char *mdir, char *mname, cptr_t *lcd,
struct lcd_create_ctx **ctx);
struct lcd_create_ctx **ctx, int child_id);
int lcd_create_module_lcds(char *mdir, char *mname, cptr_t *lcd,
struct lcd_create_ctx **ctx, int num_child);
/**
* lcd_to_boot_info -- Extract lcd_boot_info from create context
* @ctx: the lcd create ctx returned from lcd_create_module_lcd
......
#ifndef _LCD_SPINLOCK_H
#define _LCD_SPINLOCK_H
typedef struct lcd_spinlock {
unsigned int locked; // Is the lock held?
} lcd_spinlock_t;
void lcd_spin_lock_init(struct lcd_spinlock *lk);
void lcd_spin_lock(struct lcd_spinlock *lk);
void lcd_spin_unlock(struct lcd_spinlock *lk);
#endif /* _LCD_SPINLOCK_H */
......@@ -19,7 +19,7 @@
/* UTCB ---------------------------------------- */
#define LCD_NUM_REGS 8
#define LCD_NUM_REGS 42
struct lcd_utcb {
/**
* mr registers are general purpose 64-bit registers. You can
......@@ -63,6 +63,17 @@ static inline void lcd_set_cr##idx(cptr_t val) \
{ \
lcd_get_utcb()->cr[idx] = val; \
}
static inline void lcd_set_cr(int idx, cptr_t val)
{
lcd_get_utcb()->cr[idx] = val;
}
static inline cptr_t lcd_get_cr(int idx)
{
return lcd_get_utcb()->cr[idx];
}
LCD_MK_REG_ACCESS(0)
LCD_MK_REG_ACCESS(1)
LCD_MK_REG_ACCESS(2)
......@@ -72,6 +83,24 @@ LCD_MK_REG_ACCESS(5)
LCD_MK_REG_ACCESS(6)
LCD_MK_REG_ACCESS(7)
LCD_MK_REG_ACCESS(8)
LCD_MK_REG_ACCESS(9)
LCD_MK_REG_ACCESS(10)
LCD_MK_REG_ACCESS(11)
LCD_MK_REG_ACCESS(12)
LCD_MK_REG_ACCESS(13)
LCD_MK_REG_ACCESS(14)
LCD_MK_REG_ACCESS(15)
LCD_MK_REG_ACCESS(16)
LCD_MK_REG_ACCESS(17)
LCD_MK_REG_ACCESS(18)
LCD_MK_REG_ACCESS(19)
LCD_MK_REG_ACCESS(20)
LCD_MK_REG_ACCESS(21)
LCD_MK_REG_ACCESS(22)
LCD_MK_REG_ACCESS(23)
LCD_MK_REG_ACCESS(24)
/* SYNCHRONOUS IPC ---------------------------------------- */
/**
......
......@@ -8,7 +8,7 @@
#include <liblcd/liblcd.h>
#include <lcd_domains/microkernel.h>
int lcd_create(cptr_t *lcd)
int lcd_create(cptr_t *lcd, int lcd_id, int num_lcds)
{
cptr_t slot;
int ret;
......@@ -23,7 +23,7 @@ int lcd_create(cptr_t *lcd)
/*
* Make LCD
*/
ret = __lcd_create(current->lcd, slot);
ret = __lcd_create(current->lcd, slot, lcd_id, num_lcds);
if (ret) {
LIBLCD_ERR("lcd create failed");
goto fail2;
......@@ -71,9 +71,9 @@ fail1:
}
int lcd_config_registers(cptr_t lcd, gva_t pc, gva_t sp, gpa_t gva_root,
gpa_t utcb_page)
gpa_t utcb_page, gva_t gs_base)
{
return __lcd_config(current->lcd, lcd, pc, sp, gva_root, utcb_page);
return __lcd_config(current->lcd, lcd, pc, sp, gva_root, utcb_page, gs_base);
}
int lcd_memory_grant_and_map(cptr_t lcd, cptr_t mo, cptr_t dest_slot,
......
......@@ -41,7 +41,8 @@ int lcd_create_module_klcd(char *mdir, char *mname, cptr_t *klcd_out)
__gva(hva_val(va2hva(m->init))),
__gva(0),
__gpa(0),
__gpa(0));
__gpa(0),
__gva(0));
if (ret) {
LCD_ERR("failed to config klcd");
goto fail3;
......
......@@ -17,5 +17,14 @@ struct lcd_boot_info *
LIBLCD_FUNC_ATTR
lcd_get_boot_info(void)
{
return (struct lcd_boot_info *)gva_val(LCD_BOOTSTRAP_PAGES_GV_ADDR);
#ifdef CONFIG_LCD_SINGLE_EPT
struct lcd_boot_info *info;
asm volatile ("movq %%gs:0x0, %[info]"
:[info]"=r"(info));
return info;
#else
return (struct lcd_boot_info *)gva_val(LCD_BOOTSTRAP_PAGES_GV_ADDR_CHILD(current_lcd_id));
#endif
}
......@@ -13,7 +13,7 @@
#include <lcd_config/post_hook.h>
int lcd_create(cptr_t *lcd)
int lcd_create(cptr_t *lcd, int lcd_id, int num_lcds)
{
cptr_t slot;
int ret;
......@@ -50,9 +50,9 @@ int lcd_create_klcd(cptr_t *klcd)
}
int lcd_config_registers(cptr_t lcd, gva_t pc, gva_t sp, gpa_t gva_root,
gpa_t utcb_page)
gpa_t utcb_page, gva_t gs_base)
{
return lcd_syscall_config_registers(lcd, pc, sp, gva_root, utcb_page);
return lcd_syscall_config_registers(lcd, pc, sp, gva_root, utcb_page, gs_base);
}
int lcd_memory_grant_and_map(cptr_t lcd, cptr_t mo, cptr_t dest_slot,
......
......@@ -13,16 +13,56 @@
#include <libfipc.h>
#include <lcd_domains/liblcd.h>
#include <asm/lcd_domains/liblcd.h>
#include <linux/slab.h>
#include <lcd_config/post_hook.h>
static int thc_initialized;
atomic_t lcd_initialized;
static int lcds_entered = 0;
int child_lcd_enter(void)
{
int ret;
ret = _lcd_create_sync_endpoint(LCD_CPTR_CALL_ENDPOINT);
if (ret) {
LIBLCD_ERR("creating call endpoint");
goto fail1;
}
LIBLCD_MSG("call endpoint created and installed for child LCD");
lcd_get_boot_info()->lcd_current_task = (struct task_struct*)kmalloc(sizeof(struct task_struct), GFP_KERNEL);
if (!lcd_get_boot_info()->lcd_current_task) {
LIBLCD_ERR("allocating current_task failed");
goto fail1;
}
thc_init();
lcd_printk("===============");
lcd_printk(" Child LCD BOOTED ");
lcd_printk("===============");
fail1:
return 0;
}
int
LIBLCD_FUNC_ATTR
lcd_enter(void)
{
int ret;
if (atomic_read(&lcd_initialized) == 0) {
atomic_set(&lcd_initialized, 1);
} else {
return child_lcd_enter();
}
/*
* Aside from the call endpoint, order is important ...
*
......@@ -40,7 +80,7 @@ lcd_enter(void)
/*
* Set up internal memory interval tree / resource tree
*/
ret = __liblcd_mem_itree_init();
ret = __liblcd_mem_itree_init(lcds_entered);
if (ret) {
LIBLCD_ERR("failed to init memory interval tree");
goto fail;
......@@ -75,6 +115,11 @@ lcd_enter(void)
}
LIBLCD_MSG("ioremap map initialized");
lcd_get_boot_info()->lcd_current_task = (struct task_struct*)kmalloc(sizeof(struct task_struct), GFP_KERNEL);
if (!lcd_get_boot_info()->lcd_current_task) {
LIBLCD_ERR("allocating current_task failed");
}
/*
* Initialize libcap
*/
......@@ -108,6 +153,8 @@ lcd_enter(void)
lcd_printk(" LCD BOOTED ");
lcd_printk("===============");
++lcds_entered;
return 0;
fail:
......
......@@ -174,7 +174,7 @@ void __liblcd_mem_itree_booted(void)
node_cache = KMEM_CACHE(lcd_resource_node, 0);
}
static int add_boot_memory(void)
static int add_boot_memory(int lcd_id)
{
struct lcd_resource_node *n;
/*
......@@ -186,6 +186,8 @@ static int add_boot_memory(void)
n->start = gpa_val(lcd_gva2gpa(lcd_get_boot_info()->module_init_base));
n->last = n->start + lcd_get_boot_info()->module_init_size - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.module_init;
LIBLCD_MSG("%s, module_init start 0x%X | last 0x%X | cptr %llu", __func__,
n->start, n->last, cptr_val(n->cptr));
lcd_resource_tree_insert(&itree, n);
/*
* Add module core
......@@ -196,6 +198,8 @@ static int add_boot_memory(void)
n->start = gpa_val(lcd_gva2gpa(lcd_get_boot_info()->module_core_base));
n->last = n->start + lcd_get_boot_info()->module_core_size - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.module_core;
LIBLCD_MSG("%s, module_core start 0x%X | last 0x%X | cptr %lu", __func__,
n->start, n->last, cptr_val(n->cptr));
lcd_resource_tree_insert(&itree, n);
/*
* Add bootstrap pages
......@@ -203,10 +207,12 @@ static int add_boot_memory(void)
n = alloc_itree_node();
if (!n)
goto fail3;
n->start = gpa_val(LCD_BOOTSTRAP_PAGES_GP_ADDR);
n->last = gpa_val(LCD_BOOTSTRAP_PAGES_GP_ADDR) +
n->start = gpa_val(LCD_BOOTSTRAP_PAGES_GP_ADDR_CHILD(lcd_id));
n->last = gpa_val(LCD_BOOTSTRAP_PAGES_GP_ADDR_CHILD(lcd_id)) +
LCD_BOOTSTRAP_PAGES_SIZE - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.boot_pages;
LIBLCD_MSG("%s, bootstrap pages start 0x%X | last 0x%X | cptr %llu", __func__,
n->start, n->last, cptr_val(n->cptr));
lcd_resource_tree_insert(&itree, n);
/*
* Add stack pages
......@@ -214,25 +220,41 @@ static int add_boot_memory(void)
n = alloc_itree_node();
if (!n)
goto fail4;
n->start = gpa_val(LCD_STACK_GP_ADDR);
n->last = gpa_val(LCD_STACK_GP_ADDR) +
n->start = gpa_val(LCD_STACK_GP_ADDR_CHILD(lcd_id));
n->last = gpa_val(LCD_STACK_GP_ADDR_CHILD(lcd_id)) +
LCD_STACK_SIZE - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.stack;
LIBLCD_MSG("%s, stack pages start 0x%X | last 0x%X | cptr %llu", __func__,
n->start, n->last, cptr_val(n->cptr));
lcd_resource_tree_insert(&itree, n);
/*
* Add boot guest virtual page tables
* Add global segment page
*/
n = alloc_itree_node();
if (!n)
goto fail5;
n->start = gpa_val(LCD_ARCH_GS_BASE_CHILD(lcd_id));
n->last = gpa_val(LCD_ARCH_GS_BASE_CHILD(lcd_id)) +
LCD_GLOBAL_SEGMENT_SIZE - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.gs_page;
lcd_resource_tree_insert(&itree, n);
/*
* Add boot guest virtual page tables
*/
n = alloc_itree_node();
if (!n)
goto fail6;
n->start = gpa_val(LCD_BOOTSTRAP_PAGE_TABLES_GP_ADDR);
n->last = gpa_val(LCD_BOOTSTRAP_PAGE_TABLES_GP_ADDR) +
LCD_BOOTSTRAP_PAGE_TABLES_SIZE - 1;
n->cptr = lcd_get_boot_info()->lcd_boot_cptrs.gv;
LIBLCD_MSG("%s, bootstrap PT pages start 0x%X | last 0x%X | cptr %llu", __func__,
n->start, n->last, cptr_val(n->cptr));
lcd_resource_tree_insert(&itree, n);
return 0;
fail6:
fail5:
fail4:
fail3:
......@@ -241,7 +263,7 @@ fail1:
return -ENOMEM; /* we don't bother "freeing" nodes */
}
int __liblcd_mem_itree_init(void)
int __liblcd_mem_itree_init(int lcd_id)
{
int ret;
/*
......@@ -255,7 +277,7 @@ int __liblcd_mem_itree_init(void)
/*
* Add existing memory (at boot) to resource tree
*/
ret = add_boot_memory();
ret = add_boot_memory(lcd_id);
if (ret) {
LIBLCD_ERR("failed to add resource nodes for boot mem");
goto fail2;
......
......@@ -16,7 +16,7 @@ struct lcd_utcb *
LIBLCD_FUNC_ATTR
lcd_get_utcb(void)
{
return (struct lcd_utcb *)gva_val(LCD_UTCB_GV_ADDR);
return (struct lcd_utcb *)gva_val(LCD_UTCB_GV_ADDR_CHILD(current_lcd_id));
}
int
......
......@@ -657,7 +657,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
{
unsigned int offset = 0;
struct sg_mapping_iter miter;
unsigned long flags;
__maybe_unused unsigned long flags;
unsigned int sg_flags = SG_MITER_ATOMIC;
if (to_buffer)
......
#include <lcd_config/pre_hook.h>
#include <liblcd/spinlock.h>
#include <lcd_config/post_hook.h>
void lcd_spin_lock_init(struct lcd_spinlock *lk)
{
lk->locked = 0;
}
// Acquire the lock.
// Loops (spins) until the lock is acquired.
// Holding a lock for a long time may cause
// other CPUs to waste time spinning to acquire it.
void lcd_spin_lock(struct lcd_spinlock *lk)
{
// The xchg is atomic.
while(xchg(&lk->locked, 1) != 0)
;
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that the critical section's memory
// references happen after the lock is acquired.
__sync_synchronize();
}
// Release the lock.
void lcd_spin_unlock(struct lcd_spinlock *lk)
{
// Tell the C compiler and the processor to not move loads or stores
// past this point, to ensure that all the stores in the critical
// section are visible to other cores before the lock is released.
// Both the C compiler and the hardware may re-order loads and
// stores; __sync_synchronize() tells them both not to.
__sync_synchronize();
// Release the lock, equivalent to lk->locked = 0.
// This code can't use a C assignment, since it might
// not be atomic. A real OS would use C atomics here.
asm volatile("movl $0, %0" : "+m" (lk->locked) : );
}
......@@ -135,6 +135,8 @@
#include "slab.h"
#include <liblcd/spinlock.h>
/* BEGIN LCD */
#include <lcd_config/post_hook.h>
/* END LCD */
......@@ -190,7 +192,7 @@ struct array_cache {
unsigned int limit;
unsigned int batchcount;
unsigned int touched;
spinlock_t lock;
lcd_spinlock_t lock;
void *entry[]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
......@@ -826,9 +828,8 @@ static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
{
struct kmem_cache_node *n = cachep->node[numa_mem_id()];
struct page *page;
#ifndef LCD_ISOLATE
unsigned long flags;
#endif
__maybe_unused unsigned long flags;
if (!pfmemalloc_active)
return;
......@@ -1705,9 +1706,7 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
unsigned long flags;
/* BEGIN LCD */
#endif
#ifndef LCD_ISOLATE
unsigned long flags = 0UL;
#endif
__maybe_unused unsigned long flags = 0UL;
/* END LCD */
int node;
......@@ -3391,7 +3390,7 @@ __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
static __always_inline void *
slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
{
unsigned long save_flags;
__maybe_unused unsigned long save_flags;
void *objp;
flags &= gfp_allowed_mask;
......@@ -3723,7 +3722,7 @@ EXPORT_SYMBOL(__kmalloc);
*/
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
__maybe_unused unsigned long flags;
cachep = cache_from_obj(cachep, objp);
if (!cachep)
return;
......@@ -3751,7 +3750,7 @@ EXPORT_SYMBOL(kmem_cache_free);
void kfree(const void *objp)
{
struct kmem_cache *c;
unsigned long flags;
__maybe_unused unsigned long flags;
trace_kfree(_RET_IP_, objp);
......
#ifndef LIBLCD_MM_SLAB_H
#define LIBLCD_MM_SLAB_H
#include <liblcd/spinlock.h>
/*
* Internal slab definitions
*/
......@@ -248,7 +250,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* The slab lists for all objects.
*/
struct kmem_cache_node {
spinlock_t list_lock;
lcd_spinlock_t list_lock;
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
......
......@@ -9,9 +9,9 @@
static void netdev_init_one_queue(struct net_device *dev,
struct netdev_queue *queue, void *_unused)
{
#ifndef LCD_ISOLATE
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
#ifndef LCD_ISOLATE
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
#endif
queue->xmit_lock_owner = -1;
......@@ -44,8 +44,9 @@ int netif_alloc_netdev_queues(struct net_device *dev)
dev->_tx = tx;
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
#ifndef LCD_ISOLATE
spin_lock_init(&dev->tx_global_lock);
#endif
return 0;
}
EXPORT_SYMBOL(netif_alloc_netdev_queues);
......@@ -20,7 +20,10 @@ struct task_struct fake_task = {
.pid = 12345678,
.cred = &fake_cred,
};
#ifndef CONFIG_LCD_SINGLE_EPT
struct task_struct *current_task = &fake_task;
#endif
/*
* Make sure these cause trouble. This kernel_stack value is non-canonical,
* so will hopefully cause a GP exception. This phys_base sets bits past
......@@ -71,7 +74,6 @@ long strnlen_user(const char __user *str, long count)
}
/* MUTEXES ------------------------------------------------------------ */
void __mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key)
{