...
 
Commits (3)
......@@ -23,7 +23,7 @@ int lcd_arch_ept_walk(struct lcd_arch *lcd, gpa_t a, int create,
/**
* Set the guest physical => host physical mapping in the ept entry.
*/
void lcd_arch_ept_set(lcd_arch_epte_t *epte, hpa_t a);
void lcd_arch_ept_set(lcd_arch_epte_t *epte, gpa_t ga, hpa_t a);
/**
* Read the host physical address stored in epte.
*/
......
......@@ -10,6 +10,7 @@
#include <lcd_domains/types.h>
#include <asm/lcd_domains/types.h>
#include <asm/lcd_domains/microkernel.h>
#include <liblcd/address_spaces.h>
/* INVEPT / INVVPID --------------------------------------------------*/
......@@ -198,7 +199,7 @@ enum vmx_epte_mts {
*
* See Intel SDM V3 Figure 28-1 and 28.2.2.
*/
static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
static void vmx_epte_set(lcd_arch_epte_t *epte, gpa_t ga, hpa_t a, int level)
{
/*
* zero out epte, and set
......@@ -217,8 +218,24 @@ static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
* & section 28.2.5.2 of the Intel Software Developer
* Manual Vol 3 for effective memory type.
*/
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
/*
* XXX: To support ioremap, set the effective memory type to be
* uncacheable. According to Intel SDM 28.2.6.2, If IPAT
* (ignore PAT) is set, the memory type set in EPT (bits 5:3)
* would take effect.
* TODO: create new iommu_map api which would propagate this
* setting.
*/
if ((gpa_val(ga) >= gpa_val(LCD_IOREMAP_GP_ADDR)) &&
(gpa_val(ga) <= gpa_val(gpa_add(LCD_IOREMAP_GP_ADDR,
LCD_IOREMAP_REGION_SIZE)))) {
*epte |= VMX_EPTE_MT_UC << VMX_EPT_MT_EPTE_SHIFT;
*epte |= VMX_EPT_IPAT_BIT;
printk("%s, set (epte:%lx) UC to gpa:%lx hpa: %lx\n", __func__, *epte, gpa_val(ga), hpa_val(a));
} else {
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
}
}
}
#ifndef CONFIG_LVD
......@@ -268,9 +285,9 @@ int lcd_arch_ept_walk(struct lcd_arch *lcd, gpa_t a, int create,
}
#endif
void lcd_arch_ept_set(lcd_arch_epte_t *epte, hpa_t a)
void lcd_arch_ept_set(lcd_arch_epte_t *epte, gpa_t ga, hpa_t a)
{
vmx_epte_set(epte, a, 3);
vmx_epte_set(epte, ga, a, 3);
}
int lcd_arch_ept_unset(lcd_arch_epte_t *epte)
......
......@@ -10,6 +10,7 @@
#include <lcd_domains/types.h>
#include <asm/lcd_domains/types.h>
#include <asm/lcd_domains/microkernel.h>
#include <liblcd/address_spaces.h>
/* INVEPT / INVVPID --------------------------------------------------*/
......@@ -167,6 +168,14 @@ enum vmx_epte_mts {
VMX_EPTE_MT_WB = 6, /* write back */
};
static inline bool is_ioremap_addr(gpa_t gpa) {
if ((gpa_val(gpa) >= gpa_val(LCD_IOREMAP_GP_ADDR)) &&
(gpa_val(gpa) <= gpa_val(gpa_add(LCD_IOREMAP_GP_ADDR,
LCD_IOREMAP_REGION_SIZE))))
return true;
return false;
}
/**
* Sets address in epte along with default access settings. Since
* we are using a page walk length of 4, epte's at all levels have
......@@ -181,7 +190,7 @@ enum vmx_epte_mts {
*
* See Intel SDM V3 Figure 28-1 and 28.2.2.
*/
static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
static void vmx_epte_set(lcd_arch_epte_t *epte, gpa_t ga, hpa_t a, int level)
{
/*
* zero out epte, and set
......@@ -199,8 +208,21 @@ static void vmx_epte_set(lcd_arch_epte_t *epte, hpa_t a, int level)
* & section 28.2.5.2 of the Intel Software Developer
* Manual Vol 3 for effective memory type.
*/
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
/*
* XXX: To support ioremap, set the effective memory type to be
* uncacheable. This is a hack, we ideally need a new api
* which should do it for us.
*/
if ((gpa_val(ga) >= gpa_val(LCD_IOREMAP_GP_ADDR)) &&
(gpa_val(ga) <= gpa_val(gpa_add(LCD_IOREMAP_GP_ADDR,
LCD_IOREMAP_REGION_SIZE)))) {
*epte |= VMX_EPTE_MT_UC << VMX_EPT_MT_EPTE_SHIFT;
*epte |= VMX_EPT_IPAT_BIT;
printk("%s, set (epte:%lx) UC to gpa:%lx hpa: %lx\n", __func__, *epte, gpa_val(ga), hpa_val(a));
} else {
*epte |= VMX_EPTE_MT_WB << VMX_EPT_MT_EPTE_SHIFT;
*epte &= ~VMX_EPT_IPAT_BIT;
}
}
}
......@@ -233,7 +255,7 @@ int lcd_arch_ept_walk_cpu(lcd_arch_epte_t *dir, gpa_t a, int create,
return -ENOMEM;
}
//memset(hva2va(page), 0, PAGE_SIZE);
vmx_epte_set(&dir[idx], hva2hpa(page), i);
vmx_epte_set(&dir[idx], a, hva2hpa(page), i);
}
dir = (lcd_arch_epte_t *) hva2va(vmx_epte_hva(dir[idx]));
......@@ -274,7 +296,7 @@ int lcd_arch_ept_map_cpu(struct lcd_arch *lcd, gpa_t ga, hpa_t ha,
/*
* Map the guest physical addr to the host physical addr.
*/
lcd_arch_ept_set(ept_entry, ha);
lcd_arch_ept_set(ept_entry, ga, ha);
return ret;
}
......@@ -310,7 +332,7 @@ int lcd_arch_ept_map_all_cpus(struct lcd_arch *lcd, gpa_t ga, hpa_t ha,
/*
* Map the guest physical addr to the host physical addr.
*/
lcd_arch_ept_set(ept_entry, ha);
lcd_arch_ept_set(ept_entry, ga, ha);
}
return ret;
......
......@@ -27,6 +27,8 @@
#define SENDER_DISPATCH_LOOP
#define PASS_DEV_ADDR_IN_REG
#define NAPI_STRUCT_ARRAY
#define CONFIG_VMALLOC_SHARED_POOL
#define SKB_GLOBAL_HASHTABLE
enum dispatch_t {
__PCI_REGISTER_DRIVER,
......@@ -65,6 +67,7 @@ enum dispatch_t {
__NAPI_ENABLE,
NAPI_DISABLE,
NAPI_COMPLETE_DONE,
NAPI_HASH_DEL,
ETH_TYPE_TRANS,
SKB_ADD_RX_FRAG,
ETH_GET_HEADLEN,
......
......@@ -2148,7 +2148,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
page_ref_inc(page);
/*
* XXX: Linux kernel needs to do this because someone might reclaim the
* pages However, inside LCDs, there is no notion of reclamation. We
* just have it
*/
/* page_ref_inc(page); */
return true;
}
......
......@@ -30,6 +30,8 @@ void napi_complete_done(struct napi_struct *n, int work_done) {
}
void synchronize_irq(unsigned int irq) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
bool napi_hash_del(struct napi_struct *napi) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); return true; }
#endif
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
......@@ -198,6 +200,4 @@ void usleep_range(unsigned long min, unsigned long max)
udelay((max + min) >> 1);
}
bool napi_hash_del(struct napi_struct *napi) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); return true; }
#endif /* IXGBE_STUB_H */
......@@ -318,6 +318,11 @@ int net_klcd_dispatch_async_loop(struct fipc_message *message)
ret = napi_disable_callee(message);
break;
case NAPI_HASH_DEL:
trace(NAPI_HASH_DEL);
ret = napi_hash_del_callee(message);
break;
case NAPI_COMPLETE_DONE:
/* trace(NAPI_COMPLETE_DONE); */
ret = napi_complete_done_callee(message);
......
......@@ -67,6 +67,7 @@ int ___napi_schedule_irqoff_callee(struct fipc_message *_request);
int __napi_enable_callee(struct fipc_message *_request);
int napi_disable_callee(struct fipc_message *_request);
int napi_complete_done_callee(struct fipc_message *_request);
int napi_hash_del_callee(struct fipc_message *_request);
int synchronize_irq_callee(struct fipc_message *_request);
int eth_type_trans_callee(struct fipc_message *_request);
......
......@@ -19,9 +19,12 @@ extern struct glue_cspace *c_cspace;
extern void *lcd_stack;
/* to dump ixgbe registers */
static bool ixgbe_dump = 0;
bool ixgbe_dump = false;
module_param_named(dump_regs, ixgbe_dump, bool, S_IWUSR);
bool reinit = false;
module_param_named(reinit_pool, reinit, bool, S_IWUSR);
int net_klcd_dispatch_async_loop(struct fipc_message *message);
int net_klcd_syncipc_dispatch(struct fipc_message *message);
......