Commit d53b1d9e authored by Vikram Narayanan's avatar Vikram Narayanan
Browse files

merge/v4.8/lcd/arch: Switch to new macro/APIs

__get_cpu_var is replaced by this_cpu_read.

From commit,
1e02ce4c

 x86: Store a per-cpu shadow copy of CR4
CR4 reads take several cycles. So, store a per-cpu shadow copy of CR4.

So, replace direct reads with shadow_read()

Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent 3a6f1b20
......@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <asm/vmx.h>
#include <asm/tlbflush.h>
#include <lcd_domains/types.h>
#include <asm/lcd_domains/types.h>
#include <lcd_domains/microkernel.h>
......@@ -871,42 +871,42 @@ static int vmx_check_guest_ctrl_regs(struct lcd_arch *t)
static inline int vmx_seg_type(u64 ar_bytes)
{
return ar_bytes & AR_TYPE_MASK;
return ar_bytes & VMX_AR_TYPE_MASK;
}
static inline int vmx_desc_type(u64 ar_bytes)
{
return (ar_bytes & AR_S_MASK) >> 3;
return (ar_bytes & VMX_AR_S_MASK) >> 3;
}
static inline int vmx_seg_dpl(u64 ar_bytes)
{
return AR_DPL(ar_bytes);
return VMX_AR_DPL(ar_bytes);
}
static inline int vmx_seg_pres(u64 ar_bytes)
{
return (ar_bytes & AR_P_MASK) != 0;
return (ar_bytes & VMX_AR_P_MASK) != 0;
}
static inline int vmx_seg_l_mode(u64 ar_bytes)
{
return (ar_bytes & AR_L_MASK) != 0;
return (ar_bytes & VMX_AR_L_MASK) != 0;
}
static inline int vmx_seg_db(u64 ar_bytes)
{
return (ar_bytes & AR_DB_MASK) != 0;
return (ar_bytes & VMX_AR_DB_MASK) != 0;
}
static inline int vmx_seg_gran(u64 ar_bytes)
{
return (ar_bytes & AR_G_MASK) != 0;
return (ar_bytes & VMX_AR_G_MASK) != 0;
}
static inline int vmx_seg_usable(u64 ar_bytes)
{
return (ar_bytes & AR_UNUSABLE_MASK) == 0;
return (ar_bytes & VMX_AR_UNUSABLE_MASK) == 0;
}
static inline int vmx_seg_bad_limit(u64 ar_bytes, u32 limit)
......@@ -932,7 +932,7 @@ static inline int vmx_seg_usable_bad_lim(u64 ar_bytes, u32 limit)
static inline int vmx_seg_resrv(u64 ar_bytes)
{
return ar_bytes & AR_RESERVD_MASK;
return ar_bytes & VMX_AR_RESERVD_MASK;
}
static int vmx_check_guest_seg(struct lcd_arch *t)
......
......@@ -8,6 +8,8 @@
*/
#include <asm/vmx.h>
#include <asm/tlbflush.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <asm/desc.h>
......@@ -194,7 +196,7 @@ static void vmx_setup_vmcs_host(struct lcd_arch *lcd_arch)
* Intel SDM V3 2.5
*/
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);
vmcs_writel(HOST_CR4, read_cr4());
vmcs_writel(HOST_CR4, cr4_read_shadow());
vmcs_writel(HOST_CR3, read_cr3());
/*
......@@ -440,7 +442,7 @@ static void vmx_setup_vmcs_guest_regs(struct lcd_arch *lcd_arch)
if (boot_cpu_has(X86_FEATURE_PCID))
cr4 |= X86_CR4_PCIDE;
if (boot_cpu_has(X86_FEATURE_FSGSBASE))
cr4 |= X86_CR4_RDWRGSFS;
cr4 |= X86_CR4_FSGSBASE;
vmcs_writel(GUEST_CR4, cr4);
vmcs_writel(CR4_READ_SHADOW, cr4);
......@@ -744,8 +746,8 @@ static void __vmx_get_cpu_helper(void *ptr)
lcd_arch = ptr;
BUG_ON(raw_smp_processor_id() != lcd_arch->cpu);
vmcs_clear(lcd_arch->vmcs);
if (__get_cpu_var(local_lcd_arch) == lcd_arch)
__get_cpu_var(local_lcd_arch) = NULL;
if (__this_cpu_read(local_lcd_arch) == lcd_arch)
this_cpu_write(local_lcd_arch, NULL);
}
void vmx_get_cpu(struct lcd_arch *lcd_arch)
......@@ -769,9 +771,9 @@ void vmx_get_cpu(struct lcd_arch *lcd_arch)
* Otherwise, we need to make t active
* and current on this cpu.
*/
if (__get_cpu_var(local_lcd_arch) != lcd_arch) {
if (__this_cpu_read(local_lcd_arch) != lcd_arch) {
__get_cpu_var(local_lcd_arch) = lcd_arch;
this_cpu_write(local_lcd_arch, lcd_arch);
if (lcd_arch->cpu != cur_cpu) {
......@@ -952,7 +954,7 @@ void lcd_arch_destroy(struct lcd_arch *lcd_arch)
* VM clear on this cpu
*/
vmcs_clear(lcd_arch->vmcs);
__get_cpu_var(local_lcd_arch) = NULL;
this_cpu_write(local_lcd_arch, NULL);
/*
* Preemption enabled
*/
......
......@@ -415,9 +415,8 @@ static int __vmx_enable(struct lcd_arch_vmcs *vmxon_buf)
/*
* We can't use vmx if someone else is
*/
if (read_cr4() & X86_CR4_VMXE)
if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY;
write_cr4(read_cr4() | X86_CR4_VMXE);
/*
* Set MSR_IA32_FEATURE_CONTROL
......@@ -434,6 +433,11 @@ static int __vmx_enable(struct lcd_arch_vmcs *vmxon_buf)
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
}
/*
* set VMXE bit using this as opposed to directly writing cr4
*/
cr4_set_bits(X86_CR4_VMXE);
/*
* Turn on vmx
*/
......@@ -456,7 +460,7 @@ static void vmx_enable(void *unused)
int ret;
struct lcd_arch_vmcs *vmxon_buf;
vmxon_buf = __get_cpu_var(vmxon_area);
vmxon_buf = __this_cpu_read(vmxon_area);
/*
* Turn on vmx
......@@ -471,7 +475,7 @@ static void vmx_enable(void *unused)
*/
lcd_arch_ept_global_invalidate();
__get_cpu_var(vmx_enabled) = 1;
this_cpu_write(vmx_enabled, 1);
printk(KERN_INFO "VMX enabled on CPU %d\n",
raw_smp_processor_id());
......@@ -493,10 +497,17 @@ failed:
*/
static void vmx_disable(void *unused)
{
if (__get_cpu_var(vmx_enabled)) {
if (__this_cpu_read(vmx_enabled)) {
__vmxoff();
write_cr4(read_cr4() & ~X86_CR4_VMXE);
__get_cpu_var(vmx_enabled) = 0;
cr4_clear_bits(X86_CR4_VMXE);
if (cr4_read_shadow() & X86_CR4_VMXE)
LCD_ERR("VMX disabling failed on cpu %d\n",
raw_smp_processor_id());
else
LCD_MSG("VMX disabling Successful on cpu %d\n",
raw_smp_processor_id());
this_cpu_write(vmx_enabled, 0);
}
}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment