Commit d9ae7503 authored by Liu, Jinsong's avatar Liu, Jinsong

x86: Implement TSC adjust feature for HVM guest

IA32_TSC_ADJUST MSR is maintained separately for each logical
processor. A logical processor maintains and uses the IA32_TSC_ADJUST
MSR as follows:
1). On RESET, the value of the IA32_TSC_ADJUST MSR is 0;
2). If an execution of WRMSR to the IA32_TIME_STAMP_COUNTER MSR adds
    (or subtracts) value X from the TSC, the logical processor also
    adds (or subtracts) value X from the IA32_TSC_ADJUST MSR;
3). If an execution of WRMSR to the IA32_TSC_ADJUST MSR adds (or
    subtracts) value X from that MSR, the logical processor also adds
    (or subtracts) value X from the TSC.

This patch provides tsc adjust support for hvm guest, with it guest OS
would be happy when sync tsc.
Signed-off-by: default avatarLiu, Jinsong <jinsong.liu@intel.com>
Committed-by: default avatarJan Beulich <jbeulich@suse.com>
parent 45d45eac
......@@ -237,6 +237,7 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
{
uint64_t tsc;
uint64_t delta_tsc;
if ( v->domain->arch.vtsc )
{
......@@ -248,10 +249,22 @@ void hvm_set_guest_tsc(struct vcpu *v, u64 guest_tsc)
rdtscll(tsc);
}
v->arch.hvm_vcpu.cache_tsc_offset = guest_tsc - tsc;
delta_tsc = guest_tsc - tsc;
v->arch.hvm_vcpu.msr_tsc_adjust += delta_tsc
- v->arch.hvm_vcpu.cache_tsc_offset;
v->arch.hvm_vcpu.cache_tsc_offset = delta_tsc;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
}
void hvm_set_guest_tsc_adjust(struct vcpu *v, u64 tsc_adjust)
{
v->arch.hvm_vcpu.cache_tsc_offset += tsc_adjust
- v->arch.hvm_vcpu.msr_tsc_adjust;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
v->arch.hvm_vcpu.msr_tsc_adjust = tsc_adjust;
}
u64 hvm_get_guest_tsc(struct vcpu *v)
{
uint64_t tsc;
......@@ -270,6 +283,11 @@ u64 hvm_get_guest_tsc(struct vcpu *v)
return tsc + v->arch.hvm_vcpu.cache_tsc_offset;
}
u64 hvm_get_guest_tsc_adjust(struct vcpu *v)
{
return v->arch.hvm_vcpu.msr_tsc_adjust;
}
void hvm_migrate_timers(struct vcpu *v)
{
rtc_migrate_timers(v);
......@@ -2769,6 +2787,10 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
*msr_content = hvm_get_guest_tsc(v);
break;
case MSR_IA32_TSC_ADJUST:
*msr_content = hvm_get_guest_tsc_adjust(v);
break;
case MSR_TSC_AUX:
*msr_content = hvm_msr_tsc_aux(v);
break;
......@@ -2882,6 +2904,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
hvm_set_guest_tsc(v, msr_content);
break;
case MSR_IA32_TSC_ADJUST:
hvm_set_guest_tsc_adjust(v, msr_content);
break;
case MSR_TSC_AUX:
v->arch.hvm_vcpu.msr_tsc_aux = (uint32_t)msr_content;
if ( cpu_has_rdtscp
......@@ -3429,6 +3455,8 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip)
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset);
v->arch.hvm_vcpu.msr_tsc_adjust = 0;
paging_update_paging_modes(v);
v->arch.flags |= TF_kernel_mode;
......
......@@ -137,6 +137,7 @@ struct hvm_vcpu {
struct hvm_vcpu_asid n1asid;
u32 msr_tsc_aux;
u64 msr_tsc_adjust;
/* VPMU */
struct vpmu_struct vpmu;
......
......@@ -289,6 +289,7 @@
#define MSR_IA32_PLATFORM_ID 0x00000017
#define MSR_IA32_EBL_CR_POWERON 0x0000002a
#define MSR_IA32_EBC_FREQUENCY_ID 0x0000002c
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_APICBASE 0x0000001b
#define MSR_IA32_APICBASE_BSP (1<<8)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment