Commit 08d19f51 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm: (134 commits)
  KVM: ia64: Add intel iommu support for guests.
  KVM: ia64: add directed mmio range support for kvm guests
  KVM: ia64: Make pmt table be able to hold physical mmio entries.
  KVM: Move irqchip_in_kernel() from ioapic.h to irq.h
  KVM: Separate irq ack notification out of arch/x86/kvm/irq.c
  KVM: Change is_mmio_pfn to kvm_is_mmio_pfn, and make it common for all archs
  KVM: Move device assignment logic to common code
  KVM: Device Assignment: Move vtd.c from arch/x86/kvm/ to virt/kvm/
  KVM: VMX: enable invlpg exiting if EPT is disabled
  KVM: x86: Silence various LAPIC-related host kernel messages
  KVM: Device Assignment: Map mmio pages into VT-d page table
  KVM: PIC: enhance IPI avoidance
  KVM: MMU: add "oos_shadow" parameter to disable oos
  KVM: MMU: speed up mmu_unsync_walk
  KVM: MMU: out of sync shadow core
  KVM: MMU: mmu_convert_notrap helper
  KVM: MMU: awareness of new kvm_mmu_zap_page behaviour
  KVM: MMU: mmu_parent_walk
  KVM: x86: trap invlpg
  KVM: MMU: sync roots on mmu reload
  ...
parents 1c95e1b6 2381ad24
...@@ -2448,7 +2448,14 @@ S: Supported ...@@ -2448,7 +2448,14 @@ S: Supported
KERNEL VIRTUAL MACHINE (KVM) KERNEL VIRTUAL MACHINE (KVM)
P: Avi Kivity P: Avi Kivity
M: avi@qumranet.com M: avi@redhat.com
L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
P: Joerg Roedel
M: joerg.roedel@amd.com
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
W: http://kvm.qumranet.com W: http://kvm.qumranet.com
S: Supported S: Supported
......
...@@ -132,7 +132,7 @@ ...@@ -132,7 +132,7 @@
#define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */ #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
#define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */ #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
#define GPFN_GFW (6UL << 60) /* Guest Firmware */ #define GPFN_GFW (6UL << 60) /* Guest Firmware */
#define GPFN_HIGH_MMIO (7UL << 60) /* High MMIO range */ #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
#define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */ #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
#define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */ #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
...@@ -413,6 +413,10 @@ struct kvm_arch { ...@@ -413,6 +413,10 @@ struct kvm_arch {
struct kvm_ioapic *vioapic; struct kvm_ioapic *vioapic;
struct kvm_vm_stat stat; struct kvm_vm_stat stat;
struct kvm_sal_data rdv_sal_data; struct kvm_sal_data rdv_sal_data;
struct list_head assigned_dev_head;
struct dmar_domain *intel_iommu_domain;
struct hlist_head irq_ack_notifier_list;
}; };
union cpuid3_t { union cpuid3_t {
......
...@@ -46,4 +46,6 @@ config KVM_INTEL ...@@ -46,4 +46,6 @@ config KVM_INTEL
config KVM_TRACE config KVM_TRACE
bool bool
source drivers/virtio/Kconfig
endif # VIRTUALIZATION endif # VIRTUALIZATION
...@@ -44,7 +44,11 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ ...@@ -44,7 +44,11 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
coalesced_mmio.o) coalesced_mmio.o irq_comm.o)
ifeq ($(CONFIG_DMAR),y)
common-objs += $(addprefix ../../../virt/kvm/, vtd.o)
endif
kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
......
/*
* irq.h: In-kernel interrupt controller related definitions
* Copyright (c) 2008, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Xiantao Zhang <xiantao.zhang@intel.com>
*
*/
#ifndef __IRQ_H
#define __IRQ_H
static inline int irqchip_in_kernel(struct kvm *kvm)
{
return 1;
}
#endif
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/hrtimer.h> #include <linux/hrtimer.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/intel-iommu.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/gcc_intrin.h> #include <asm/gcc_intrin.h>
...@@ -45,6 +46,7 @@ ...@@ -45,6 +46,7 @@
#include "iodev.h" #include "iodev.h"
#include "ioapic.h" #include "ioapic.h"
#include "lapic.h" #include "lapic.h"
#include "irq.h"
static unsigned long kvm_vmm_base; static unsigned long kvm_vmm_base;
static unsigned long kvm_vsa_base; static unsigned long kvm_vsa_base;
...@@ -179,12 +181,16 @@ int kvm_dev_ioctl_check_extension(long ext) ...@@ -179,12 +181,16 @@ int kvm_dev_ioctl_check_extension(long ext)
switch (ext) { switch (ext) {
case KVM_CAP_IRQCHIP: case KVM_CAP_IRQCHIP:
case KVM_CAP_USER_MEMORY: case KVM_CAP_USER_MEMORY:
case KVM_CAP_MP_STATE:
r = 1; r = 1;
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break; break;
case KVM_CAP_IOMMU:
r = intel_iommu_found();
break;
default: default:
r = 0; r = 0;
} }
...@@ -771,6 +777,7 @@ static void kvm_init_vm(struct kvm *kvm) ...@@ -771,6 +777,7 @@ static void kvm_init_vm(struct kvm *kvm)
*/ */
kvm_build_io_pmt(kvm); kvm_build_io_pmt(kvm);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
} }
struct kvm *kvm_arch_create_vm(void) struct kvm *kvm_arch_create_vm(void)
...@@ -1334,6 +1341,10 @@ static void kvm_release_vm_pages(struct kvm *kvm) ...@@ -1334,6 +1341,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm) void kvm_arch_destroy_vm(struct kvm *kvm)
{ {
kvm_iommu_unmap_guest(kvm);
#ifdef KVM_CAP_DEVICE_ASSIGNMENT
kvm_free_all_assigned_devices(kvm);
#endif
kfree(kvm->arch.vioapic); kfree(kvm->arch.vioapic);
kvm_release_vm_pages(kvm); kvm_release_vm_pages(kvm);
kvm_free_physmem(kvm); kvm_free_physmem(kvm);
...@@ -1435,17 +1446,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm, ...@@ -1435,17 +1446,24 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
int user_alloc) int user_alloc)
{ {
unsigned long i; unsigned long i;
struct page *page; unsigned long pfn;
int npages = mem->memory_size >> PAGE_SHIFT; int npages = mem->memory_size >> PAGE_SHIFT;
struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
unsigned long base_gfn = memslot->base_gfn; unsigned long base_gfn = memslot->base_gfn;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
page = gfn_to_page(kvm, base_gfn + i); pfn = gfn_to_pfn(kvm, base_gfn + i);
if (!kvm_is_mmio_pfn(pfn)) {
kvm_set_pmt_entry(kvm, base_gfn + i,
pfn << PAGE_SHIFT,
_PAGE_AR_RWX | _PAGE_MA_WB);
memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
} else {
kvm_set_pmt_entry(kvm, base_gfn + i, kvm_set_pmt_entry(kvm, base_gfn + i,
page_to_pfn(page) << PAGE_SHIFT, GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
_PAGE_AR_RWX|_PAGE_MA_WB); _PAGE_MA_UC);
memslot->rmap[i] = (unsigned long)page; memslot->rmap[i] = 0;
}
} }
return 0; return 0;
...@@ -1789,11 +1807,43 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) ...@@ -1789,11 +1807,43 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
return -EINVAL; vcpu_load(vcpu);
mp_state->mp_state = vcpu->arch.mp_state;
vcpu_put(vcpu);
return 0;
}
static int vcpu_reset(struct kvm_vcpu *vcpu)
{
int r;
long psr;
local_irq_save(psr);
r = kvm_insert_vmm_mapping(vcpu);
if (r)
goto fail;
vcpu->arch.launched = 0;
kvm_arch_vcpu_uninit(vcpu);
r = kvm_arch_vcpu_init(vcpu);
if (r)
goto fail;
kvm_purge_vmm_mapping(vcpu);
r = 0;
fail:
local_irq_restore(psr);
return r;
} }
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state) struct kvm_mp_state *mp_state)
{ {
return -EINVAL; int r = 0;
vcpu_load(vcpu);
vcpu->arch.mp_state = mp_state->mp_state;
if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
r = vcpu_reset(vcpu);
vcpu_put(vcpu);
return r;
} }
...@@ -50,27 +50,18 @@ ...@@ -50,27 +50,18 @@
#define PAL_VSA_SYNC_READ \ #define PAL_VSA_SYNC_READ \
/* begin to call pal vps sync_read */ \ /* begin to call pal vps sync_read */ \
{.mii; \
add r25 = VMM_VPD_BASE_OFFSET, r21; \ add r25 = VMM_VPD_BASE_OFFSET, r21; \
adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ nop 0x0; \
mov r24=ip; \
;; \ ;; \
} \
{.mmb \
add r24=0x20, r24; \
ld8 r25 = [r25]; /* read vpd base */ \ ld8 r25 = [r25]; /* read vpd base */ \
ld8 r20 = [r20]; \ br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
;; \
add r20 = PAL_VPS_SYNC_READ,r20; \
;; \
{ .mii; \
nop 0x0; \
mov r24 = ip; \
mov b0 = r20; \
;; \ ;; \
}; \ }; \
{ .mmb; \
add r24 = 0x20, r24; \
nop 0x0; \
br.cond.sptk b0; /* call the service */ \
;; \
};
#define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
......
/* /*
* arch/ia64/vmx/optvfault.S * arch/ia64/kvm/optvfault.S
* optimize virtualization fault handler * optimize virtualization fault handler
* *
* Copyright (C) 2006 Intel Co * Copyright (C) 2006 Intel Co
* Xuefei Xu (Anthony Xu) <anthony.xu@intel.com> * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
* Copyright (C) 2008 Intel Co
* Add the support for Tukwila processors.
* Xiantao Zhang <xiantao.zhang@intel.com>
*/ */
#include <asm/asmmacro.h> #include <asm/asmmacro.h>
...@@ -20,6 +23,98 @@ ...@@ -20,6 +23,98 @@
#define ACCE_MOV_TO_PSR #define ACCE_MOV_TO_PSR
#define ACCE_THASH #define ACCE_THASH
#define VMX_VPS_SYNC_READ \
add r16=VMM_VPD_BASE_OFFSET,r21; \
mov r17 = b0; \
mov r18 = r24; \
mov r19 = r25; \
mov r20 = r31; \
;; \
{.mii; \
ld8 r16 = [r16]; \
nop 0x0; \
mov r24 = ip; \
;; \
}; \
{.mmb; \
add r24=0x20, r24; \
mov r25 =r16; \
br.sptk.many kvm_vps_sync_read; \
}; \
mov b0 = r17; \
mov r24 = r18; \
mov r25 = r19; \
mov r31 = r20
ENTRY(kvm_vps_entry)
adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
;;
ld8 r29 = [r29]
;;
add r29 = r29, r30
;;
mov b0 = r29
br.sptk.many b0
END(kvm_vps_entry)
/*
* Inputs:
* r24 : return address
* r25 : vpd
* r29 : scratch
*
*/
GLOBAL_ENTRY(kvm_vps_sync_read)
movl r30 = PAL_VPS_SYNC_READ
;;
br.sptk.many kvm_vps_entry
END(kvm_vps_sync_read)
/*
* Inputs:
* r24 : return address
* r25 : vpd
* r29 : scratch
*
*/
GLOBAL_ENTRY(kvm_vps_sync_write)
movl r30 = PAL_VPS_SYNC_WRITE
;;
br.sptk.many kvm_vps_entry
END(kvm_vps_sync_write)
/*
* Inputs:
* r23 : pr
* r24 : guest b0
* r25 : vpd
*
*/
GLOBAL_ENTRY(kvm_vps_resume_normal)
movl r30 = PAL_VPS_RESUME_NORMAL
;;
mov pr=r23,-2
br.sptk.many kvm_vps_entry
END(kvm_vps_resume_normal)
/*
* Inputs:
* r23 : pr
* r24 : guest b0
* r25 : vpd
* r17 : isr
*/
GLOBAL_ENTRY(kvm_vps_resume_handler)
movl r30 = PAL_VPS_RESUME_HANDLER
;;
ld8 r27=[r25]
shr r17=r17,IA64_ISR_IR_BIT
;;
dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE
mov pr=r23,-2
br.sptk.many kvm_vps_entry
END(kvm_vps_resume_handler)
//mov r1=ar3 //mov r1=ar3
GLOBAL_ENTRY(kvm_asm_mov_from_ar) GLOBAL_ENTRY(kvm_asm_mov_from_ar)
#ifndef ACCE_MOV_FROM_AR #ifndef ACCE_MOV_FROM_AR
...@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm) ...@@ -157,11 +252,11 @@ GLOBAL_ENTRY(kvm_asm_rsm)
#ifndef ACCE_RSM #ifndef ACCE_RSM
br.many kvm_virtualization_fault_back br.many kvm_virtualization_fault_back
#endif #endif
add r16=VMM_VPD_BASE_OFFSET,r21 VMX_VPS_SYNC_READ
;;
extr.u r26=r25,6,21 extr.u r26=r25,6,21
extr.u r27=r25,31,2 extr.u r27=r25,31,2
;; ;;
ld8 r16=[r16]
extr.u r28=r25,36,1 extr.u r28=r25,36,1
dep r26=r27,r26,21,2 dep r26=r27,r26,21,2
;; ;;
...@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) ...@@ -196,7 +291,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
tbit.nz p6,p0=r23,0 tbit.nz p6,p0=r23,0
;; ;;
tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
(p6) br.dptk kvm_resume_to_guest (p6) br.dptk kvm_resume_to_guest_with_sync
;; ;;
add r26=VMM_VCPU_META_RR0_OFFSET,r21 add r26=VMM_VCPU_META_RR0_OFFSET,r21
add r27=VMM_VCPU_META_RR0_OFFSET+8,r21 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
...@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm) ...@@ -212,7 +307,7 @@ GLOBAL_ENTRY(kvm_asm_rsm)
mov rr[r28]=r27 mov rr[r28]=r27
;; ;;
srlz.d srlz.d
br.many kvm_resume_to_guest br.many kvm_resume_to_guest_with_sync
END(kvm_asm_rsm) END(kvm_asm_rsm)
...@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm) ...@@ -221,11 +316,11 @@ GLOBAL_ENTRY(kvm_asm_ssm)
#ifndef ACCE_SSM #ifndef ACCE_SSM
br.many kvm_virtualization_fault_back br.many kvm_virtualization_fault_back
#endif #endif
add r16=VMM_VPD_BASE_OFFSET,r21 VMX_VPS_SYNC_READ
;;
extr.u r26=r25,6,21 extr.u r26=r25,6,21
extr.u r27=r25,31,2 extr.u r27=r25,31,2
;; ;;
ld8 r16=[r16]
extr.u r28=r25,36,1 extr.u r28=r25,36,1
dep r26=r27,r26,21,2 dep r26=r27,r26,21,2
;; //r26 is imm24 ;; //r26 is imm24
...@@ -271,7 +366,7 @@ kvm_asm_ssm_1: ...@@ -271,7 +366,7 @@ kvm_asm_ssm_1:
tbit.nz p6,p0=r29,IA64_PSR_I_BIT tbit.nz p6,p0=r29,IA64_PSR_I_BIT
;; ;;
tbit.z.or p6,p0=r19,IA64_PSR_I_BIT tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
(p6) br.dptk kvm_resume_to_guest (p6) br.dptk kvm_resume_to_guest_with_sync
;; ;;
add r29=VPD_VTPR_START_OFFSET,r16 add r29=VPD_VTPR_START_OFFSET,r16
add r30=VPD_VHPI_START_OFFSET,r16 add r30=VPD_VHPI_START_OFFSET,r16
...@@ -286,7 +381,7 @@ kvm_asm_ssm_1: ...@@ -286,7 +381,7 @@ kvm_asm_ssm_1:
;; ;;
cmp.gt p6,p0=r30,r17 cmp.gt p6,p0=r30,r17
(p6) br.dpnt.few kvm_asm_dispatch_vexirq (p6) br.dpnt.few kvm_asm_dispatch_vexirq
br.many kvm_resume_to_guest br.many kvm_resume_to_guest_with_sync
END(kvm_asm_ssm) END(kvm_asm_ssm)
...@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr) ...@@ -295,10 +390,9 @@ GLOBAL_ENTRY(kvm_asm_mov_to_psr)
#ifndef ACCE_MOV_TO_PSR #ifndef ACCE_MOV_TO_PSR
br.many kvm_virtualization_fault_back br.many kvm_virtualization_fault_back
#endif #endif
add r16=VMM_VPD_BASE_OFFSET,r21 VMX_VPS_SYNC_READ
extr.u r26=r25,13,7 //r2
;; ;;
ld8 r16=[r16] extr.u r26=r25,13,7 //r2
addl r20=@gprel(asm_mov_from_reg),gp addl r20=@gprel(asm_mov_from_reg),gp
;; ;;
adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
...@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1: ...@@ -374,7 +468,7 @@ kvm_asm_mov_to_psr_1:
;; ;;
tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
tbit.z.or p6,p0=r30,IA64_PSR_I_BIT tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
(p6) br.dpnt.few kvm_resume_to_guest (p6) br.dpnt.few kvm_resume_to_guest_with_sync
;; ;;
add r29=VPD_VTPR_START_OFFSET,r16 add r29=VPD_VTPR_START_OFFSET,r16
add r30=VPD_VHPI_START_OFFSET,r16 add r30=VPD_VHPI_START_OFFSET,r16
...@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1: ...@@ -389,13 +483,29 @@ kvm_asm_mov_to_psr_1:
;; ;;
cmp.gt p6,p0=r30,r17 cmp.gt p6,p0=r30,r17
(p6) br.dpnt.few kvm_asm_dispatch_vexirq (p6) br.dpnt.few kvm_asm_dispatch_vexirq
br.many kvm_resume_to_guest br.many kvm_resume_to_guest_with_sync
END(kvm_asm_mov_to_psr) END(kvm_asm_mov_to_psr)
ENTRY(kvm_asm_dispatch_vexirq) ENTRY(kvm_asm_dispatch_vexirq)
//increment iip //increment iip
mov r17 = b0
mov r18 = r31
{.mii
add r25=VMM_VPD_BASE_OFFSET,r21
nop 0x0
mov r24 = ip
;;
}
{.mmb
add r24 = 0x20, r24
ld8 r25 = [r25]
br.sptk.many kvm_vps_sync_write
}
mov b0 =r17
mov r16=cr.ipsr mov r16=cr.ipsr
mov r31 = r18
mov r19 = 37
;; ;;
extr.u r17=r16,IA64_PSR_RI_BIT,2 extr.u r17=r16,IA64_PSR_RI_BIT,2
tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
...@@ -435,20 +545,26 @@ GLOBAL_ENTRY(kvm_asm_thash) ...@@ -435,20 +545,26 @@ GLOBAL_ENTRY(kvm_asm_thash)
;; ;;
kvm_asm_thash_back1: kvm_asm_thash_back1:
shr.u r23=r19,61 // get RR number shr.u r23=r19,61 // get RR number
adds r25=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr