diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index e1a114161027d6598234959e2605af08018f6fc6..2811e452f7566f5f5fe4fa6242f8724a9970ba11 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
 		__u8 pad;
 	} nmi;
 	__u32 sipi_vector;
-	__u32 flags;   /* must be zero */
+	__u32 flags;
 };
 
 4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.
 
 See KVM_GET_VCPU_EVENTS for the data structure.
 
+Fields that may be modified asynchronously by running VCPUs can be excluded
+from the update. These fields are nmi.pending and sipi_vector. Keep the
+corresponding bits in the flags field cleared to suppress overwriting the
+current in-kernel state. The bits are:
+
+KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
+KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+
 
 5. The kvm_run structure
 
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae699d38681da70c1b750c6318f87a81..988911b4cc7abd9f0f60993ef5419b62d0ea062b 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
 #define _vmm_raw_spin_lock(x)	 do {}while(0)
 #define _vmm_raw_spin_unlock(x) do {}while(0)
 #else
+typedef struct {
+	volatile unsigned int lock;
+} vmm_spinlock_t;
 #define _vmm_raw_spin_lock(x)						\
 	do {								\
 		__u32 *ia64_spinlock_ptr = (__u32 *) (x);		\
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
 
 #define _vmm_raw_spin_unlock(x)				\
 	do { barrier();				\
-		((spinlock_t *)x)->raw_lock.lock = 0; } \
+		((vmm_spinlock_t *)x)->lock = 0; } \
 while (0)
 #endif
 
-void vmm_spin_lock(spinlock_t *lock);
-void vmm_spin_unlock(spinlock_t *lock);
+void vmm_spin_lock(vmm_spinlock_t *lock);
+void vmm_spin_unlock(vmm_spinlock_t *lock);
 enum {
 	I_TLB = 1,
 	D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6ce9d77d42d7a796931cb13e47ab03..7a62f75778c535f7421fc16ec4525407e3686e9b 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
 	return ;
 }
 
-void vmm_spin_lock(spinlock_t *lock)
+void vmm_spin_lock(vmm_spinlock_t *lock)
 {
 	_vmm_raw_spin_lock(lock);
 }
 
-void vmm_spin_unlock(spinlock_t *lock)
+void vmm_spin_unlock(vmm_spinlock_t *lock)
 {
 	_vmm_raw_spin_unlock(lock);
 }
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6ec50d765fe3911fa0b6e6a0ef4859..4332f7ee5203f27179db08b5a95d372c6509e681 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
 {
 	u64 i, dirty_pages = 1;
 	u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
-	spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+	vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
 	void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
 
 	dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 5598f88f142e6332877e7a4a84947fbba93b9dc6..e4beeb371a732fecdf3fc5fe1291ae01aa40b2a6 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 {
 	u64 rb = 0, rs = 0;
 
+	/*
+	 * According to Book3 2.01 mtsrin is implemented as:
+	 *
+	 * The SLB entry specified by (RB)32:35 is loaded from register
+	 * RS, as follows.
+	 *
+	 * SLBE Bit	Source			SLB Field
+	 *
+	 * 0:31		0x0000_0000		ESID-0:31
+	 * 32:35	(RB)32:35		ESID-32:35
+	 * 36		0b1			V
+	 * 37:61	0x00_0000|| 0b0		VSID-0:24
+	 * 62:88	(RS)37:63		VSID-25:51
+	 * 89:91	(RS)33:35		Ks Kp N
+	 * 92		(RS)36			L ((RS)36 must be 0b0)
+	 * 93		0b0			C
+	 */
+
+	dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
+
 	/* ESID = srnum */
 	rb |= (srnum & 0xf) << 28;
 	/* Set the valid bit */
@@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 	/* VSID = VSID */
 	rs |= (value & 0xfffffff) << 12;
 	/* flags = flags */
-	rs |= ((value >> 27) & 0xf) << 9;
+	rs |= ((value >> 28) & 0x7) << 9;
 
 	kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
 }
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 950df434763f5b2ea3fa407d3511f0020e9cb67b..f46b79f6c16c873a337013fa8a18e18f85c4ee22 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
 	__u8 reserved[31];
 };
 
+/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_VALID_NMI_PENDING	0x00000001
+#define KVM_VCPUEVENT_VALID_SIPI_VECTOR	0x00000002
+
 /* for KVM_GET/SET_VCPU_EVENTS */
 struct kvm_vcpu_events {
 	struct {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cd60c0bd1b32b158d70f43e373fcd6e477883b10..3063a0c4858b462a23dd8e2b7d6b81342a203fc1 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
 	hrtimer_cancel(&apic->lapic_timer.timer);
 	update_divide_count(apic);
 	start_apic_timer(apic);
+	apic->irr_pending = true;
 }
 
 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a6017132fba8e8dd9a8600fbc898e4e9e9fbb319..58a0f1e8859655154da07037604ebd5fb1872c7f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -455,8 +455,6 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
 	struct kvm_shadow_walk_iterator iterator;
-	pt_element_t gpte;
-	gpa_t pte_gpa = -1;
 	int level;
 	u64 *sptep;
 	int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 		if (level == PT_PAGE_TABLE_LEVEL  ||
 		    ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
 		    ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
-			struct kvm_mmu_page *sp = page_header(__pa(sptep));
-
-			pte_gpa = (sp->gfn << PAGE_SHIFT);
-			pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
 			if (is_shadow_present_pte(*sptep)) {
 				rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 	if (need_flush)
 		kvm_flush_remote_tlbs(vcpu->kvm);
 	spin_unlock(&vcpu->kvm->mmu_lock);
-
-	if (pte_gpa == -1)
-		return;
-	if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-				  sizeof(pt_element_t)))
-		return;
-	if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
-		if (mmu_topup_memory_caches(vcpu))
-			return;
-		kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
-				  sizeof(pt_element_t), 0);
-	}
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d068966fb2aa491f00152e22ea047e60984f683..6651dbf58675ee7ec8d9e7f3e145dce09953e054 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 
 	events->sipi_vector = vcpu->arch.sipi_vector;
 
-	events->flags = 0;
+	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
+			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
 
 	vcpu_put(vcpu);
 }
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 					      struct kvm_vcpu_events *events)
 {
-	if (events->flags)
+	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
+			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
 		return -EINVAL;
 
 	vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 		kvm_pic_clear_isr_ack(vcpu->kvm);
 
 	vcpu->arch.nmi_injected = events->nmi.injected;
-	vcpu->arch.nmi_pending = events->nmi.pending;
+	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
+		vcpu->arch.nmi_pending = events->nmi.pending;
 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
-	vcpu->arch.sipi_vector = events->sipi_vector;
+	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
+		vcpu->arch.sipi_vector = events->sipi_vector;
 
 	vcpu_put(vcpu);
 
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index fd9c097b760ae909c0425dbfe6d2b8aa034fc24d..f73de631e3eef53d6a5c50b95b21f037969d2f95 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -508,8 +508,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 	struct kvm_assigned_dev_kernel *match;
 	struct pci_dev *dev;
 
-	down_read(&kvm->slots_lock);
 	mutex_lock(&kvm->lock);
+	down_read(&kvm->slots_lock);
 
 	match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
 				      assigned_dev->assigned_dev_id);
@@ -573,8 +573,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 	}
 
 out:
-	mutex_unlock(&kvm->lock);
 	up_read(&kvm->slots_lock);
+	mutex_unlock(&kvm->lock);
 	return r;
 out_list_del:
 	list_del(&match->list);
@@ -585,8 +585,8 @@ out_put:
 	pci_dev_put(dev);
 out_free:
 	kfree(match);
-	mutex_unlock(&kvm->lock);
 	up_read(&kvm->slots_lock);
+	mutex_unlock(&kvm->lock);
 	return r;
 }
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b5af88167613ded2aba4ebcdca2cb702d47c84eb..a944be392d6e9420aa38d833cbc736bf9f07d22e 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -64,7 +64,7 @@ MODULE_LICENSE("GPL");
 /*
  * Ordering of locks:
  *
- * 		kvm->slots_lock --> kvm->lock --> kvm->irq_lock
+ * 		kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
 DEFINE_SPINLOCK(kvm_lock);
@@ -406,8 +406,11 @@ static struct kvm *kvm_create_vm(void)
 out:
 	return kvm;
 
+#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
+    (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
 out_err:
 	hardware_disable_all();
+#endif
 out_err_nodisable:
 	kfree(kvm);
 	return ERR_PTR(r);