kvm_main.c 31.9 KB
Newer Older
Avi Kivity's avatar
Avi Kivity committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2.  See
 * the COPYING file in the top-level directory.
 *
 */

18
#include "iodev.h"
Avi Kivity's avatar
Avi Kivity committed
19

20
#include <linux/kvm_host.h>
Avi Kivity's avatar
Avi Kivity committed
21
22
23
24
25
26
27
28
29
30
31
32
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
33
#include <linux/sysdev.h>
Avi Kivity's avatar
Avi Kivity committed
34
#include <linux/cpu.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
35
#include <linux/sched.h>
36
37
#include <linux/cpumask.h>
#include <linux/smp.h>
38
#include <linux/anon_inodes.h>
39
#include <linux/profile.h>
40
#include <linux/kvm_para.h>
41
#include <linux/pagemap.h>
42
#include <linux/mman.h>
43
#include <linux/swap.h>
Avi Kivity's avatar
Avi Kivity committed
44

Avi Kivity's avatar
Avi Kivity committed
45
46
47
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
48
#include <asm/pgtable.h>
Avi Kivity's avatar
Avi Kivity committed
49
50
51
52

MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

53
54
DEFINE_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
55

56
57
static cpumask_t cpus_hardware_enabled;

58
59
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
Avi Kivity's avatar
Avi Kivity committed
60

61
62
static __read_mostly struct preempt_ops kvm_preempt_ops;

63
struct dentry *kvm_debugfs_dir;
Avi Kivity's avatar
Avi Kivity committed
64

Avi Kivity's avatar
Avi Kivity committed
65
66
67
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
			   unsigned long arg);

68
69
70
71
72
static inline int valid_vcpu(int n)
{
	return likely(n >= 0 && n < KVM_MAX_VCPUS);
}

Avi Kivity's avatar
Avi Kivity committed
73
74
75
/*
 * Switches to specified vcpu, until a matching vcpu_put()
 */
76
void vcpu_load(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
77
{
78
79
	int cpu;

Avi Kivity's avatar
Avi Kivity committed
80
	mutex_lock(&vcpu->mutex);
81
82
	cpu = get_cpu();
	preempt_notifier_register(&vcpu->preempt_notifier);
83
	kvm_arch_vcpu_load(vcpu, cpu);
84
	put_cpu();
Avi Kivity's avatar
Avi Kivity committed
85
86
}

87
void vcpu_put(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
88
{
89
	preempt_disable();
90
	kvm_arch_vcpu_put(vcpu);
91
92
	preempt_notifier_unregister(&vcpu->preempt_notifier);
	preempt_enable();
Avi Kivity's avatar
Avi Kivity committed
93
94
95
	mutex_unlock(&vcpu->mutex);
}

96
97
98
99
100
101
static void ack_flush(void *_completed)
{
}

void kvm_flush_remote_tlbs(struct kvm *kvm)
{
102
	int i, cpu;
103
104
105
106
	cpumask_t cpus;
	struct kvm_vcpu *vcpu;

	cpus_clear(cpus);
107
108
109
110
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
111
		if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
112
113
114
			continue;
		cpu = vcpu->cpu;
		if (cpu != -1 && cpu != raw_smp_processor_id())
115
			cpu_set(cpu, cpus);
116
	}
117
118
119
	if (cpus_empty(cpus))
		return;
	++kvm->stat.remote_tlb_flush;
120
	smp_call_function_mask(cpus, ack_flush, NULL, 1);
121
122
}

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
void kvm_reload_remote_mmus(struct kvm *kvm)
{
	int i, cpu;
	cpumask_t cpus;
	struct kvm_vcpu *vcpu;

	cpus_clear(cpus);
	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
		vcpu = kvm->vcpus[i];
		if (!vcpu)
			continue;
		if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
			continue;
		cpu = vcpu->cpu;
		if (cpu != -1 && cpu != raw_smp_processor_id())
			cpu_set(cpu, cpus);
	}
	if (cpus_empty(cpus))
		return;
	smp_call_function_mask(cpus, ack_flush, NULL, 1);
}


146
147
148
149
150
151
152
153
154
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
	struct page *page;
	int r;

	mutex_init(&vcpu->mutex);
	vcpu->cpu = -1;
	vcpu->kvm = kvm;
	vcpu->vcpu_id = id;
Eddie Dong's avatar
Eddie Dong committed
155
	init_waitqueue_head(&vcpu->wq);
156
157
158
159
160
161
162
163

	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!page) {
		r = -ENOMEM;
		goto fail;
	}
	vcpu->run = page_address(page);

164
	r = kvm_arch_vcpu_init(vcpu);
165
	if (r < 0)
166
		goto fail_free_run;
167
168
169
170
171
	return 0;

fail_free_run:
	free_page((unsigned long)vcpu->run);
fail:
172
	return r;
173
174
175
176
177
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);

void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
178
	kvm_arch_vcpu_uninit(vcpu);
179
180
181
182
	free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);

183
static struct kvm *kvm_create_vm(void)
Avi Kivity's avatar
Avi Kivity committed
184
{
185
	struct kvm *kvm = kvm_arch_create_vm();
Avi Kivity's avatar
Avi Kivity committed
186

187
188
	if (IS_ERR(kvm))
		goto out;
Avi Kivity's avatar
Avi Kivity committed
189

190
191
	kvm->mm = current->mm;
	atomic_inc(&kvm->mm->mm_count);
192
	spin_lock_init(&kvm->mmu_lock);
193
	kvm_io_bus_init(&kvm->pio_bus);
Shaohua Li's avatar
Shaohua Li committed
194
	mutex_init(&kvm->lock);
195
	kvm_io_bus_init(&kvm->mmio_bus);
196
	init_rwsem(&kvm->slots_lock);
Izik Eidus's avatar
Izik Eidus committed
197
	atomic_set(&kvm->users_count, 1);
198
199
200
	spin_lock(&kvm_lock);
	list_add(&kvm->vm_list, &vm_list);
	spin_unlock(&kvm_lock);
201
out:
202
203
204
	return kvm;
}

Avi Kivity's avatar
Avi Kivity committed
205
206
207
208
209
210
/*
 * Free any memory in @free but not in @dont.
 */
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
				  struct kvm_memory_slot *dont)
{
211
212
	if (!dont || free->rmap != dont->rmap)
		vfree(free->rmap);
Avi Kivity's avatar
Avi Kivity committed
213
214
215
216

	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
		vfree(free->dirty_bitmap);

Marcelo Tosatti's avatar
Marcelo Tosatti committed
217
218
219
	if (!dont || free->lpage_info != dont->lpage_info)
		vfree(free->lpage_info);

Avi Kivity's avatar
Avi Kivity committed
220
	free->npages = 0;
Al Viro's avatar
Al Viro committed
221
	free->dirty_bitmap = NULL;
222
	free->rmap = NULL;
Marcelo Tosatti's avatar
Marcelo Tosatti committed
223
	free->lpage_info = NULL;
Avi Kivity's avatar
Avi Kivity committed
224
225
}

226
void kvm_free_physmem(struct kvm *kvm)
Avi Kivity's avatar
Avi Kivity committed
227
228
229
230
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i)
Al Viro's avatar
Al Viro committed
231
		kvm_free_physmem_slot(&kvm->memslots[i], NULL);
Avi Kivity's avatar
Avi Kivity committed
232
233
}

234
235
static void kvm_destroy_vm(struct kvm *kvm)
{
236
237
	struct mm_struct *mm = kvm->mm;

238
239
240
	spin_lock(&kvm_lock);
	list_del(&kvm->vm_list);
	spin_unlock(&kvm_lock);
241
	kvm_io_bus_destroy(&kvm->pio_bus);
242
	kvm_io_bus_destroy(&kvm->mmio_bus);
243
	kvm_arch_destroy_vm(kvm);
244
	mmdrop(mm);
245
246
}

Izik Eidus's avatar
Izik Eidus committed
247
248
249
250
251
252
253
254
255
256
257
258
259
260
void kvm_get_kvm(struct kvm *kvm)
{
	atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);

void kvm_put_kvm(struct kvm *kvm)
{
	if (atomic_dec_and_test(&kvm->users_count))
		kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);


261
262
263
264
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
	struct kvm *kvm = filp->private_data;

Izik Eidus's avatar
Izik Eidus committed
265
	kvm_put_kvm(kvm);
Avi Kivity's avatar
Avi Kivity committed
266
267
268
269
270
271
272
273
	return 0;
}

/*
 * Allocate some memory and give it an address in the guest physical address
 * space.
 *
 * Discontiguous memory is allowed, mostly for framebuffers.
274
 *
275
 * Must be called holding mmap_sem for write.
Avi Kivity's avatar
Avi Kivity committed
276
 */
277
278
279
int __kvm_set_memory_region(struct kvm *kvm,
			    struct kvm_userspace_memory_region *mem,
			    int user_alloc)
Avi Kivity's avatar
Avi Kivity committed
280
281
282
283
284
285
286
287
288
289
290
291
292
293
{
	int r;
	gfn_t base_gfn;
	unsigned long npages;
	unsigned long i;
	struct kvm_memory_slot *memslot;
	struct kvm_memory_slot old, new;

	r = -EINVAL;
	/* General sanity checks */
	if (mem->memory_size & (PAGE_SIZE - 1))
		goto out;
	if (mem->guest_phys_addr & (PAGE_SIZE - 1))
		goto out;
294
	if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
Avi Kivity's avatar
Avi Kivity committed
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
		goto out;
	if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
		goto out;

	memslot = &kvm->memslots[mem->slot];
	base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
	npages = mem->memory_size >> PAGE_SHIFT;

	if (!npages)
		mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;

	new = old = *memslot;

	new.base_gfn = base_gfn;
	new.npages = npages;
	new.flags = mem->flags;

	/* Disallow changing a memory slot's size. */
	r = -EINVAL;
	if (npages && old.npages && npages != old.npages)
315
		goto out_free;
Avi Kivity's avatar
Avi Kivity committed
316
317
318
319
320
321
322
323
324
325

	/* Check for overlaps */
	r = -EEXIST;
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *s = &kvm->memslots[i];

		if (s == memslot)
			continue;
		if (!((base_gfn + npages <= s->base_gfn) ||
		      (base_gfn >= s->base_gfn + s->npages)))
326
			goto out_free;
Avi Kivity's avatar
Avi Kivity committed
327
328
329
330
	}

	/* Free page dirty bitmap if unneeded */
	if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
Al Viro's avatar
Al Viro committed
331
		new.dirty_bitmap = NULL;
Avi Kivity's avatar
Avi Kivity committed
332
333
334
335

	r = -ENOMEM;

	/* Allocate if a slot is being created */
336
	if (npages && !new.rmap) {
Mike Day's avatar
Mike Day committed
337
		new.rmap = vmalloc(npages * sizeof(struct page *));
338
339

		if (!new.rmap)
340
			goto out_free;
341
342

		memset(new.rmap, 0, npages * sizeof(*new.rmap));
343

344
		new.user_alloc = user_alloc;
345
		new.userspace_addr = mem->userspace_addr;
Avi Kivity's avatar
Avi Kivity committed
346
	}
Marcelo Tosatti's avatar
Marcelo Tosatti committed
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
	if (npages && !new.lpage_info) {
		int largepages = npages / KVM_PAGES_PER_HPAGE;
		if (npages % KVM_PAGES_PER_HPAGE)
			largepages++;
		if (base_gfn % KVM_PAGES_PER_HPAGE)
			largepages++;

		new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));

		if (!new.lpage_info)
			goto out_free;

		memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));

		if (base_gfn % KVM_PAGES_PER_HPAGE)
			new.lpage_info[0].write_count = 1;
		if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
			new.lpage_info[largepages-1].write_count = 1;
	}
Avi Kivity's avatar
Avi Kivity committed
366
367
368
369
370
371
372

	/* Allocate page dirty bitmap if needed */
	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
		unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;

		new.dirty_bitmap = vmalloc(dirty_bytes);
		if (!new.dirty_bitmap)
373
			goto out_free;
Avi Kivity's avatar
Avi Kivity committed
374
375
376
377
378
379
		memset(new.dirty_bitmap, 0, dirty_bytes);
	}

	if (mem->slot >= kvm->nmemslots)
		kvm->nmemslots = mem->slot + 1;

380
381
	*memslot = new;

382
383
384
385
	r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
	if (r) {
		*memslot = old;
		goto out_free;
386
387
	}

Avi Kivity's avatar
Avi Kivity committed
388
389
390
	kvm_free_physmem_slot(&old, &new);
	return 0;

391
out_free:
Avi Kivity's avatar
Avi Kivity committed
392
393
394
	kvm_free_physmem_slot(&new, &old);
out:
	return r;
395
396

}
397
398
399
400
401
402
403
404
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);

int kvm_set_memory_region(struct kvm *kvm,
			  struct kvm_userspace_memory_region *mem,
			  int user_alloc)
{
	int r;

405
	down_write(&kvm->slots_lock);
406
	r = __kvm_set_memory_region(kvm, mem, user_alloc);
407
	up_write(&kvm->slots_lock);
408
409
	return r;
}
410
411
EXPORT_SYMBOL_GPL(kvm_set_memory_region);

412
413
414
415
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
				   struct
				   kvm_userspace_memory_region *mem,
				   int user_alloc)
416
{
417
418
	if (mem->slot >= KVM_MEMORY_SLOTS)
		return -EINVAL;
419
	return kvm_set_memory_region(kvm, mem, user_alloc);
Avi Kivity's avatar
Avi Kivity committed
420
421
}

422
423
int kvm_get_dirty_log(struct kvm *kvm,
			struct kvm_dirty_log *log, int *is_dirty)
Avi Kivity's avatar
Avi Kivity committed
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
{
	struct kvm_memory_slot *memslot;
	int r, i;
	int n;
	unsigned long any = 0;

	r = -EINVAL;
	if (log->slot >= KVM_MEMORY_SLOTS)
		goto out;

	memslot = &kvm->memslots[log->slot];
	r = -ENOENT;
	if (!memslot->dirty_bitmap)
		goto out;

439
	n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
Avi Kivity's avatar
Avi Kivity committed
440

441
	for (i = 0; !any && i < n/sizeof(long); ++i)
Avi Kivity's avatar
Avi Kivity committed
442
443
444
445
446
447
		any = memslot->dirty_bitmap[i];

	r = -EFAULT;
	if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
		goto out;

448
449
	if (any)
		*is_dirty = 1;
Avi Kivity's avatar
Avi Kivity committed
450
451
452
453
454
455

	r = 0;
out:
	return r;
}

456
457
458
459
460
461
int is_error_page(struct page *page)
{
	return page == bad_page;
}
EXPORT_SYMBOL_GPL(is_error_page);

462
463
464
465
466
467
int is_error_pfn(pfn_t pfn)
{
	return pfn == bad_pfn;
}
EXPORT_SYMBOL_GPL(is_error_pfn);

Izik Eidus's avatar
Izik Eidus committed
468
469
470
471
472
473
474
475
476
477
478
static inline unsigned long bad_hva(void)
{
	return PAGE_OFFSET;
}

int kvm_is_error_hva(unsigned long addr)
{
	return addr == bad_hva();
}
EXPORT_SYMBOL_GPL(kvm_is_error_hva);

479
static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
Avi Kivity's avatar
Avi Kivity committed
480
481
482
483
484
485
486
487
488
489
{
	int i;

	for (i = 0; i < kvm->nmemslots; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return memslot;
	}
Al Viro's avatar
Al Viro committed
490
	return NULL;
Avi Kivity's avatar
Avi Kivity committed
491
}
492
493
494
495
496
497

struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
	gfn = unalias_gfn(kvm, gfn);
	return __gfn_to_memslot(kvm, gfn);
}
Avi Kivity's avatar
Avi Kivity committed
498

499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
	int i;

	gfn = unalias_gfn(kvm, gfn);
	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
		struct kvm_memory_slot *memslot = &kvm->memslots[i];

		if (gfn >= memslot->base_gfn
		    && gfn < memslot->base_gfn + memslot->npages)
			return 1;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);

Marcelo Tosatti's avatar
Marcelo Tosatti committed
515
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
Izik Eidus's avatar
Izik Eidus committed
516
517
518
519
520
521
522
523
524
525
{
	struct kvm_memory_slot *slot;

	gfn = unalias_gfn(kvm, gfn);
	slot = __gfn_to_memslot(kvm, gfn);
	if (!slot)
		return bad_hva();
	return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}

526
527
528
/*
 * Requires current->mm->mmap_sem to be held
 */
529
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
Avi Kivity's avatar
Avi Kivity committed
530
{
531
	struct page *page[1];
Izik Eidus's avatar
Izik Eidus committed
532
	unsigned long addr;
533
	int npages;
Avi Kivity's avatar
Avi Kivity committed
534

535
536
	might_sleep();

Izik Eidus's avatar
Izik Eidus committed
537
538
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr)) {
539
		get_page(bad_page);
540
		return page_to_pfn(bad_page);
541
	}
542

Izik Eidus's avatar
Izik Eidus committed
543
544
545
	npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
				NULL);

546
547
	if (npages != 1) {
		get_page(bad_page);
548
		return page_to_pfn(bad_page);
549
	}
550

551
552
553
554
555
556
557
558
	return page_to_pfn(page[0]);
}

EXPORT_SYMBOL_GPL(gfn_to_pfn);

struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
	return pfn_to_page(gfn_to_pfn(kvm, gfn));
Avi Kivity's avatar
Avi Kivity committed
559
}
560

Avi Kivity's avatar
Avi Kivity committed
561
562
EXPORT_SYMBOL_GPL(gfn_to_page);

563
564
void kvm_release_page_clean(struct page *page)
{
565
	kvm_release_pfn_clean(page_to_pfn(page));
566
567
568
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);

569
570
571
572
573
574
void kvm_release_pfn_clean(pfn_t pfn)
{
	put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);

575
void kvm_release_page_dirty(struct page *page)
576
{
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
	kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);

void kvm_release_pfn_dirty(pfn_t pfn)
{
	kvm_set_pfn_dirty(pfn);
	kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);

void kvm_set_page_dirty(struct page *page)
{
	kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);

void kvm_set_pfn_dirty(pfn_t pfn)
{
	struct page *page = pfn_to_page(pfn);
597
598
599
	if (!PageReserved(page))
		SetPageDirty(page);
}
600
601
602
603
604
605
606
607
608
609
610
611
612
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

void kvm_set_pfn_accessed(pfn_t pfn)
{
	mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

void kvm_get_pfn(pfn_t pfn)
{
	get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
613

614
615
616
617
618
619
620
621
622
623
624
static int next_segment(unsigned long len, int offset)
{
	if (len > PAGE_SIZE - offset)
		return PAGE_SIZE - offset;
	else
		return len;
}

int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
			int len)
{
625
626
	int r;
	unsigned long addr;
627

628
629
630
631
632
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_from_user(data, (void __user *)addr + offset, len);
	if (r)
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);

int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);

658
659
660
661
662
663
664
665
666
667
668
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
			  unsigned long len)
{
	int r;
	unsigned long addr;
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int offset = offset_in_page(gpa);

	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
669
	pagefault_disable();
670
	r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
671
	pagefault_enable();
672
673
674
675
676
677
	if (r)
		return -EFAULT;
	return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);

678
679
680
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
			 int offset, int len)
{
681
682
	int r;
	unsigned long addr;
683

684
685
686
687
688
	addr = gfn_to_hva(kvm, gfn);
	if (kvm_is_error_hva(addr))
		return -EFAULT;
	r = copy_to_user((void __user *)addr + offset, data, len);
	if (r)
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
		return -EFAULT;
	mark_page_dirty(kvm, gfn);
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);

int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
		    unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

	while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		data += seg;
		++gfn;
	}
	return 0;
}

int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
717
	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);

int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
	gfn_t gfn = gpa >> PAGE_SHIFT;
	int seg;
	int offset = offset_in_page(gpa);
	int ret;

        while ((seg = next_segment(len, offset)) != 0) {
		ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
		if (ret < 0)
			return ret;
		offset = 0;
		len -= seg;
		++gfn;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);

Avi Kivity's avatar
Avi Kivity committed
740
741
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
742
	struct kvm_memory_slot *memslot;
Avi Kivity's avatar
Avi Kivity committed
743

744
	gfn = unalias_gfn(kvm, gfn);
Rusty Russell's avatar
Rusty Russell committed
745
746
747
	memslot = __gfn_to_memslot(kvm, gfn);
	if (memslot && memslot->dirty_bitmap) {
		unsigned long rel_gfn = gfn - memslot->base_gfn;
Avi Kivity's avatar
Avi Kivity committed
748

Rusty Russell's avatar
Rusty Russell committed
749
750
751
		/* avoid RMW */
		if (!test_bit(rel_gfn, memslot->dirty_bitmap))
			set_bit(rel_gfn, memslot->dirty_bitmap);
Avi Kivity's avatar
Avi Kivity committed
752
753
754
	}
}

Eddie Dong's avatar
Eddie Dong committed
755
756
757
/*
 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
 */
758
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
759
{
Eddie Dong's avatar
Eddie Dong committed
760
761
762
763
764
765
766
	DECLARE_WAITQUEUE(wait, current);

	add_wait_queue(&vcpu->wq, &wait);

	/*
	 * We will block until either an interrupt or a signal wakes us up
	 */
767
	while (!kvm_cpu_has_interrupt(vcpu)
768
	       && !kvm_cpu_has_pending_timer(vcpu)
769
	       && !signal_pending(current)
770
	       && !kvm_arch_vcpu_runnable(vcpu)) {
Eddie Dong's avatar
Eddie Dong committed
771
772
773
774
775
		set_current_state(TASK_INTERRUPTIBLE);
		vcpu_put(vcpu);
		schedule();
		vcpu_load(vcpu);
	}
776

777
	__set_current_state(TASK_RUNNING);
Eddie Dong's avatar
Eddie Dong committed
778
779
780
	remove_wait_queue(&vcpu->wq, &wait);
}

Avi Kivity's avatar
Avi Kivity committed
781
782
void kvm_resched(struct kvm_vcpu *vcpu)
{
783
784
	if (!need_resched())
		return;
Avi Kivity's avatar
Avi Kivity committed
785
786
787
788
	cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);

789
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
790
791
792
793
{
	struct kvm_vcpu *vcpu = vma->vm_file->private_data;
	struct page *page;

794
	if (vmf->pgoff == 0)
795
		page = virt_to_page(vcpu->run);
Avi Kivity's avatar
Avi Kivity committed
796
#ifdef CONFIG_X86
797
	else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
798
		page = virt_to_page(vcpu->arch.pio_data);
Avi Kivity's avatar
Avi Kivity committed
799
#endif
800
	else
801
		return VM_FAULT_SIGBUS;
802
	get_page(page);
803
804
	vmf->page = page;
	return 0;
805
806
807
}

static struct vm_operations_struct kvm_vcpu_vm_ops = {
808
	.fault = kvm_vcpu_fault,
809
810
811
812
813
814
815
816
};

static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vcpu_vm_ops;
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
817
818
819
820
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
	struct kvm_vcpu *vcpu = filp->private_data;

Al Viro's avatar
Al Viro committed
821
	kvm_put_kvm(vcpu->kvm);
Avi Kivity's avatar
Avi Kivity committed
822
823
824
	return 0;
}

825
static const struct file_operations kvm_vcpu_fops = {
Avi Kivity's avatar
Avi Kivity committed
826
827
828
	.release        = kvm_vcpu_release,
	.unlocked_ioctl = kvm_vcpu_ioctl,
	.compat_ioctl   = kvm_vcpu_ioctl,
829
	.mmap           = kvm_vcpu_mmap,
Avi Kivity's avatar
Avi Kivity committed
830
831
832
833
834
835
836
837
838
839
840
};

/*
 * Allocates an inode for the vcpu.
 */
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
	int fd, r;
	struct inode *inode;
	struct file *file;

841
842
	r = anon_inode_getfd(&fd, &inode, &file,
			     "kvm-vcpu", &kvm_vcpu_fops, vcpu);
Al Viro's avatar
Al Viro committed
843
844
	if (r) {
		kvm_put_kvm(vcpu->kvm);
845
		return r;
Al Viro's avatar
Al Viro committed
846
	}
Avi Kivity's avatar
Avi Kivity committed
847
848
849
	return fd;
}

850
851
852
853
854
855
856
857
858
/*
 * Creates some virtual cpus.  Good luck creating more than one.
 */
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
{
	int r;
	struct kvm_vcpu *vcpu;

	if (!valid_vcpu(n))
859
		return -EINVAL;
860

861
	vcpu = kvm_arch_vcpu_create(kvm, n);
862
863
	if (IS_ERR(vcpu))
		return PTR_ERR(vcpu);
864

865
866
	preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);

867
868
869
870
	r = kvm_arch_vcpu_setup(vcpu);
	if (r)
		goto vcpu_destroy;

Shaohua Li's avatar
Shaohua Li committed
871
	mutex_lock(&kvm->lock);
872
873
	if (kvm->vcpus[n]) {
		r = -EEXIST;
Shaohua Li's avatar
Shaohua Li committed
874
		mutex_unlock(&kvm->lock);
875
		goto vcpu_destroy;
876
877
	}
	kvm->vcpus[n] = vcpu;
Shaohua Li's avatar
Shaohua Li committed
878
	mutex_unlock(&kvm->lock);
879

880
	/* Now it's all set up, let userspace reach it */
Al Viro's avatar
Al Viro committed
881
	kvm_get_kvm(kvm);
Avi Kivity's avatar
Avi Kivity committed
882
883
	r = create_vcpu_fd(vcpu);
	if (r < 0)
884
885
		goto unlink;
	return r;
886

887
unlink:
Shaohua Li's avatar
Shaohua Li committed
888
	mutex_lock(&kvm->lock);
889
	kvm->vcpus[n] = NULL;
Shaohua Li's avatar
Shaohua Li committed
890
	mutex_unlock(&kvm->lock);
891
vcpu_destroy:
892
	kvm_arch_vcpu_destroy(vcpu);
893
894
895
	return r;
}

Avi Kivity's avatar
Avi Kivity committed
896
897
898
899
900
901
902
903
904
905
906
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
	if (sigset) {
		sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
		vcpu->sigset_active = 1;
		vcpu->sigset = *sigset;
	} else
		vcpu->sigset_active = 0;
	return 0;
}

Avi Kivity's avatar
Avi Kivity committed
907
908
static long kvm_vcpu_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
Avi Kivity's avatar
Avi Kivity committed
909
{
Avi Kivity's avatar
Avi Kivity committed
910
	struct kvm_vcpu *vcpu = filp->private_data;
Al Viro's avatar
Al Viro committed
911
	void __user *argp = (void __user *)arg;
912
	int r;
Avi Kivity's avatar
Avi Kivity committed
913

914
915
	if (vcpu->kvm->mm != current->mm)
		return -EIO;
Avi Kivity's avatar
Avi Kivity committed
916
	switch (ioctl) {
917
	case KVM_RUN:
918
919
920
		r = -EINVAL;
		if (arg)
			goto out;
921
		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
Avi Kivity's avatar
Avi Kivity committed
922
923
		break;
	case KVM_GET_REGS: {
924
		struct kvm_regs *kvm_regs;
Avi Kivity's avatar
Avi Kivity committed
925

926
927
928
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
Avi Kivity's avatar
Avi Kivity committed
929
			goto out;
930
931
932
		r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
		if (r)
			goto out_free1;
Avi Kivity's avatar
Avi Kivity committed
933
		r = -EFAULT;
934
935
		if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
			goto out_free1;
Avi Kivity's avatar
Avi Kivity committed
936
		r = 0;
937
938
out_free1:
		kfree(kvm_regs);
Avi Kivity's avatar
Avi Kivity committed
939
940
941
		break;
	}
	case KVM_SET_REGS: {
942
		struct kvm_regs *kvm_regs;
Avi Kivity's avatar
Avi Kivity committed
943

944
945
946
		r = -ENOMEM;
		kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
		if (!kvm_regs)
Avi Kivity's avatar
Avi Kivity committed
947
			goto out;
948
949
950
951
		r = -EFAULT;
		if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
			goto out_free2;
		r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
Avi Kivity's avatar
Avi Kivity committed
952
		if (r)
953
			goto out_free2;
Avi Kivity's avatar
Avi Kivity committed
954
		r = 0;
955
956
out_free2:
		kfree(kvm_regs);
Avi Kivity's avatar
Avi Kivity committed
957
958
959
960
961
		break;
	}
	case KVM_GET_SREGS: {
		struct kvm_sregs kvm_sregs;

Avi Kivity's avatar
Avi Kivity committed
962
		memset(&kvm_sregs, 0, sizeof kvm_sregs);
963
		r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
Avi Kivity's avatar
Avi Kivity committed
964
965
966
		if (r)
			goto out;
		r = -EFAULT;
Al Viro's avatar
Al Viro committed
967
		if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
Avi Kivity's avatar
Avi Kivity committed
968
969
970
971
972
973
974
975
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_SREGS: {
		struct kvm_sregs kvm_sregs;

		r = -EFAULT;
Al Viro's avatar
Al Viro committed
976
		if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
Avi Kivity's avatar
Avi Kivity committed
977
			goto out;
978
		r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
Avi Kivity's avatar
Avi Kivity committed
979
980
981
982
983
		if (r)
			goto out;
		r = 0;
		break;
	}
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
	case KVM_GET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &mp_state, sizeof mp_state))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_MP_STATE: {
		struct kvm_mp_state mp_state;

		r = -EFAULT;
		if (copy_from_user(&mp_state, argp, sizeof mp_state))
			goto out;
		r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
		if (r)
			goto out;
		r = 0;
		break;
	}
Avi Kivity's avatar
Avi Kivity committed
1008
1009
1010
1011
	case KVM_TRANSLATE: {
		struct kvm_translation tr;

		r = -EFAULT;
Al Viro's avatar
Al Viro committed
1012
		if (copy_from_user(&tr, argp, sizeof tr))
Avi Kivity's avatar
Avi Kivity committed
1013
			goto out;
1014
		r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
Avi Kivity's avatar
Avi Kivity committed
1015
1016
1017
		if (r)
			goto out;
		r = -EFAULT;
Al Viro's avatar
Al Viro committed
1018
		if (copy_to_user(argp, &tr, sizeof tr))
Avi Kivity's avatar
Avi Kivity committed
1019
1020
1021
1022
1023
1024
1025
1026
			goto out;
		r = 0;
		break;
	}
	case KVM_DEBUG_GUEST: {
		struct kvm_debug_guest dbg;

		r = -EFAULT;
Al Viro's avatar
Al Viro committed
1027
		if (copy_from_user(&dbg, argp, sizeof dbg))
Avi Kivity's avatar
Avi Kivity committed
1028
			goto out;
1029
		r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
Avi Kivity's avatar
Avi Kivity committed
1030
1031
1032
1033
1034
		if (r)
			goto out;
		r = 0;
		break;
	}
Avi Kivity's avatar
Avi Kivity committed
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
	case KVM_SET_SIGNAL_MASK: {
		struct kvm_signal_mask __user *sigmask_arg = argp;
		struct kvm_signal_mask kvm_sigmask;
		sigset_t sigset, *p;

		p = NULL;
		if (argp) {
			r = -EFAULT;
			if (copy_from_user(&kvm_sigmask, argp,
					   sizeof kvm_sigmask))
				goto out;
			r = -EINVAL;
			if (kvm_sigmask.len != sizeof sigset)
				goto out;
			r = -EFAULT;
			if (copy_from_user(&sigset, sigmask_arg->sigset,
					   sizeof sigset))
				goto out;
			p = &sigset;
		}
		r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
		break;
	}
Avi Kivity's avatar
Avi Kivity committed
1058
1059
1060
1061
	case KVM_GET_FPU: {
		struct kvm_fpu fpu;

		memset(&fpu, 0, sizeof fpu);
1062
		r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
Avi Kivity's avatar
Avi Kivity committed
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
		if (r)
			goto out;
		r = -EFAULT;
		if (copy_to_user(argp, &fpu, sizeof fpu))
			goto out;
		r = 0;
		break;
	}
	case KVM_SET_FPU: {
		struct kvm_fpu fpu;

		r = -EFAULT;
		if (copy_from_user(&fpu, argp, sizeof fpu))
			goto out;
1077
		r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
Avi Kivity's avatar
Avi Kivity committed
1078
1079
1080
1081
1082
		if (r)
			goto out;
		r = 0;
		break;
	}
Avi Kivity's avatar
Avi Kivity committed
1083
	default:
1084
		r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
Avi Kivity's avatar
Avi Kivity committed
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
	}
out:
	return r;
}

static long kvm_vm_ioctl(struct file *filp,
			   unsigned int ioctl, unsigned long arg)
{
	struct kvm *kvm = filp->private_data;
	void __user *argp = (void __user *)arg;
1095
	int r;
Avi Kivity's avatar
Avi Kivity committed
1096

1097
1098
	if (kvm->mm != current->mm)
		return -EIO;
Avi Kivity's avatar
Avi Kivity committed
1099
1100
1101
1102
1103
1104
	switch (ioctl) {
	case KVM_CREATE_VCPU:
		r = kvm_vm_ioctl_create_vcpu(kvm, arg);
		if (r < 0)
			goto out;
		break;
1105
1106
1107
1108
1109
1110
1111
1112
1113
	case KVM_SET_USER_MEMORY_REGION: {
		struct kvm_userspace_memory_region kvm_userspace_mem;

		r = -EFAULT;
		if (copy_from_user(&kvm_userspace_mem, argp,
						sizeof kvm_userspace_mem))
			goto out;

		r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
Avi Kivity's avatar
Avi Kivity committed
1114
1115
1116
1117
1118
1119
1120
1121
		if (r)
			goto out;
		break;
	}
	case KVM_GET_DIRTY_LOG: {
		struct kvm_dirty_log log;

		r = -EFAULT;
Al Viro's avatar
Al Viro committed
1122
		if (copy_from_user(&log, argp, sizeof log))
Avi Kivity's avatar
Avi Kivity committed
1123
			goto out;
1124
		r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
Avi Kivity's avatar
Avi Kivity committed
1125
1126
1127
1128
		if (r)
			goto out;
		break;
	}
1129
	default:
1130
		r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1131
1132
1133
1134
1135
	}
out:
	return r;
}

1136
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1137
1138
1139
1140
{
	struct kvm *kvm = vma->vm_file->private_data;
	struct page *page;

1141
1142
	if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
		return VM_FAULT_SIGBUS;
1143
	page = gfn_to_page(kvm, vmf->pgoff);
1144
	if (is_error_page(page)) {
1145
		kvm_release_page_clean(page);
1146
		return VM_FAULT_SIGBUS;
1147
	}
1148
1149
	vmf->page = page;
	return 0;
1150
1151
1152
}

static struct vm_operations_struct kvm_vm_vm_ops = {
1153
	.fault = kvm_vm_fault,
1154
1155
1156
1157
1158
1159
1160
1161
};

static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
	vma->vm_ops = &kvm_vm_vm_ops;
	return 0;
}

1162
static const struct file_operations kvm_vm_fops = {
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
	.release        = kvm_vm_release,
	.unlocked_ioctl = kvm_vm_ioctl,
	.compat_ioctl   = kvm_vm_ioctl,
	.mmap           = kvm_vm_mmap,
};

static int kvm_dev_ioctl_create_vm(void)
{
	int fd, r;
	struct inode *inode;
	struct file *file;
	struct kvm *kvm;

	kvm = kvm_create_vm();
1177
1178
1179
1180
	if (IS_ERR(kvm))
		return PTR_ERR(kvm);
	r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
	if (r) {
Al Viro's avatar
Al Viro committed
1181
		kvm_put_kvm(kvm);
1182
		return r;
1183
1184
1185
1186
1187
1188
1189
1190
1191
	}

	return fd;
}

static long kvm_dev_ioctl(struct file *filp,
			  unsigned int ioctl, unsigned long arg)
{
	void __user *argp = (void __user *)arg;
1192
	long r = -EINVAL;
1193
1194
1195

	switch (ioctl) {
	case KVM_GET_API_VERSION:
1196
1197
1198
		r = -EINVAL;
		if (arg)
			goto out;
1199
1200
1201
		r = KVM_API_VERSION;
		break;
	case KVM_CREATE_VM:
1202
1203
1204
		r = -EINVAL;
		if (arg)
			goto out;
1205
1206
		r = kvm_dev_ioctl_create_vm();
		break;
1207
1208
	case KVM_CHECK_EXTENSION:
		r = kvm_dev_ioctl_check_extension((long)argp);
1209
		break;
1210
1211
1212
1213
	case KVM_GET_VCPU_MMAP_SIZE:
		r = -EINVAL;
		if (arg)
			goto out;
1214
1215
1216
1217
		r = PAGE_SIZE;     /* struct kvm_run */
#ifdef CONFIG_X86
		r += PAGE_SIZE;    /* pio data page */
#endif
1218
		break;
1219
1220
1221
1222
1223
	case KVM_TRACE_ENABLE:
	case KVM_TRACE_PAUSE:
	case KVM_TRACE_DISABLE:
		r = kvm_trace_ioctl(ioctl, arg);
		break;
Avi Kivity's avatar
Avi Kivity committed
1224
	default: