i915_gem.c 106 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
Chris Wilson's avatar
Chris Wilson committed
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
#include <linux/shmem_fs.h>
35
#include <linux/slab.h>
36
#include <linux/swap.h>
37
#include <linux/pci.h>
38
#include <linux/dma-buf.h>
39

40
41
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42
43
44
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
						    unsigned alignment,
						    bool map_and_fenceable);
45
46
static int i915_gem_phys_pwrite(struct drm_device *dev,
				struct drm_i915_gem_object *obj,
47
				struct drm_i915_gem_pwrite *args,
48
				struct drm_file *file);
49

50
51
52
53
54
55
static void i915_gem_write_fence(struct drm_device *dev, int reg,
				 struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
					 struct drm_i915_fence_reg *fence,
					 bool enable);

56
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
57
				    struct shrink_control *sc);
Chris Wilson's avatar
Chris Wilson committed
58
59
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
60
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
61

62
63
64
65
66
67
68
69
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
	if (obj->tiling_mode)
		i915_gem_release_mmap(obj);

	/* As we do not have an associated fence register, we will force
	 * a tiling change if we ever need to acquire one.
	 */
70
	obj->fence_dirty = false;
71
72
73
	obj->fence_reg = I915_FENCE_REG_NONE;
}

74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
				  size_t size)
{
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
				     size_t size)
{
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
}

89
90
static int
i915_gem_wait_for_error(struct drm_device *dev)
91
92
93
94
95
96
97
98
99
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct completion *x = &dev_priv->error_completion;
	unsigned long flags;
	int ret;

	if (!atomic_read(&dev_priv->mm.wedged))
		return 0;

100
101
102
103
104
105
106
107
108
109
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
	ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
110
		return ret;
111
	}
112

113
114
115
116
117
118
119
120
121
122
123
	if (atomic_read(&dev_priv->mm.wedged)) {
		/* GPU is hung, bump the completion count to account for
		 * the token we just consumed so that we never hit zero and
		 * end up waiting upon a subsequent completion event that
		 * will never happen.
		 */
		spin_lock_irqsave(&x->wait.lock, flags);
		x->done++;
		spin_unlock_irqrestore(&x->wait.lock, flags);
	}
	return 0;
124
125
}

126
int i915_mutex_lock_interruptible(struct drm_device *dev)
127
128
129
{
	int ret;

130
	ret = i915_gem_wait_for_error(dev);
131
132
133
134
135
136
137
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

138
	WARN_ON(i915_verify_lists(dev));
139
140
	return 0;
}
141

142
static inline bool
143
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
144
{
Chris Wilson's avatar
Chris Wilson committed
145
	return obj->gtt_space && !obj->active;
146
147
}

148
149
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
150
		    struct drm_file *file)
151
152
{
	struct drm_i915_gem_init *args = data;
153

154
155
156
	if (drm_core_check_feature(dev, DRIVER_MODESET))
		return -ENODEV;

157
158
159
	if (args->gtt_start >= args->gtt_end ||
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
		return -EINVAL;
160

161
162
163
164
	/* GEM with user mode setting was never supported on ilk and later. */
	if (INTEL_INFO(dev)->gen >= 5)
		return -ENODEV;

165
	mutex_lock(&dev->struct_mutex);
166
167
	i915_gem_init_global_gtt(dev, args->gtt_start,
				 args->gtt_end, args->gtt_end);
168
169
	mutex_unlock(&dev->struct_mutex);

170
	return 0;
171
172
}

173
174
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
175
			    struct drm_file *file)
176
{
177
	struct drm_i915_private *dev_priv = dev->dev_private;
178
	struct drm_i915_gem_get_aperture *args = data;
179
180
	struct drm_i915_gem_object *obj;
	size_t pinned;
181

182
	pinned = 0;
183
	mutex_lock(&dev->struct_mutex);
Chris Wilson's avatar
Chris Wilson committed
184
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
185
186
		if (obj->pin_count)
			pinned += obj->gtt_space->size;
187
	mutex_unlock(&dev->struct_mutex);
188

189
	args->aper_size = dev_priv->mm.gtt_total;
190
	args->aper_available_size = args->aper_size - pinned;
191

192
193
194
	return 0;
}

195
196
197
198
199
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
200
{
201
	struct drm_i915_gem_object *obj;
202
203
	int ret;
	u32 handle;
204

205
	size = roundup(size, PAGE_SIZE);
206
207
	if (size == 0)
		return -EINVAL;
208
209

	/* Allocate the new object */
210
	obj = i915_gem_alloc_object(dev, size);
211
212
213
	if (obj == NULL)
		return -ENOMEM;

214
	ret = drm_gem_handle_create(file, &obj->base, &handle);
215
	if (ret) {
216
217
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
218
		kfree(obj);
219
		return ret;
220
	}
221

222
	/* drop reference from allocate - handle holds it now */
223
	drm_gem_object_unreference(&obj->base);
224
225
	trace_i915_gem_object_create(obj);

226
	*handle_p = handle;
227
228
229
	return 0;
}

230
231
232
233
234
235
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
236
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

int i915_gem_dumb_destroy(struct drm_file *file,
			  struct drm_device *dev,
			  uint32_t handle)
{
	return drm_gem_handle_delete(file, handle);
}

/**
 * Creates a new mm object and returns a handle to it.
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
257

258
259
260
261
	return i915_gem_create(file, dev,
			       args->size, &args->handle);
}

262
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
263
{
264
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
265
266

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
267
		obj->tiling_mode != I915_TILING_NONE;
268
269
}

270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

296
static inline int
297
298
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

322
323
324
/* Per-page copy function for the shmem pread fastpath.
 * Flushes invalid cachelines before reading the target if
 * needs_clflush is set. */
325
static int
326
327
328
329
330
331
332
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

333
	if (unlikely(page_do_bit17_swizzling))
334
335
336
337
338
339
340
341
342
343
344
345
346
347
		return -EINVAL;

	vaddr = kmap_atomic(page);
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_to_user_inatomic(user_data,
				      vaddr + shmem_page_offset,
				      page_length);
	kunmap_atomic(vaddr);

	return ret;
}

348
349
350
351
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
352
	if (unlikely(swizzled)) {
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

370
371
372
373
374
375
376
377
378
379
380
381
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
382
383
384
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
385
386
387
388
389
390
391
392
393
394
395
396
397
398

	if (page_do_bit17_swizzling)
		ret = __copy_to_user_swizzled(user_data,
					      vaddr, shmem_page_offset,
					      page_length);
	else
		ret = __copy_to_user(user_data,
				     vaddr + shmem_page_offset,
				     page_length);
	kunmap(page);

	return ret;
}

399
static int
400
401
402
403
i915_gem_shmem_pread(struct drm_device *dev,
		     struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args,
		     struct drm_file *file)
404
{
405
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
406
	char __user *user_data;
407
	ssize_t remain;
408
	loff_t offset;
409
	int shmem_page_offset, page_length, ret = 0;
410
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
411
	int hit_slowpath = 0;
412
	int prefaulted = 0;
413
	int needs_clflush = 0;
414
	int release_page;
415

416
	user_data = (char __user *) (uintptr_t) args->data_ptr;
417
418
	remain = args->size;

419
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
420

421
422
423
424
425
426
427
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
		/* If we're not in the cpu read domain, set ourself into the gtt
		 * read domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will dirty the data
		 * anyway again before the next pread happens. */
		if (obj->cache_level == I915_CACHE_NONE)
			needs_clflush = 1;
Chris Wilson's avatar
Chris Wilson committed
428
429
430
431
432
		if (obj->gtt_space) {
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
			if (ret)
				return ret;
		}
433
	}
434

435
	offset = args->offset;
436
437

	while (remain > 0) {
438
439
		struct page *page;

440
441
442
443
444
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
445
		shmem_page_offset = offset_in_page(offset);
446
447
448
449
		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

450
451
452
453
454
455
456
457
458
459
		if (obj->pages) {
			page = obj->pages[offset >> PAGE_SHIFT];
			release_page = 0;
		} else {
			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
				goto out;
			}
			release_page = 1;
460
		}
461

462
463
464
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

465
466
467
468
469
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
		if (ret == 0)
			goto next_page;
470
471

		hit_slowpath = 1;
472
		page_cache_get(page);
473
474
		mutex_unlock(&dev->struct_mutex);

475
		if (!prefaulted) {
476
			ret = fault_in_multipages_writeable(user_data, remain);
477
478
479
480
481
482
483
			/* Userspace is tricking us, but we've already clobbered
			 * its pages with the prefault and promised to write the
			 * data up to the first fault. Hence ignore any errors
			 * and just continue. */
			(void)ret;
			prefaulted = 1;
		}
484

485
486
487
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
				       user_data, page_do_bit17_swizzling,
				       needs_clflush);
488

489
		mutex_lock(&dev->struct_mutex);
490
		page_cache_release(page);
491
next_page:
492
		mark_page_accessed(page);
493
494
		if (release_page)
			page_cache_release(page);
495

496
497
498
499
500
		if (ret) {
			ret = -EFAULT;
			goto out;
		}

501
		remain -= page_length;
502
		user_data += page_length;
503
504
505
		offset += page_length;
	}

506
out:
507
508
509
510
511
	if (hit_slowpath) {
		/* Fixup: Kill any reinstated backing storage pages */
		if (obj->madv == __I915_MADV_PURGED)
			i915_gem_object_truncate(obj);
	}
512
513
514
515

	return ret;
}

516
517
518
519
520
521
522
/**
 * Reads data from the object referenced by handle.
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
523
		     struct drm_file *file)
524
525
{
	struct drm_i915_gem_pread *args = data;
526
	struct drm_i915_gem_object *obj;
527
	int ret = 0;
528

529
530
531
532
533
534
535
536
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

537
	ret = i915_mutex_lock_interruptible(dev);
538
	if (ret)
539
		return ret;
540

541
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
542
	if (&obj->base == NULL) {
543
544
		ret = -ENOENT;
		goto unlock;
545
	}
546

547
	/* Bounds check source.  */
548
549
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
550
		ret = -EINVAL;
551
		goto out;
552
553
	}

554
555
556
557
558
559
560
561
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

Chris Wilson's avatar
Chris Wilson committed
562
563
	trace_i915_gem_object_pread(obj, args->offset, args->size);

564
	ret = i915_gem_shmem_pread(dev, obj, args, file);
565

566
out:
567
	drm_gem_object_unreference(&obj->base);
568
unlock:
569
	mutex_unlock(&dev->struct_mutex);
570
	return ret;
571
572
}

573
574
/* This is the fast write path which cannot handle
 * page faults in the source data
575
 */
576
577
578
579
580
581

static inline int
fast_user_write(struct io_mapping *mapping,
		loff_t page_base, int page_offset,
		char __user *user_data,
		int length)
582
{
583
584
	void __iomem *vaddr_atomic;
	void *vaddr;
585
	unsigned long unwritten;
586

Peter Zijlstra's avatar
Peter Zijlstra committed
587
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
588
589
590
	/* We can use the cpu mem copy function because this is X86. */
	vaddr = (void __force*)vaddr_atomic + page_offset;
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
591
						      user_data, length);
Peter Zijlstra's avatar
Peter Zijlstra committed
592
	io_mapping_unmap_atomic(vaddr_atomic);
593
	return unwritten;
594
595
}

596
597
598
599
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
 */
600
static int
601
602
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
			 struct drm_i915_gem_object *obj,
603
			 struct drm_i915_gem_pwrite *args,
604
			 struct drm_file *file)
605
{
606
	drm_i915_private_t *dev_priv = dev->dev_private;
607
	ssize_t remain;
608
	loff_t offset, page_base;
609
	char __user *user_data;
Daniel Vetter's avatar
Daniel Vetter committed
610
611
612
613
614
615
616
617
618
619
620
621
622
	int page_offset, page_length, ret;

	ret = i915_gem_object_pin(obj, 0, true);
	if (ret)
		goto out;

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

	ret = i915_gem_object_put_fence(obj);
	if (ret)
		goto out_unpin;
623
624
625
626

	user_data = (char __user *) (uintptr_t) args->data_ptr;
	remain = args->size;

627
	offset = obj->gtt_offset + args->offset;
628
629
630
631

	while (remain > 0) {
		/* Operation in this page
		 *
632
633
634
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
635
		 */
636
637
		page_base = offset & PAGE_MASK;
		page_offset = offset_in_page(offset);
638
639
640
641
642
		page_length = remain;
		if ((page_offset + remain) > PAGE_SIZE)
			page_length = PAGE_SIZE - page_offset;

		/* If we get a fault while copying data, then (presumably) our
643
644
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
645
		 */
646
		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
Daniel Vetter's avatar
Daniel Vetter committed
647
648
649
650
				    page_offset, user_data, page_length)) {
			ret = -EFAULT;
			goto out_unpin;
		}
651

652
653
654
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
655
656
	}

Daniel Vetter's avatar
Daniel Vetter committed
657
658
659
out_unpin:
	i915_gem_object_unpin(obj);
out:
660
	return ret;
661
662
}

663
664
665
666
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set. */
667
static int
668
669
670
671
672
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
673
{
674
	char *vaddr;
675
	int ret;
676

677
	if (unlikely(page_do_bit17_swizzling))
678
		return -EINVAL;
679

680
681
682
683
684
685
686
687
688
689
690
	vaddr = kmap_atomic(page);
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
						user_data,
						page_length);
	if (needs_clflush_after)
		drm_clflush_virt_range(vaddr + shmem_page_offset,
				       page_length);
	kunmap_atomic(vaddr);
691
692
693
694

	return ret;
}

695
696
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
697
static int
698
699
700
701
702
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
703
{
704
705
	char *vaddr;
	int ret;
706

707
	vaddr = kmap(page);
708
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
709
710
711
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
712
713
	if (page_do_bit17_swizzling)
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
714
715
						user_data,
						page_length);
716
717
718
719
720
	else
		ret = __copy_from_user(vaddr + shmem_page_offset,
				       user_data,
				       page_length);
	if (needs_clflush_after)
721
722
723
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
					     page_length,
					     page_do_bit17_swizzling);
724
	kunmap(page);
725

726
	return ret;
727
728
729
}

static int
730
731
732
733
i915_gem_shmem_pwrite(struct drm_device *dev,
		      struct drm_i915_gem_object *obj,
		      struct drm_i915_gem_pwrite *args,
		      struct drm_file *file)
734
{
735
	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
736
	ssize_t remain;
737
738
	loff_t offset;
	char __user *user_data;
739
	int shmem_page_offset, page_length, ret = 0;
740
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
741
	int hit_slowpath = 0;
742
743
	int needs_clflush_after = 0;
	int needs_clflush_before = 0;
744
	int release_page;
745

746
	user_data = (char __user *) (uintptr_t) args->data_ptr;
747
748
	remain = args->size;

749
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
750

751
752
753
754
755
756
757
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
		/* If we're not in the cpu write domain, set ourself into the gtt
		 * write domain and manually flush cachelines (if required). This
		 * optimizes for the case when the gpu will use the data
		 * right away and we therefore have to clflush anyway. */
		if (obj->cache_level == I915_CACHE_NONE)
			needs_clflush_after = 1;
Chris Wilson's avatar
Chris Wilson committed
758
759
760
761
762
		if (obj->gtt_space) {
			ret = i915_gem_object_set_to_gtt_domain(obj, true);
			if (ret)
				return ret;
		}
763
764
765
766
767
768
769
	}
	/* Same trick applies for invalidate partially written cachelines before
	 * writing.  */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
	    && obj->cache_level == I915_CACHE_NONE)
		needs_clflush_before = 1;

770
	offset = args->offset;
771
	obj->dirty = 1;
772

773
	while (remain > 0) {
774
		struct page *page;
775
		int partial_cacheline_write;
776

777
778
779
780
781
		/* Operation in this page
		 *
		 * shmem_page_offset = offset within page in shmem file
		 * page_length = bytes to copy for this page
		 */
782
		shmem_page_offset = offset_in_page(offset);
783
784
785
786
787

		page_length = remain;
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
			page_length = PAGE_SIZE - shmem_page_offset;

788
789
790
791
792
793
794
		/* If we don't overwrite a cacheline completely we need to be
		 * careful to have up-to-date data by first clflushing. Don't
		 * overcomplicate things and flush the entire patch. */
		partial_cacheline_write = needs_clflush_before &&
			((shmem_page_offset | page_length)
				& (boot_cpu_data.x86_clflush_size - 1));

795
796
797
798
799
800
801
802
803
804
		if (obj->pages) {
			page = obj->pages[offset >> PAGE_SHIFT];
			release_page = 0;
		} else {
			page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
				goto out;
			}
			release_page = 1;
805
806
		}

807
808
809
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
			(page_to_phys(page) & (1 << 17)) != 0;

810
811
812
813
814
815
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
		if (ret == 0)
			goto next_page;
816
817

		hit_slowpath = 1;
818
		page_cache_get(page);
819
820
		mutex_unlock(&dev->struct_mutex);

821
822
823
824
		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
					user_data, page_do_bit17_swizzling,
					partial_cacheline_write,
					needs_clflush_after);
825

826
		mutex_lock(&dev->struct_mutex);
827
		page_cache_release(page);
828
next_page:
829
830
		set_page_dirty(page);
		mark_page_accessed(page);
831
832
		if (release_page)
			page_cache_release(page);
833

834
835
836
837
838
		if (ret) {
			ret = -EFAULT;
			goto out;
		}

839
		remain -= page_length;
840
		user_data += page_length;
841
		offset += page_length;
842
843
	}

844
out:
845
846
847
848
849
850
851
852
853
854
	if (hit_slowpath) {
		/* Fixup: Kill any reinstated backing storage pages */
		if (obj->madv == __I915_MADV_PURGED)
			i915_gem_object_truncate(obj);
		/* and flush dirty cachelines in case the object isn't in the cpu write
		 * domain anymore. */
		if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
			i915_gem_clflush_object(obj);
			intel_gtt_chipset_flush();
		}
855
	}
856

857
858
859
	if (needs_clflush_after)
		intel_gtt_chipset_flush();

860
	return ret;
861
862
863
864
865
866
867
868
869
}

/**
 * Writes data to the object referenced by handle.
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
870
		      struct drm_file *file)
871
872
{
	struct drm_i915_gem_pwrite *args = data;
873
	struct drm_i915_gem_object *obj;
874
875
876
877
878
879
880
881
882
883
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
		       (char __user *)(uintptr_t)args->data_ptr,
		       args->size))
		return -EFAULT;

884
885
	ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
					   args->size);
886
887
	if (ret)
		return -EFAULT;
888

889
	ret = i915_mutex_lock_interruptible(dev);
890
	if (ret)
891
		return ret;
892

893
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
894
	if (&obj->base == NULL) {
895
896
		ret = -ENOENT;
		goto unlock;
897
	}
898

899
	/* Bounds check destination. */
900
901
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
902
		ret = -EINVAL;
903
		goto out;
904
905
	}

906
907
908
909
910
911
912
913
	/* prime objects have no backing filp to GEM pread/pwrite
	 * pages from.
	 */
	if (!obj->base.filp) {
		ret = -EINVAL;
		goto out;
	}

Chris Wilson's avatar
Chris Wilson committed
914
915
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

Daniel Vetter's avatar
Daniel Vetter committed
916
	ret = -EFAULT;
917
918
919
920
921
922
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
923
	if (obj->phys_obj) {
924
		ret = i915_gem_phys_pwrite(dev, obj, args, file);
925
926
927
928
		goto out;
	}

	if (obj->gtt_space &&
929
	    obj->cache_level == I915_CACHE_NONE &&
930
	    obj->tiling_mode == I915_TILING_NONE &&
931
	    obj->map_and_fenceable &&
932
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
933
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
Daniel Vetter's avatar
Daniel Vetter committed
934
935
936
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
		 * textures). Fallback to the shmem path in that case. */
937
	}
938

939
	if (ret == -EFAULT)
Daniel Vetter's avatar
Daniel Vetter committed
940
		ret = i915_gem_shmem_pwrite(dev, obj, args, file);
941

942
out:
943
	drm_gem_object_unreference(&obj->base);
944
unlock:
945
	mutex_unlock(&dev->struct_mutex);
946
947
948
949
	return ret;
}

/**
950
951
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
952
953
954
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
955
			  struct drm_file *file)
956
957
{
	struct drm_i915_gem_set_domain *args = data;
958
	struct drm_i915_gem_object *obj;
959
960
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
961
962
	int ret;

963
	/* Only handle setting domains to types used by the CPU. */
964
	if (write_domain & I915_GEM_GPU_DOMAINS)
965
966
		return -EINVAL;

967
	if (read_domains & I915_GEM_GPU_DOMAINS)
968
969
970
971
972
973
974
975
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

976
	ret = i915_mutex_lock_interruptible(dev);
977
	if (ret)
978
		return ret;
979

980
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
981
	if (&obj->base == NULL) {
982
983
		ret = -ENOENT;
		goto unlock;
984
	}
985

986
987
	if (read_domains & I915_GEM_DOMAIN_GTT) {
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
988
989
990
991
992
993
994

		/* Silently promote "you're not bound, there was nothing to do"
		 * to success, since the client was just asking us to
		 * make sure everything was done.
		 */
		if (ret == -EINVAL)
			ret = 0;
995
	} else {
996
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
997
998
	}

999
	drm_gem_object_unreference(&obj->base);
1000
unlock:
1001
1002
1003
1004
1005
1006
1007
1008
1009
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Called when user space has done writes to this buffer
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1010
			 struct drm_file *file)
1011
1012
{
	struct drm_i915_gem_sw_finish *args = data;
1013
	struct drm_i915_gem_object *obj;
1014
1015
	int ret = 0;

1016
	ret = i915_mutex_lock_interruptible(dev);
1017
	if (ret)
1018
		return ret;
1019

1020
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1021
	if (&obj->base == NULL) {
1022
1023
		ret = -ENOENT;
		goto unlock;
1024
1025
1026
	}

	/* Pinned buffers may be scanout, so flush the cache */
1027
	if (obj->pin_count)
1028
1029
		i915_gem_object_flush_cpu_write_domain(obj);

1030
	drm_gem_object_unreference(&obj->base);
1031
unlock:
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

/**
 * Maps the contents of an object, returning the address it is mapped
 * into.
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1045
		    struct drm_file *file)
1046
1047
1048
1049
1050
{
	struct drm_i915_gem_mmap *args = data;
	struct drm_gem_object *obj;
	unsigned long addr;

1051
	obj = drm_gem_object_lookup(dev, file, args->handle);
1052
	if (obj == NULL)
1053
		return -ENOENT;
1054

1055
1056
1057
1058
1059
1060
1061
1062
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
	if (!obj->filp) {
		drm_gem_object_unreference_unlocked(obj);
		return -EINVAL;
	}

1063