Subversion Repositories Kolibri OS

Rev

Rev 4371 | Rev 4392 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *
26
 */
27
 
3031 serge 28
#include 
4280 Serge 29
#include 
3031 serge 30
#include 
2326 Serge 31
#include "i915_drv.h"
2351 Serge 32
#include "i915_trace.h"
2326 Serge 33
#include "intel_drv.h"
3260 Serge 34
#include 
2330 Serge 35
#include 
2326 Serge 36
//#include 
3746 Serge 37
#include 
2326 Serge 38
#include 
39
 
2344 Serge 40
extern int x86_clflush_size;
2332 Serge 41
 
3263 Serge 42
#define PROT_READ       0x1             /* page can be read */
43
#define PROT_WRITE      0x2             /* page can be written */
44
#define MAP_SHARED      0x01            /* Share changes */
45
 
2344 Serge 46
#undef mb
47
#undef rmb
48
#undef wmb
49
#define mb() asm volatile("mfence")
50
#define rmb() asm volatile ("lfence")
51
#define wmb() asm volatile ("sfence")
52
 
3266 Serge 53
struct drm_i915_gem_object *get_fb_obj();
54
 
3263 Serge 55
unsigned long vm_mmap(struct file *file, unsigned long addr,
56
         unsigned long len, unsigned long prot,
57
         unsigned long flag, unsigned long offset);
58
 
2344 Serge 59
static inline void clflush(volatile void *__p)
60
{
61
    asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
62
}
63
 
2332 Serge 64
#define MAX_ERRNO       4095
65
 
66
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67
 
68
 
2326 Serge 69
#define I915_EXEC_CONSTANTS_MASK        (3<<6)
70
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
71
#define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
72
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
73
 
2332 Serge 74
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
4104 Serge 75
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
76
						   bool force);
77
static __must_check int
78
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
79
			   struct i915_address_space *vm,
2332 Serge 80
						    unsigned alignment,
3031 serge 81
						    bool map_and_fenceable,
82
						    bool nonblocking);
2332 Serge 83
static int i915_gem_phys_pwrite(struct drm_device *dev,
84
				struct drm_i915_gem_object *obj,
85
				struct drm_i915_gem_pwrite *args,
86
				struct drm_file *file);
2326 Serge 87
 
3031 serge 88
static void i915_gem_write_fence(struct drm_device *dev, int reg,
89
				 struct drm_i915_gem_object *obj);
90
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
91
					 struct drm_i915_fence_reg *fence,
92
					 bool enable);
2332 Serge 93
 
3031 serge 94
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
4104 Serge 95
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3031 serge 96
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
97
 
4104 Serge 98
static bool cpu_cache_is_coherent(struct drm_device *dev,
99
				  enum i915_cache_level level)
100
{
101
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
102
}
103
 
104
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
105
{
106
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
107
		return true;
108
 
109
	return obj->pin_display;
110
}
111
 
3031 serge 112
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
113
{
114
	if (obj->tiling_mode)
115
		i915_gem_release_mmap(obj);
116
 
117
	/* As we do not have an associated fence register, we will force
118
	 * a tiling change if we ever need to acquire one.
119
	 */
120
	obj->fence_dirty = false;
121
	obj->fence_reg = I915_FENCE_REG_NONE;
122
}
123
 
2332 Serge 124
/* some bookkeeping */
125
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
126
				  size_t size)
127
{
4104 Serge 128
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 129
	dev_priv->mm.object_count++;
130
	dev_priv->mm.object_memory += size;
4104 Serge 131
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 132
}
133
 
134
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
135
				     size_t size)
136
{
4104 Serge 137
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 138
	dev_priv->mm.object_count--;
139
	dev_priv->mm.object_memory -= size;
4104 Serge 140
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 141
}
142
 
143
static int
3480 Serge 144
i915_gem_wait_for_error(struct i915_gpu_error *error)
2332 Serge 145
{
146
	int ret;
147
 
3480 Serge 148
#define EXIT_COND (!i915_reset_in_progress(error))
149
	if (EXIT_COND)
2332 Serge 150
		return 0;
3255 Serge 151
#if 0
3031 serge 152
	/*
153
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
154
	 * userspace. If it takes that long something really bad is going on and
155
	 * we should simply try to bail out and fail as gracefully as possible.
156
	 */
3480 Serge 157
	ret = wait_event_interruptible_timeout(error->reset_queue,
158
					       EXIT_COND,
159
					       10*HZ);
3031 serge 160
	if (ret == 0) {
161
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
162
		return -EIO;
163
	} else if (ret < 0) {
2332 Serge 164
		return ret;
3031 serge 165
	}
2332 Serge 166
 
3255 Serge 167
#endif
3480 Serge 168
#undef EXIT_COND
3255 Serge 169
 
2332 Serge 170
	return 0;
171
}
172
 
173
int i915_mutex_lock_interruptible(struct drm_device *dev)
174
{
3480 Serge 175
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 176
	int ret;
177
 
3480 Serge 178
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
2332 Serge 179
	if (ret)
180
		return ret;
181
 
3480 Serge 182
	ret = mutex_lock_interruptible(&dev->struct_mutex);
183
	if (ret)
184
		return ret;
2332 Serge 185
 
186
	WARN_ON(i915_verify_lists(dev));
187
	return 0;
188
}
189
 
190
static inline bool
191
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
192
{
4104 Serge 193
	return i915_gem_obj_bound_any(obj) && !obj->active;
2332 Serge 194
}
195
 
196
 
197
#if 0
198
 
199
int
200
i915_gem_init_ioctl(struct drm_device *dev, void *data,
201
		    struct drm_file *file)
202
{
3480 Serge 203
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 204
	struct drm_i915_gem_init *args = data;
205
 
3031 serge 206
	if (drm_core_check_feature(dev, DRIVER_MODESET))
207
		return -ENODEV;
208
 
2332 Serge 209
	if (args->gtt_start >= args->gtt_end ||
210
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
211
		return -EINVAL;
212
 
3031 serge 213
	/* GEM with user mode setting was never supported on ilk and later. */
214
	if (INTEL_INFO(dev)->gen >= 5)
215
		return -ENODEV;
216
 
2332 Serge 217
	mutex_lock(&dev->struct_mutex);
3480 Serge 218
	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
219
				  args->gtt_end);
220
	dev_priv->gtt.mappable_end = args->gtt_end;
2332 Serge 221
	mutex_unlock(&dev->struct_mutex);
222
 
223
	return 0;
224
}
2351 Serge 225
#endif
2332 Serge 226
 
227
int
228
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
229
			    struct drm_file *file)
230
{
231
	struct drm_i915_private *dev_priv = dev->dev_private;
232
	struct drm_i915_gem_get_aperture *args = data;
233
	struct drm_i915_gem_object *obj;
234
	size_t pinned;
235
 
236
	pinned = 0;
237
	mutex_lock(&dev->struct_mutex);
4104 Serge 238
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
3031 serge 239
		if (obj->pin_count)
4104 Serge 240
			pinned += i915_gem_obj_ggtt_size(obj);
2332 Serge 241
	mutex_unlock(&dev->struct_mutex);
242
 
4104 Serge 243
	args->aper_size = dev_priv->gtt.base.total;
2342 Serge 244
	args->aper_available_size = args->aper_size - pinned;
2332 Serge 245
 
246
	return 0;
247
}
248
 
3480 Serge 249
void *i915_gem_object_alloc(struct drm_device *dev)
250
{
251
	struct drm_i915_private *dev_priv = dev->dev_private;
252
	return kmalloc(sizeof(struct drm_i915_gem_object), 0);
253
}
254
 
255
void i915_gem_object_free(struct drm_i915_gem_object *obj)
256
{
257
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
258
	kfree(obj);
259
}
260
 
3031 serge 261
static int
262
i915_gem_create(struct drm_file *file,
2332 Serge 263
		struct drm_device *dev,
264
		uint64_t size,
265
		uint32_t *handle_p)
266
{
267
	struct drm_i915_gem_object *obj;
268
	int ret;
269
	u32 handle;
270
 
271
	size = roundup(size, PAGE_SIZE);
2342 Serge 272
	if (size == 0)
273
		return -EINVAL;
2332 Serge 274
 
275
	/* Allocate the new object */
276
	obj = i915_gem_alloc_object(dev, size);
277
	if (obj == NULL)
278
		return -ENOMEM;
279
 
280
	ret = drm_gem_handle_create(file, &obj->base, &handle);
4104 Serge 281
	/* drop reference from allocate - handle holds it now */
282
	drm_gem_object_unreference_unlocked(&obj->base);
283
	if (ret)
2332 Serge 284
		return ret;
285
 
286
	*handle_p = handle;
287
	return 0;
288
}
289
 
290
int
291
i915_gem_dumb_create(struct drm_file *file,
292
		     struct drm_device *dev,
293
		     struct drm_mode_create_dumb *args)
294
{
295
	/* have to work out size/pitch and return them */
296
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
297
	args->size = args->pitch * args->height;
298
	return i915_gem_create(file, dev,
299
			       args->size, &args->handle);
300
}
301
 
2326 Serge 302
/**
2332 Serge 303
 * Creates a new mm object and returns a handle to it.
304
 */
305
int
306
i915_gem_create_ioctl(struct drm_device *dev, void *data,
307
		      struct drm_file *file)
308
{
309
	struct drm_i915_gem_create *args = data;
3031 serge 310
 
2332 Serge 311
	return i915_gem_create(file, dev,
312
			       args->size, &args->handle);
313
}
314
 
315
 
3260 Serge 316
#if 0
2332 Serge 317
 
3031 serge 318
static inline int
319
__copy_to_user_swizzled(char __user *cpu_vaddr,
320
			const char *gpu_vaddr, int gpu_offset,
2332 Serge 321
		int length)
322
{
3031 serge 323
	int ret, cpu_offset = 0;
2332 Serge 324
 
3031 serge 325
	while (length > 0) {
326
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
327
		int this_length = min(cacheline_end - gpu_offset, length);
328
		int swizzled_gpu_offset = gpu_offset ^ 64;
2332 Serge 329
 
3031 serge 330
		ret = __copy_to_user(cpu_vaddr + cpu_offset,
331
				     gpu_vaddr + swizzled_gpu_offset,
332
				     this_length);
333
		if (ret)
334
			return ret + length;
2332 Serge 335
 
3031 serge 336
		cpu_offset += this_length;
337
		gpu_offset += this_length;
338
		length -= this_length;
339
	}
340
 
341
	return 0;
2332 Serge 342
}
343
 
3031 serge 344
static inline int
345
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
346
			  const char __user *cpu_vaddr,
347
			  int length)
2332 Serge 348
{
3031 serge 349
	int ret, cpu_offset = 0;
2332 Serge 350
 
351
	while (length > 0) {
352
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
353
		int this_length = min(cacheline_end - gpu_offset, length);
354
		int swizzled_gpu_offset = gpu_offset ^ 64;
355
 
3031 serge 356
		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
2332 Serge 357
			       cpu_vaddr + cpu_offset,
358
			       this_length);
3031 serge 359
		if (ret)
360
			return ret + length;
361
 
2332 Serge 362
		cpu_offset += this_length;
363
		gpu_offset += this_length;
364
		length -= this_length;
365
	}
366
 
3031 serge 367
	return 0;
2332 Serge 368
}
369
 
3031 serge 370
/* Per-page copy function for the shmem pread fastpath.
371
 * Flushes invalid cachelines before reading the target if
372
 * needs_clflush is set. */
2332 Serge 373
static int
3031 serge 374
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
375
		 char __user *user_data,
376
		 bool page_do_bit17_swizzling, bool needs_clflush)
377
{
378
		char *vaddr;
379
		int ret;
380
 
381
	if (unlikely(page_do_bit17_swizzling))
382
		return -EINVAL;
383
 
384
		vaddr = kmap_atomic(page);
385
	if (needs_clflush)
386
		drm_clflush_virt_range(vaddr + shmem_page_offset,
387
				       page_length);
388
		ret = __copy_to_user_inatomic(user_data,
389
				      vaddr + shmem_page_offset,
390
					      page_length);
391
		kunmap_atomic(vaddr);
392
 
393
	return ret ? -EFAULT : 0;
394
}
395
 
396
static void
397
shmem_clflush_swizzled_range(char *addr, unsigned long length,
398
			     bool swizzled)
399
{
400
	if (unlikely(swizzled)) {
401
		unsigned long start = (unsigned long) addr;
402
		unsigned long end = (unsigned long) addr + length;
403
 
404
		/* For swizzling simply ensure that we always flush both
405
		 * channels. Lame, but simple and it works. Swizzled
406
		 * pwrite/pread is far from a hotpath - current userspace
407
		 * doesn't use it at all. */
408
		start = round_down(start, 128);
409
		end = round_up(end, 128);
410
 
411
		drm_clflush_virt_range((void *)start, end - start);
412
	} else {
413
		drm_clflush_virt_range(addr, length);
414
	}
415
 
416
}
417
 
418
/* Only difference to the fast-path function is that this can handle bit17
419
 * and uses non-atomic copy and kmap functions. */
420
static int
421
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
422
		 char __user *user_data,
423
		 bool page_do_bit17_swizzling, bool needs_clflush)
424
{
425
	char *vaddr;
426
	int ret;
427
 
428
	vaddr = kmap(page);
429
	if (needs_clflush)
430
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
431
					     page_length,
432
					     page_do_bit17_swizzling);
433
 
434
	if (page_do_bit17_swizzling)
435
		ret = __copy_to_user_swizzled(user_data,
436
					      vaddr, shmem_page_offset,
437
					      page_length);
438
	else
439
		ret = __copy_to_user(user_data,
440
				     vaddr + shmem_page_offset,
441
				     page_length);
442
	kunmap(page);
443
 
444
	return ret ? - EFAULT : 0;
445
}
446
 
447
static int
448
i915_gem_shmem_pread(struct drm_device *dev,
2332 Serge 449
			  struct drm_i915_gem_object *obj,
450
			  struct drm_i915_gem_pread *args,
451
			  struct drm_file *file)
452
{
3031 serge 453
	char __user *user_data;
2332 Serge 454
	ssize_t remain;
455
	loff_t offset;
3031 serge 456
	int shmem_page_offset, page_length, ret = 0;
457
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
458
	int prefaulted = 0;
459
	int needs_clflush = 0;
3746 Serge 460
	struct sg_page_iter sg_iter;
2332 Serge 461
 
3746 Serge 462
	user_data = to_user_ptr(args->data_ptr);
2332 Serge 463
	remain = args->size;
464
 
3031 serge 465
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
466
 
467
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
468
		/* If we're not in the cpu read domain, set ourself into the gtt
469
		 * read domain and manually flush cachelines (if required). This
470
		 * optimizes for the case when the gpu will dirty the data
471
		 * anyway again before the next pread happens. */
4104 Serge 472
		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
473
		if (i915_gem_obj_bound_any(obj)) {
3031 serge 474
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
475
			if (ret)
476
				return ret;
477
		}
478
	}
479
 
480
	ret = i915_gem_object_get_pages(obj);
481
	if (ret)
482
		return ret;
483
 
484
	i915_gem_object_pin_pages(obj);
485
 
2332 Serge 486
	offset = args->offset;
487
 
3746 Serge 488
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
489
			 offset >> PAGE_SHIFT) {
490
		struct page *page = sg_page_iter_page(&sg_iter);
2332 Serge 491
 
3031 serge 492
		if (remain <= 0)
493
			break;
494
 
2332 Serge 495
		/* Operation in this page
496
		 *
3031 serge 497
		 * shmem_page_offset = offset within page in shmem file
2332 Serge 498
		 * page_length = bytes to copy for this page
499
		 */
3031 serge 500
		shmem_page_offset = offset_in_page(offset);
2332 Serge 501
		page_length = remain;
3031 serge 502
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
503
			page_length = PAGE_SIZE - shmem_page_offset;
2332 Serge 504
 
3031 serge 505
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
506
			(page_to_phys(page) & (1 << 17)) != 0;
2332 Serge 507
 
3031 serge 508
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
509
				       user_data, page_do_bit17_swizzling,
510
				       needs_clflush);
511
		if (ret == 0)
512
			goto next_page;
2332 Serge 513
 
3031 serge 514
		mutex_unlock(&dev->struct_mutex);
515
 
4104 Serge 516
		if (likely(!i915_prefault_disable) && !prefaulted) {
3031 serge 517
			ret = fault_in_multipages_writeable(user_data, remain);
518
			/* Userspace is tricking us, but we've already clobbered
519
			 * its pages with the prefault and promised to write the
520
			 * data up to the first fault. Hence ignore any errors
521
			 * and just continue. */
522
			(void)ret;
523
			prefaulted = 1;
524
		}
525
 
526
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
527
				       user_data, page_do_bit17_swizzling,
528
				       needs_clflush);
529
 
530
		mutex_lock(&dev->struct_mutex);
531
 
532
next_page:
2332 Serge 533
		mark_page_accessed(page);
3031 serge 534
 
2332 Serge 535
		if (ret)
3031 serge 536
			goto out;
2332 Serge 537
 
538
		remain -= page_length;
539
		user_data += page_length;
540
		offset += page_length;
541
	}
542
 
3031 serge 543
out:
544
	i915_gem_object_unpin_pages(obj);
545
 
546
	return ret;
2332 Serge 547
}
548
 
549
/**
3031 serge 550
 * Reads data from the object referenced by handle.
551
 *
552
 * On error, the contents of *data are undefined.
2332 Serge 553
 */
3031 serge 554
int
555
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
556
		     struct drm_file *file)
557
{
558
	struct drm_i915_gem_pread *args = data;
559
	struct drm_i915_gem_object *obj;
560
	int ret = 0;
561
 
562
	if (args->size == 0)
563
		return 0;
564
 
565
	if (!access_ok(VERIFY_WRITE,
3746 Serge 566
		       to_user_ptr(args->data_ptr),
3031 serge 567
		       args->size))
568
		return -EFAULT;
569
 
570
	ret = i915_mutex_lock_interruptible(dev);
571
	if (ret)
572
		return ret;
573
 
574
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
575
	if (&obj->base == NULL) {
576
		ret = -ENOENT;
577
		goto unlock;
578
	}
579
 
580
	/* Bounds check source.  */
581
	if (args->offset > obj->base.size ||
582
	    args->size > obj->base.size - args->offset) {
583
		ret = -EINVAL;
584
		goto out;
585
	}
586
 
587
	/* prime objects have no backing filp to GEM pread/pwrite
588
	 * pages from.
589
	 */
590
	if (!obj->base.filp) {
591
		ret = -EINVAL;
592
		goto out;
593
	}
594
 
595
	trace_i915_gem_object_pread(obj, args->offset, args->size);
596
 
597
	ret = i915_gem_shmem_pread(dev, obj, args, file);
598
 
599
out:
600
	drm_gem_object_unreference(&obj->base);
601
unlock:
602
	mutex_unlock(&dev->struct_mutex);
603
	return ret;
604
}
605
 
606
/* This is the fast write path which cannot handle
607
 * page faults in the source data
608
 */
609
 
610
static inline int
611
fast_user_write(struct io_mapping *mapping,
612
		loff_t page_base, int page_offset,
613
		char __user *user_data,
614
		int length)
615
{
616
	void __iomem *vaddr_atomic;
617
	void *vaddr;
618
	unsigned long unwritten;
619
 
620
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
621
	/* We can use the cpu mem copy function because this is X86. */
622
	vaddr = (void __force*)vaddr_atomic + page_offset;
623
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
624
						      user_data, length);
625
	io_mapping_unmap_atomic(vaddr_atomic);
626
	return unwritten;
627
}
3260 Serge 628
#endif
3031 serge 629
 
3260 Serge 630
#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
3031 serge 631
/**
632
 * This is the fast pwrite path, where we copy the data directly from the
633
 * user into the GTT, uncached.
634
 */
2332 Serge 635
static int
3031 serge 636
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
637
			 struct drm_i915_gem_object *obj,
638
			 struct drm_i915_gem_pwrite *args,
639
			 struct drm_file *file)
2332 Serge 640
{
3031 serge 641
	drm_i915_private_t *dev_priv = dev->dev_private;
2332 Serge 642
	ssize_t remain;
3031 serge 643
	loff_t offset, page_base;
644
	char __user *user_data;
645
	int page_offset, page_length, ret;
3260 Serge 646
    char *vaddr;
2332 Serge 647
 
4104 Serge 648
	ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
3031 serge 649
	if (ret)
650
		goto out;
651
 
652
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
653
	if (ret)
654
		goto out_unpin;
655
 
656
	ret = i915_gem_object_put_fence(obj);
657
	if (ret)
658
		goto out_unpin;
659
 
3260 Serge 660
    vaddr = AllocKernelSpace(4096);
661
    if(vaddr == NULL)
662
    {
663
        ret = -ENOSPC;
664
        goto out_unpin;
665
    };
666
 
3031 serge 667
	user_data = (char __user *) (uintptr_t) args->data_ptr;
2332 Serge 668
	remain = args->size;
669
 
4104 Serge 670
	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
2332 Serge 671
 
3031 serge 672
	while (remain > 0) {
673
		/* Operation in this page
674
		 *
675
		 * page_base = page offset within aperture
676
		 * page_offset = offset within page
677
		 * page_length = bytes to copy for this page
678
		 */
679
		page_base = offset & PAGE_MASK;
680
		page_offset = offset_in_page(offset);
681
		page_length = remain;
682
		if ((page_offset + remain) > PAGE_SIZE)
683
			page_length = PAGE_SIZE - page_offset;
2332 Serge 684
 
4371 Serge 685
        MapPage(vaddr, dev_priv->gtt.mappable_base+page_base, PG_SW|PG_NOCACHE);
3031 serge 686
 
3260 Serge 687
        memcpy(vaddr+page_offset, user_data, page_length);
688
 
3031 serge 689
		remain -= page_length;
690
		user_data += page_length;
691
		offset += page_length;
2332 Serge 692
	}
693
 
3260 Serge 694
    FreeKernelSpace(vaddr);
695
 
3031 serge 696
out_unpin:
697
	i915_gem_object_unpin(obj);
698
out:
699
	return ret;
700
}
701
 
702
/* Per-page copy function for the shmem pwrite fastpath.
703
 * Flushes invalid cachelines before writing to the target if
704
 * needs_clflush_before is set and flushes out any written cachelines after
705
 * writing if needs_clflush is set. */
706
static int
707
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
708
		  char __user *user_data,
709
		  bool page_do_bit17_swizzling,
710
		  bool needs_clflush_before,
711
		  bool needs_clflush_after)
712
{
713
	char *vaddr;
3260 Serge 714
	int ret = 0;
3031 serge 715
 
716
	if (unlikely(page_do_bit17_swizzling))
717
		return -EINVAL;
718
 
4371 Serge 719
	vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW|PG_NOCACHE);
3031 serge 720
	if (needs_clflush_before)
721
		drm_clflush_virt_range(vaddr + shmem_page_offset,
722
				       page_length);
3260 Serge 723
	memcpy(vaddr + shmem_page_offset,
3031 serge 724
						user_data,
725
						page_length);
726
	if (needs_clflush_after)
727
		drm_clflush_virt_range(vaddr + shmem_page_offset,
728
				       page_length);
3260 Serge 729
	FreeKernelSpace(vaddr);
3031 serge 730
 
731
	return ret ? -EFAULT : 0;
732
}
3260 Serge 733
#if 0
3031 serge 734
 
735
/* Only difference to the fast-path function is that this can handle bit17
736
 * and uses non-atomic copy and kmap functions. */
737
static int
738
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
739
		  char __user *user_data,
740
		  bool page_do_bit17_swizzling,
741
		  bool needs_clflush_before,
742
		  bool needs_clflush_after)
743
{
744
	char *vaddr;
745
	int ret;
746
 
747
	vaddr = kmap(page);
748
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
749
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
750
					     page_length,
751
					     page_do_bit17_swizzling);
752
	if (page_do_bit17_swizzling)
753
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
754
						user_data,
755
						page_length);
756
	else
757
		ret = __copy_from_user(vaddr + shmem_page_offset,
758
				       user_data,
759
				       page_length);
760
	if (needs_clflush_after)
761
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
762
					     page_length,
763
					     page_do_bit17_swizzling);
764
	kunmap(page);
765
 
766
	return ret ? -EFAULT : 0;
767
}
3260 Serge 768
#endif
3031 serge 769
 
3260 Serge 770
 
3031 serge 771
static int
772
i915_gem_shmem_pwrite(struct drm_device *dev,
773
		      struct drm_i915_gem_object *obj,
774
		      struct drm_i915_gem_pwrite *args,
775
		      struct drm_file *file)
776
{
777
	ssize_t remain;
778
	loff_t offset;
779
	char __user *user_data;
780
	int shmem_page_offset, page_length, ret = 0;
781
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
782
	int hit_slowpath = 0;
783
	int needs_clflush_after = 0;
784
	int needs_clflush_before = 0;
3746 Serge 785
	struct sg_page_iter sg_iter;
3031 serge 786
 
3746 Serge 787
	user_data = to_user_ptr(args->data_ptr);
3031 serge 788
	remain = args->size;
789
 
790
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
791
 
792
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
793
		/* If we're not in the cpu write domain, set ourself into the gtt
794
		 * write domain and manually flush cachelines (if required). This
795
		 * optimizes for the case when the gpu will use the data
796
		 * right away and we therefore have to clflush anyway. */
4104 Serge 797
		needs_clflush_after = cpu_write_needs_clflush(obj);
798
		if (i915_gem_obj_bound_any(obj)) {
3031 serge 799
			ret = i915_gem_object_set_to_gtt_domain(obj, true);
800
			if (ret)
801
				return ret;
802
		}
803
	}
4104 Serge 804
	/* Same trick applies to invalidate partially written cachelines read
805
	 * before writing. */
806
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
807
		needs_clflush_before =
808
			!cpu_cache_is_coherent(dev, obj->cache_level);
3031 serge 809
 
810
	ret = i915_gem_object_get_pages(obj);
2332 Serge 811
	if (ret)
3031 serge 812
		return ret;
2332 Serge 813
 
3031 serge 814
	i915_gem_object_pin_pages(obj);
2332 Serge 815
 
816
	offset = args->offset;
3031 serge 817
	obj->dirty = 1;
2332 Serge 818
 
3746 Serge 819
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
820
			 offset >> PAGE_SHIFT) {
821
		struct page *page = sg_page_iter_page(&sg_iter);
3031 serge 822
		int partial_cacheline_write;
2332 Serge 823
 
3031 serge 824
		if (remain <= 0)
825
			break;
826
 
2332 Serge 827
		/* Operation in this page
828
		 *
829
		 * shmem_page_offset = offset within page in shmem file
830
		 * page_length = bytes to copy for this page
831
		 */
832
		shmem_page_offset = offset_in_page(offset);
833
 
834
		page_length = remain;
835
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
836
			page_length = PAGE_SIZE - shmem_page_offset;
837
 
3031 serge 838
		/* If we don't overwrite a cacheline completely we need to be
839
		 * careful to have up-to-date data by first clflushing. Don't
840
		 * overcomplicate things and flush the entire patch. */
841
		partial_cacheline_write = needs_clflush_before &&
842
			((shmem_page_offset | page_length)
3260 Serge 843
				& (x86_clflush_size - 1));
2332 Serge 844
 
3031 serge 845
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
846
			(page_to_phys(page) & (1 << 17)) != 0;
2332 Serge 847
 
3031 serge 848
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
849
					user_data, page_do_bit17_swizzling,
850
					partial_cacheline_write,
851
					needs_clflush_after);
852
		if (ret == 0)
853
			goto next_page;
854
 
855
		hit_slowpath = 1;
856
		mutex_unlock(&dev->struct_mutex);
3260 Serge 857
		dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
3031 serge 858
 
3260 Serge 859
//		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
860
//					user_data, page_do_bit17_swizzling,
861
//					partial_cacheline_write,
862
//					needs_clflush_after);
863
 
3031 serge 864
		mutex_lock(&dev->struct_mutex);
865
 
866
next_page:
2332 Serge 867
 
3031 serge 868
		if (ret)
869
			goto out;
870
 
2332 Serge 871
		remain -= page_length;
3031 serge 872
		user_data += page_length;
2332 Serge 873
		offset += page_length;
874
	}
875
 
876
out:
3031 serge 877
	i915_gem_object_unpin_pages(obj);
878
 
879
	if (hit_slowpath) {
3480 Serge 880
		/*
881
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
882
		 * cachelines in-line while writing and the object moved
883
		 * out of the cpu write domain while we've dropped the lock.
884
		 */
885
		if (!needs_clflush_after &&
886
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
4104 Serge 887
			if (i915_gem_clflush_object(obj, obj->pin_display))
3243 Serge 888
			i915_gem_chipset_flush(dev);
3031 serge 889
		}
2332 Serge 890
	}
891
 
3031 serge 892
	if (needs_clflush_after)
3243 Serge 893
		i915_gem_chipset_flush(dev);
3031 serge 894
 
2332 Serge 895
	return ret;
896
}
3031 serge 897
 
898
/**
899
 * Writes data to the object referenced by handle.
900
 *
901
 * On error, the contents of the buffer that were to be modified are undefined.
902
 */
903
int
904
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
905
		      struct drm_file *file)
906
{
907
	struct drm_i915_gem_pwrite *args = data;
908
	struct drm_i915_gem_object *obj;
909
	int ret;
910
 
4104 Serge 911
	if (args->size == 0)
912
		return 0;
913
 
3480 Serge 914
     if(args->handle == -2)
915
     {
916
        printf("%s handle %d\n", __FUNCTION__, args->handle);
917
        return 0;
918
     }
919
 
3031 serge 920
	ret = i915_mutex_lock_interruptible(dev);
921
	if (ret)
922
		return ret;
923
 
924
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
925
	if (&obj->base == NULL) {
926
		ret = -ENOENT;
927
		goto unlock;
928
	}
929
 
930
	/* Bounds check destination. */
931
	if (args->offset > obj->base.size ||
932
	    args->size > obj->base.size - args->offset) {
933
		ret = -EINVAL;
934
		goto out;
935
	}
936
 
937
	/* prime objects have no backing filp to GEM pread/pwrite
938
	 * pages from.
939
	 */
940
	if (!obj->base.filp) {
941
		ret = -EINVAL;
942
		goto out;
943
	}
944
 
945
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
946
 
947
	ret = -EFAULT;
948
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
949
	 * it would end up going through the fenced access, and we'll get
950
	 * different detiling behavior between reading and writing.
951
	 * pread/pwrite currently are reading and writing from the CPU
952
	 * perspective, requiring manual detiling by the client.
953
	 */
3260 Serge 954
//   if (obj->phys_obj) {
955
//       ret = i915_gem_phys_pwrite(dev, obj, args, file);
956
//       goto out;
957
//   }
3031 serge 958
 
4104 Serge 959
	if (obj->tiling_mode == I915_TILING_NONE &&
960
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
961
	    cpu_write_needs_clflush(obj)) {
3031 serge 962
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
963
		/* Note that the gtt paths might fail with non-page-backed user
964
		 * pointers (e.g. gtt mappings when moving data between
965
		 * textures). Fallback to the shmem path in that case. */
966
	}
967
 
968
	if (ret == -EFAULT || ret == -ENOSPC)
3260 Serge 969
       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
3031 serge 970
 
971
out:
972
	drm_gem_object_unreference(&obj->base);
973
unlock:
974
	mutex_unlock(&dev->struct_mutex);
975
	return ret;
976
}
977
 
978
int
3480 Serge 979
i915_gem_check_wedge(struct i915_gpu_error *error,
3031 serge 980
		     bool interruptible)
981
{
3480 Serge 982
	if (i915_reset_in_progress(error)) {
3031 serge 983
		/* Non-interruptible callers can't handle -EAGAIN, hence return
984
		 * -EIO unconditionally for these. */
985
		if (!interruptible)
986
			return -EIO;
2332 Serge 987
 
3480 Serge 988
		/* Recovery complete, but the reset failed ... */
989
		if (i915_terminally_wedged(error))
3031 serge 990
			return -EIO;
2332 Serge 991
 
3031 serge 992
		return -EAGAIN;
993
	}
2332 Serge 994
 
3031 serge 995
	return 0;
996
}
2332 Serge 997
 
3031 serge 998
/*
999
 * Compare seqno against outstanding lazy request. Emit a request if they are
1000
 * equal.
1001
 */
1002
static int
1003
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1004
{
1005
	int ret;
2332 Serge 1006
 
3031 serge 1007
	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
2332 Serge 1008
 
3031 serge 1009
	ret = 0;
1010
	if (seqno == ring->outstanding_lazy_request)
4104 Serge 1011
		ret = i915_add_request(ring, NULL);
2332 Serge 1012
 
3031 serge 1013
	return ret;
1014
}
2332 Serge 1015
 
3031 serge 1016
/**
1017
 * __wait_seqno - wait until execution of seqno has finished
1018
 * @ring: the ring expected to report seqno
1019
 * @seqno: duh!
3480 Serge 1020
 * @reset_counter: reset sequence associated with the given seqno
3031 serge 1021
 * @interruptible: do an interruptible wait (normally yes)
1022
 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1023
 *
3480 Serge 1024
 * Note: It is of utmost importance that the passed in seqno and reset_counter
1025
 * values have been read by the caller in an smp safe manner. Where read-side
1026
 * locks are involved, it is sufficient to read the reset_counter before
1027
 * unlocking the lock that protects the seqno. For lockless tricks, the
1028
 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1029
 * inserted.
1030
 *
3031 serge 1031
 * Returns 0 if the seqno was found within the alloted time. Else returns the
1032
 * errno with remaining time filled in timeout argument.
1033
 */
1034
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
3480 Serge 1035
			unsigned reset_counter,
3031 serge 1036
			bool interruptible, struct timespec *timeout)
1037
{
1038
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1039
	struct timespec before, now, wait_time={1,0};
1040
	unsigned long timeout_jiffies;
1041
	long end;
1042
	bool wait_forever = true;
1043
	int ret;
2332 Serge 1044
 
4104 Serge 1045
	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1046
 
3031 serge 1047
	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1048
		return 0;
2332 Serge 1049
 
3031 serge 1050
	trace_i915_gem_request_wait_begin(ring, seqno);
2332 Serge 1051
 
3031 serge 1052
	if (timeout != NULL) {
1053
		wait_time = *timeout;
1054
		wait_forever = false;
1055
	}
2332 Serge 1056
 
4104 Serge 1057
	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
2332 Serge 1058
 
3031 serge 1059
	if (WARN_ON(!ring->irq_get(ring)))
1060
		return -ENODEV;
2332 Serge 1061
 
3031 serge 1062
    /* Record current time in case interrupted by signal, or wedged * */
1063
	getrawmonotonic(&before);
2332 Serge 1064
 
3031 serge 1065
#define EXIT_COND \
1066
	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
3480 Serge 1067
	 i915_reset_in_progress(&dev_priv->gpu_error) || \
1068
	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3031 serge 1069
	do {
3266 Serge 1070
		if (interruptible)
1071
			end = wait_event_interruptible_timeout(ring->irq_queue,
1072
							       EXIT_COND,
1073
							       timeout_jiffies);
1074
		else
3031 serge 1075
			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1076
						 timeout_jiffies);
2332 Serge 1077
 
3480 Serge 1078
		/* We need to check whether any gpu reset happened in between
1079
		 * the caller grabbing the seqno and now ... */
1080
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1081
			end = -EAGAIN;
1082
 
1083
		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1084
		 * gone. */
1085
		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
3031 serge 1086
		if (ret)
1087
			end = ret;
1088
	} while (end == 0 && wait_forever);
2332 Serge 1089
 
3031 serge 1090
	getrawmonotonic(&now);
2332 Serge 1091
 
3031 serge 1092
	ring->irq_put(ring);
1093
	trace_i915_gem_request_wait_end(ring, seqno);
1094
#undef EXIT_COND
2332 Serge 1095
 
3031 serge 1096
	if (timeout) {
4104 Serge 1097
//		struct timespec sleep_time = timespec_sub(now, before);
1098
//		*timeout = timespec_sub(*timeout, sleep_time);
3031 serge 1099
	}
2332 Serge 1100
 
3031 serge 1101
	switch (end) {
1102
	case -EIO:
1103
	case -EAGAIN: /* Wedged */
1104
	case -ERESTARTSYS: /* Signal */
1105
		return (int)end;
1106
	case 0: /* Timeout */
1107
		return -ETIME;
1108
	default: /* Completed */
1109
		WARN_ON(end < 0); /* We're not aware of other errors */
1110
		return 0;
1111
	}
1112
}
2332 Serge 1113
 
3031 serge 1114
/**
1115
 * Waits for a sequence number to be signaled, and cleans up the
1116
 * request and object lists appropriately for that event.
1117
 */
1118
int
1119
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1120
{
1121
	struct drm_device *dev = ring->dev;
1122
	struct drm_i915_private *dev_priv = dev->dev_private;
1123
	bool interruptible = dev_priv->mm.interruptible;
1124
	int ret;
2332 Serge 1125
 
3031 serge 1126
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1127
	BUG_ON(seqno == 0);
2332 Serge 1128
 
3480 Serge 1129
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
3031 serge 1130
	if (ret)
1131
		return ret;
2332 Serge 1132
 
3031 serge 1133
	ret = i915_gem_check_olr(ring, seqno);
1134
	if (ret)
1135
		return ret;
2332 Serge 1136
 
3480 Serge 1137
	return __wait_seqno(ring, seqno,
1138
			    atomic_read(&dev_priv->gpu_error.reset_counter),
1139
			    interruptible, NULL);
3031 serge 1140
}
2332 Serge 1141
 
4104 Serge 1142
static int
1143
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1144
				     struct intel_ring_buffer *ring)
1145
{
1146
	i915_gem_retire_requests_ring(ring);
1147
 
1148
	/* Manually manage the write flush as we may have not yet
1149
	 * retired the buffer.
1150
	 *
1151
	 * Note that the last_write_seqno is always the earlier of
1152
	 * the two (read/write) seqno, so if we haved successfully waited,
1153
	 * we know we have passed the last write.
1154
	 */
1155
	obj->last_write_seqno = 0;
1156
	obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1157
 
1158
	return 0;
1159
}
1160
 
3031 serge 1161
/**
1162
 * Ensures that all rendering to the object has completed and the object is
1163
 * safe to unbind from the GTT or access from the CPU.
1164
 */
1165
static __must_check int
1166
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1167
			       bool readonly)
1168
{
1169
	struct intel_ring_buffer *ring = obj->ring;
1170
	u32 seqno;
1171
	int ret;
2332 Serge 1172
 
3031 serge 1173
	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1174
	if (seqno == 0)
1175
		return 0;
2332 Serge 1176
 
3031 serge 1177
	ret = i915_wait_seqno(ring, seqno);
4104 Serge 1178
    if (ret)
1179
        return ret;
2332 Serge 1180
 
4104 Serge 1181
	return i915_gem_object_wait_rendering__tail(obj, ring);
3031 serge 1182
}
2332 Serge 1183
 
3260 Serge 1184
/* A nonblocking variant of the above wait. This is a highly dangerous routine
1185
 * as the object state may change during this call.
1186
 */
1187
static __must_check int
1188
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1189
					    bool readonly)
1190
{
1191
	struct drm_device *dev = obj->base.dev;
1192
	struct drm_i915_private *dev_priv = dev->dev_private;
1193
	struct intel_ring_buffer *ring = obj->ring;
3480 Serge 1194
	unsigned reset_counter;
3260 Serge 1195
	u32 seqno;
1196
	int ret;
2332 Serge 1197
 
3260 Serge 1198
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1199
	BUG_ON(!dev_priv->mm.interruptible);
2332 Serge 1200
 
3260 Serge 1201
	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1202
	if (seqno == 0)
1203
		return 0;
2332 Serge 1204
 
3480 Serge 1205
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3260 Serge 1206
	if (ret)
1207
		return ret;
2332 Serge 1208
 
3260 Serge 1209
	ret = i915_gem_check_olr(ring, seqno);
1210
	if (ret)
1211
		return ret;
2332 Serge 1212
 
3480 Serge 1213
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3260 Serge 1214
	mutex_unlock(&dev->struct_mutex);
3480 Serge 1215
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3260 Serge 1216
	mutex_lock(&dev->struct_mutex);
4104 Serge 1217
	if (ret)
1218
		return ret;
2332 Serge 1219
 
4104 Serge 1220
	return i915_gem_object_wait_rendering__tail(obj, ring);
3260 Serge 1221
}
2332 Serge 1222
 
3260 Serge 1223
/**
1224
 * Called when user space prepares to use an object with the CPU, either
1225
 * through the mmap ioctl's mapping or a GTT mapping.
1226
 */
1227
int
1228
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1229
			  struct drm_file *file)
1230
{
1231
	struct drm_i915_gem_set_domain *args = data;
1232
	struct drm_i915_gem_object *obj;
1233
	uint32_t read_domains = args->read_domains;
1234
	uint32_t write_domain = args->write_domain;
1235
	int ret;
2332 Serge 1236
 
3480 Serge 1237
 
1238
     if(args->handle == -2)
1239
     {
1240
        printf("%s handle %d\n", __FUNCTION__, args->handle);
1241
        return 0;
1242
     }
1243
 
3260 Serge 1244
	/* Only handle setting domains to types used by the CPU. */
1245
	if (write_domain & I915_GEM_GPU_DOMAINS)
1246
		return -EINVAL;
2332 Serge 1247
 
3260 Serge 1248
	if (read_domains & I915_GEM_GPU_DOMAINS)
1249
		return -EINVAL;
2332 Serge 1250
 
3260 Serge 1251
	/* Having something in the write domain implies it's in the read
1252
	 * domain, and only that read domain.  Enforce that in the request.
1253
	 */
1254
	if (write_domain != 0 && read_domains != write_domain)
1255
		return -EINVAL;
2332 Serge 1256
 
3260 Serge 1257
	ret = i915_mutex_lock_interruptible(dev);
1258
	if (ret)
1259
		return ret;
2332 Serge 1260
 
3260 Serge 1261
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1262
	if (&obj->base == NULL) {
1263
		ret = -ENOENT;
1264
		goto unlock;
1265
	}
2332 Serge 1266
 
3260 Serge 1267
	/* Try to flush the object off the GPU without holding the lock.
1268
	 * We will repeat the flush holding the lock in the normal manner
1269
	 * to catch cases where we are gazumped.
1270
	 */
1271
	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1272
	if (ret)
1273
		goto unref;
2332 Serge 1274
 
3260 Serge 1275
	if (read_domains & I915_GEM_DOMAIN_GTT) {
1276
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
2332 Serge 1277
 
3260 Serge 1278
		/* Silently promote "you're not bound, there was nothing to do"
1279
		 * to success, since the client was just asking us to
1280
		 * make sure everything was done.
1281
		 */
1282
		if (ret == -EINVAL)
1283
			ret = 0;
1284
	} else {
1285
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1286
	}
2332 Serge 1287
 
3260 Serge 1288
unref:
1289
	drm_gem_object_unreference(&obj->base);
1290
unlock:
1291
	mutex_unlock(&dev->struct_mutex);
1292
	return ret;
1293
}
2332 Serge 1294
 
4293 Serge 1295
/**
1296
 * Called when user space has done writes to this buffer
1297
 */
1298
int
1299
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1300
			 struct drm_file *file)
1301
{
1302
	struct drm_i915_gem_sw_finish *args = data;
1303
	struct drm_i915_gem_object *obj;
1304
	int ret = 0;
2332 Serge 1305
 
4293 Serge 1306
    if(args->handle == -2)
1307
    {
1308
       printf("%s handle %d\n", __FUNCTION__, args->handle);
1309
       return 0;
1310
    }
2332 Serge 1311
 
4293 Serge 1312
	ret = i915_mutex_lock_interruptible(dev);
1313
	if (ret)
1314
		return ret;
2332 Serge 1315
 
4293 Serge 1316
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1317
	if (&obj->base == NULL) {
1318
		ret = -ENOENT;
1319
		goto unlock;
1320
	}
2332 Serge 1321
 
4293 Serge 1322
	/* Pinned buffers may be scanout, so flush the cache */
1323
	if (obj->pin_display)
1324
		i915_gem_object_flush_cpu_write_domain(obj, true);
2332 Serge 1325
 
4293 Serge 1326
	drm_gem_object_unreference(&obj->base);
1327
unlock:
1328
	mutex_unlock(&dev->struct_mutex);
1329
	return ret;
1330
}
1331
 
3260 Serge 1332
/**
1333
 * Maps the contents of an object, returning the address it is mapped
1334
 * into.
1335
 *
1336
 * While the mapping holds a reference on the contents of the object, it doesn't
1337
 * imply a ref on the object itself.
1338
 */
1339
int
1340
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1341
		    struct drm_file *file)
1342
{
1343
	struct drm_i915_gem_mmap *args = data;
1344
	struct drm_gem_object *obj;
1345
	unsigned long addr = 0;
2332 Serge 1346
 
3480 Serge 1347
     if(args->handle == -2)
1348
     {
1349
        printf("%s handle %d\n", __FUNCTION__, args->handle);
1350
        return 0;
1351
     }
1352
 
3260 Serge 1353
	obj = drm_gem_object_lookup(dev, file, args->handle);
1354
	if (obj == NULL)
1355
		return -ENOENT;
4104 Serge 1356
 
3260 Serge 1357
	/* prime objects have no backing filp to GEM mmap
1358
	 * pages from.
1359
	 */
1360
	if (!obj->filp) {
1361
		drm_gem_object_unreference_unlocked(obj);
1362
		return -EINVAL;
1363
	}
2332 Serge 1364
 
3263 Serge 1365
    addr = vm_mmap(obj->filp, 0, args->size,
1366
              PROT_READ | PROT_WRITE, MAP_SHARED,
1367
              args->offset);
3260 Serge 1368
	drm_gem_object_unreference_unlocked(obj);
3263 Serge 1369
    if (IS_ERR((void *)addr))
1370
        return addr;
2332 Serge 1371
 
3260 Serge 1372
	args->addr_ptr = (uint64_t) addr;
2332 Serge 1373
 
3263 Serge 1374
    return 0;
3260 Serge 1375
}
2332 Serge 1376
 
1377
 
1378
 
1379
 
1380
 
1381
 
1382
 
1383
 
3031 serge 1384
 
1385
 
1386
 
1387
 
1388
 
1389
/**
1390
 * i915_gem_release_mmap - remove physical page mappings
1391
 * @obj: obj in question
1392
 *
1393
 * Preserve the reservation of the mmapping with the DRM core code, but
1394
 * relinquish ownership of the pages back to the system.
1395
 *
1396
 * It is vital that we remove the page mapping if we have mapped a tiled
1397
 * object through the GTT and then lose the fence register due to
1398
 * resource pressure. Similarly if the object has been moved out of the
1399
 * aperture, than pages mapped into userspace must be revoked. Removing the
1400
 * mapping will then trigger a page fault on the next user access, allowing
1401
 * fixup by i915_gem_fault().
1402
 */
1403
void
1404
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1405
{
1406
	if (!obj->fault_mappable)
1407
		return;
1408
 
4104 Serge 1409
//	drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
3031 serge 1410
	obj->fault_mappable = false;
1411
}
1412
 
3480 Serge 1413
uint32_t
2332 Serge 1414
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1415
{
1416
	uint32_t gtt_size;
1417
 
1418
	if (INTEL_INFO(dev)->gen >= 4 ||
1419
	    tiling_mode == I915_TILING_NONE)
1420
		return size;
1421
 
1422
	/* Previous chips need a power-of-two fence region when tiling */
1423
	if (INTEL_INFO(dev)->gen == 3)
1424
		gtt_size = 1024*1024;
1425
	else
1426
		gtt_size = 512*1024;
1427
 
1428
	while (gtt_size < size)
1429
		gtt_size <<= 1;
1430
 
1431
	return gtt_size;
1432
}
1433
 
1434
/**
1435
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1436
 * @obj: object to check
1437
 *
1438
 * Return the required GTT alignment for an object, taking into account
1439
 * potential fence register mapping.
1440
 */
3480 Serge 1441
uint32_t
1442
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1443
			   int tiling_mode, bool fenced)
2332 Serge 1444
{
1445
	/*
1446
	 * Minimum alignment is 4k (GTT page size), but might be greater
1447
	 * if a fence register is needed for the object.
1448
	 */
3480 Serge 1449
	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2332 Serge 1450
	    tiling_mode == I915_TILING_NONE)
1451
		return 4096;
1452
 
1453
	/*
1454
	 * Previous chips need to be aligned to the size of the smallest
1455
	 * fence register that can contain the object.
1456
	 */
1457
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1458
}
1459
 
1460
/**
1461
 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1462
 *					 unfenced object
1463
 * @dev: the device
1464
 * @size: size of the object
1465
 * @tiling_mode: tiling mode of the object
1466
 *
1467
 * Return the required GTT alignment for an object, only taking into account
1468
 * unfenced tiled surface requirements.
1469
 */
1470
uint32_t
1471
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1472
				    uint32_t size,
1473
				    int tiling_mode)
1474
{
1475
	/*
1476
	 * Minimum alignment is 4k (GTT page size) for sane hw.
1477
	 */
1478
	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1479
	    tiling_mode == I915_TILING_NONE)
1480
		return 4096;
1481
 
1482
	/* Previous hardware however needs to be aligned to a power-of-two
1483
	 * tile height. The simplest method for determining this is to reuse
1484
	 * the power-of-tile object size.
1485
	 */
1486
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1487
}
1488
 
3480 Serge 1489
int
1490
i915_gem_mmap_gtt(struct drm_file *file,
1491
          struct drm_device *dev,
1492
          uint32_t handle,
1493
          uint64_t *offset)
1494
{
1495
    struct drm_i915_private *dev_priv = dev->dev_private;
1496
    struct drm_i915_gem_object *obj;
1497
    unsigned long pfn;
1498
    char *mem, *ptr;
1499
    int ret;
1500
 
1501
    ret = i915_mutex_lock_interruptible(dev);
1502
    if (ret)
1503
        return ret;
1504
 
1505
    obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1506
    if (&obj->base == NULL) {
1507
        ret = -ENOENT;
1508
        goto unlock;
1509
    }
1510
 
1511
    if (obj->base.size > dev_priv->gtt.mappable_end) {
1512
        ret = -E2BIG;
1513
        goto out;
1514
    }
1515
 
1516
    if (obj->madv != I915_MADV_WILLNEED) {
1517
        DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1518
        ret = -EINVAL;
1519
        goto out;
1520
    }
1521
    /* Now bind it into the GTT if needed */
4104 Serge 1522
    ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
3480 Serge 1523
    if (ret)
1524
        goto out;
1525
 
1526
    ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1527
    if (ret)
1528
        goto unpin;
1529
 
1530
    ret = i915_gem_object_get_fence(obj);
1531
    if (ret)
1532
        goto unpin;
1533
 
1534
    obj->fault_mappable = true;
1535
 
4104 Serge 1536
    pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
3480 Serge 1537
 
1538
    /* Finally, remap it using the new GTT offset */
1539
 
1540
    mem = UserAlloc(obj->base.size);
1541
    if(unlikely(mem == NULL))
1542
    {
1543
        ret = -ENOMEM;
1544
        goto unpin;
1545
    }
1546
 
1547
    for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096)
1548
        MapPage(ptr, pfn, PG_SHARED|PG_UW);
1549
 
1550
unpin:
1551
    i915_gem_object_unpin(obj);
1552
 
1553
 
4104 Serge 1554
    *offset = mem;
3480 Serge 1555
 
1556
out:
1557
    drm_gem_object_unreference(&obj->base);
1558
unlock:
1559
    mutex_unlock(&dev->struct_mutex);
1560
    return ret;
1561
}
1562
 
1563
/**
1564
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1565
 * @dev: DRM device
1566
 * @data: GTT mapping ioctl data
1567
 * @file: GEM object info
1568
 *
1569
 * Simply returns the fake offset to userspace so it can mmap it.
1570
 * The mmap call will end up in drm_gem_mmap(), which will set things
1571
 * up so we can get faults in the handler above.
1572
 *
1573
 * The fault handler will take care of binding the object into the GTT
1574
 * (since it may have been evicted to make room for something), allocating
1575
 * a fence register, and mapping the appropriate aperture address into
1576
 * userspace.
1577
 */
1578
int
1579
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1580
            struct drm_file *file)
1581
{
1582
    struct drm_i915_gem_mmap_gtt *args = data;
1583
 
1584
    return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1585
}
1586
 
3031 serge 1587
/* Immediately discard the backing storage */
1588
static void
1589
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1590
{
1591
//	struct inode *inode;
2332 Serge 1592
 
3031 serge 1593
//	i915_gem_object_free_mmap_offset(obj);
2332 Serge 1594
 
3263 Serge 1595
	if (obj->base.filp == NULL)
1596
		return;
2332 Serge 1597
 
3031 serge 1598
	/* Our goal here is to return as much of the memory as
1599
	 * is possible back to the system as we are called from OOM.
1600
	 * To do this we must instruct the shmfs to drop all of its
1601
	 * backing pages, *now*.
1602
	 */
1603
//	inode = obj->base.filp->f_path.dentry->d_inode;
1604
//	shmem_truncate_range(inode, 0, (loff_t)-1);
2332 Serge 1605
 
3031 serge 1606
	obj->madv = __I915_MADV_PURGED;
1607
}
2332 Serge 1608
 
3031 serge 1609
static inline int
1610
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1611
{
1612
	return obj->madv == I915_MADV_DONTNEED;
1613
}
2332 Serge 1614
 
3031 serge 1615
static void
1616
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1617
{
3746 Serge 1618
	struct sg_page_iter sg_iter;
1619
	int ret;
2332 Serge 1620
 
3031 serge 1621
	BUG_ON(obj->madv == __I915_MADV_PURGED);
2332 Serge 1622
 
3031 serge 1623
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
1624
	if (ret) {
1625
		/* In the event of a disaster, abandon all caches and
1626
		 * hope for the best.
1627
		 */
1628
		WARN_ON(ret != -EIO);
4104 Serge 1629
		i915_gem_clflush_object(obj, true);
3031 serge 1630
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1631
	}
2332 Serge 1632
 
3031 serge 1633
	if (obj->madv == I915_MADV_DONTNEED)
1634
		obj->dirty = 0;
2332 Serge 1635
 
3746 Serge 1636
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1637
		struct page *page = sg_page_iter_page(&sg_iter);
2332 Serge 1638
 
3290 Serge 1639
        page_cache_release(page);
3243 Serge 1640
	}
1641
    //DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
3290 Serge 1642
 
4104 Serge 1643
    obj->dirty = 0;
3243 Serge 1644
 
1645
	sg_free_table(obj->pages);
1646
	kfree(obj->pages);
3031 serge 1647
}
2332 Serge 1648
 
3480 Serge 1649
int
3031 serge 1650
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1651
{
1652
	const struct drm_i915_gem_object_ops *ops = obj->ops;
2332 Serge 1653
 
3243 Serge 1654
	if (obj->pages == NULL)
3031 serge 1655
		return 0;
2332 Serge 1656
 
3031 serge 1657
	if (obj->pages_pin_count)
1658
		return -EBUSY;
1659
 
4104 Serge 1660
	BUG_ON(i915_gem_obj_bound_any(obj));
1661
 
3243 Serge 1662
	/* ->put_pages might need to allocate memory for the bit17 swizzle
1663
	 * array, hence protect them from being reaped by removing them from gtt
1664
	 * lists early. */
4104 Serge 1665
	list_del(&obj->global_list);
3243 Serge 1666
 
3031 serge 1667
	ops->put_pages(obj);
3243 Serge 1668
	obj->pages = NULL;
3031 serge 1669
 
1670
	if (i915_gem_object_is_purgeable(obj))
1671
		i915_gem_object_truncate(obj);
1672
 
1673
	return 0;
1674
}
1675
 
1676
 
1677
 
1678
 
1679
 
1680
 
1681
 
1682
 
2332 Serge 1683
static int
3031 serge 1684
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2332 Serge 1685
{
3260 Serge 1686
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3243 Serge 1687
    int page_count, i;
4104 Serge 1688
    struct sg_table *st;
3243 Serge 1689
	struct scatterlist *sg;
3746 Serge 1690
	struct sg_page_iter sg_iter;
3243 Serge 1691
	struct page *page;
3746 Serge 1692
	unsigned long last_pfn = 0;	/* suppress gcc warning */
3243 Serge 1693
	gfp_t gfp;
2332 Serge 1694
 
3243 Serge 1695
	/* Assert that the object is not currently in any GPU domain. As it
1696
	 * wasn't in the GTT, there shouldn't be any way it could have been in
1697
	 * a GPU cache
2332 Serge 1698
	 */
3243 Serge 1699
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1700
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1701
 
1702
	st = kmalloc(sizeof(*st), GFP_KERNEL);
1703
	if (st == NULL)
1704
		return -ENOMEM;
1705
 
2332 Serge 1706
	page_count = obj->base.size / PAGE_SIZE;
3243 Serge 1707
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1708
		kfree(st);
3746 Serge 1709
        FAIL();
2332 Serge 1710
		return -ENOMEM;
3243 Serge 1711
	}
2332 Serge 1712
 
3243 Serge 1713
	/* Get the list of pages out of our struct file.  They'll be pinned
1714
	 * at this point until we release them.
1715
	 *
1716
	 * Fail silently without starting the shrinker
1717
	 */
3746 Serge 1718
	sg = st->sgl;
1719
	st->nents = 0;
1720
	for (i = 0; i < page_count; i++) {
4104 Serge 1721
        page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
3260 Serge 1722
		if (IS_ERR(page)) {
1723
            dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
2332 Serge 1724
			goto err_pages;
1725
 
3260 Serge 1726
		}
3746 Serge 1727
 
1728
		if (!i || page_to_pfn(page) != last_pfn + 1) {
1729
			if (i)
1730
				sg = sg_next(sg);
1731
			st->nents++;
3243 Serge 1732
		sg_set_page(sg, page, PAGE_SIZE, 0);
3746 Serge 1733
		} else {
1734
			sg->length += PAGE_SIZE;
1735
		}
1736
		last_pfn = page_to_pfn(page);
3243 Serge 1737
	}
3031 serge 1738
 
3746 Serge 1739
		sg_mark_end(sg);
3243 Serge 1740
	obj->pages = st;
3031 serge 1741
 
2332 Serge 1742
	return 0;
1743
 
1744
err_pages:
3746 Serge 1745
	sg_mark_end(sg);
1746
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1747
		page_cache_release(sg_page_iter_page(&sg_iter));
3243 Serge 1748
	sg_free_table(st);
1749
	kfree(st);
3746 Serge 1750
    FAIL();
3243 Serge 1751
	return PTR_ERR(page);
2332 Serge 1752
}
1753
 
3031 serge 1754
/* Ensure that the associated pages are gathered from the backing storage
1755
 * and pinned into our object. i915_gem_object_get_pages() may be called
1756
 * multiple times before they are released by a single call to
1757
 * i915_gem_object_put_pages() - once the pages are no longer referenced
1758
 * either as a result of memory pressure (reaping pages under the shrinker)
1759
 * or as the object is itself released.
1760
 */
1761
int
1762
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2332 Serge 1763
{
3031 serge 1764
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1765
	const struct drm_i915_gem_object_ops *ops = obj->ops;
1766
	int ret;
2332 Serge 1767
 
3243 Serge 1768
	if (obj->pages)
3031 serge 1769
		return 0;
2332 Serge 1770
 
3031 serge 1771
	BUG_ON(obj->pages_pin_count);
2332 Serge 1772
 
3031 serge 1773
	ret = ops->get_pages(obj);
1774
	if (ret)
1775
		return ret;
2344 Serge 1776
 
4104 Serge 1777
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3243 Serge 1778
    return 0;
2332 Serge 1779
}
1780
 
1781
void
1782
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
3243 Serge 1783
			       struct intel_ring_buffer *ring)
2332 Serge 1784
{
1785
	struct drm_device *dev = obj->base.dev;
1786
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 1787
	u32 seqno = intel_ring_get_seqno(ring);
2332 Serge 1788
 
1789
	BUG_ON(ring == NULL);
4104 Serge 1790
	if (obj->ring != ring && obj->last_write_seqno) {
1791
		/* Keep the seqno relative to the current ring */
1792
		obj->last_write_seqno = seqno;
1793
	}
2332 Serge 1794
	obj->ring = ring;
1795
 
1796
	/* Add a reference if we're newly entering the active list. */
1797
	if (!obj->active) {
2344 Serge 1798
		drm_gem_object_reference(&obj->base);
2332 Serge 1799
		obj->active = 1;
1800
	}
1801
 
1802
	list_move_tail(&obj->ring_list, &ring->active_list);
1803
 
3031 serge 1804
	obj->last_read_seqno = seqno;
1805
 
2332 Serge 1806
	if (obj->fenced_gpu_access) {
3031 serge 1807
		obj->last_fenced_seqno = seqno;
1808
 
1809
		/* Bump MRU to take account of the delayed flush */
1810
		if (obj->fence_reg != I915_FENCE_REG_NONE) {
2332 Serge 1811
		struct drm_i915_fence_reg *reg;
1812
 
1813
		reg = &dev_priv->fence_regs[obj->fence_reg];
3031 serge 1814
			list_move_tail(®->lru_list,
1815
				       &dev_priv->mm.fence_list);
1816
		}
2332 Serge 1817
	}
1818
}
1819
 
2344 Serge 1820
static void
3031 serge 1821
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2344 Serge 1822
{
4104 Serge 1823
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1824
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1825
	struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2332 Serge 1826
 
3031 serge 1827
	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2344 Serge 1828
	BUG_ON(!obj->active);
2332 Serge 1829
 
4104 Serge 1830
	list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2344 Serge 1831
 
3031 serge 1832
	list_del_init(&obj->ring_list);
2352 Serge 1833
	obj->ring = NULL;
2344 Serge 1834
 
3031 serge 1835
	obj->last_read_seqno = 0;
1836
	obj->last_write_seqno = 0;
1837
	obj->base.write_domain = 0;
1838
 
1839
	obj->last_fenced_seqno = 0;
2352 Serge 1840
	obj->fenced_gpu_access = false;
2344 Serge 1841
 
2352 Serge 1842
	obj->active = 0;
1843
	drm_gem_object_unreference(&obj->base);
1844
 
1845
	WARN_ON(i915_verify_lists(dev));
1846
}
1847
 
3243 Serge 1848
static int
3480 Serge 1849
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2344 Serge 1850
{
3243 Serge 1851
	struct drm_i915_private *dev_priv = dev->dev_private;
1852
	struct intel_ring_buffer *ring;
1853
	int ret, i, j;
2344 Serge 1854
 
3480 Serge 1855
	/* Carefully retire all requests without writing to the rings */
3243 Serge 1856
	for_each_ring(ring, dev_priv, i) {
3480 Serge 1857
		ret = intel_ring_idle(ring);
3243 Serge 1858
	if (ret)
1859
		return ret;
3480 Serge 1860
	}
1861
	i915_gem_retire_requests(dev);
3243 Serge 1862
 
3480 Serge 1863
	/* Finally reset hw state */
3243 Serge 1864
	for_each_ring(ring, dev_priv, i) {
3480 Serge 1865
		intel_ring_init_seqno(ring, seqno);
1866
 
3243 Serge 1867
		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1868
			ring->sync_seqno[j] = 0;
1869
	}
1870
 
1871
	return 0;
2344 Serge 1872
}
1873
 
3480 Serge 1874
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1875
{
1876
	struct drm_i915_private *dev_priv = dev->dev_private;
1877
	int ret;
1878
 
1879
	if (seqno == 0)
1880
		return -EINVAL;
1881
 
1882
	/* HWS page needs to be set less than what we
1883
	 * will inject to ring
1884
	 */
1885
	ret = i915_gem_init_seqno(dev, seqno - 1);
1886
	if (ret)
1887
		return ret;
1888
 
1889
	/* Carefully set the last_seqno value so that wrap
1890
	 * detection still works
1891
	 */
1892
	dev_priv->next_seqno = seqno;
1893
	dev_priv->last_seqno = seqno - 1;
1894
	if (dev_priv->last_seqno == 0)
1895
		dev_priv->last_seqno--;
1896
 
1897
	return 0;
1898
}
1899
 
3243 Serge 1900
int
1901
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2344 Serge 1902
{
3243 Serge 1903
	struct drm_i915_private *dev_priv = dev->dev_private;
2344 Serge 1904
 
3243 Serge 1905
	/* reserve 0 for non-seqno */
1906
	if (dev_priv->next_seqno == 0) {
3480 Serge 1907
		int ret = i915_gem_init_seqno(dev, 0);
3243 Serge 1908
		if (ret)
1909
			return ret;
1910
 
1911
		dev_priv->next_seqno = 1;
1912
	}
1913
 
3480 Serge 1914
	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
3243 Serge 1915
	return 0;
2332 Serge 1916
}
1917
 
4104 Serge 1918
int __i915_add_request(struct intel_ring_buffer *ring,
2352 Serge 1919
		 struct drm_file *file,
4104 Serge 1920
		       struct drm_i915_gem_object *obj,
3031 serge 1921
		 u32 *out_seqno)
2352 Serge 1922
{
1923
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
3031 serge 1924
	struct drm_i915_gem_request *request;
4104 Serge 1925
	u32 request_ring_position, request_start;
2352 Serge 1926
	int was_empty;
1927
	int ret;
2332 Serge 1928
 
4104 Serge 1929
	request_start = intel_ring_get_tail(ring);
3031 serge 1930
	/*
1931
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
1932
	 * after having emitted the batchbuffer command. Hence we need to fix
1933
	 * things up similar to emitting the lazy request. The difference here
1934
	 * is that the flush _must_ happen before the next request, no matter
1935
	 * what.
1936
	 */
4104 Serge 1937
   ret = intel_ring_flush_all_caches(ring);
1938
   if (ret)
1939
       return ret;
2332 Serge 1940
 
3031 serge 1941
	request = kmalloc(sizeof(*request), GFP_KERNEL);
1942
	if (request == NULL)
1943
		return -ENOMEM;
1944
 
1945
 
1946
	/* Record the position of the start of the request so that
1947
	 * should we detect the updated seqno part-way through the
4104 Serge 1948
    * GPU processing the request, we never over-estimate the
3031 serge 1949
	 * position of the head.
1950
	 */
4104 Serge 1951
   request_ring_position = intel_ring_get_tail(ring);
3031 serge 1952
 
3243 Serge 1953
	ret = ring->add_request(ring);
3031 serge 1954
	if (ret) {
1955
		kfree(request);
4104 Serge 1956
		return ret;
3031 serge 1957
	}
2332 Serge 1958
 
3243 Serge 1959
	request->seqno = intel_ring_get_seqno(ring);
2352 Serge 1960
	request->ring = ring;
4104 Serge 1961
	request->head = request_start;
3031 serge 1962
	request->tail = request_ring_position;
4104 Serge 1963
	request->ctx = ring->last_context;
1964
	request->batch_obj = obj;
1965
 
1966
	/* Whilst this request exists, batch_obj will be on the
1967
	 * active_list, and so will hold the active reference. Only when this
1968
	 * request is retired will the the batch_obj be moved onto the
1969
	 * inactive_list and lose its active reference. Hence we do not need
1970
	 * to explicitly hold another reference here.
1971
	 */
1972
 
1973
	if (request->ctx)
1974
		i915_gem_context_reference(request->ctx);
1975
 
3031 serge 1976
    request->emitted_jiffies = GetTimerTicks();
2352 Serge 1977
	was_empty = list_empty(&ring->request_list);
1978
	list_add_tail(&request->list, &ring->request_list);
3031 serge 1979
	request->file_priv = NULL;
2332 Serge 1980
 
3263 Serge 1981
	if (file) {
1982
		struct drm_i915_file_private *file_priv = file->driver_priv;
2332 Serge 1983
 
3263 Serge 1984
		spin_lock(&file_priv->mm.lock);
1985
		request->file_priv = file_priv;
1986
		list_add_tail(&request->client_list,
1987
			      &file_priv->mm.request_list);
1988
		spin_unlock(&file_priv->mm.lock);
1989
	}
1990
 
1991
	trace_i915_gem_request_add(ring, request->seqno);
3031 serge 1992
	ring->outstanding_lazy_request = 0;
2332 Serge 1993
 
4104 Serge 1994
	if (!dev_priv->ums.mm_suspended) {
1995
//		i915_queue_hangcheck(ring->dev);
1996
 
1997
       if (was_empty) {
2360 Serge 1998
           queue_delayed_work(dev_priv->wq,
3482 Serge 1999
					   &dev_priv->mm.retire_work,
2000
					   round_jiffies_up_relative(HZ));
4104 Serge 2001
           intel_mark_busy(dev_priv->dev);
2002
       }
2003
   }
3031 serge 2004
 
2005
	if (out_seqno)
3243 Serge 2006
		*out_seqno = request->seqno;
2352 Serge 2007
	return 0;
2008
}
2332 Serge 2009
 
3263 Serge 2010
static inline void
2011
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2012
{
2013
	struct drm_i915_file_private *file_priv = request->file_priv;
2332 Serge 2014
 
3263 Serge 2015
	if (!file_priv)
2016
		return;
2332 Serge 2017
 
3263 Serge 2018
	spin_lock(&file_priv->mm.lock);
2019
	if (request->file_priv) {
2020
		list_del(&request->client_list);
2021
		request->file_priv = NULL;
2022
	}
2023
	spin_unlock(&file_priv->mm.lock);
2024
}
2332 Serge 2025
 
4104 Serge 2026
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2027
				    struct i915_address_space *vm)
2028
{
2029
	if (acthd >= i915_gem_obj_offset(obj, vm) &&
2030
	    acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2031
		return true;
2032
 
2033
	return false;
2034
}
2035
 
2036
static bool i915_head_inside_request(const u32 acthd_unmasked,
2037
				     const u32 request_start,
2038
				     const u32 request_end)
2039
{
2040
	const u32 acthd = acthd_unmasked & HEAD_ADDR;
2041
 
2042
	if (request_start < request_end) {
2043
		if (acthd >= request_start && acthd < request_end)
2044
			return true;
2045
	} else if (request_start > request_end) {
2046
		if (acthd >= request_start || acthd < request_end)
2047
			return true;
2048
	}
2049
 
2050
	return false;
2051
}
2052
 
2053
static struct i915_address_space *
2054
request_to_vm(struct drm_i915_gem_request *request)
2055
{
2056
	struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2057
	struct i915_address_space *vm;
2058
 
2059
	vm = &dev_priv->gtt.base;
2060
 
2061
	return vm;
2062
}
2063
 
2064
static bool i915_request_guilty(struct drm_i915_gem_request *request,
2065
				const u32 acthd, bool *inside)
2066
{
2067
	/* There is a possibility that unmasked head address
2068
	 * pointing inside the ring, matches the batch_obj address range.
2069
	 * However this is extremely unlikely.
2070
	 */
2071
	if (request->batch_obj) {
2072
		if (i915_head_inside_object(acthd, request->batch_obj,
2073
					    request_to_vm(request))) {
2074
			*inside = true;
2075
			return true;
2076
		}
2077
	}
2078
 
2079
	if (i915_head_inside_request(acthd, request->head, request->tail)) {
2080
		*inside = false;
2081
		return true;
2082
	}
2083
 
2084
	return false;
2085
}
2086
 
2087
static void i915_set_reset_status(struct intel_ring_buffer *ring,
2088
				  struct drm_i915_gem_request *request,
2089
				  u32 acthd)
2090
{
2091
	struct i915_ctx_hang_stats *hs = NULL;
2092
	bool inside, guilty;
2093
	unsigned long offset = 0;
2094
 
2095
	/* Innocent until proven guilty */
2096
	guilty = false;
2097
 
2098
	if (request->batch_obj)
2099
		offset = i915_gem_obj_offset(request->batch_obj,
2100
					     request_to_vm(request));
2101
 
2102
	if (ring->hangcheck.action != HANGCHECK_WAIT &&
2103
	    i915_request_guilty(request, acthd, &inside)) {
2104
		DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2105
			  ring->name,
2106
			  inside ? "inside" : "flushing",
2107
			  offset,
2108
			  request->ctx ? request->ctx->id : 0,
2109
			  acthd);
2110
 
2111
		guilty = true;
2112
	}
2113
 
2114
	/* If contexts are disabled or this is the default context, use
2115
	 * file_priv->reset_state
2116
	 */
2117
	if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2118
		hs = &request->ctx->hang_stats;
2119
	else if (request->file_priv)
2120
		hs = &request->file_priv->hang_stats;
2121
 
2122
	if (hs) {
2123
		if (guilty)
2124
			hs->batch_active++;
2125
		else
2126
			hs->batch_pending++;
2127
	}
2128
}
2129
 
2130
static void i915_gem_free_request(struct drm_i915_gem_request *request)
2131
{
2132
	list_del(&request->list);
2133
	i915_gem_request_remove_from_client(request);
2134
 
2135
	if (request->ctx)
2136
		i915_gem_context_unreference(request->ctx);
2137
 
2138
	kfree(request);
2139
}
2140
 
3031 serge 2141
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2142
				      struct intel_ring_buffer *ring)
2143
{
4104 Serge 2144
	u32 completed_seqno;
2145
	u32 acthd;
2146
 
2147
	acthd = intel_ring_get_active_head(ring);
2148
	completed_seqno = ring->get_seqno(ring, false);
2149
 
3031 serge 2150
	while (!list_empty(&ring->request_list)) {
2151
		struct drm_i915_gem_request *request;
2332 Serge 2152
 
3031 serge 2153
		request = list_first_entry(&ring->request_list,
2154
					   struct drm_i915_gem_request,
2155
					   list);
2332 Serge 2156
 
4104 Serge 2157
		if (request->seqno > completed_seqno)
2158
			i915_set_reset_status(ring, request, acthd);
2159
 
2160
		i915_gem_free_request(request);
3031 serge 2161
	}
2332 Serge 2162
 
3031 serge 2163
	while (!list_empty(&ring->active_list)) {
2164
		struct drm_i915_gem_object *obj;
2332 Serge 2165
 
3031 serge 2166
		obj = list_first_entry(&ring->active_list,
2167
				       struct drm_i915_gem_object,
2168
				       ring_list);
2332 Serge 2169
 
3031 serge 2170
		i915_gem_object_move_to_inactive(obj);
2171
	}
2172
}
2332 Serge 2173
 
3746 Serge 2174
void i915_gem_restore_fences(struct drm_device *dev)
3031 serge 2175
{
2176
	struct drm_i915_private *dev_priv = dev->dev_private;
2177
	int i;
2332 Serge 2178
 
3031 serge 2179
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2180
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
4104 Serge 2181
 
2182
		/*
2183
		 * Commit delayed tiling changes if we have an object still
2184
		 * attached to the fence, otherwise just clear the fence.
2185
		 */
2186
		if (reg->obj) {
2187
			i915_gem_object_update_fence(reg->obj, reg,
2188
						     reg->obj->tiling_mode);
2189
		} else {
2190
			i915_gem_write_fence(dev, i, NULL);
2191
		}
3031 serge 2192
	}
2193
}
2360 Serge 2194
 
3031 serge 2195
void i915_gem_reset(struct drm_device *dev)
2196
{
2197
	struct drm_i915_private *dev_priv = dev->dev_private;
2198
	struct intel_ring_buffer *ring;
2199
	int i;
2360 Serge 2200
 
3031 serge 2201
	for_each_ring(ring, dev_priv, i)
2202
		i915_gem_reset_ring_lists(dev_priv, ring);
2360 Serge 2203
 
3746 Serge 2204
	i915_gem_restore_fences(dev);
3031 serge 2205
}
2360 Serge 2206
 
2352 Serge 2207
/**
2208
 * This function clears the request list as sequence numbers are passed.
2209
 */
3031 serge 2210
void
2352 Serge 2211
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2212
{
2213
	uint32_t seqno;
2332 Serge 2214
 
2352 Serge 2215
	if (list_empty(&ring->request_list))
2216
		return;
2332 Serge 2217
 
2352 Serge 2218
	WARN_ON(i915_verify_lists(ring->dev));
2332 Serge 2219
 
3031 serge 2220
	seqno = ring->get_seqno(ring, true);
2332 Serge 2221
 
2352 Serge 2222
	while (!list_empty(&ring->request_list)) {
2223
		struct drm_i915_gem_request *request;
2332 Serge 2224
 
2352 Serge 2225
		request = list_first_entry(&ring->request_list,
2226
					   struct drm_i915_gem_request,
2227
					   list);
2332 Serge 2228
 
2352 Serge 2229
		if (!i915_seqno_passed(seqno, request->seqno))
2230
			break;
2332 Serge 2231
 
2352 Serge 2232
		trace_i915_gem_request_retire(ring, request->seqno);
3031 serge 2233
		/* We know the GPU must have read the request to have
2234
		 * sent us the seqno + interrupt, so use the position
2235
		 * of tail of the request to update the last known position
2236
		 * of the GPU head.
2237
		 */
2238
		ring->last_retired_head = request->tail;
2332 Serge 2239
 
4104 Serge 2240
		i915_gem_free_request(request);
2352 Serge 2241
	}
2332 Serge 2242
 
2352 Serge 2243
	/* Move any buffers on the active list that are no longer referenced
2244
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
2245
	 */
2246
	while (!list_empty(&ring->active_list)) {
2247
		struct drm_i915_gem_object *obj;
2332 Serge 2248
 
2352 Serge 2249
		obj = list_first_entry(&ring->active_list,
2250
				      struct drm_i915_gem_object,
2251
				      ring_list);
2332 Serge 2252
 
3031 serge 2253
		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2352 Serge 2254
			break;
2332 Serge 2255
 
2352 Serge 2256
			i915_gem_object_move_to_inactive(obj);
2257
	}
2332 Serge 2258
 
2352 Serge 2259
	if (unlikely(ring->trace_irq_seqno &&
2260
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2261
		ring->irq_put(ring);
2262
		ring->trace_irq_seqno = 0;
2263
	}
2332 Serge 2264
 
2352 Serge 2265
	WARN_ON(i915_verify_lists(ring->dev));
2266
}
2332 Serge 2267
 
2352 Serge 2268
void
2269
i915_gem_retire_requests(struct drm_device *dev)
2270
{
2271
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 2272
	struct intel_ring_buffer *ring;
2352 Serge 2273
	int i;
2332 Serge 2274
 
3031 serge 2275
	for_each_ring(ring, dev_priv, i)
2276
		i915_gem_retire_requests_ring(ring);
2352 Serge 2277
}
2278
 
2360 Serge 2279
static void
2280
i915_gem_retire_work_handler(struct work_struct *work)
2281
{
2282
	drm_i915_private_t *dev_priv;
2283
	struct drm_device *dev;
3031 serge 2284
	struct intel_ring_buffer *ring;
2360 Serge 2285
	bool idle;
2286
	int i;
2352 Serge 2287
 
2360 Serge 2288
	dev_priv = container_of(work, drm_i915_private_t,
2289
				mm.retire_work.work);
2290
	dev = dev_priv->dev;
2352 Serge 2291
 
2360 Serge 2292
	/* Come back later if the device is busy... */
2293
	if (!mutex_trylock(&dev->struct_mutex)) {
3482 Serge 2294
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2295
				   round_jiffies_up_relative(HZ));
3243 Serge 2296
        return;
2360 Serge 2297
	}
2352 Serge 2298
 
2360 Serge 2299
	i915_gem_retire_requests(dev);
2352 Serge 2300
 
2360 Serge 2301
	/* Send a periodic flush down the ring so we don't hold onto GEM
2302
	 * objects indefinitely.
2303
	 */
2304
	idle = true;
3031 serge 2305
	for_each_ring(ring, dev_priv, i) {
2306
		if (ring->gpu_caches_dirty)
4104 Serge 2307
			i915_add_request(ring, NULL);
2352 Serge 2308
 
2360 Serge 2309
		idle &= list_empty(&ring->request_list);
2310
	}
2352 Serge 2311
 
4104 Serge 2312
	if (!dev_priv->ums.mm_suspended && !idle)
3482 Serge 2313
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2314
				   round_jiffies_up_relative(HZ));
3031 serge 2315
	if (idle)
2316
		intel_mark_idle(dev);
2360 Serge 2317
 
2318
	mutex_unlock(&dev->struct_mutex);
2319
}
2320
 
2344 Serge 2321
/**
3031 serge 2322
 * Ensures that an object will eventually get non-busy by flushing any required
2323
 * write domains, emitting any outstanding lazy request and retiring and
2324
 * completed requests.
2352 Serge 2325
 */
3031 serge 2326
static int
2327
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2352 Serge 2328
{
3031 serge 2329
	int ret;
2352 Serge 2330
 
3031 serge 2331
	if (obj->active) {
2332
		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2333
		if (ret)
2334
			return ret;
2352 Serge 2335
 
3031 serge 2336
		i915_gem_retire_requests_ring(obj->ring);
2337
	}
2352 Serge 2338
 
3031 serge 2339
	return 0;
2340
}
2352 Serge 2341
 
3243 Serge 2342
/**
2343
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2344
 * @DRM_IOCTL_ARGS: standard ioctl arguments
2345
 *
2346
 * Returns 0 if successful, else an error is returned with the remaining time in
2347
 * the timeout parameter.
2348
 *  -ETIME: object is still busy after timeout
2349
 *  -ERESTARTSYS: signal interrupted the wait
2350
 *  -ENONENT: object doesn't exist
2351
 * Also possible, but rare:
2352
 *  -EAGAIN: GPU wedged
2353
 *  -ENOMEM: damn
2354
 *  -ENODEV: Internal IRQ fail
2355
 *  -E?: The add request failed
2356
 *
2357
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2358
 * non-zero timeout parameter the wait ioctl will wait for the given number of
2359
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2360
 * without holding struct_mutex the object may become re-busied before this
2361
 * function completes. A similar but shorter * race condition exists in the busy
2362
 * ioctl
2363
 */
4246 Serge 2364
int
2365
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2366
{
2367
	drm_i915_private_t *dev_priv = dev->dev_private;
2368
	struct drm_i915_gem_wait *args = data;
2369
	struct drm_i915_gem_object *obj;
2370
	struct intel_ring_buffer *ring = NULL;
2371
	struct timespec timeout_stack, *timeout = NULL;
2372
	unsigned reset_counter;
2373
	u32 seqno = 0;
2374
	int ret = 0;
2352 Serge 2375
 
4246 Serge 2376
	if (args->timeout_ns >= 0) {
2377
		timeout_stack = ns_to_timespec(args->timeout_ns);
2378
		timeout = &timeout_stack;
2379
	}
2352 Serge 2380
 
4246 Serge 2381
	ret = i915_mutex_lock_interruptible(dev);
2382
	if (ret)
2383
		return ret;
2352 Serge 2384
 
4246 Serge 2385
    if(args->bo_handle == -2)
2386
    {
2387
        obj = get_fb_obj();
2388
        drm_gem_object_reference(&obj->base);
2389
    }
2390
    else
2391
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2392
	if (&obj->base == NULL) {
2393
		mutex_unlock(&dev->struct_mutex);
2394
		return -ENOENT;
2395
	}
2352 Serge 2396
 
4246 Serge 2397
	/* Need to make sure the object gets inactive eventually. */
2398
	ret = i915_gem_object_flush_active(obj);
2399
	if (ret)
2400
		goto out;
2352 Serge 2401
 
4246 Serge 2402
	if (obj->active) {
2403
		seqno = obj->last_read_seqno;
2404
		ring = obj->ring;
2405
	}
2352 Serge 2406
 
4246 Serge 2407
	if (seqno == 0)
2408
		 goto out;
2352 Serge 2409
 
4246 Serge 2410
	/* Do this after OLR check to make sure we make forward progress polling
2411
	 * on this IOCTL with a 0 timeout (like busy ioctl)
2412
	 */
2413
	if (!args->timeout_ns) {
2414
		ret = -ETIME;
2415
		goto out;
2416
	}
2352 Serge 2417
 
4246 Serge 2418
	drm_gem_object_unreference(&obj->base);
2419
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2420
	mutex_unlock(&dev->struct_mutex);
2352 Serge 2421
 
4246 Serge 2422
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2423
	if (timeout)
2424
		args->timeout_ns = timespec_to_ns(timeout);
2425
	return ret;
3243 Serge 2426
 
4246 Serge 2427
out:
2428
	drm_gem_object_unreference(&obj->base);
2429
	mutex_unlock(&dev->struct_mutex);
2430
	return ret;
2431
}
3243 Serge 2432
 
2352 Serge 2433
/**
3031 serge 2434
 * i915_gem_object_sync - sync an object to a ring.
2435
 *
2436
 * @obj: object which may be in use on another ring.
2437
 * @to: ring we wish to use the object on. May be NULL.
2438
 *
2439
 * This code is meant to abstract object synchronization with the GPU.
2440
 * Calling with NULL implies synchronizing the object with the CPU
2441
 * rather than a particular GPU ring.
2442
 *
2443
 * Returns 0 if successful, else propagates up the lower layer error.
2344 Serge 2444
 */
2445
int
3031 serge 2446
i915_gem_object_sync(struct drm_i915_gem_object *obj,
2447
		     struct intel_ring_buffer *to)
2344 Serge 2448
{
3031 serge 2449
	struct intel_ring_buffer *from = obj->ring;
2450
	u32 seqno;
2451
	int ret, idx;
2332 Serge 2452
 
3031 serge 2453
	if (from == NULL || to == from)
2454
		return 0;
2332 Serge 2455
 
3031 serge 2456
	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2457
		return i915_gem_object_wait_rendering(obj, false);
2332 Serge 2458
 
3031 serge 2459
	idx = intel_ring_sync_index(from, to);
2460
 
2461
	seqno = obj->last_read_seqno;
2462
	if (seqno <= from->sync_seqno[idx])
2463
		return 0;
2464
 
2465
	ret = i915_gem_check_olr(obj->ring, seqno);
2466
	if (ret)
2467
		return ret;
2468
 
2469
	ret = to->sync_to(to, from, seqno);
2470
	if (!ret)
3243 Serge 2471
		/* We use last_read_seqno because sync_to()
2472
		 * might have just caused seqno wrap under
2473
		 * the radar.
2474
		 */
2475
		from->sync_seqno[idx] = obj->last_read_seqno;
3031 serge 2476
 
2477
	return ret;
2344 Serge 2478
}
2332 Serge 2479
 
2344 Serge 2480
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2481
{
2482
	u32 old_write_domain, old_read_domains;
2332 Serge 2483
 
2344 Serge 2484
	/* Force a pagefault for domain tracking on next user access */
2485
//	i915_gem_release_mmap(obj);
2332 Serge 2486
 
2344 Serge 2487
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2488
		return;
2332 Serge 2489
 
3480 Serge 2490
	/* Wait for any direct GTT access to complete */
2491
	mb();
2492
 
2344 Serge 2493
	old_read_domains = obj->base.read_domains;
2494
	old_write_domain = obj->base.write_domain;
2351 Serge 2495
 
2344 Serge 2496
	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2497
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2332 Serge 2498
 
2351 Serge 2499
	trace_i915_gem_object_change_domain(obj,
2500
					    old_read_domains,
2501
					    old_write_domain);
2344 Serge 2502
}
2332 Serge 2503
 
4104 Serge 2504
int i915_vma_unbind(struct i915_vma *vma)
2344 Serge 2505
{
4104 Serge 2506
	struct drm_i915_gem_object *obj = vma->obj;
3031 serge 2507
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3480 Serge 2508
	int ret;
2332 Serge 2509
 
3263 Serge 2510
    if(obj == get_fb_obj())
2511
        return 0;
2512
 
4104 Serge 2513
	if (list_empty(&vma->vma_link))
2344 Serge 2514
		return 0;
2332 Serge 2515
 
4104 Serge 2516
	if (!drm_mm_node_allocated(&vma->node))
2517
		goto destroy;
2518
 
3031 serge 2519
	if (obj->pin_count)
2520
		return -EBUSY;
2332 Serge 2521
 
3243 Serge 2522
	BUG_ON(obj->pages == NULL);
3031 serge 2523
 
2344 Serge 2524
	ret = i915_gem_object_finish_gpu(obj);
3031 serge 2525
	if (ret)
2344 Serge 2526
		return ret;
2527
	/* Continue on if we fail due to EIO, the GPU is hung so we
2528
	 * should be safe and we need to cleanup or else we might
2529
	 * cause memory corruption through use-after-free.
2530
	 */
2332 Serge 2531
 
2344 Serge 2532
	i915_gem_object_finish_gtt(obj);
2332 Serge 2533
 
2344 Serge 2534
	/* release the fence reg _after_ flushing */
2535
	ret = i915_gem_object_put_fence(obj);
3031 serge 2536
	if (ret)
2344 Serge 2537
		return ret;
2332 Serge 2538
 
4104 Serge 2539
	trace_i915_vma_unbind(vma);
2332 Serge 2540
 
3031 serge 2541
	if (obj->has_global_gtt_mapping)
3243 Serge 2542
        i915_gem_gtt_unbind_object(obj);
3031 serge 2543
	if (obj->has_aliasing_ppgtt_mapping) {
2544
		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2545
		obj->has_aliasing_ppgtt_mapping = 0;
2546
	}
2547
	i915_gem_gtt_finish_object(obj);
4104 Serge 2548
	i915_gem_object_unpin_pages(obj);
2332 Serge 2549
 
4104 Serge 2550
	list_del(&vma->mm_list);
2344 Serge 2551
	/* Avoid an unnecessary call to unbind on rebind. */
4104 Serge 2552
	if (i915_is_ggtt(vma->vm))
2344 Serge 2553
	obj->map_and_fenceable = true;
2332 Serge 2554
 
4104 Serge 2555
	drm_mm_remove_node(&vma->node);
2332 Serge 2556
 
4104 Serge 2557
destroy:
2558
	i915_gem_vma_destroy(vma);
2559
 
2560
	/* Since the unbound list is global, only move to that list if
2561
	 * no more VMAs exist.
2562
	 * NB: Until we have real VMAs there will only ever be one */
2563
	WARN_ON(!list_empty(&obj->vma_list));
2564
	if (list_empty(&obj->vma_list))
2565
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2566
 
2344 Serge 2567
	return 0;
2568
}
2332 Serge 2569
 
4104 Serge 2570
/**
2571
 * Unbinds an object from the global GTT aperture.
2572
 */
2573
int
2574
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2575
{
2576
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2577
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
2578
 
2579
	if (!i915_gem_obj_ggtt_bound(obj))
2580
		return 0;
2581
 
2582
	if (obj->pin_count)
2583
		return -EBUSY;
2584
 
2585
	BUG_ON(obj->pages == NULL);
2586
 
2587
	return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2588
}
2589
 
3031 serge 2590
int i915_gpu_idle(struct drm_device *dev)
2344 Serge 2591
{
2592
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 2593
	struct intel_ring_buffer *ring;
2344 Serge 2594
	int ret, i;
2332 Serge 2595
 
2344 Serge 2596
	/* Flush everything onto the inactive list. */
3031 serge 2597
	for_each_ring(ring, dev_priv, i) {
2598
		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2344 Serge 2599
		if (ret)
2600
			return ret;
3031 serge 2601
 
3243 Serge 2602
		ret = intel_ring_idle(ring);
3031 serge 2603
		if (ret)
2604
			return ret;
2344 Serge 2605
	}
2332 Serge 2606
 
2344 Serge 2607
	return 0;
2608
}
2332 Serge 2609
 
3480 Serge 2610
static void i965_write_fence_reg(struct drm_device *dev, int reg,
3031 serge 2611
					struct drm_i915_gem_object *obj)
2612
{
2613
	drm_i915_private_t *dev_priv = dev->dev_private;
3480 Serge 2614
	int fence_reg;
2615
	int fence_pitch_shift;
2332 Serge 2616
 
3480 Serge 2617
	if (INTEL_INFO(dev)->gen >= 6) {
2618
		fence_reg = FENCE_REG_SANDYBRIDGE_0;
2619
		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2620
	} else {
2621
		fence_reg = FENCE_REG_965_0;
2622
		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2623
	}
2332 Serge 2624
 
4104 Serge 2625
	fence_reg += reg * 8;
2626
 
2627
	/* To w/a incoherency with non-atomic 64-bit register updates,
2628
	 * we split the 64-bit update into two 32-bit writes. In order
2629
	 * for a partial fence not to be evaluated between writes, we
2630
	 * precede the update with write to turn off the fence register,
2631
	 * and only enable the fence as the last step.
2632
	 *
2633
	 * For extra levels of paranoia, we make sure each step lands
2634
	 * before applying the next step.
2635
	 */
2636
	I915_WRITE(fence_reg, 0);
2637
	POSTING_READ(fence_reg);
2638
 
3031 serge 2639
	if (obj) {
4104 Serge 2640
		u32 size = i915_gem_obj_ggtt_size(obj);
2641
		uint64_t val;
2332 Serge 2642
 
4104 Serge 2643
		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3031 serge 2644
				 0xfffff000) << 32;
4104 Serge 2645
		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3480 Serge 2646
		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3031 serge 2647
		if (obj->tiling_mode == I915_TILING_Y)
2648
			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2649
		val |= I965_FENCE_REG_VALID;
2332 Serge 2650
 
4104 Serge 2651
		I915_WRITE(fence_reg + 4, val >> 32);
2652
		POSTING_READ(fence_reg + 4);
2653
 
2654
		I915_WRITE(fence_reg + 0, val);
3480 Serge 2655
	POSTING_READ(fence_reg);
4104 Serge 2656
	} else {
2657
		I915_WRITE(fence_reg + 4, 0);
2658
		POSTING_READ(fence_reg + 4);
2659
	}
3031 serge 2660
}
2332 Serge 2661
 
3031 serge 2662
static void i915_write_fence_reg(struct drm_device *dev, int reg,
2663
				 struct drm_i915_gem_object *obj)
2664
{
2665
	drm_i915_private_t *dev_priv = dev->dev_private;
2666
	u32 val;
2332 Serge 2667
 
3031 serge 2668
	if (obj) {
4104 Serge 2669
		u32 size = i915_gem_obj_ggtt_size(obj);
3031 serge 2670
		int pitch_val;
2671
		int tile_width;
2332 Serge 2672
 
4104 Serge 2673
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3031 serge 2674
		     (size & -size) != size ||
4104 Serge 2675
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2676
		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2677
		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2332 Serge 2678
 
3031 serge 2679
		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2680
			tile_width = 128;
2681
		else
2682
			tile_width = 512;
2332 Serge 2683
 
3031 serge 2684
		/* Note: pitch better be a power of two tile widths */
2685
		pitch_val = obj->stride / tile_width;
2686
		pitch_val = ffs(pitch_val) - 1;
2332 Serge 2687
 
4104 Serge 2688
		val = i915_gem_obj_ggtt_offset(obj);
3031 serge 2689
		if (obj->tiling_mode == I915_TILING_Y)
2690
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2691
		val |= I915_FENCE_SIZE_BITS(size);
2692
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2693
		val |= I830_FENCE_REG_VALID;
2694
	} else
2695
		val = 0;
2332 Serge 2696
 
3031 serge 2697
	if (reg < 8)
2698
		reg = FENCE_REG_830_0 + reg * 4;
2699
	else
2700
		reg = FENCE_REG_945_8 + (reg - 8) * 4;
2332 Serge 2701
 
3031 serge 2702
	I915_WRITE(reg, val);
2703
	POSTING_READ(reg);
2704
}
2332 Serge 2705
 
3031 serge 2706
static void i830_write_fence_reg(struct drm_device *dev, int reg,
2707
				struct drm_i915_gem_object *obj)
2708
{
2709
	drm_i915_private_t *dev_priv = dev->dev_private;
2710
	uint32_t val;
2344 Serge 2711
 
3031 serge 2712
	if (obj) {
4104 Serge 2713
		u32 size = i915_gem_obj_ggtt_size(obj);
3031 serge 2714
		uint32_t pitch_val;
2344 Serge 2715
 
4104 Serge 2716
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3031 serge 2717
		     (size & -size) != size ||
4104 Serge 2718
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2719
		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2720
		     i915_gem_obj_ggtt_offset(obj), size);
2344 Serge 2721
 
3031 serge 2722
		pitch_val = obj->stride / 128;
2723
		pitch_val = ffs(pitch_val) - 1;
2344 Serge 2724
 
4104 Serge 2725
		val = i915_gem_obj_ggtt_offset(obj);
3031 serge 2726
		if (obj->tiling_mode == I915_TILING_Y)
2727
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2728
		val |= I830_FENCE_SIZE_BITS(size);
2729
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2730
		val |= I830_FENCE_REG_VALID;
2731
	} else
2732
		val = 0;
2733
 
2734
	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2735
	POSTING_READ(FENCE_REG_830_0 + reg * 4);
2736
}
2737
 
3480 Serge 2738
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2739
{
2740
	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2741
}
2742
 
3031 serge 2743
static void i915_gem_write_fence(struct drm_device *dev, int reg,
2744
				 struct drm_i915_gem_object *obj)
2332 Serge 2745
{
3480 Serge 2746
	struct drm_i915_private *dev_priv = dev->dev_private;
2747
 
2748
	/* Ensure that all CPU reads are completed before installing a fence
2749
	 * and all writes before removing the fence.
2750
	 */
2751
	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2752
		mb();
2753
 
4104 Serge 2754
	WARN(obj && (!obj->stride || !obj->tiling_mode),
2755
	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2756
	     obj->stride, obj->tiling_mode);
2757
 
3031 serge 2758
	switch (INTEL_INFO(dev)->gen) {
2759
	case 7:
3480 Serge 2760
	case 6:
3031 serge 2761
	case 5:
2762
	case 4: i965_write_fence_reg(dev, reg, obj); break;
2763
	case 3: i915_write_fence_reg(dev, reg, obj); break;
2764
	case 2: i830_write_fence_reg(dev, reg, obj); break;
3480 Serge 2765
	default: BUG();
3031 serge 2766
	}
3480 Serge 2767
 
2768
	/* And similarly be paranoid that no direct access to this region
2769
	 * is reordered to before the fence is installed.
2770
	 */
2771
	if (i915_gem_object_needs_mb(obj))
2772
		mb();
2344 Serge 2773
}
2774
 
3031 serge 2775
static inline int fence_number(struct drm_i915_private *dev_priv,
2776
			       struct drm_i915_fence_reg *fence)
2344 Serge 2777
{
3031 serge 2778
	return fence - dev_priv->fence_regs;
2779
}
2332 Serge 2780
 
3031 serge 2781
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2782
					 struct drm_i915_fence_reg *fence,
2783
					 bool enable)
2784
{
4104 Serge 2785
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2786
	int reg = fence_number(dev_priv, fence);
2332 Serge 2787
 
4104 Serge 2788
	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3031 serge 2789
 
2790
	if (enable) {
4104 Serge 2791
		obj->fence_reg = reg;
3031 serge 2792
		fence->obj = obj;
2793
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2794
	} else {
2795
		obj->fence_reg = I915_FENCE_REG_NONE;
2796
		fence->obj = NULL;
2797
		list_del_init(&fence->lru_list);
2344 Serge 2798
	}
4104 Serge 2799
	obj->fence_dirty = false;
3031 serge 2800
}
2344 Serge 2801
 
3031 serge 2802
static int
3480 Serge 2803
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3031 serge 2804
{
2805
	if (obj->last_fenced_seqno) {
2806
		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2352 Serge 2807
			if (ret)
2808
				return ret;
2344 Serge 2809
 
2810
		obj->last_fenced_seqno = 0;
2811
	}
2812
 
3031 serge 2813
	obj->fenced_gpu_access = false;
2332 Serge 2814
	return 0;
2815
}
2816
 
2817
int
2344 Serge 2818
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2332 Serge 2819
{
3031 serge 2820
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3746 Serge 2821
	struct drm_i915_fence_reg *fence;
2332 Serge 2822
	int ret;
2823
 
3480 Serge 2824
	ret = i915_gem_object_wait_fence(obj);
2332 Serge 2825
	if (ret)
2826
		return ret;
2827
 
3031 serge 2828
	if (obj->fence_reg == I915_FENCE_REG_NONE)
2829
		return 0;
2332 Serge 2830
 
3746 Serge 2831
	fence = &dev_priv->fence_regs[obj->fence_reg];
2832
 
3031 serge 2833
	i915_gem_object_fence_lost(obj);
3746 Serge 2834
	i915_gem_object_update_fence(obj, fence, false);
2344 Serge 2835
 
2332 Serge 2836
	return 0;
2837
}
2838
 
3031 serge 2839
static struct drm_i915_fence_reg *
2840
i915_find_fence_reg(struct drm_device *dev)
2841
{
2842
	struct drm_i915_private *dev_priv = dev->dev_private;
2843
	struct drm_i915_fence_reg *reg, *avail;
2844
	int i;
2332 Serge 2845
 
3031 serge 2846
	/* First try to find a free reg */
2847
	avail = NULL;
2848
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2849
		reg = &dev_priv->fence_regs[i];
2850
		if (!reg->obj)
2851
			return reg;
2332 Serge 2852
 
3031 serge 2853
		if (!reg->pin_count)
2854
			avail = reg;
2855
	}
2332 Serge 2856
 
3031 serge 2857
	if (avail == NULL)
2858
		return NULL;
2332 Serge 2859
 
3031 serge 2860
	/* None available, try to steal one or wait for a user to finish */
2861
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2862
		if (reg->pin_count)
2863
			continue;
2332 Serge 2864
 
3031 serge 2865
		return reg;
2866
	}
2332 Serge 2867
 
3031 serge 2868
	return NULL;
2869
}
2332 Serge 2870
 
3031 serge 2871
/**
2872
 * i915_gem_object_get_fence - set up fencing for an object
2873
 * @obj: object to map through a fence reg
2874
 *
2875
 * When mapping objects through the GTT, userspace wants to be able to write
2876
 * to them without having to worry about swizzling if the object is tiled.
2877
 * This function walks the fence regs looking for a free one for @obj,
2878
 * stealing one if it can't find any.
2879
 *
2880
 * It then sets up the reg based on the object's properties: address, pitch
2881
 * and tiling format.
2882
 *
2883
 * For an untiled surface, this removes any existing fence.
2884
 */
2885
int
2886
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2887
{
2888
	struct drm_device *dev = obj->base.dev;
2889
	struct drm_i915_private *dev_priv = dev->dev_private;
2890
	bool enable = obj->tiling_mode != I915_TILING_NONE;
2891
	struct drm_i915_fence_reg *reg;
2892
	int ret;
2332 Serge 2893
 
3031 serge 2894
	/* Have we updated the tiling parameters upon the object and so
2895
	 * will need to serialise the write to the associated fence register?
2896
	 */
2897
	if (obj->fence_dirty) {
3480 Serge 2898
		ret = i915_gem_object_wait_fence(obj);
3031 serge 2899
		if (ret)
2900
			return ret;
2901
	}
2332 Serge 2902
 
3031 serge 2903
	/* Just update our place in the LRU if our fence is getting reused. */
2904
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
2905
		reg = &dev_priv->fence_regs[obj->fence_reg];
2906
		if (!obj->fence_dirty) {
2907
			list_move_tail(®->lru_list,
2908
				       &dev_priv->mm.fence_list);
2909
			return 0;
2910
		}
2911
	} else if (enable) {
2912
		reg = i915_find_fence_reg(dev);
2913
		if (reg == NULL)
2914
			return -EDEADLK;
2332 Serge 2915
 
3031 serge 2916
		if (reg->obj) {
2917
			struct drm_i915_gem_object *old = reg->obj;
2332 Serge 2918
 
3480 Serge 2919
			ret = i915_gem_object_wait_fence(old);
3031 serge 2920
			if (ret)
2921
				return ret;
2332 Serge 2922
 
3031 serge 2923
			i915_gem_object_fence_lost(old);
2924
		}
2925
	} else
2926
		return 0;
2332 Serge 2927
 
3031 serge 2928
	i915_gem_object_update_fence(obj, reg, enable);
2332 Serge 2929
 
3031 serge 2930
	return 0;
2931
}
2332 Serge 2932
 
3031 serge 2933
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2934
				     struct drm_mm_node *gtt_space,
2935
				     unsigned long cache_level)
2936
{
2937
	struct drm_mm_node *other;
2332 Serge 2938
 
3031 serge 2939
	/* On non-LLC machines we have to be careful when putting differing
2940
	 * types of snoopable memory together to avoid the prefetcher
3480 Serge 2941
	 * crossing memory domains and dying.
3031 serge 2942
	 */
2943
	if (HAS_LLC(dev))
2944
		return true;
2332 Serge 2945
 
4104 Serge 2946
	if (!drm_mm_node_allocated(gtt_space))
3031 serge 2947
		return true;
2332 Serge 2948
 
3031 serge 2949
	if (list_empty(>t_space->node_list))
2950
		return true;
2332 Serge 2951
 
3031 serge 2952
	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2953
	if (other->allocated && !other->hole_follows && other->color != cache_level)
2954
		return false;
2344 Serge 2955
 
3031 serge 2956
	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2957
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2958
		return false;
2344 Serge 2959
 
3031 serge 2960
	return true;
2961
}
2344 Serge 2962
 
3031 serge 2963
static void i915_gem_verify_gtt(struct drm_device *dev)
2964
{
2965
#if WATCH_GTT
2966
	struct drm_i915_private *dev_priv = dev->dev_private;
2967
	struct drm_i915_gem_object *obj;
2968
	int err = 0;
2344 Serge 2969
 
4104 Serge 2970
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3031 serge 2971
		if (obj->gtt_space == NULL) {
2972
			printk(KERN_ERR "object found on GTT list with no space reserved\n");
2973
			err++;
2974
			continue;
2975
		}
2344 Serge 2976
 
3031 serge 2977
		if (obj->cache_level != obj->gtt_space->color) {
2978
			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
4104 Serge 2979
			       i915_gem_obj_ggtt_offset(obj),
2980
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3031 serge 2981
			       obj->cache_level,
2982
			       obj->gtt_space->color);
2983
			err++;
2984
			continue;
2985
		}
2344 Serge 2986
 
3031 serge 2987
		if (!i915_gem_valid_gtt_space(dev,
2988
					      obj->gtt_space,
2989
					      obj->cache_level)) {
2990
			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
4104 Serge 2991
			       i915_gem_obj_ggtt_offset(obj),
2992
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3031 serge 2993
			       obj->cache_level);
2994
			err++;
2995
			continue;
2996
		}
2997
	}
2344 Serge 2998
 
3031 serge 2999
	WARN_ON(err);
3000
#endif
2326 Serge 3001
}
3002
 
2332 Serge 3003
/**
3004
 * Finds free space in the GTT aperture and binds the object there.
3005
 */
3006
static int
4104 Serge 3007
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3008
			   struct i915_address_space *vm,
2332 Serge 3009
			    unsigned alignment,
3031 serge 3010
			    bool map_and_fenceable,
3011
			    bool nonblocking)
2332 Serge 3012
{
3013
	struct drm_device *dev = obj->base.dev;
3014
	drm_i915_private_t *dev_priv = dev->dev_private;
3015
	u32 size, fence_size, fence_alignment, unfenced_alignment;
4104 Serge 3016
	size_t gtt_max =
3017
		map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3018
	struct i915_vma *vma;
2332 Serge 3019
	int ret;
2326 Serge 3020
 
2332 Serge 3021
	fence_size = i915_gem_get_gtt_size(dev,
3022
					   obj->base.size,
3023
					   obj->tiling_mode);
3024
	fence_alignment = i915_gem_get_gtt_alignment(dev,
3025
						     obj->base.size,
3480 Serge 3026
						     obj->tiling_mode, true);
2332 Serge 3027
	unfenced_alignment =
3480 Serge 3028
		i915_gem_get_gtt_alignment(dev,
2332 Serge 3029
						    obj->base.size,
3480 Serge 3030
						    obj->tiling_mode, false);
2332 Serge 3031
 
3032
	if (alignment == 0)
3033
		alignment = map_and_fenceable ? fence_alignment :
3034
						unfenced_alignment;
3035
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3036
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3037
		return -EINVAL;
3038
	}
3039
 
3040
	size = map_and_fenceable ? fence_size : obj->base.size;
3041
 
3042
	/* If the object is bigger than the entire aperture, reject it early
3043
	 * before evicting everything in a vain attempt to find space.
3044
	 */
4104 Serge 3045
	if (obj->base.size > gtt_max) {
3046
		DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3047
			  obj->base.size,
3048
			  map_and_fenceable ? "mappable" : "total",
3049
			  gtt_max);
2332 Serge 3050
		return -E2BIG;
3051
	}
3052
 
3031 serge 3053
	ret = i915_gem_object_get_pages(obj);
3054
	if (ret)
3055
		return ret;
3056
 
3243 Serge 3057
	i915_gem_object_pin_pages(obj);
3058
 
4104 Serge 3059
	BUG_ON(!i915_is_ggtt(vm));
3060
 
3061
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3062
	if (IS_ERR(vma)) {
3063
		ret = PTR_ERR(vma);
3064
		goto err_unpin;
3243 Serge 3065
	}
3066
 
4104 Serge 3067
	/* For now we only ever use 1 vma per object */
3068
	WARN_ON(!list_is_singular(&obj->vma_list));
3069
 
3070
search_free:
3071
	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3072
						  size, alignment,
3073
						  obj->cache_level, 0, gtt_max,
3074
						  DRM_MM_SEARCH_DEFAULT);
3243 Serge 3075
	if (ret) {
2332 Serge 3076
 
4104 Serge 3077
		goto err_free_vma;
2332 Serge 3078
	}
4104 Serge 3079
	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3080
					      obj->cache_level))) {
3081
		ret = -EINVAL;
3082
		goto err_remove_node;
3031 serge 3083
	}
2332 Serge 3084
 
3031 serge 3085
	ret = i915_gem_gtt_prepare_object(obj);
4104 Serge 3086
	if (ret)
3087
		goto err_remove_node;
2332 Serge 3088
 
4104 Serge 3089
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3090
	list_add_tail(&vma->mm_list, &vm->inactive_list);
2332 Serge 3091
 
4104 Serge 3092
	if (i915_is_ggtt(vm)) {
3093
		bool mappable, fenceable;
2332 Serge 3094
 
4104 Serge 3095
		fenceable = (vma->node.size == fence_size &&
3096
			     (vma->node.start & (fence_alignment - 1)) == 0);
2332 Serge 3097
 
4104 Serge 3098
		mappable = (vma->node.start + obj->base.size <=
3099
			    dev_priv->gtt.mappable_end);
2332 Serge 3100
 
3101
	obj->map_and_fenceable = mappable && fenceable;
4104 Serge 3102
	}
2332 Serge 3103
 
4104 Serge 3104
	WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3105
 
3106
	trace_i915_vma_bind(vma, map_and_fenceable);
3031 serge 3107
	i915_gem_verify_gtt(dev);
2332 Serge 3108
	return 0;
4104 Serge 3109
 
3110
err_remove_node:
3111
	drm_mm_remove_node(&vma->node);
3112
err_free_vma:
3113
	i915_gem_vma_destroy(vma);
3114
err_unpin:
3115
	i915_gem_object_unpin_pages(obj);
3116
	return ret;
2332 Serge 3117
}
3118
 
4104 Serge 3119
bool
3120
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3121
			bool force)
2332 Serge 3122
{
3123
	/* If we don't have a page list set up, then we're not pinned
3124
	 * to GPU, and we can ignore the cache flush because it'll happen
3125
	 * again at bind time.
3126
	 */
3243 Serge 3127
	if (obj->pages == NULL)
4104 Serge 3128
		return false;
2332 Serge 3129
 
3480 Serge 3130
	/*
3131
	 * Stolen memory is always coherent with the GPU as it is explicitly
3132
	 * marked as wc by the system, or the system is cache-coherent.
3133
	 */
3134
	if (obj->stolen)
4104 Serge 3135
		return false;
3480 Serge 3136
 
2332 Serge 3137
	/* If the GPU is snooping the contents of the CPU cache,
3138
	 * we do not need to manually clear the CPU cache lines.  However,
3139
	 * the caches are only snooped when the render cache is
3140
	 * flushed/invalidated.  As we always have to emit invalidations
3141
	 * and flushes when moving into and out of the RENDER domain, correct
3142
	 * snooping behaviour occurs naturally as the result of our domain
3143
	 * tracking.
3144
	 */
4104 Serge 3145
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3146
		return false;
2332 Serge 3147
 
4293 Serge 3148
	trace_i915_gem_object_clflush(obj);
3149
	drm_clflush_sg(obj->pages);
2344 Serge 3150
 
4104 Serge 3151
	return true;
2332 Serge 3152
}
3153
 
2344 Serge 3154
/** Flushes the GTT write domain for the object if it's dirty. */
3155
static void
3156
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3157
{
3158
	uint32_t old_write_domain;
2332 Serge 3159
 
2344 Serge 3160
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3161
		return;
2332 Serge 3162
 
2344 Serge 3163
	/* No actual flushing is required for the GTT write domain.  Writes
3164
	 * to it immediately go to main memory as far as we know, so there's
3165
	 * no chipset flush.  It also doesn't land in render cache.
3166
	 *
3167
	 * However, we do have to enforce the order so that all writes through
3168
	 * the GTT land before any writes to the device, such as updates to
3169
	 * the GATT itself.
3170
	 */
3171
	wmb();
2332 Serge 3172
 
2344 Serge 3173
	old_write_domain = obj->base.write_domain;
3174
	obj->base.write_domain = 0;
2332 Serge 3175
 
2351 Serge 3176
	trace_i915_gem_object_change_domain(obj,
3177
					    obj->base.read_domains,
3178
					    old_write_domain);
2344 Serge 3179
}
2332 Serge 3180
 
3181
/** Flushes the CPU write domain for the object if it's dirty. */
2326 Serge 3182
static void
4104 Serge 3183
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3184
				       bool force)
2332 Serge 3185
{
3186
	uint32_t old_write_domain;
3187
 
3188
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3189
		return;
3190
 
4104 Serge 3191
	if (i915_gem_clflush_object(obj, force))
3243 Serge 3192
	i915_gem_chipset_flush(obj->base.dev);
4104 Serge 3193
 
2332 Serge 3194
	old_write_domain = obj->base.write_domain;
3195
	obj->base.write_domain = 0;
3196
 
2351 Serge 3197
	trace_i915_gem_object_change_domain(obj,
3198
					    obj->base.read_domains,
3199
					    old_write_domain);
2332 Serge 3200
}
3201
 
3202
/**
3203
 * Moves a single object to the GTT read, and possibly write domain.
3204
 *
3205
 * This function returns when the move is complete, including waiting on
3206
 * flushes to occur.
3207
 */
3208
int
3209
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3210
{
3031 serge 3211
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2332 Serge 3212
	uint32_t old_write_domain, old_read_domains;
3213
	int ret;
3214
 
3215
	/* Not valid to be called on unbound objects. */
4104 Serge 3216
	if (!i915_gem_obj_bound_any(obj))
2332 Serge 3217
		return -EINVAL;
3218
 
3219
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3220
		return 0;
3221
 
3031 serge 3222
	ret = i915_gem_object_wait_rendering(obj, !write);
2332 Serge 3223
		if (ret)
3224
			return ret;
3225
 
4104 Serge 3226
	i915_gem_object_flush_cpu_write_domain(obj, false);
2332 Serge 3227
 
3480 Serge 3228
	/* Serialise direct access to this object with the barriers for
3229
	 * coherent writes from the GPU, by effectively invalidating the
3230
	 * GTT domain upon first access.
3231
	 */
3232
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3233
		mb();
3234
 
2332 Serge 3235
	old_write_domain = obj->base.write_domain;
3236
	old_read_domains = obj->base.read_domains;
3237
 
3238
	/* It should now be out of any other write domains, and we can update
3239
	 * the domain values for our changes.
3240
	 */
3241
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3242
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3243
	if (write) {
3244
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3245
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3246
		obj->dirty = 1;
3247
	}
3248
 
2351 Serge 3249
	trace_i915_gem_object_change_domain(obj,
3250
					    old_read_domains,
3251
					    old_write_domain);
3252
 
3031 serge 3253
	/* And bump the LRU for this access */
4104 Serge 3254
	if (i915_gem_object_is_inactive(obj)) {
3255
		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3256
							   &dev_priv->gtt.base);
3257
		if (vma)
3258
			list_move_tail(&vma->mm_list,
3259
				       &dev_priv->gtt.base.inactive_list);
3031 serge 3260
 
4104 Serge 3261
	}
3262
 
2332 Serge 3263
	return 0;
3264
}
3265
 
2335 Serge 3266
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3267
				    enum i915_cache_level cache_level)
3268
{
3031 serge 3269
	struct drm_device *dev = obj->base.dev;
3270
	drm_i915_private_t *dev_priv = dev->dev_private;
4104 Serge 3271
	struct i915_vma *vma;
2335 Serge 3272
	int ret;
2332 Serge 3273
 
2335 Serge 3274
	if (obj->cache_level == cache_level)
3275
		return 0;
2332 Serge 3276
 
2335 Serge 3277
	if (obj->pin_count) {
3278
		DRM_DEBUG("can not change the cache level of pinned objects\n");
3279
		return -EBUSY;
3280
	}
2332 Serge 3281
 
4104 Serge 3282
	list_for_each_entry(vma, &obj->vma_list, vma_link) {
3283
		if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3284
			ret = i915_vma_unbind(vma);
3031 serge 3285
		if (ret)
3286
			return ret;
4104 Serge 3287
 
3288
			break;
3289
		}
3031 serge 3290
	}
3291
 
4104 Serge 3292
	if (i915_gem_obj_bound_any(obj)) {
2335 Serge 3293
		ret = i915_gem_object_finish_gpu(obj);
3294
		if (ret)
3295
			return ret;
2332 Serge 3296
 
2335 Serge 3297
		i915_gem_object_finish_gtt(obj);
2332 Serge 3298
 
2335 Serge 3299
		/* Before SandyBridge, you could not use tiling or fence
3300
		 * registers with snooped memory, so relinquish any fences
3301
		 * currently pointing to our region in the aperture.
3302
		 */
3031 serge 3303
		if (INTEL_INFO(dev)->gen < 6) {
2335 Serge 3304
			ret = i915_gem_object_put_fence(obj);
3305
			if (ret)
3306
				return ret;
3307
		}
2332 Serge 3308
 
3031 serge 3309
		if (obj->has_global_gtt_mapping)
3310
			i915_gem_gtt_bind_object(obj, cache_level);
3311
		if (obj->has_aliasing_ppgtt_mapping)
3312
			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3313
					       obj, cache_level);
2335 Serge 3314
	}
2332 Serge 3315
 
4104 Serge 3316
	list_for_each_entry(vma, &obj->vma_list, vma_link)
3317
		vma->node.color = cache_level;
3318
	obj->cache_level = cache_level;
3319
 
3320
	if (cpu_write_needs_clflush(obj)) {
2335 Serge 3321
		u32 old_read_domains, old_write_domain;
2332 Serge 3322
 
2335 Serge 3323
		/* If we're coming from LLC cached, then we haven't
3324
		 * actually been tracking whether the data is in the
3325
		 * CPU cache or not, since we only allow one bit set
3326
		 * in obj->write_domain and have been skipping the clflushes.
3327
		 * Just set it to the CPU cache for now.
3328
		 */
3329
		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2332 Serge 3330
 
2335 Serge 3331
		old_read_domains = obj->base.read_domains;
3332
		old_write_domain = obj->base.write_domain;
2332 Serge 3333
 
2335 Serge 3334
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3335
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2332 Serge 3336
 
2351 Serge 3337
		trace_i915_gem_object_change_domain(obj,
3338
						    old_read_domains,
3339
						    old_write_domain);
2344 Serge 3340
    }
2332 Serge 3341
 
3031 serge 3342
	i915_gem_verify_gtt(dev);
2335 Serge 3343
	return 0;
3344
}
2332 Serge 3345
 
3260 Serge 3346
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3347
			       struct drm_file *file)
3348
{
3349
	struct drm_i915_gem_caching *args = data;
3350
	struct drm_i915_gem_object *obj;
3351
	int ret;
3352
 
3480 Serge 3353
     if(args->handle == -2)
3354
     {
3355
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3356
        return 0;
3357
     }
3358
 
3260 Serge 3359
	ret = i915_mutex_lock_interruptible(dev);
3360
	if (ret)
3361
		return ret;
3362
 
3363
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3364
	if (&obj->base == NULL) {
3365
		ret = -ENOENT;
3366
		goto unlock;
3367
	}
3368
 
4104 Serge 3369
	switch (obj->cache_level) {
3370
	case I915_CACHE_LLC:
3371
	case I915_CACHE_L3_LLC:
3372
		args->caching = I915_CACHING_CACHED;
3373
		break;
3260 Serge 3374
 
4104 Serge 3375
	case I915_CACHE_WT:
3376
		args->caching = I915_CACHING_DISPLAY;
3377
		break;
3378
 
3379
	default:
3380
		args->caching = I915_CACHING_NONE;
3381
		break;
3382
	}
3383
 
3260 Serge 3384
	drm_gem_object_unreference(&obj->base);
3385
unlock:
3386
	mutex_unlock(&dev->struct_mutex);
3387
	return ret;
3388
}
3389
 
3390
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3391
			       struct drm_file *file)
3392
{
3393
	struct drm_i915_gem_caching *args = data;
3394
	struct drm_i915_gem_object *obj;
3395
	enum i915_cache_level level;
3396
	int ret;
3397
 
3480 Serge 3398
     if(args->handle == -2)
3399
     {
3400
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3401
        return 0;
3402
     }
3403
 
3260 Serge 3404
	switch (args->caching) {
3405
	case I915_CACHING_NONE:
3406
		level = I915_CACHE_NONE;
3407
		break;
3408
	case I915_CACHING_CACHED:
3409
		level = I915_CACHE_LLC;
3410
		break;
4104 Serge 3411
	case I915_CACHING_DISPLAY:
3412
		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3413
		break;
3260 Serge 3414
	default:
3415
		return -EINVAL;
3416
	}
3417
 
3418
	ret = i915_mutex_lock_interruptible(dev);
3419
	if (ret)
3420
		return ret;
3421
 
3422
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3423
	if (&obj->base == NULL) {
3424
		ret = -ENOENT;
3425
		goto unlock;
3426
	}
3427
 
3428
	ret = i915_gem_object_set_cache_level(obj, level);
3429
 
3430
	drm_gem_object_unreference(&obj->base);
3431
unlock:
3432
	mutex_unlock(&dev->struct_mutex);
3433
	return ret;
3434
}
3435
 
4104 Serge 3436
static bool is_pin_display(struct drm_i915_gem_object *obj)
3437
{
3438
	/* There are 3 sources that pin objects:
3439
	 *   1. The display engine (scanouts, sprites, cursors);
3440
	 *   2. Reservations for execbuffer;
3441
	 *   3. The user.
3442
	 *
3443
	 * We can ignore reservations as we hold the struct_mutex and
3444
	 * are only called outside of the reservation path.  The user
3445
	 * can only increment pin_count once, and so if after
3446
	 * subtracting the potential reference by the user, any pin_count
3447
	 * remains, it must be due to another use by the display engine.
3448
	 */
3449
	return obj->pin_count - !!obj->user_pin_count;
3450
}
3451
 
2335 Serge 3452
/*
3453
 * Prepare buffer for display plane (scanout, cursors, etc).
3454
 * Can be called from an uninterruptible phase (modesetting) and allows
3455
 * any flushes to be pipelined (for pageflips).
3456
 */
3457
int
3458
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3459
				     u32 alignment,
3460
				     struct intel_ring_buffer *pipelined)
3461
{
3462
	u32 old_read_domains, old_write_domain;
3463
	int ret;
2332 Serge 3464
 
3031 serge 3465
	if (pipelined != obj->ring) {
3466
		ret = i915_gem_object_sync(obj, pipelined);
2335 Serge 3467
	if (ret)
3468
		return ret;
3469
	}
2332 Serge 3470
 
4104 Serge 3471
	/* Mark the pin_display early so that we account for the
3472
	 * display coherency whilst setting up the cache domains.
3473
	 */
3474
	obj->pin_display = true;
3475
 
2335 Serge 3476
	/* The display engine is not coherent with the LLC cache on gen6.  As
3477
	 * a result, we make sure that the pinning that is about to occur is
3478
	 * done with uncached PTEs. This is lowest common denominator for all
3479
	 * chipsets.
3480
	 *
3481
	 * However for gen6+, we could do better by using the GFDT bit instead
3482
	 * of uncaching, which would allow us to flush all the LLC-cached data
3483
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3484
	 */
4104 Serge 3485
	ret = i915_gem_object_set_cache_level(obj,
3486
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
2360 Serge 3487
	if (ret)
4104 Serge 3488
		goto err_unpin_display;
2332 Serge 3489
 
2335 Serge 3490
	/* As the user may map the buffer once pinned in the display plane
3491
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3492
	 * always use map_and_fenceable for all scanout buffers.
3493
	 */
4104 Serge 3494
	ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
2335 Serge 3495
	if (ret)
4104 Serge 3496
		goto err_unpin_display;
2332 Serge 3497
 
4104 Serge 3498
	i915_gem_object_flush_cpu_write_domain(obj, true);
2332 Serge 3499
 
2335 Serge 3500
	old_write_domain = obj->base.write_domain;
3501
	old_read_domains = obj->base.read_domains;
2332 Serge 3502
 
2335 Serge 3503
	/* It should now be out of any other write domains, and we can update
3504
	 * the domain values for our changes.
3505
	 */
3031 serge 3506
	obj->base.write_domain = 0;
2335 Serge 3507
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2332 Serge 3508
 
2351 Serge 3509
	trace_i915_gem_object_change_domain(obj,
3510
					    old_read_domains,
3511
					    old_write_domain);
2332 Serge 3512
 
2335 Serge 3513
	return 0;
4104 Serge 3514
 
3515
err_unpin_display:
3516
	obj->pin_display = is_pin_display(obj);
3517
	return ret;
2335 Serge 3518
}
2332 Serge 3519
 
4104 Serge 3520
void
3521
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3522
{
3523
	i915_gem_object_unpin(obj);
3524
	obj->pin_display = is_pin_display(obj);
3525
}
3526
 
2344 Serge 3527
int
3528
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3529
{
3530
	int ret;
2332 Serge 3531
 
2344 Serge 3532
	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3533
		return 0;
2332 Serge 3534
 
3031 serge 3535
	ret = i915_gem_object_wait_rendering(obj, false);
3243 Serge 3536
    if (ret)
3537
        return ret;
2332 Serge 3538
 
2344 Serge 3539
	/* Ensure that we invalidate the GPU's caches and TLBs. */
3540
	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3031 serge 3541
	return 0;
2344 Serge 3542
}
2332 Serge 3543
 
2344 Serge 3544
/**
3545
 * Moves a single object to the CPU read, and possibly write domain.
3546
 *
3547
 * This function returns when the move is complete, including waiting on
3548
 * flushes to occur.
3549
 */
3031 serge 3550
int
2344 Serge 3551
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3552
{
3553
	uint32_t old_write_domain, old_read_domains;
3554
	int ret;
2332 Serge 3555
 
2344 Serge 3556
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3557
		return 0;
2332 Serge 3558
 
3031 serge 3559
	ret = i915_gem_object_wait_rendering(obj, !write);
2344 Serge 3560
	if (ret)
3561
		return ret;
2332 Serge 3562
 
2344 Serge 3563
	i915_gem_object_flush_gtt_write_domain(obj);
2332 Serge 3564
 
2344 Serge 3565
	old_write_domain = obj->base.write_domain;
3566
	old_read_domains = obj->base.read_domains;
2332 Serge 3567
 
2344 Serge 3568
	/* Flush the CPU cache if it's still invalid. */
3569
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4104 Serge 3570
		i915_gem_clflush_object(obj, false);
2332 Serge 3571
 
2344 Serge 3572
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3573
	}
2332 Serge 3574
 
2344 Serge 3575
	/* It should now be out of any other write domains, and we can update
3576
	 * the domain values for our changes.
3577
	 */
3578
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2332 Serge 3579
 
2344 Serge 3580
	/* If we're writing through the CPU, then the GPU read domains will
3581
	 * need to be invalidated at next use.
3582
	 */
3583
	if (write) {
3584
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3585
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3586
	}
2332 Serge 3587
 
2351 Serge 3588
	trace_i915_gem_object_change_domain(obj,
3589
					    old_read_domains,
3590
					    old_write_domain);
2332 Serge 3591
 
2344 Serge 3592
	return 0;
3593
}
2332 Serge 3594
 
3031 serge 3595
/* Throttle our rendering by waiting until the ring has completed our requests
3596
 * emitted over 20 msec ago.
2344 Serge 3597
 *
3031 serge 3598
 * Note that if we were to use the current jiffies each time around the loop,
3599
 * we wouldn't escape the function with any frames outstanding if the time to
3600
 * render a frame was over 20ms.
3601
 *
3602
 * This should get us reasonable parallelism between CPU and GPU but also
3603
 * relatively low latency when blocking on a particular request to finish.
2344 Serge 3604
 */
3031 serge 3605
static int
3606
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2344 Serge 3607
{
3031 serge 3608
	struct drm_i915_private *dev_priv = dev->dev_private;
3609
	struct drm_i915_file_private *file_priv = file->driver_priv;
3263 Serge 3610
	unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20);
3031 serge 3611
	struct drm_i915_gem_request *request;
3612
	struct intel_ring_buffer *ring = NULL;
3480 Serge 3613
	unsigned reset_counter;
3031 serge 3614
	u32 seqno = 0;
3615
	int ret;
2332 Serge 3616
 
3480 Serge 3617
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3618
	if (ret)
3619
		return ret;
2332 Serge 3620
 
3480 Serge 3621
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3622
	if (ret)
3623
		return ret;
3624
 
3031 serge 3625
	spin_lock(&file_priv->mm.lock);
3626
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3627
		if (time_after_eq(request->emitted_jiffies, recent_enough))
3628
			break;
2332 Serge 3629
 
3031 serge 3630
		ring = request->ring;
3631
		seqno = request->seqno;
3632
	}
3480 Serge 3633
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 3634
	spin_unlock(&file_priv->mm.lock);
2332 Serge 3635
 
3031 serge 3636
	if (seqno == 0)
3637
		return 0;
2332 Serge 3638
 
3480 Serge 3639
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3031 serge 3640
	if (ret == 0)
3641
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2332 Serge 3642
 
3031 serge 3643
	return ret;
2352 Serge 3644
}
2332 Serge 3645
 
3646
int
3647
i915_gem_object_pin(struct drm_i915_gem_object *obj,
4104 Serge 3648
		    struct i915_address_space *vm,
2332 Serge 3649
		    uint32_t alignment,
3031 serge 3650
		    bool map_and_fenceable,
3651
		    bool nonblocking)
2332 Serge 3652
{
4104 Serge 3653
	struct i915_vma *vma;
2332 Serge 3654
	int ret;
3655
 
3031 serge 3656
	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3657
		return -EBUSY;
2332 Serge 3658
 
4104 Serge 3659
	WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3660
 
3661
	vma = i915_gem_obj_to_vma(obj, vm);
3662
 
3663
	if (vma) {
3664
		if ((alignment &&
3665
		     vma->node.start & (alignment - 1)) ||
2332 Serge 3666
		    (map_and_fenceable && !obj->map_and_fenceable)) {
3667
			WARN(obj->pin_count,
3668
			     "bo is already pinned with incorrect alignment:"
4104 Serge 3669
			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
2332 Serge 3670
			     " obj->map_and_fenceable=%d\n",
4104 Serge 3671
			     i915_gem_obj_offset(obj, vm), alignment,
2332 Serge 3672
			     map_and_fenceable,
3673
			     obj->map_and_fenceable);
4104 Serge 3674
			ret = i915_vma_unbind(vma);
2332 Serge 3675
			if (ret)
3676
				return ret;
3677
		}
3678
	}
3679
 
4104 Serge 3680
	if (!i915_gem_obj_bound(obj, vm)) {
3243 Serge 3681
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3682
 
4104 Serge 3683
		ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3031 serge 3684
						  map_and_fenceable,
3685
						  nonblocking);
2332 Serge 3686
		if (ret)
3687
			return ret;
3243 Serge 3688
 
3689
		if (!dev_priv->mm.aliasing_ppgtt)
3690
			i915_gem_gtt_bind_object(obj, obj->cache_level);
2332 Serge 3691
	}
3692
 
3031 serge 3693
	if (!obj->has_global_gtt_mapping && map_and_fenceable)
3694
		i915_gem_gtt_bind_object(obj, obj->cache_level);
3695
 
3696
	obj->pin_count++;
2332 Serge 3697
	obj->pin_mappable |= map_and_fenceable;
3698
 
3699
	return 0;
3700
}
3701
 
2344 Serge 3702
void
3703
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3704
{
3705
	BUG_ON(obj->pin_count == 0);
4104 Serge 3706
	BUG_ON(!i915_gem_obj_bound_any(obj));
2332 Serge 3707
 
3031 serge 3708
	if (--obj->pin_count == 0)
2344 Serge 3709
		obj->pin_mappable = false;
3710
}
2332 Serge 3711
 
3031 serge 3712
int
3713
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3714
		   struct drm_file *file)
3715
{
3716
	struct drm_i915_gem_pin *args = data;
3717
	struct drm_i915_gem_object *obj;
3718
	int ret;
2332 Serge 3719
 
3480 Serge 3720
     if(args->handle == -2)
3721
     {
3722
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3723
        return 0;
3724
     }
3725
 
3031 serge 3726
	ret = i915_mutex_lock_interruptible(dev);
3727
	if (ret)
3728
		return ret;
2332 Serge 3729
 
3031 serge 3730
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3731
	if (&obj->base == NULL) {
3732
		ret = -ENOENT;
3733
		goto unlock;
3734
	}
2332 Serge 3735
 
3031 serge 3736
	if (obj->madv != I915_MADV_WILLNEED) {
3737
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
3738
		ret = -EINVAL;
3739
		goto out;
3740
	}
2332 Serge 3741
 
3031 serge 3742
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
3743
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3744
			  args->handle);
3745
		ret = -EINVAL;
3746
		goto out;
3747
	}
2332 Serge 3748
 
3243 Serge 3749
	if (obj->user_pin_count == 0) {
4104 Serge 3750
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3031 serge 3751
		if (ret)
3752
			goto out;
3753
	}
2332 Serge 3754
 
3243 Serge 3755
	obj->user_pin_count++;
3756
	obj->pin_filp = file;
3757
 
4104 Serge 3758
	args->offset = i915_gem_obj_ggtt_offset(obj);
3031 serge 3759
out:
3760
	drm_gem_object_unreference(&obj->base);
3761
unlock:
3762
	mutex_unlock(&dev->struct_mutex);
3763
	return ret;
3764
}
2332 Serge 3765
 
3031 serge 3766
int
3767
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3768
		     struct drm_file *file)
3769
{
3770
	struct drm_i915_gem_pin *args = data;
3771
	struct drm_i915_gem_object *obj;
3772
	int ret;
2332 Serge 3773
 
3031 serge 3774
	ret = i915_mutex_lock_interruptible(dev);
3775
	if (ret)
3776
		return ret;
2332 Serge 3777
 
4246 Serge 3778
    if(args->handle == -2)
3779
    {
3780
        obj = get_fb_obj();
3781
        drm_gem_object_reference(&obj->base);
3782
    }
3783
    else
3031 serge 3784
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3785
	if (&obj->base == NULL) {
3786
		ret = -ENOENT;
3787
		goto unlock;
3788
	}
2332 Serge 3789
 
3031 serge 3790
	if (obj->pin_filp != file) {
3791
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3792
			  args->handle);
3793
		ret = -EINVAL;
3794
		goto out;
3795
	}
3796
	obj->user_pin_count--;
3797
	if (obj->user_pin_count == 0) {
3798
		obj->pin_filp = NULL;
3799
		i915_gem_object_unpin(obj);
3800
	}
2332 Serge 3801
 
3031 serge 3802
out:
3803
	drm_gem_object_unreference(&obj->base);
3804
unlock:
3805
	mutex_unlock(&dev->struct_mutex);
3806
	return ret;
3807
}
2332 Serge 3808
 
3031 serge 3809
int
3810
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3811
		    struct drm_file *file)
3812
{
3813
	struct drm_i915_gem_busy *args = data;
3814
	struct drm_i915_gem_object *obj;
3815
	int ret;
2332 Serge 3816
 
3031 serge 3817
	ret = i915_mutex_lock_interruptible(dev);
3818
	if (ret)
3819
		return ret;
2332 Serge 3820
 
3480 Serge 3821
    if(args->handle == -2)
3822
    {
3823
        obj = get_fb_obj();
3824
        drm_gem_object_reference(&obj->base);
3825
    }
3826
    else
4104 Serge 3827
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3031 serge 3828
	if (&obj->base == NULL) {
3829
		ret = -ENOENT;
3830
		goto unlock;
3831
	}
2332 Serge 3832
 
3031 serge 3833
	/* Count all active objects as busy, even if they are currently not used
3834
	 * by the gpu. Users of this interface expect objects to eventually
3835
	 * become non-busy without any further actions, therefore emit any
3836
	 * necessary flushes here.
3837
	 */
3838
	ret = i915_gem_object_flush_active(obj);
2332 Serge 3839
 
3031 serge 3840
	args->busy = obj->active;
3841
	if (obj->ring) {
3842
		BUILD_BUG_ON(I915_NUM_RINGS > 16);
3843
		args->busy |= intel_ring_flag(obj->ring) << 16;
3844
	}
2332 Serge 3845
 
3031 serge 3846
	drm_gem_object_unreference(&obj->base);
3847
unlock:
3848
	mutex_unlock(&dev->struct_mutex);
3849
	return ret;
3850
}
2332 Serge 3851
 
3031 serge 3852
int
3853
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3854
			struct drm_file *file_priv)
3855
{
3856
	return i915_gem_ring_throttle(dev, file_priv);
3857
}
2332 Serge 3858
 
3263 Serge 3859
#if 0
3860
 
3031 serge 3861
int
3862
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3863
		       struct drm_file *file_priv)
3864
{
3865
	struct drm_i915_gem_madvise *args = data;
3866
	struct drm_i915_gem_object *obj;
3867
	int ret;
2332 Serge 3868
 
3031 serge 3869
	switch (args->madv) {
3870
	case I915_MADV_DONTNEED:
3871
	case I915_MADV_WILLNEED:
3872
	    break;
3873
	default:
3874
	    return -EINVAL;
3875
	}
2332 Serge 3876
 
3031 serge 3877
	ret = i915_mutex_lock_interruptible(dev);
3878
	if (ret)
3879
		return ret;
2332 Serge 3880
 
3031 serge 3881
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3882
	if (&obj->base == NULL) {
3883
		ret = -ENOENT;
3884
		goto unlock;
3885
	}
2332 Serge 3886
 
3031 serge 3887
	if (obj->pin_count) {
3888
		ret = -EINVAL;
3889
		goto out;
3890
	}
2332 Serge 3891
 
3031 serge 3892
	if (obj->madv != __I915_MADV_PURGED)
3893
		obj->madv = args->madv;
2332 Serge 3894
 
3031 serge 3895
	/* if the object is no longer attached, discard its backing storage */
3896
	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3897
		i915_gem_object_truncate(obj);
2332 Serge 3898
 
3031 serge 3899
	args->retained = obj->madv != __I915_MADV_PURGED;
2332 Serge 3900
 
3031 serge 3901
out:
3902
	drm_gem_object_unreference(&obj->base);
3903
unlock:
3904
	mutex_unlock(&dev->struct_mutex);
3905
	return ret;
3906
}
3907
#endif
2332 Serge 3908
 
3031 serge 3909
void i915_gem_object_init(struct drm_i915_gem_object *obj,
3910
			  const struct drm_i915_gem_object_ops *ops)
3911
{
4104 Serge 3912
	INIT_LIST_HEAD(&obj->global_list);
3031 serge 3913
	INIT_LIST_HEAD(&obj->ring_list);
3914
	INIT_LIST_HEAD(&obj->exec_list);
4104 Serge 3915
	INIT_LIST_HEAD(&obj->obj_exec_link);
3916
	INIT_LIST_HEAD(&obj->vma_list);
2332 Serge 3917
 
3031 serge 3918
	obj->ops = ops;
3919
 
3920
	obj->fence_reg = I915_FENCE_REG_NONE;
3921
	obj->madv = I915_MADV_WILLNEED;
3922
	/* Avoid an unnecessary call to unbind on the first bind. */
3923
	obj->map_and_fenceable = true;
3924
 
3925
	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3926
}
3927
 
3928
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3929
	.get_pages = i915_gem_object_get_pages_gtt,
3930
	.put_pages = i915_gem_object_put_pages_gtt,
3931
};
3932
 
2332 Serge 3933
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3934
						  size_t size)
3935
{
3936
	struct drm_i915_gem_object *obj;
3031 serge 3937
	struct address_space *mapping;
3480 Serge 3938
	gfp_t mask;
2340 Serge 3939
 
3746 Serge 3940
	obj = i915_gem_object_alloc(dev);
2332 Serge 3941
	if (obj == NULL)
3942
		return NULL;
3943
 
3944
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4104 Serge 3945
		i915_gem_object_free(obj);
2332 Serge 3946
		return NULL;
3947
	}
3948
 
3949
 
3031 serge 3950
	i915_gem_object_init(obj, &i915_gem_object_ops);
2332 Serge 3951
 
3952
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3953
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3954
 
3031 serge 3955
	if (HAS_LLC(dev)) {
3956
		/* On some devices, we can have the GPU use the LLC (the CPU
2332 Serge 3957
		 * cache) for about a 10% performance improvement
3958
		 * compared to uncached.  Graphics requests other than
3959
		 * display scanout are coherent with the CPU in
3960
		 * accessing this cache.  This means in this mode we
3961
		 * don't need to clflush on the CPU side, and on the
3962
		 * GPU side we only need to flush internal caches to
3963
		 * get data visible to the CPU.
3964
		 *
3965
		 * However, we maintain the display planes as UC, and so
3966
		 * need to rebind when first used as such.
3967
		 */
3968
		obj->cache_level = I915_CACHE_LLC;
3969
	} else
3970
		obj->cache_level = I915_CACHE_NONE;
3971
 
3972
	return obj;
3973
}
3974
 
2344 Serge 3975
int i915_gem_init_object(struct drm_gem_object *obj)
3976
{
3977
	BUG();
2332 Serge 3978
 
2344 Serge 3979
	return 0;
3980
}
2332 Serge 3981
 
3031 serge 3982
void i915_gem_free_object(struct drm_gem_object *gem_obj)
2344 Serge 3983
{
3031 serge 3984
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
2344 Serge 3985
	struct drm_device *dev = obj->base.dev;
3986
	drm_i915_private_t *dev_priv = dev->dev_private;
4104 Serge 3987
	struct i915_vma *vma, *next;
2332 Serge 3988
 
3031 serge 3989
	trace_i915_gem_object_destroy(obj);
3990
 
3991
 
3992
	obj->pin_count = 0;
4104 Serge 3993
	/* NB: 0 or 1 elements */
3994
	WARN_ON(!list_empty(&obj->vma_list) &&
3995
		!list_is_singular(&obj->vma_list));
3996
	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3997
		int ret = i915_vma_unbind(vma);
3998
		if (WARN_ON(ret == -ERESTARTSYS)) {
3031 serge 3999
		bool was_interruptible;
4000
 
4001
		was_interruptible = dev_priv->mm.interruptible;
4002
		dev_priv->mm.interruptible = false;
4003
 
4104 Serge 4004
			WARN_ON(i915_vma_unbind(vma));
3031 serge 4005
 
4006
		dev_priv->mm.interruptible = was_interruptible;
2344 Serge 4007
	}
4104 Serge 4008
	}
2332 Serge 4009
 
4104 Serge 4010
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4011
	 * before progressing. */
4012
	if (obj->stolen)
4013
		i915_gem_object_unpin_pages(obj);
4014
 
4015
	if (WARN_ON(obj->pages_pin_count))
3031 serge 4016
	obj->pages_pin_count = 0;
4017
	i915_gem_object_put_pages(obj);
4018
//   i915_gem_object_free_mmap_offset(obj);
4104 Serge 4019
	i915_gem_object_release_stolen(obj);
2332 Serge 4020
 
3243 Serge 4021
	BUG_ON(obj->pages);
2332 Serge 4022
 
3031 serge 4023
 
3290 Serge 4024
    if(obj->base.filp != NULL)
4025
    {
3298 Serge 4026
//        printf("filp %p\n", obj->base.filp);
3290 Serge 4027
        shmem_file_delete(obj->base.filp);
4028
    }
4029
 
2344 Serge 4030
	drm_gem_object_release(&obj->base);
4031
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
2332 Serge 4032
 
2344 Serge 4033
	kfree(obj->bit_17);
4104 Serge 4034
	i915_gem_object_free(obj);
2344 Serge 4035
}
2332 Serge 4036
 
4104 Serge 4037
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4038
				     struct i915_address_space *vm)
4039
{
4040
	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4041
	if (vma == NULL)
4042
		return ERR_PTR(-ENOMEM);
4043
 
4044
	INIT_LIST_HEAD(&vma->vma_link);
4045
	INIT_LIST_HEAD(&vma->mm_list);
4046
	INIT_LIST_HEAD(&vma->exec_list);
4047
	vma->vm = vm;
4048
	vma->obj = obj;
4049
 
4050
	/* Keep GGTT vmas first to make debug easier */
4051
	if (i915_is_ggtt(vm))
4052
		list_add(&vma->vma_link, &obj->vma_list);
4053
	else
4054
		list_add_tail(&vma->vma_link, &obj->vma_list);
4055
 
4056
	return vma;
4057
}
4058
 
4059
void i915_gem_vma_destroy(struct i915_vma *vma)
4060
{
4061
	WARN_ON(vma->node.allocated);
4062
	list_del(&vma->vma_link);
4063
	kfree(vma);
4064
}
4065
 
3031 serge 4066
#if 0
4067
int
4068
i915_gem_idle(struct drm_device *dev)
2344 Serge 4069
{
3031 serge 4070
	drm_i915_private_t *dev_priv = dev->dev_private;
4071
	int ret;
2332 Serge 4072
 
4104 Serge 4073
	if (dev_priv->ums.mm_suspended) {
3031 serge 4074
		mutex_unlock(&dev->struct_mutex);
4075
		return 0;
4076
	}
2332 Serge 4077
 
3031 serge 4078
	ret = i915_gpu_idle(dev);
4079
	if (ret) {
4080
		mutex_unlock(&dev->struct_mutex);
4081
		return ret;
4082
	}
4083
	i915_gem_retire_requests(dev);
4084
 
3480 Serge 4085
	/* Under UMS, be paranoid and evict. */
4086
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
4087
		i915_gem_evict_everything(dev);
4088
 
4089
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3031 serge 4090
 
4091
	i915_kernel_lost_context(dev);
4092
	i915_gem_cleanup_ringbuffer(dev);
4093
 
4094
	/* Cancel the retire work handler, which should be idle now. */
3263 Serge 4095
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3031 serge 4096
 
4097
	return 0;
2344 Serge 4098
}
3031 serge 4099
#endif
2332 Serge 4100
 
3031 serge 4101
void i915_gem_l3_remap(struct drm_device *dev)
4102
{
4103
	drm_i915_private_t *dev_priv = dev->dev_private;
4104
	u32 misccpctl;
4105
	int i;
2332 Serge 4106
 
3480 Serge 4107
	if (!HAS_L3_GPU_CACHE(dev))
3031 serge 4108
		return;
2332 Serge 4109
 
3243 Serge 4110
	if (!dev_priv->l3_parity.remap_info)
3031 serge 4111
		return;
2332 Serge 4112
 
3031 serge 4113
	misccpctl = I915_READ(GEN7_MISCCPCTL);
4114
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4115
	POSTING_READ(GEN7_MISCCPCTL);
2332 Serge 4116
 
3031 serge 4117
	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4118
		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3243 Serge 4119
		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3031 serge 4120
			DRM_DEBUG("0x%x was already programmed to %x\n",
4121
				  GEN7_L3LOG_BASE + i, remap);
3243 Serge 4122
		if (remap && !dev_priv->l3_parity.remap_info[i/4])
3031 serge 4123
			DRM_DEBUG_DRIVER("Clearing remapped register\n");
3243 Serge 4124
		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3031 serge 4125
	}
2332 Serge 4126
 
3031 serge 4127
	/* Make sure all the writes land before disabling dop clock gating */
4128
	POSTING_READ(GEN7_L3LOG_BASE);
2332 Serge 4129
 
3031 serge 4130
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4131
}
2332 Serge 4132
 
3031 serge 4133
void i915_gem_init_swizzling(struct drm_device *dev)
4134
{
4135
	drm_i915_private_t *dev_priv = dev->dev_private;
2332 Serge 4136
 
3031 serge 4137
	if (INTEL_INFO(dev)->gen < 5 ||
4138
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4139
		return;
2332 Serge 4140
 
3031 serge 4141
	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4142
				 DISP_TILE_SURFACE_SWIZZLING);
2332 Serge 4143
 
3031 serge 4144
	if (IS_GEN5(dev))
4145
		return;
2344 Serge 4146
 
3031 serge 4147
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4148
	if (IS_GEN6(dev))
4149
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3480 Serge 4150
	else if (IS_GEN7(dev))
4151
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3031 serge 4152
	else
3480 Serge 4153
		BUG();
3031 serge 4154
}
4155
 
4156
static bool
4157
intel_enable_blt(struct drm_device *dev)
4158
{
4159
	if (!HAS_BLT(dev))
4160
		return false;
4161
 
4162
	/* The blitter was dysfunctional on early prototypes */
4163
	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4164
		DRM_INFO("BLT not supported on this pre-production hardware;"
4165
			 " graphics performance will be degraded.\n");
4166
		return false;
4167
	}
4168
 
4169
	return true;
4170
}
4171
 
3480 Serge 4172
static int i915_gem_init_rings(struct drm_device *dev)
2332 Serge 4173
{
3480 Serge 4174
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 4175
	int ret;
2351 Serge 4176
 
2332 Serge 4177
	ret = intel_init_render_ring_buffer(dev);
4178
	if (ret)
4179
		return ret;
4180
 
4181
    if (HAS_BSD(dev)) {
4182
		ret = intel_init_bsd_ring_buffer(dev);
4183
		if (ret)
4184
			goto cleanup_render_ring;
4185
	}
4186
 
3031 serge 4187
	if (intel_enable_blt(dev)) {
2332 Serge 4188
		ret = intel_init_blt_ring_buffer(dev);
4189
		if (ret)
4190
			goto cleanup_bsd_ring;
4191
	}
4192
 
4104 Serge 4193
	if (HAS_VEBOX(dev)) {
4194
		ret = intel_init_vebox_ring_buffer(dev);
4195
		if (ret)
4196
			goto cleanup_blt_ring;
4197
	}
4198
 
4199
 
3480 Serge 4200
	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4201
	if (ret)
4104 Serge 4202
		goto cleanup_vebox_ring;
2351 Serge 4203
 
2332 Serge 4204
	return 0;
4205
 
4104 Serge 4206
cleanup_vebox_ring:
4207
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
3480 Serge 4208
cleanup_blt_ring:
4209
	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
2332 Serge 4210
cleanup_bsd_ring:
4211
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4212
cleanup_render_ring:
4213
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3480 Serge 4214
 
2332 Serge 4215
	return ret;
4216
}
4217
 
3480 Serge 4218
int
4219
i915_gem_init_hw(struct drm_device *dev)
3031 serge 4220
{
3480 Serge 4221
	drm_i915_private_t *dev_priv = dev->dev_private;
4222
	int ret;
3031 serge 4223
 
3480 Serge 4224
	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4225
		return -EIO;
3031 serge 4226
 
4104 Serge 4227
	if (dev_priv->ellc_size)
4228
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
3480 Serge 4229
 
3746 Serge 4230
	if (HAS_PCH_NOP(dev)) {
4231
		u32 temp = I915_READ(GEN7_MSG_CTL);
4232
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4233
		I915_WRITE(GEN7_MSG_CTL, temp);
4234
	}
4235
 
3480 Serge 4236
	i915_gem_l3_remap(dev);
4237
 
4238
	i915_gem_init_swizzling(dev);
4239
 
4240
	ret = i915_gem_init_rings(dev);
4241
	if (ret)
4242
		return ret;
4243
 
4244
	/*
4245
	 * XXX: There was some w/a described somewhere suggesting loading
4246
	 * contexts before PPGTT.
4247
	 */
4248
	i915_gem_context_init(dev);
3746 Serge 4249
	if (dev_priv->mm.aliasing_ppgtt) {
4250
		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4251
		if (ret) {
4252
			i915_gem_cleanup_aliasing_ppgtt(dev);
4253
			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4254
		}
4255
	}
3480 Serge 4256
 
4257
	return 0;
3031 serge 4258
}
4259
 
4260
int i915_gem_init(struct drm_device *dev)
4261
{
4262
	struct drm_i915_private *dev_priv = dev->dev_private;
4263
	int ret;
4264
 
4265
	mutex_lock(&dev->struct_mutex);
3746 Serge 4266
 
4267
	if (IS_VALLEYVIEW(dev)) {
4268
		/* VLVA0 (potential hack), BIOS isn't actually waking us */
4269
		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4270
		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4271
			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4272
	}
4273
 
3480 Serge 4274
	i915_gem_init_global_gtt(dev);
3746 Serge 4275
 
3031 serge 4276
	ret = i915_gem_init_hw(dev);
4277
	mutex_unlock(&dev->struct_mutex);
4278
	if (ret) {
4279
		i915_gem_cleanup_aliasing_ppgtt(dev);
4280
		return ret;
4281
	}
4282
 
3746 Serge 4283
 
3031 serge 4284
    return 0;
4285
}
4286
 
2332 Serge 4287
void
4288
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4289
{
4290
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 4291
	struct intel_ring_buffer *ring;
2332 Serge 4292
	int i;
4293
 
3031 serge 4294
	for_each_ring(ring, dev_priv, i)
4295
		intel_cleanup_ring_buffer(ring);
2332 Serge 4296
}
4297
 
3031 serge 4298
#if 0
4299
 
2332 Serge 4300
int
4301
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4302
		       struct drm_file *file_priv)
4303
{
4104 Serge 4304
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4305
	int ret;
2332 Serge 4306
 
4307
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4308
		return 0;
4309
 
3480 Serge 4310
	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2332 Serge 4311
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
3480 Serge 4312
		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
2332 Serge 4313
	}
4314
 
4315
	mutex_lock(&dev->struct_mutex);
4104 Serge 4316
	dev_priv->ums.mm_suspended = 0;
2332 Serge 4317
 
3031 serge 4318
	ret = i915_gem_init_hw(dev);
2332 Serge 4319
	if (ret != 0) {
4320
		mutex_unlock(&dev->struct_mutex);
4321
		return ret;
4322
	}
4323
 
4104 Serge 4324
	BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
2332 Serge 4325
	mutex_unlock(&dev->struct_mutex);
4326
 
4327
	ret = drm_irq_install(dev);
4328
	if (ret)
4329
		goto cleanup_ringbuffer;
4330
 
4331
	return 0;
4332
 
4333
cleanup_ringbuffer:
4334
	mutex_lock(&dev->struct_mutex);
4335
	i915_gem_cleanup_ringbuffer(dev);
4104 Serge 4336
	dev_priv->ums.mm_suspended = 1;
2332 Serge 4337
	mutex_unlock(&dev->struct_mutex);
4338
 
4339
	return ret;
4340
}
4341
 
4342
int
4343
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4344
		       struct drm_file *file_priv)
4345
{
4104 Serge 4346
	struct drm_i915_private *dev_priv = dev->dev_private;
4347
	int ret;
4348
 
2332 Serge 4349
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4350
		return 0;
4351
 
4352
	drm_irq_uninstall(dev);
4104 Serge 4353
 
4354
	mutex_lock(&dev->struct_mutex);
4355
	ret =  i915_gem_idle(dev);
4356
 
4357
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
4358
	 * We need to replace this with a semaphore, or something.
4359
	 * And not confound ums.mm_suspended!
4360
	 */
4361
	if (ret != 0)
4362
		dev_priv->ums.mm_suspended = 1;
4363
	mutex_unlock(&dev->struct_mutex);
4364
 
4365
	return ret;
2332 Serge 4366
}
4367
 
4368
void
4369
i915_gem_lastclose(struct drm_device *dev)
4370
{
4371
	int ret;
4372
 
4373
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4374
		return;
4375
 
4104 Serge 4376
	mutex_lock(&dev->struct_mutex);
2332 Serge 4377
	ret = i915_gem_idle(dev);
4378
	if (ret)
4379
		DRM_ERROR("failed to idle hardware: %d\n", ret);
4104 Serge 4380
	mutex_unlock(&dev->struct_mutex);
2332 Serge 4381
}
4382
#endif
4383
 
4384
static void
2326 Serge 4385
init_ring_lists(struct intel_ring_buffer *ring)
4386
{
4387
    INIT_LIST_HEAD(&ring->active_list);
4388
    INIT_LIST_HEAD(&ring->request_list);
4389
}
4390
 
4104 Serge 4391
static void i915_init_vm(struct drm_i915_private *dev_priv,
4392
			 struct i915_address_space *vm)
4393
{
4394
	vm->dev = dev_priv->dev;
4395
	INIT_LIST_HEAD(&vm->active_list);
4396
	INIT_LIST_HEAD(&vm->inactive_list);
4397
	INIT_LIST_HEAD(&vm->global_link);
4398
	list_add(&vm->global_link, &dev_priv->vm_list);
4399
}
4400
 
2326 Serge 4401
void
4402
i915_gem_load(struct drm_device *dev)
4403
{
3480 Serge 4404
	drm_i915_private_t *dev_priv = dev->dev_private;
2326 Serge 4405
    int i;
4406
 
4104 Serge 4407
	INIT_LIST_HEAD(&dev_priv->vm_list);
4408
	i915_init_vm(dev_priv, &dev_priv->gtt.base);
4409
 
3031 serge 4410
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4411
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
2326 Serge 4412
    INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4413
    for (i = 0; i < I915_NUM_RINGS; i++)
4414
        init_ring_lists(&dev_priv->ring[i]);
2342 Serge 4415
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
2326 Serge 4416
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
2360 Serge 4417
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4418
			  i915_gem_retire_work_handler);
3480 Serge 4419
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
2326 Serge 4420
 
4421
    /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4422
    if (IS_GEN3(dev)) {
3031 serge 4423
		I915_WRITE(MI_ARB_STATE,
4424
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
2326 Serge 4425
    }
4426
 
4427
    dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4428
 
3746 Serge 4429
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4430
		dev_priv->num_fence_regs = 32;
4431
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2326 Serge 4432
        dev_priv->num_fence_regs = 16;
4433
    else
4434
        dev_priv->num_fence_regs = 8;
4435
 
4436
    /* Initialize fence registers to zero */
3746 Serge 4437
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4438
	i915_gem_restore_fences(dev);
2326 Serge 4439
 
4440
    i915_gem_detect_bit_6_swizzle(dev);
4441
 
4442
    dev_priv->mm.interruptible = true;
4443
 
4444
}
4445
 
4104 Serge 4446
#if 0
4447
/*
4448
 * Create a physically contiguous memory object for this object
4449
 * e.g. for cursor + overlay regs
4450
 */
4451
static int i915_gem_init_phys_object(struct drm_device *dev,
4452
				     int id, int size, int align)
4453
{
4454
	drm_i915_private_t *dev_priv = dev->dev_private;
4455
	struct drm_i915_gem_phys_object *phys_obj;
4456
	int ret;
2326 Serge 4457
 
4104 Serge 4458
	if (dev_priv->mm.phys_objs[id - 1] || !size)
4459
		return 0;
4460
 
4461
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4462
	if (!phys_obj)
4463
		return -ENOMEM;
4464
 
4465
	phys_obj->id = id;
4466
 
4467
	phys_obj->handle = drm_pci_alloc(dev, size, align);
4468
	if (!phys_obj->handle) {
4469
		ret = -ENOMEM;
4470
		goto kfree_obj;
4471
	}
4472
#ifdef CONFIG_X86
4473
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4474
#endif
4475
 
4476
	dev_priv->mm.phys_objs[id - 1] = phys_obj;
4477
 
4478
	return 0;
4479
kfree_obj:
4480
	kfree(phys_obj);
4481
	return ret;
4482
}
4483
 
4484
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4485
{
4486
	drm_i915_private_t *dev_priv = dev->dev_private;
4487
	struct drm_i915_gem_phys_object *phys_obj;
4488
 
4489
	if (!dev_priv->mm.phys_objs[id - 1])
4490
		return;
4491
 
4492
	phys_obj = dev_priv->mm.phys_objs[id - 1];
4493
	if (phys_obj->cur_obj) {
4494
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4495
	}
4496
 
4497
#ifdef CONFIG_X86
4498
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4499
#endif
4500
	drm_pci_free(dev, phys_obj->handle);
4501
	kfree(phys_obj);
4502
	dev_priv->mm.phys_objs[id - 1] = NULL;
4503
}
4504
 
4505
void i915_gem_free_all_phys_object(struct drm_device *dev)
4506
{
4507
	int i;
4508
 
4509
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4510
		i915_gem_free_phys_object(dev, i);
4511
}
4512
 
4513
void i915_gem_detach_phys_object(struct drm_device *dev,
4514
				 struct drm_i915_gem_object *obj)
4515
{
4516
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4517
	char *vaddr;
4518
	int i;
4519
	int page_count;
4520
 
4521
	if (!obj->phys_obj)
4522
		return;
4523
	vaddr = obj->phys_obj->handle->vaddr;
4524
 
4525
	page_count = obj->base.size / PAGE_SIZE;
4526
	for (i = 0; i < page_count; i++) {
4527
		struct page *page = shmem_read_mapping_page(mapping, i);
4528
		if (!IS_ERR(page)) {
4529
			char *dst = kmap_atomic(page);
4530
			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4531
			kunmap_atomic(dst);
4532
 
4533
			drm_clflush_pages(&page, 1);
4534
 
4535
			set_page_dirty(page);
4536
			mark_page_accessed(page);
4537
			page_cache_release(page);
4538
		}
4539
	}
4540
	i915_gem_chipset_flush(dev);
4541
 
4542
	obj->phys_obj->cur_obj = NULL;
4543
	obj->phys_obj = NULL;
4544
}
4545
 
4546
int
4547
i915_gem_attach_phys_object(struct drm_device *dev,
4548
			    struct drm_i915_gem_object *obj,
4549
			    int id,
4550
			    int align)
4551
{
4552
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4553
	drm_i915_private_t *dev_priv = dev->dev_private;
4554
	int ret = 0;
4555
	int page_count;
4556
	int i;
4557
 
4558
	if (id > I915_MAX_PHYS_OBJECT)
4559
		return -EINVAL;
4560
 
4561
	if (obj->phys_obj) {
4562
		if (obj->phys_obj->id == id)
4563
			return 0;
4564
		i915_gem_detach_phys_object(dev, obj);
4565
	}
4566
 
4567
	/* create a new object */
4568
	if (!dev_priv->mm.phys_objs[id - 1]) {
4569
		ret = i915_gem_init_phys_object(dev, id,
4570
						obj->base.size, align);
4571
		if (ret) {
4572
			DRM_ERROR("failed to init phys object %d size: %zu\n",
4573
				  id, obj->base.size);
4574
			return ret;
4575
		}
4576
	}
4577
 
4578
	/* bind to the object */
4579
	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4580
	obj->phys_obj->cur_obj = obj;
4581
 
4582
	page_count = obj->base.size / PAGE_SIZE;
4583
 
4584
	for (i = 0; i < page_count; i++) {
4585
		struct page *page;
4586
		char *dst, *src;
4587
 
4588
		page = shmem_read_mapping_page(mapping, i);
4589
		if (IS_ERR(page))
4590
			return PTR_ERR(page);
4591
 
4592
		src = kmap_atomic(page);
4593
		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4594
		memcpy(dst, src, PAGE_SIZE);
4595
		kunmap_atomic(src);
4596
 
4597
		mark_page_accessed(page);
4598
		page_cache_release(page);
4599
	}
4600
 
4601
	return 0;
4602
}
4603
 
4604
static int
4605
i915_gem_phys_pwrite(struct drm_device *dev,
4606
		     struct drm_i915_gem_object *obj,
4607
		     struct drm_i915_gem_pwrite *args,
4608
		     struct drm_file *file_priv)
4609
{
4610
	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4611
	char __user *user_data = to_user_ptr(args->data_ptr);
4612
 
4613
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4614
		unsigned long unwritten;
4615
 
4616
		/* The physical object once assigned is fixed for the lifetime
4617
		 * of the obj, so we can safely drop the lock and continue
4618
		 * to access vaddr.
4619
		 */
4620
		mutex_unlock(&dev->struct_mutex);
4621
		unwritten = copy_from_user(vaddr, user_data, args->size);
4622
		mutex_lock(&dev->struct_mutex);
4623
		if (unwritten)
4624
			return -EFAULT;
4625
	}
4626
 
4627
	i915_gem_chipset_flush(dev);
4628
	return 0;
4629
}
4630
 
4631
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4632
{
4633
	struct drm_i915_file_private *file_priv = file->driver_priv;
4634
 
4635
	/* Clean up our request list when the client is going away, so that
4636
	 * later retire_requests won't dereference our soon-to-be-gone
4637
	 * file_priv.
4638
	 */
4639
	spin_lock(&file_priv->mm.lock);
4640
	while (!list_empty(&file_priv->mm.request_list)) {
4641
		struct drm_i915_gem_request *request;
4642
 
4643
		request = list_first_entry(&file_priv->mm.request_list,
4644
					   struct drm_i915_gem_request,
4645
					   client_list);
4646
		list_del(&request->client_list);
4647
		request->file_priv = NULL;
4648
	}
4649
	spin_unlock(&file_priv->mm.lock);
4650
}
4651
#endif
4652
 
4653
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4654
{
4655
	if (!mutex_is_locked(mutex))
4656
		return false;
4657
 
4658
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4659
	return mutex->owner == task;
4660
#else
4661
	/* Since UP may be pre-empted, we cannot assume that we own the lock */
4662
	return false;
4663
#endif
4664
}
4665
 
4666
/* All the new VM stuff */
4667
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4668
				  struct i915_address_space *vm)
4669
{
4670
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4671
	struct i915_vma *vma;
4672
 
4673
	if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4674
		vm = &dev_priv->gtt.base;
4675
 
4676
	BUG_ON(list_empty(&o->vma_list));
4677
	list_for_each_entry(vma, &o->vma_list, vma_link) {
4678
		if (vma->vm == vm)
4679
			return vma->node.start;
4680
 
4681
	}
4682
    return 0; //-1;
4683
}
4684
 
4685
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4686
			struct i915_address_space *vm)
4687
{
4688
	struct i915_vma *vma;
4689
 
4690
	list_for_each_entry(vma, &o->vma_list, vma_link)
4691
		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4692
			return true;
4693
 
4694
	return false;
4695
}
4696
 
4697
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4698
{
4699
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4700
	struct i915_address_space *vm;
4701
 
4702
	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4703
		if (i915_gem_obj_bound(o, vm))
4704
			return true;
4705
 
4706
	return false;
4707
}
4708
 
4709
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4710
				struct i915_address_space *vm)
4711
{
4712
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4713
	struct i915_vma *vma;
4714
 
4715
	if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4716
		vm = &dev_priv->gtt.base;
4717
 
4718
	BUG_ON(list_empty(&o->vma_list));
4719
 
4720
	list_for_each_entry(vma, &o->vma_list, vma_link)
4721
		if (vma->vm == vm)
4722
			return vma->node.size;
4723
 
4724
	return 0;
4725
}
4726
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4727
				     struct i915_address_space *vm)
4728
{
4729
	struct i915_vma *vma;
4730
	list_for_each_entry(vma, &obj->vma_list, vma_link)
4731
		if (vma->vm == vm)
4732
			return vma;
4733
 
4734
	return NULL;
4735
}
4736
 
4737
struct i915_vma *
4738
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4739
				  struct i915_address_space *vm)
4740
{
4741
	struct i915_vma *vma;
4742
 
4743
	vma = i915_gem_obj_to_vma(obj, vm);
4744
	if (!vma)
4745
		vma = i915_gem_vma_create(obj, vm);
4746
 
4747
	return vma;
4748
}