Subversion Repositories Kolibri OS

Rev

Rev 4280 | Rev 4371 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
2326 Serge 1
/*
2
 * Copyright © 2008 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *
26
 */
27
 
3031 serge 28
#include 
4280 Serge 29
#include 
3031 serge 30
#include 
2326 Serge 31
#include "i915_drv.h"
2351 Serge 32
#include "i915_trace.h"
2326 Serge 33
#include "intel_drv.h"
3260 Serge 34
#include 
2330 Serge 35
#include 
2326 Serge 36
//#include 
3746 Serge 37
#include 
2326 Serge 38
#include 
39
 
2344 Serge 40
extern int x86_clflush_size;
2332 Serge 41
 
3263 Serge 42
#define PROT_READ       0x1             /* page can be read */
43
#define PROT_WRITE      0x2             /* page can be written */
44
#define MAP_SHARED      0x01            /* Share changes */
45
 
2344 Serge 46
#undef mb
47
#undef rmb
48
#undef wmb
49
#define mb() asm volatile("mfence")
50
#define rmb() asm volatile ("lfence")
51
#define wmb() asm volatile ("sfence")
52
 
3266 Serge 53
struct drm_i915_gem_object *get_fb_obj();
54
 
3263 Serge 55
unsigned long vm_mmap(struct file *file, unsigned long addr,
56
         unsigned long len, unsigned long prot,
57
         unsigned long flag, unsigned long offset);
58
 
2344 Serge 59
static inline void clflush(volatile void *__p)
60
{
61
    asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
62
}
63
 
2332 Serge 64
#define MAX_ERRNO       4095
65
 
66
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
67
 
68
 
2326 Serge 69
#define I915_EXEC_CONSTANTS_MASK        (3<<6)
70
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
71
#define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
72
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
73
 
2332 Serge 74
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
4104 Serge 75
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
76
						   bool force);
77
static __must_check int
78
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
79
			   struct i915_address_space *vm,
2332 Serge 80
						    unsigned alignment,
3031 serge 81
						    bool map_and_fenceable,
82
						    bool nonblocking);
2332 Serge 83
static int i915_gem_phys_pwrite(struct drm_device *dev,
84
				struct drm_i915_gem_object *obj,
85
				struct drm_i915_gem_pwrite *args,
86
				struct drm_file *file);
2326 Serge 87
 
3031 serge 88
static void i915_gem_write_fence(struct drm_device *dev, int reg,
89
				 struct drm_i915_gem_object *obj);
90
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
91
					 struct drm_i915_fence_reg *fence,
92
					 bool enable);
2332 Serge 93
 
3031 serge 94
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
4104 Serge 95
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
3031 serge 96
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
97
 
4104 Serge 98
static bool cpu_cache_is_coherent(struct drm_device *dev,
99
				  enum i915_cache_level level)
100
{
101
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
102
}
103
 
104
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
105
{
106
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
107
		return true;
108
 
109
	return obj->pin_display;
110
}
111
 
3031 serge 112
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
113
{
114
	if (obj->tiling_mode)
115
		i915_gem_release_mmap(obj);
116
 
117
	/* As we do not have an associated fence register, we will force
118
	 * a tiling change if we ever need to acquire one.
119
	 */
120
	obj->fence_dirty = false;
121
	obj->fence_reg = I915_FENCE_REG_NONE;
122
}
123
 
2332 Serge 124
/* some bookkeeping */
125
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
126
				  size_t size)
127
{
4104 Serge 128
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 129
	dev_priv->mm.object_count++;
130
	dev_priv->mm.object_memory += size;
4104 Serge 131
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 132
}
133
 
134
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
135
				     size_t size)
136
{
4104 Serge 137
	spin_lock(&dev_priv->mm.object_stat_lock);
2332 Serge 138
	dev_priv->mm.object_count--;
139
	dev_priv->mm.object_memory -= size;
4104 Serge 140
	spin_unlock(&dev_priv->mm.object_stat_lock);
2332 Serge 141
}
142
 
143
static int
3480 Serge 144
i915_gem_wait_for_error(struct i915_gpu_error *error)
2332 Serge 145
{
146
	int ret;
147
 
3480 Serge 148
#define EXIT_COND (!i915_reset_in_progress(error))
149
	if (EXIT_COND)
2332 Serge 150
		return 0;
3255 Serge 151
#if 0
3031 serge 152
	/*
153
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
154
	 * userspace. If it takes that long something really bad is going on and
155
	 * we should simply try to bail out and fail as gracefully as possible.
156
	 */
3480 Serge 157
	ret = wait_event_interruptible_timeout(error->reset_queue,
158
					       EXIT_COND,
159
					       10*HZ);
3031 serge 160
	if (ret == 0) {
161
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
162
		return -EIO;
163
	} else if (ret < 0) {
2332 Serge 164
		return ret;
3031 serge 165
	}
2332 Serge 166
 
3255 Serge 167
#endif
3480 Serge 168
#undef EXIT_COND
3255 Serge 169
 
2332 Serge 170
	return 0;
171
}
172
 
173
int i915_mutex_lock_interruptible(struct drm_device *dev)
174
{
3480 Serge 175
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 176
	int ret;
177
 
3480 Serge 178
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
2332 Serge 179
	if (ret)
180
		return ret;
181
 
3480 Serge 182
	ret = mutex_lock_interruptible(&dev->struct_mutex);
183
	if (ret)
184
		return ret;
2332 Serge 185
 
186
	WARN_ON(i915_verify_lists(dev));
187
	return 0;
188
}
189
 
190
static inline bool
191
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
192
{
4104 Serge 193
	return i915_gem_obj_bound_any(obj) && !obj->active;
2332 Serge 194
}
195
 
196
 
197
#if 0
198
 
199
int
200
i915_gem_init_ioctl(struct drm_device *dev, void *data,
201
		    struct drm_file *file)
202
{
3480 Serge 203
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 204
	struct drm_i915_gem_init *args = data;
205
 
3031 serge 206
	if (drm_core_check_feature(dev, DRIVER_MODESET))
207
		return -ENODEV;
208
 
2332 Serge 209
	if (args->gtt_start >= args->gtt_end ||
210
	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
211
		return -EINVAL;
212
 
3031 serge 213
	/* GEM with user mode setting was never supported on ilk and later. */
214
	if (INTEL_INFO(dev)->gen >= 5)
215
		return -ENODEV;
216
 
2332 Serge 217
	mutex_lock(&dev->struct_mutex);
3480 Serge 218
	i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
219
				  args->gtt_end);
220
	dev_priv->gtt.mappable_end = args->gtt_end;
2332 Serge 221
	mutex_unlock(&dev->struct_mutex);
222
 
223
	return 0;
224
}
2351 Serge 225
#endif
2332 Serge 226
 
227
int
228
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
229
			    struct drm_file *file)
230
{
231
	struct drm_i915_private *dev_priv = dev->dev_private;
232
	struct drm_i915_gem_get_aperture *args = data;
233
	struct drm_i915_gem_object *obj;
234
	size_t pinned;
235
 
236
	pinned = 0;
237
	mutex_lock(&dev->struct_mutex);
4104 Serge 238
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
3031 serge 239
		if (obj->pin_count)
4104 Serge 240
			pinned += i915_gem_obj_ggtt_size(obj);
2332 Serge 241
	mutex_unlock(&dev->struct_mutex);
242
 
4104 Serge 243
	args->aper_size = dev_priv->gtt.base.total;
2342 Serge 244
	args->aper_available_size = args->aper_size - pinned;
2332 Serge 245
 
246
	return 0;
247
}
248
 
3480 Serge 249
void *i915_gem_object_alloc(struct drm_device *dev)
250
{
251
	struct drm_i915_private *dev_priv = dev->dev_private;
252
	return kmalloc(sizeof(struct drm_i915_gem_object), 0);
253
}
254
 
255
void i915_gem_object_free(struct drm_i915_gem_object *obj)
256
{
257
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
258
	kfree(obj);
259
}
260
 
3031 serge 261
static int
262
i915_gem_create(struct drm_file *file,
2332 Serge 263
		struct drm_device *dev,
264
		uint64_t size,
265
		uint32_t *handle_p)
266
{
267
	struct drm_i915_gem_object *obj;
268
	int ret;
269
	u32 handle;
270
 
271
	size = roundup(size, PAGE_SIZE);
2342 Serge 272
	if (size == 0)
273
		return -EINVAL;
2332 Serge 274
 
275
	/* Allocate the new object */
276
	obj = i915_gem_alloc_object(dev, size);
277
	if (obj == NULL)
278
		return -ENOMEM;
279
 
280
	ret = drm_gem_handle_create(file, &obj->base, &handle);
4104 Serge 281
	/* drop reference from allocate - handle holds it now */
282
	drm_gem_object_unreference_unlocked(&obj->base);
283
	if (ret)
2332 Serge 284
		return ret;
285
 
286
	*handle_p = handle;
287
	return 0;
288
}
289
 
290
int
291
i915_gem_dumb_create(struct drm_file *file,
292
		     struct drm_device *dev,
293
		     struct drm_mode_create_dumb *args)
294
{
295
	/* have to work out size/pitch and return them */
296
	args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
297
	args->size = args->pitch * args->height;
298
	return i915_gem_create(file, dev,
299
			       args->size, &args->handle);
300
}
301
 
2326 Serge 302
/**
2332 Serge 303
 * Creates a new mm object and returns a handle to it.
304
 */
305
int
306
i915_gem_create_ioctl(struct drm_device *dev, void *data,
307
		      struct drm_file *file)
308
{
309
	struct drm_i915_gem_create *args = data;
3031 serge 310
 
2332 Serge 311
	return i915_gem_create(file, dev,
312
			       args->size, &args->handle);
313
}
314
 
315
 
3260 Serge 316
#if 0
2332 Serge 317
 
3031 serge 318
static inline int
319
__copy_to_user_swizzled(char __user *cpu_vaddr,
320
			const char *gpu_vaddr, int gpu_offset,
2332 Serge 321
		int length)
322
{
3031 serge 323
	int ret, cpu_offset = 0;
2332 Serge 324
 
3031 serge 325
	while (length > 0) {
326
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
327
		int this_length = min(cacheline_end - gpu_offset, length);
328
		int swizzled_gpu_offset = gpu_offset ^ 64;
2332 Serge 329
 
3031 serge 330
		ret = __copy_to_user(cpu_vaddr + cpu_offset,
331
				     gpu_vaddr + swizzled_gpu_offset,
332
				     this_length);
333
		if (ret)
334
			return ret + length;
2332 Serge 335
 
3031 serge 336
		cpu_offset += this_length;
337
		gpu_offset += this_length;
338
		length -= this_length;
339
	}
340
 
341
	return 0;
2332 Serge 342
}
343
 
3031 serge 344
static inline int
345
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
346
			  const char __user *cpu_vaddr,
347
			  int length)
2332 Serge 348
{
3031 serge 349
	int ret, cpu_offset = 0;
2332 Serge 350
 
351
	while (length > 0) {
352
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
353
		int this_length = min(cacheline_end - gpu_offset, length);
354
		int swizzled_gpu_offset = gpu_offset ^ 64;
355
 
3031 serge 356
		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
2332 Serge 357
			       cpu_vaddr + cpu_offset,
358
			       this_length);
3031 serge 359
		if (ret)
360
			return ret + length;
361
 
2332 Serge 362
		cpu_offset += this_length;
363
		gpu_offset += this_length;
364
		length -= this_length;
365
	}
366
 
3031 serge 367
	return 0;
2332 Serge 368
}
369
 
3031 serge 370
/* Per-page copy function for the shmem pread fastpath.
371
 * Flushes invalid cachelines before reading the target if
372
 * needs_clflush is set. */
2332 Serge 373
static int
3031 serge 374
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
375
		 char __user *user_data,
376
		 bool page_do_bit17_swizzling, bool needs_clflush)
377
{
378
		char *vaddr;
379
		int ret;
380
 
381
	if (unlikely(page_do_bit17_swizzling))
382
		return -EINVAL;
383
 
384
		vaddr = kmap_atomic(page);
385
	if (needs_clflush)
386
		drm_clflush_virt_range(vaddr + shmem_page_offset,
387
				       page_length);
388
		ret = __copy_to_user_inatomic(user_data,
389
				      vaddr + shmem_page_offset,
390
					      page_length);
391
		kunmap_atomic(vaddr);
392
 
393
	return ret ? -EFAULT : 0;
394
}
395
 
396
static void
397
shmem_clflush_swizzled_range(char *addr, unsigned long length,
398
			     bool swizzled)
399
{
400
	if (unlikely(swizzled)) {
401
		unsigned long start = (unsigned long) addr;
402
		unsigned long end = (unsigned long) addr + length;
403
 
404
		/* For swizzling simply ensure that we always flush both
405
		 * channels. Lame, but simple and it works. Swizzled
406
		 * pwrite/pread is far from a hotpath - current userspace
407
		 * doesn't use it at all. */
408
		start = round_down(start, 128);
409
		end = round_up(end, 128);
410
 
411
		drm_clflush_virt_range((void *)start, end - start);
412
	} else {
413
		drm_clflush_virt_range(addr, length);
414
	}
415
 
416
}
417
 
418
/* Only difference to the fast-path function is that this can handle bit17
419
 * and uses non-atomic copy and kmap functions. */
420
static int
421
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
422
		 char __user *user_data,
423
		 bool page_do_bit17_swizzling, bool needs_clflush)
424
{
425
	char *vaddr;
426
	int ret;
427
 
428
	vaddr = kmap(page);
429
	if (needs_clflush)
430
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
431
					     page_length,
432
					     page_do_bit17_swizzling);
433
 
434
	if (page_do_bit17_swizzling)
435
		ret = __copy_to_user_swizzled(user_data,
436
					      vaddr, shmem_page_offset,
437
					      page_length);
438
	else
439
		ret = __copy_to_user(user_data,
440
				     vaddr + shmem_page_offset,
441
				     page_length);
442
	kunmap(page);
443
 
444
	return ret ? - EFAULT : 0;
445
}
446
 
447
static int
448
i915_gem_shmem_pread(struct drm_device *dev,
2332 Serge 449
			  struct drm_i915_gem_object *obj,
450
			  struct drm_i915_gem_pread *args,
451
			  struct drm_file *file)
452
{
3031 serge 453
	char __user *user_data;
2332 Serge 454
	ssize_t remain;
455
	loff_t offset;
3031 serge 456
	int shmem_page_offset, page_length, ret = 0;
457
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
458
	int prefaulted = 0;
459
	int needs_clflush = 0;
3746 Serge 460
	struct sg_page_iter sg_iter;
2332 Serge 461
 
3746 Serge 462
	user_data = to_user_ptr(args->data_ptr);
2332 Serge 463
	remain = args->size;
464
 
3031 serge 465
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
466
 
467
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
468
		/* If we're not in the cpu read domain, set ourself into the gtt
469
		 * read domain and manually flush cachelines (if required). This
470
		 * optimizes for the case when the gpu will dirty the data
471
		 * anyway again before the next pread happens. */
4104 Serge 472
		needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
473
		if (i915_gem_obj_bound_any(obj)) {
3031 serge 474
			ret = i915_gem_object_set_to_gtt_domain(obj, false);
475
			if (ret)
476
				return ret;
477
		}
478
	}
479
 
480
	ret = i915_gem_object_get_pages(obj);
481
	if (ret)
482
		return ret;
483
 
484
	i915_gem_object_pin_pages(obj);
485
 
2332 Serge 486
	offset = args->offset;
487
 
3746 Serge 488
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
489
			 offset >> PAGE_SHIFT) {
490
		struct page *page = sg_page_iter_page(&sg_iter);
2332 Serge 491
 
3031 serge 492
		if (remain <= 0)
493
			break;
494
 
2332 Serge 495
		/* Operation in this page
496
		 *
3031 serge 497
		 * shmem_page_offset = offset within page in shmem file
2332 Serge 498
		 * page_length = bytes to copy for this page
499
		 */
3031 serge 500
		shmem_page_offset = offset_in_page(offset);
2332 Serge 501
		page_length = remain;
3031 serge 502
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
503
			page_length = PAGE_SIZE - shmem_page_offset;
2332 Serge 504
 
3031 serge 505
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
506
			(page_to_phys(page) & (1 << 17)) != 0;
2332 Serge 507
 
3031 serge 508
		ret = shmem_pread_fast(page, shmem_page_offset, page_length,
509
				       user_data, page_do_bit17_swizzling,
510
				       needs_clflush);
511
		if (ret == 0)
512
			goto next_page;
2332 Serge 513
 
3031 serge 514
		mutex_unlock(&dev->struct_mutex);
515
 
4104 Serge 516
		if (likely(!i915_prefault_disable) && !prefaulted) {
3031 serge 517
			ret = fault_in_multipages_writeable(user_data, remain);
518
			/* Userspace is tricking us, but we've already clobbered
519
			 * its pages with the prefault and promised to write the
520
			 * data up to the first fault. Hence ignore any errors
521
			 * and just continue. */
522
			(void)ret;
523
			prefaulted = 1;
524
		}
525
 
526
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
527
				       user_data, page_do_bit17_swizzling,
528
				       needs_clflush);
529
 
530
		mutex_lock(&dev->struct_mutex);
531
 
532
next_page:
2332 Serge 533
		mark_page_accessed(page);
3031 serge 534
 
2332 Serge 535
		if (ret)
3031 serge 536
			goto out;
2332 Serge 537
 
538
		remain -= page_length;
539
		user_data += page_length;
540
		offset += page_length;
541
	}
542
 
3031 serge 543
out:
544
	i915_gem_object_unpin_pages(obj);
545
 
546
	return ret;
2332 Serge 547
}
548
 
549
/**
3031 serge 550
 * Reads data from the object referenced by handle.
551
 *
552
 * On error, the contents of *data are undefined.
2332 Serge 553
 */
3031 serge 554
int
555
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
556
		     struct drm_file *file)
557
{
558
	struct drm_i915_gem_pread *args = data;
559
	struct drm_i915_gem_object *obj;
560
	int ret = 0;
561
 
562
	if (args->size == 0)
563
		return 0;
564
 
565
	if (!access_ok(VERIFY_WRITE,
3746 Serge 566
		       to_user_ptr(args->data_ptr),
3031 serge 567
		       args->size))
568
		return -EFAULT;
569
 
570
	ret = i915_mutex_lock_interruptible(dev);
571
	if (ret)
572
		return ret;
573
 
574
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
575
	if (&obj->base == NULL) {
576
		ret = -ENOENT;
577
		goto unlock;
578
	}
579
 
580
	/* Bounds check source.  */
581
	if (args->offset > obj->base.size ||
582
	    args->size > obj->base.size - args->offset) {
583
		ret = -EINVAL;
584
		goto out;
585
	}
586
 
587
	/* prime objects have no backing filp to GEM pread/pwrite
588
	 * pages from.
589
	 */
590
	if (!obj->base.filp) {
591
		ret = -EINVAL;
592
		goto out;
593
	}
594
 
595
	trace_i915_gem_object_pread(obj, args->offset, args->size);
596
 
597
	ret = i915_gem_shmem_pread(dev, obj, args, file);
598
 
599
out:
600
	drm_gem_object_unreference(&obj->base);
601
unlock:
602
	mutex_unlock(&dev->struct_mutex);
603
	return ret;
604
}
605
 
606
/* This is the fast write path which cannot handle
607
 * page faults in the source data
608
 */
609
 
610
static inline int
611
fast_user_write(struct io_mapping *mapping,
612
		loff_t page_base, int page_offset,
613
		char __user *user_data,
614
		int length)
615
{
616
	void __iomem *vaddr_atomic;
617
	void *vaddr;
618
	unsigned long unwritten;
619
 
620
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
621
	/* We can use the cpu mem copy function because this is X86. */
622
	vaddr = (void __force*)vaddr_atomic + page_offset;
623
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
624
						      user_data, length);
625
	io_mapping_unmap_atomic(vaddr_atomic);
626
	return unwritten;
627
}
3260 Serge 628
#endif
3031 serge 629
 
3260 Serge 630
#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
3031 serge 631
/**
632
 * This is the fast pwrite path, where we copy the data directly from the
633
 * user into the GTT, uncached.
634
 */
2332 Serge 635
static int
3031 serge 636
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
637
			 struct drm_i915_gem_object *obj,
638
			 struct drm_i915_gem_pwrite *args,
639
			 struct drm_file *file)
2332 Serge 640
{
3031 serge 641
	drm_i915_private_t *dev_priv = dev->dev_private;
2332 Serge 642
	ssize_t remain;
3031 serge 643
	loff_t offset, page_base;
644
	char __user *user_data;
645
	int page_offset, page_length, ret;
3260 Serge 646
    char *vaddr;
2332 Serge 647
 
4104 Serge 648
	ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
3031 serge 649
	if (ret)
650
		goto out;
651
 
652
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
653
	if (ret)
654
		goto out_unpin;
655
 
656
	ret = i915_gem_object_put_fence(obj);
657
	if (ret)
658
		goto out_unpin;
659
 
3260 Serge 660
    vaddr = AllocKernelSpace(4096);
661
    if(vaddr == NULL)
662
    {
663
        ret = -ENOSPC;
664
        goto out_unpin;
665
    };
666
 
3031 serge 667
	user_data = (char __user *) (uintptr_t) args->data_ptr;
2332 Serge 668
	remain = args->size;
669
 
4104 Serge 670
	offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
2332 Serge 671
 
3031 serge 672
	while (remain > 0) {
673
		/* Operation in this page
674
		 *
675
		 * page_base = page offset within aperture
676
		 * page_offset = offset within page
677
		 * page_length = bytes to copy for this page
678
		 */
679
		page_base = offset & PAGE_MASK;
680
		page_offset = offset_in_page(offset);
681
		page_length = remain;
682
		if ((page_offset + remain) > PAGE_SIZE)
683
			page_length = PAGE_SIZE - page_offset;
2332 Serge 684
 
3260 Serge 685
        MapPage(vaddr, page_base, PG_SW|PG_NOCACHE);
3031 serge 686
 
3260 Serge 687
        memcpy(vaddr+page_offset, user_data, page_length);
688
 
3031 serge 689
		remain -= page_length;
690
		user_data += page_length;
691
		offset += page_length;
2332 Serge 692
	}
693
 
3260 Serge 694
    FreeKernelSpace(vaddr);
695
 
3031 serge 696
out_unpin:
697
	i915_gem_object_unpin(obj);
698
out:
699
	return ret;
700
}
701
 
702
/* Per-page copy function for the shmem pwrite fastpath.
703
 * Flushes invalid cachelines before writing to the target if
704
 * needs_clflush_before is set and flushes out any written cachelines after
705
 * writing if needs_clflush is set. */
706
static int
707
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
708
		  char __user *user_data,
709
		  bool page_do_bit17_swizzling,
710
		  bool needs_clflush_before,
711
		  bool needs_clflush_after)
712
{
713
	char *vaddr;
3260 Serge 714
	int ret = 0;
3031 serge 715
 
716
	if (unlikely(page_do_bit17_swizzling))
717
		return -EINVAL;
718
 
3260 Serge 719
	vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
3031 serge 720
	if (needs_clflush_before)
721
		drm_clflush_virt_range(vaddr + shmem_page_offset,
722
				       page_length);
3260 Serge 723
	memcpy(vaddr + shmem_page_offset,
3031 serge 724
						user_data,
725
						page_length);
726
	if (needs_clflush_after)
727
		drm_clflush_virt_range(vaddr + shmem_page_offset,
728
				       page_length);
3260 Serge 729
	FreeKernelSpace(vaddr);
3031 serge 730
 
731
	return ret ? -EFAULT : 0;
732
}
3260 Serge 733
#if 0
3031 serge 734
 
735
/* Only difference to the fast-path function is that this can handle bit17
736
 * and uses non-atomic copy and kmap functions. */
737
static int
738
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
739
		  char __user *user_data,
740
		  bool page_do_bit17_swizzling,
741
		  bool needs_clflush_before,
742
		  bool needs_clflush_after)
743
{
744
	char *vaddr;
745
	int ret;
746
 
747
	vaddr = kmap(page);
748
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
749
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
750
					     page_length,
751
					     page_do_bit17_swizzling);
752
	if (page_do_bit17_swizzling)
753
		ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
754
						user_data,
755
						page_length);
756
	else
757
		ret = __copy_from_user(vaddr + shmem_page_offset,
758
				       user_data,
759
				       page_length);
760
	if (needs_clflush_after)
761
		shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
762
					     page_length,
763
					     page_do_bit17_swizzling);
764
	kunmap(page);
765
 
766
	return ret ? -EFAULT : 0;
767
}
3260 Serge 768
#endif
3031 serge 769
 
3260 Serge 770
 
3031 serge 771
static int
772
i915_gem_shmem_pwrite(struct drm_device *dev,
773
		      struct drm_i915_gem_object *obj,
774
		      struct drm_i915_gem_pwrite *args,
775
		      struct drm_file *file)
776
{
777
	ssize_t remain;
778
	loff_t offset;
779
	char __user *user_data;
780
	int shmem_page_offset, page_length, ret = 0;
781
	int obj_do_bit17_swizzling, page_do_bit17_swizzling;
782
	int hit_slowpath = 0;
783
	int needs_clflush_after = 0;
784
	int needs_clflush_before = 0;
3746 Serge 785
	struct sg_page_iter sg_iter;
3031 serge 786
 
3746 Serge 787
	user_data = to_user_ptr(args->data_ptr);
3031 serge 788
	remain = args->size;
789
 
790
	obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
791
 
792
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
793
		/* If we're not in the cpu write domain, set ourself into the gtt
794
		 * write domain and manually flush cachelines (if required). This
795
		 * optimizes for the case when the gpu will use the data
796
		 * right away and we therefore have to clflush anyway. */
4104 Serge 797
		needs_clflush_after = cpu_write_needs_clflush(obj);
798
		if (i915_gem_obj_bound_any(obj)) {
3031 serge 799
			ret = i915_gem_object_set_to_gtt_domain(obj, true);
800
			if (ret)
801
				return ret;
802
		}
803
	}
4104 Serge 804
	/* Same trick applies to invalidate partially written cachelines read
805
	 * before writing. */
806
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
807
		needs_clflush_before =
808
			!cpu_cache_is_coherent(dev, obj->cache_level);
3031 serge 809
 
810
	ret = i915_gem_object_get_pages(obj);
2332 Serge 811
	if (ret)
3031 serge 812
		return ret;
2332 Serge 813
 
3031 serge 814
	i915_gem_object_pin_pages(obj);
2332 Serge 815
 
816
	offset = args->offset;
3031 serge 817
	obj->dirty = 1;
2332 Serge 818
 
3746 Serge 819
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
820
			 offset >> PAGE_SHIFT) {
821
		struct page *page = sg_page_iter_page(&sg_iter);
3031 serge 822
		int partial_cacheline_write;
2332 Serge 823
 
3031 serge 824
		if (remain <= 0)
825
			break;
826
 
2332 Serge 827
		/* Operation in this page
828
		 *
829
		 * shmem_page_offset = offset within page in shmem file
830
		 * page_length = bytes to copy for this page
831
		 */
832
		shmem_page_offset = offset_in_page(offset);
833
 
834
		page_length = remain;
835
		if ((shmem_page_offset + page_length) > PAGE_SIZE)
836
			page_length = PAGE_SIZE - shmem_page_offset;
837
 
3031 serge 838
		/* If we don't overwrite a cacheline completely we need to be
839
		 * careful to have up-to-date data by first clflushing. Don't
840
		 * overcomplicate things and flush the entire patch. */
841
		partial_cacheline_write = needs_clflush_before &&
842
			((shmem_page_offset | page_length)
3260 Serge 843
				& (x86_clflush_size - 1));
2332 Serge 844
 
3031 serge 845
		page_do_bit17_swizzling = obj_do_bit17_swizzling &&
846
			(page_to_phys(page) & (1 << 17)) != 0;
2332 Serge 847
 
3031 serge 848
		ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
849
					user_data, page_do_bit17_swizzling,
850
					partial_cacheline_write,
851
					needs_clflush_after);
852
		if (ret == 0)
853
			goto next_page;
854
 
855
		hit_slowpath = 1;
856
		mutex_unlock(&dev->struct_mutex);
3260 Serge 857
		dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
3031 serge 858
 
3260 Serge 859
//		ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
860
//					user_data, page_do_bit17_swizzling,
861
//					partial_cacheline_write,
862
//					needs_clflush_after);
863
 
3031 serge 864
		mutex_lock(&dev->struct_mutex);
865
 
866
next_page:
2332 Serge 867
 
3031 serge 868
		if (ret)
869
			goto out;
870
 
2332 Serge 871
		remain -= page_length;
3031 serge 872
		user_data += page_length;
2332 Serge 873
		offset += page_length;
874
	}
875
 
876
out:
3031 serge 877
	i915_gem_object_unpin_pages(obj);
878
 
879
	if (hit_slowpath) {
3480 Serge 880
		/*
881
		 * Fixup: Flush cpu caches in case we didn't flush the dirty
882
		 * cachelines in-line while writing and the object moved
883
		 * out of the cpu write domain while we've dropped the lock.
884
		 */
885
		if (!needs_clflush_after &&
886
		    obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
4104 Serge 887
			if (i915_gem_clflush_object(obj, obj->pin_display))
3243 Serge 888
			i915_gem_chipset_flush(dev);
3031 serge 889
		}
2332 Serge 890
	}
891
 
3031 serge 892
	if (needs_clflush_after)
3243 Serge 893
		i915_gem_chipset_flush(dev);
3031 serge 894
 
2332 Serge 895
	return ret;
896
}
3031 serge 897
 
898
/**
899
 * Writes data to the object referenced by handle.
900
 *
901
 * On error, the contents of the buffer that were to be modified are undefined.
902
 */
903
int
904
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
905
		      struct drm_file *file)
906
{
907
	struct drm_i915_gem_pwrite *args = data;
908
	struct drm_i915_gem_object *obj;
909
	int ret;
910
 
4104 Serge 911
	if (args->size == 0)
912
		return 0;
913
 
3480 Serge 914
     if(args->handle == -2)
915
     {
916
        printf("%s handle %d\n", __FUNCTION__, args->handle);
917
        return 0;
918
     }
919
 
3031 serge 920
	ret = i915_mutex_lock_interruptible(dev);
921
	if (ret)
922
		return ret;
923
 
924
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
925
	if (&obj->base == NULL) {
926
		ret = -ENOENT;
927
		goto unlock;
928
	}
929
 
930
	/* Bounds check destination. */
931
	if (args->offset > obj->base.size ||
932
	    args->size > obj->base.size - args->offset) {
933
		ret = -EINVAL;
934
		goto out;
935
	}
936
 
937
	/* prime objects have no backing filp to GEM pread/pwrite
938
	 * pages from.
939
	 */
940
	if (!obj->base.filp) {
941
		ret = -EINVAL;
942
		goto out;
943
	}
944
 
945
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);
946
 
947
	ret = -EFAULT;
948
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
949
	 * it would end up going through the fenced access, and we'll get
950
	 * different detiling behavior between reading and writing.
951
	 * pread/pwrite currently are reading and writing from the CPU
952
	 * perspective, requiring manual detiling by the client.
953
	 */
3260 Serge 954
//   if (obj->phys_obj) {
955
//       ret = i915_gem_phys_pwrite(dev, obj, args, file);
956
//       goto out;
957
//   }
3031 serge 958
 
4104 Serge 959
	if (obj->tiling_mode == I915_TILING_NONE &&
960
	    obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
961
	    cpu_write_needs_clflush(obj)) {
3031 serge 962
		ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
963
		/* Note that the gtt paths might fail with non-page-backed user
964
		 * pointers (e.g. gtt mappings when moving data between
965
		 * textures). Fallback to the shmem path in that case. */
966
	}
967
 
968
	if (ret == -EFAULT || ret == -ENOSPC)
3260 Serge 969
       ret = i915_gem_shmem_pwrite(dev, obj, args, file);
3031 serge 970
 
971
out:
972
	drm_gem_object_unreference(&obj->base);
973
unlock:
974
	mutex_unlock(&dev->struct_mutex);
975
	return ret;
976
}
977
 
978
int
3480 Serge 979
i915_gem_check_wedge(struct i915_gpu_error *error,
3031 serge 980
		     bool interruptible)
981
{
3480 Serge 982
	if (i915_reset_in_progress(error)) {
3031 serge 983
		/* Non-interruptible callers can't handle -EAGAIN, hence return
984
		 * -EIO unconditionally for these. */
985
		if (!interruptible)
986
			return -EIO;
2332 Serge 987
 
3480 Serge 988
		/* Recovery complete, but the reset failed ... */
989
		if (i915_terminally_wedged(error))
3031 serge 990
			return -EIO;
2332 Serge 991
 
3031 serge 992
		return -EAGAIN;
993
	}
2332 Serge 994
 
3031 serge 995
	return 0;
996
}
2332 Serge 997
 
3031 serge 998
/*
999
 * Compare seqno against outstanding lazy request. Emit a request if they are
1000
 * equal.
1001
 */
1002
static int
1003
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
1004
{
1005
	int ret;
2332 Serge 1006
 
3031 serge 1007
	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
2332 Serge 1008
 
3031 serge 1009
	ret = 0;
1010
	if (seqno == ring->outstanding_lazy_request)
4104 Serge 1011
		ret = i915_add_request(ring, NULL);
2332 Serge 1012
 
3031 serge 1013
	return ret;
1014
}
2332 Serge 1015
 
3031 serge 1016
/**
1017
 * __wait_seqno - wait until execution of seqno has finished
1018
 * @ring: the ring expected to report seqno
1019
 * @seqno: duh!
3480 Serge 1020
 * @reset_counter: reset sequence associated with the given seqno
3031 serge 1021
 * @interruptible: do an interruptible wait (normally yes)
1022
 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1023
 *
3480 Serge 1024
 * Note: It is of utmost importance that the passed in seqno and reset_counter
1025
 * values have been read by the caller in an smp safe manner. Where read-side
1026
 * locks are involved, it is sufficient to read the reset_counter before
1027
 * unlocking the lock that protects the seqno. For lockless tricks, the
1028
 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1029
 * inserted.
1030
 *
3031 serge 1031
 * Returns 0 if the seqno was found within the alloted time. Else returns the
1032
 * errno with remaining time filled in timeout argument.
1033
 */
1034
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
3480 Serge 1035
			unsigned reset_counter,
3031 serge 1036
			bool interruptible, struct timespec *timeout)
1037
{
1038
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1039
	struct timespec before, now, wait_time={1,0};
1040
	unsigned long timeout_jiffies;
1041
	long end;
1042
	bool wait_forever = true;
1043
	int ret;
2332 Serge 1044
 
4104 Serge 1045
	WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
1046
 
3031 serge 1047
	if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1048
		return 0;
2332 Serge 1049
 
3031 serge 1050
	trace_i915_gem_request_wait_begin(ring, seqno);
2332 Serge 1051
 
3031 serge 1052
	if (timeout != NULL) {
1053
		wait_time = *timeout;
1054
		wait_forever = false;
1055
	}
2332 Serge 1056
 
4104 Serge 1057
	timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
2332 Serge 1058
 
3031 serge 1059
	if (WARN_ON(!ring->irq_get(ring)))
1060
		return -ENODEV;
2332 Serge 1061
 
3031 serge 1062
    /* Record current time in case interrupted by signal, or wedged * */
1063
	getrawmonotonic(&before);
2332 Serge 1064
 
3031 serge 1065
#define EXIT_COND \
1066
	(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
3480 Serge 1067
	 i915_reset_in_progress(&dev_priv->gpu_error) || \
1068
	 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3031 serge 1069
	do {
3266 Serge 1070
		if (interruptible)
1071
			end = wait_event_interruptible_timeout(ring->irq_queue,
1072
							       EXIT_COND,
1073
							       timeout_jiffies);
1074
		else
3031 serge 1075
			end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1076
						 timeout_jiffies);
2332 Serge 1077
 
3480 Serge 1078
		/* We need to check whether any gpu reset happened in between
1079
		 * the caller grabbing the seqno and now ... */
1080
		if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1081
			end = -EAGAIN;
1082
 
1083
		/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1084
		 * gone. */
1085
		ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
3031 serge 1086
		if (ret)
1087
			end = ret;
1088
	} while (end == 0 && wait_forever);
2332 Serge 1089
 
3031 serge 1090
	getrawmonotonic(&now);
2332 Serge 1091
 
3031 serge 1092
	ring->irq_put(ring);
1093
	trace_i915_gem_request_wait_end(ring, seqno);
1094
#undef EXIT_COND
2332 Serge 1095
 
3031 serge 1096
	if (timeout) {
4104 Serge 1097
//		struct timespec sleep_time = timespec_sub(now, before);
1098
//		*timeout = timespec_sub(*timeout, sleep_time);
3031 serge 1099
	}
2332 Serge 1100
 
3031 serge 1101
	switch (end) {
1102
	case -EIO:
1103
	case -EAGAIN: /* Wedged */
1104
	case -ERESTARTSYS: /* Signal */
1105
		return (int)end;
1106
	case 0: /* Timeout */
1107
		return -ETIME;
1108
	default: /* Completed */
1109
		WARN_ON(end < 0); /* We're not aware of other errors */
1110
		return 0;
1111
	}
1112
}
2332 Serge 1113
 
3031 serge 1114
/**
1115
 * Waits for a sequence number to be signaled, and cleans up the
1116
 * request and object lists appropriately for that event.
1117
 */
1118
int
1119
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1120
{
1121
	struct drm_device *dev = ring->dev;
1122
	struct drm_i915_private *dev_priv = dev->dev_private;
1123
	bool interruptible = dev_priv->mm.interruptible;
1124
	int ret;
2332 Serge 1125
 
3031 serge 1126
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1127
	BUG_ON(seqno == 0);
2332 Serge 1128
 
3480 Serge 1129
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
3031 serge 1130
	if (ret)
1131
		return ret;
2332 Serge 1132
 
3031 serge 1133
	ret = i915_gem_check_olr(ring, seqno);
1134
	if (ret)
1135
		return ret;
2332 Serge 1136
 
3480 Serge 1137
	return __wait_seqno(ring, seqno,
1138
			    atomic_read(&dev_priv->gpu_error.reset_counter),
1139
			    interruptible, NULL);
3031 serge 1140
}
2332 Serge 1141
 
4104 Serge 1142
static int
1143
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
1144
				     struct intel_ring_buffer *ring)
1145
{
1146
	i915_gem_retire_requests_ring(ring);
1147
 
1148
	/* Manually manage the write flush as we may have not yet
1149
	 * retired the buffer.
1150
	 *
1151
	 * Note that the last_write_seqno is always the earlier of
1152
	 * the two (read/write) seqno, so if we haved successfully waited,
1153
	 * we know we have passed the last write.
1154
	 */
1155
	obj->last_write_seqno = 0;
1156
	obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1157
 
1158
	return 0;
1159
}
1160
 
3031 serge 1161
/**
1162
 * Ensures that all rendering to the object has completed and the object is
1163
 * safe to unbind from the GTT or access from the CPU.
1164
 */
1165
static __must_check int
1166
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1167
			       bool readonly)
1168
{
1169
	struct intel_ring_buffer *ring = obj->ring;
1170
	u32 seqno;
1171
	int ret;
2332 Serge 1172
 
3031 serge 1173
	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1174
	if (seqno == 0)
1175
		return 0;
2332 Serge 1176
 
3031 serge 1177
	ret = i915_wait_seqno(ring, seqno);
4104 Serge 1178
    if (ret)
1179
        return ret;
2332 Serge 1180
 
4104 Serge 1181
	return i915_gem_object_wait_rendering__tail(obj, ring);
3031 serge 1182
}
2332 Serge 1183
 
3260 Serge 1184
/* A nonblocking variant of the above wait. This is a highly dangerous routine
1185
 * as the object state may change during this call.
1186
 */
1187
static __must_check int
1188
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1189
					    bool readonly)
1190
{
1191
	struct drm_device *dev = obj->base.dev;
1192
	struct drm_i915_private *dev_priv = dev->dev_private;
1193
	struct intel_ring_buffer *ring = obj->ring;
3480 Serge 1194
	unsigned reset_counter;
3260 Serge 1195
	u32 seqno;
1196
	int ret;
2332 Serge 1197
 
3260 Serge 1198
	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1199
	BUG_ON(!dev_priv->mm.interruptible);
2332 Serge 1200
 
3260 Serge 1201
	seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1202
	if (seqno == 0)
1203
		return 0;
2332 Serge 1204
 
3480 Serge 1205
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
3260 Serge 1206
	if (ret)
1207
		return ret;
2332 Serge 1208
 
3260 Serge 1209
	ret = i915_gem_check_olr(ring, seqno);
1210
	if (ret)
1211
		return ret;
2332 Serge 1212
 
3480 Serge 1213
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3260 Serge 1214
	mutex_unlock(&dev->struct_mutex);
3480 Serge 1215
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3260 Serge 1216
	mutex_lock(&dev->struct_mutex);
4104 Serge 1217
	if (ret)
1218
		return ret;
2332 Serge 1219
 
4104 Serge 1220
	return i915_gem_object_wait_rendering__tail(obj, ring);
3260 Serge 1221
}
2332 Serge 1222
 
3260 Serge 1223
/**
1224
 * Called when user space prepares to use an object with the CPU, either
1225
 * through the mmap ioctl's mapping or a GTT mapping.
1226
 */
1227
int
1228
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1229
			  struct drm_file *file)
1230
{
1231
	struct drm_i915_gem_set_domain *args = data;
1232
	struct drm_i915_gem_object *obj;
1233
	uint32_t read_domains = args->read_domains;
1234
	uint32_t write_domain = args->write_domain;
1235
	int ret;
2332 Serge 1236
 
3480 Serge 1237
 
1238
     if(args->handle == -2)
1239
     {
1240
        printf("%s handle %d\n", __FUNCTION__, args->handle);
1241
        return 0;
1242
     }
1243
 
3260 Serge 1244
	/* Only handle setting domains to types used by the CPU. */
1245
	if (write_domain & I915_GEM_GPU_DOMAINS)
1246
		return -EINVAL;
2332 Serge 1247
 
3260 Serge 1248
	if (read_domains & I915_GEM_GPU_DOMAINS)
1249
		return -EINVAL;
2332 Serge 1250
 
3260 Serge 1251
	/* Having something in the write domain implies it's in the read
1252
	 * domain, and only that read domain.  Enforce that in the request.
1253
	 */
1254
	if (write_domain != 0 && read_domains != write_domain)
1255
		return -EINVAL;
2332 Serge 1256
 
3260 Serge 1257
	ret = i915_mutex_lock_interruptible(dev);
1258
	if (ret)
1259
		return ret;
2332 Serge 1260
 
3260 Serge 1261
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1262
	if (&obj->base == NULL) {
1263
		ret = -ENOENT;
1264
		goto unlock;
1265
	}
2332 Serge 1266
 
3260 Serge 1267
	/* Try to flush the object off the GPU without holding the lock.
1268
	 * We will repeat the flush holding the lock in the normal manner
1269
	 * to catch cases where we are gazumped.
1270
	 */
1271
	ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
1272
	if (ret)
1273
		goto unref;
2332 Serge 1274
 
3260 Serge 1275
	if (read_domains & I915_GEM_DOMAIN_GTT) {
1276
		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
2332 Serge 1277
 
3260 Serge 1278
		/* Silently promote "you're not bound, there was nothing to do"
1279
		 * to success, since the client was just asking us to
1280
		 * make sure everything was done.
1281
		 */
1282
		if (ret == -EINVAL)
1283
			ret = 0;
1284
	} else {
1285
		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1286
	}
2332 Serge 1287
 
3260 Serge 1288
unref:
1289
	drm_gem_object_unreference(&obj->base);
1290
unlock:
1291
	mutex_unlock(&dev->struct_mutex);
1292
	return ret;
1293
}
2332 Serge 1294
 
4293 Serge 1295
/**
1296
 * Called when user space has done writes to this buffer
1297
 */
1298
int
1299
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1300
			 struct drm_file *file)
1301
{
1302
	struct drm_i915_gem_sw_finish *args = data;
1303
	struct drm_i915_gem_object *obj;
1304
	int ret = 0;
2332 Serge 1305
 
4293 Serge 1306
    if(args->handle == -2)
1307
    {
1308
       printf("%s handle %d\n", __FUNCTION__, args->handle);
1309
       return 0;
1310
    }
2332 Serge 1311
 
4293 Serge 1312
	ret = i915_mutex_lock_interruptible(dev);
1313
	if (ret)
1314
		return ret;
2332 Serge 1315
 
4293 Serge 1316
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1317
	if (&obj->base == NULL) {
1318
		ret = -ENOENT;
1319
		goto unlock;
1320
	}
2332 Serge 1321
 
4293 Serge 1322
	/* Pinned buffers may be scanout, so flush the cache */
1323
	if (obj->pin_display)
1324
		i915_gem_object_flush_cpu_write_domain(obj, true);
2332 Serge 1325
 
4293 Serge 1326
	drm_gem_object_unreference(&obj->base);
1327
unlock:
1328
	mutex_unlock(&dev->struct_mutex);
1329
	return ret;
1330
}
1331
 
3260 Serge 1332
/**
1333
 * Maps the contents of an object, returning the address it is mapped
1334
 * into.
1335
 *
1336
 * While the mapping holds a reference on the contents of the object, it doesn't
1337
 * imply a ref on the object itself.
1338
 */
1339
int
1340
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1341
		    struct drm_file *file)
1342
{
1343
	struct drm_i915_gem_mmap *args = data;
1344
	struct drm_gem_object *obj;
1345
	unsigned long addr = 0;
2332 Serge 1346
 
3480 Serge 1347
     if(args->handle == -2)
1348
     {
1349
        printf("%s handle %d\n", __FUNCTION__, args->handle);
1350
        return 0;
1351
     }
1352
 
3260 Serge 1353
	obj = drm_gem_object_lookup(dev, file, args->handle);
1354
	if (obj == NULL)
1355
		return -ENOENT;
4104 Serge 1356
 
3260 Serge 1357
	/* prime objects have no backing filp to GEM mmap
1358
	 * pages from.
1359
	 */
1360
	if (!obj->filp) {
1361
		drm_gem_object_unreference_unlocked(obj);
1362
		return -EINVAL;
1363
	}
2332 Serge 1364
 
3263 Serge 1365
    addr = vm_mmap(obj->filp, 0, args->size,
1366
              PROT_READ | PROT_WRITE, MAP_SHARED,
1367
              args->offset);
3260 Serge 1368
	drm_gem_object_unreference_unlocked(obj);
3263 Serge 1369
    if (IS_ERR((void *)addr))
1370
        return addr;
2332 Serge 1371
 
3260 Serge 1372
	args->addr_ptr = (uint64_t) addr;
2332 Serge 1373
 
3263 Serge 1374
    return 0;
3260 Serge 1375
}
2332 Serge 1376
 
1377
 
1378
 
1379
 
1380
 
1381
 
1382
 
1383
 
3031 serge 1384
 
1385
 
1386
 
1387
 
1388
 
1389
/**
1390
 * i915_gem_release_mmap - remove physical page mappings
1391
 * @obj: obj in question
1392
 *
1393
 * Preserve the reservation of the mmapping with the DRM core code, but
1394
 * relinquish ownership of the pages back to the system.
1395
 *
1396
 * It is vital that we remove the page mapping if we have mapped a tiled
1397
 * object through the GTT and then lose the fence register due to
1398
 * resource pressure. Similarly if the object has been moved out of the
1399
 * aperture, than pages mapped into userspace must be revoked. Removing the
1400
 * mapping will then trigger a page fault on the next user access, allowing
1401
 * fixup by i915_gem_fault().
1402
 */
1403
void
1404
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1405
{
1406
	if (!obj->fault_mappable)
1407
		return;
1408
 
4104 Serge 1409
//	drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
3031 serge 1410
	obj->fault_mappable = false;
1411
}
1412
 
3480 Serge 1413
uint32_t
2332 Serge 1414
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1415
{
1416
	uint32_t gtt_size;
1417
 
1418
	if (INTEL_INFO(dev)->gen >= 4 ||
1419
	    tiling_mode == I915_TILING_NONE)
1420
		return size;
1421
 
1422
	/* Previous chips need a power-of-two fence region when tiling */
1423
	if (INTEL_INFO(dev)->gen == 3)
1424
		gtt_size = 1024*1024;
1425
	else
1426
		gtt_size = 512*1024;
1427
 
1428
	while (gtt_size < size)
1429
		gtt_size <<= 1;
1430
 
1431
	return gtt_size;
1432
}
1433
 
1434
/**
1435
 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1436
 * @obj: object to check
1437
 *
1438
 * Return the required GTT alignment for an object, taking into account
1439
 * potential fence register mapping.
1440
 */
3480 Serge 1441
uint32_t
1442
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1443
			   int tiling_mode, bool fenced)
2332 Serge 1444
{
1445
	/*
1446
	 * Minimum alignment is 4k (GTT page size), but might be greater
1447
	 * if a fence register is needed for the object.
1448
	 */
3480 Serge 1449
	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
2332 Serge 1450
	    tiling_mode == I915_TILING_NONE)
1451
		return 4096;
1452
 
1453
	/*
1454
	 * Previous chips need to be aligned to the size of the smallest
1455
	 * fence register that can contain the object.
1456
	 */
1457
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1458
}
1459
 
1460
/**
1461
 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1462
 *					 unfenced object
1463
 * @dev: the device
1464
 * @size: size of the object
1465
 * @tiling_mode: tiling mode of the object
1466
 *
1467
 * Return the required GTT alignment for an object, only taking into account
1468
 * unfenced tiled surface requirements.
1469
 */
1470
uint32_t
1471
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1472
				    uint32_t size,
1473
				    int tiling_mode)
1474
{
1475
	/*
1476
	 * Minimum alignment is 4k (GTT page size) for sane hw.
1477
	 */
1478
	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1479
	    tiling_mode == I915_TILING_NONE)
1480
		return 4096;
1481
 
1482
	/* Previous hardware however needs to be aligned to a power-of-two
1483
	 * tile height. The simplest method for determining this is to reuse
1484
	 * the power-of-tile object size.
1485
	 */
1486
	return i915_gem_get_gtt_size(dev, size, tiling_mode);
1487
}
1488
 
3480 Serge 1489
int
1490
i915_gem_mmap_gtt(struct drm_file *file,
1491
          struct drm_device *dev,
1492
          uint32_t handle,
1493
          uint64_t *offset)
1494
{
1495
    struct drm_i915_private *dev_priv = dev->dev_private;
1496
    struct drm_i915_gem_object *obj;
1497
    unsigned long pfn;
1498
    char *mem, *ptr;
1499
    int ret;
1500
 
1501
    ret = i915_mutex_lock_interruptible(dev);
1502
    if (ret)
1503
        return ret;
1504
 
1505
    obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1506
    if (&obj->base == NULL) {
1507
        ret = -ENOENT;
1508
        goto unlock;
1509
    }
1510
 
1511
    if (obj->base.size > dev_priv->gtt.mappable_end) {
1512
        ret = -E2BIG;
1513
        goto out;
1514
    }
1515
 
1516
    if (obj->madv != I915_MADV_WILLNEED) {
1517
        DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1518
        ret = -EINVAL;
1519
        goto out;
1520
    }
1521
    /* Now bind it into the GTT if needed */
4104 Serge 1522
    ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
3480 Serge 1523
    if (ret)
1524
        goto out;
1525
 
1526
    ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1527
    if (ret)
1528
        goto unpin;
1529
 
1530
    ret = i915_gem_object_get_fence(obj);
1531
    if (ret)
1532
        goto unpin;
1533
 
1534
    obj->fault_mappable = true;
1535
 
4104 Serge 1536
    pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
3480 Serge 1537
 
1538
    /* Finally, remap it using the new GTT offset */
1539
 
1540
    mem = UserAlloc(obj->base.size);
1541
    if(unlikely(mem == NULL))
1542
    {
1543
        ret = -ENOMEM;
1544
        goto unpin;
1545
    }
1546
 
1547
    for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096)
1548
        MapPage(ptr, pfn, PG_SHARED|PG_UW);
1549
 
1550
unpin:
1551
    i915_gem_object_unpin(obj);
1552
 
1553
 
4104 Serge 1554
    *offset = mem;
3480 Serge 1555
 
1556
out:
1557
    drm_gem_object_unreference(&obj->base);
1558
unlock:
1559
    mutex_unlock(&dev->struct_mutex);
1560
    return ret;
1561
}
1562
 
1563
/**
1564
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1565
 * @dev: DRM device
1566
 * @data: GTT mapping ioctl data
1567
 * @file: GEM object info
1568
 *
1569
 * Simply returns the fake offset to userspace so it can mmap it.
1570
 * The mmap call will end up in drm_gem_mmap(), which will set things
1571
 * up so we can get faults in the handler above.
1572
 *
1573
 * The fault handler will take care of binding the object into the GTT
1574
 * (since it may have been evicted to make room for something), allocating
1575
 * a fence register, and mapping the appropriate aperture address into
1576
 * userspace.
1577
 */
1578
int
1579
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1580
            struct drm_file *file)
1581
{
1582
    struct drm_i915_gem_mmap_gtt *args = data;
1583
 
1584
    return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1585
}
1586
 
3031 serge 1587
/* Immediately discard the backing storage */
1588
static void
1589
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1590
{
1591
//	struct inode *inode;
2332 Serge 1592
 
3031 serge 1593
//	i915_gem_object_free_mmap_offset(obj);
2332 Serge 1594
 
3263 Serge 1595
	if (obj->base.filp == NULL)
1596
		return;
2332 Serge 1597
 
3031 serge 1598
	/* Our goal here is to return as much of the memory as
1599
	 * is possible back to the system as we are called from OOM.
1600
	 * To do this we must instruct the shmfs to drop all of its
1601
	 * backing pages, *now*.
1602
	 */
1603
//	inode = obj->base.filp->f_path.dentry->d_inode;
1604
//	shmem_truncate_range(inode, 0, (loff_t)-1);
2332 Serge 1605
 
3031 serge 1606
	obj->madv = __I915_MADV_PURGED;
1607
}
2332 Serge 1608
 
3031 serge 1609
static inline int
1610
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1611
{
1612
	return obj->madv == I915_MADV_DONTNEED;
1613
}
2332 Serge 1614
 
3031 serge 1615
static void
1616
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1617
{
3746 Serge 1618
	struct sg_page_iter sg_iter;
1619
	int ret;
2332 Serge 1620
 
3031 serge 1621
	BUG_ON(obj->madv == __I915_MADV_PURGED);
2332 Serge 1622
 
3031 serge 1623
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
1624
	if (ret) {
1625
		/* In the event of a disaster, abandon all caches and
1626
		 * hope for the best.
1627
		 */
1628
		WARN_ON(ret != -EIO);
4104 Serge 1629
		i915_gem_clflush_object(obj, true);
3031 serge 1630
		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1631
	}
2332 Serge 1632
 
3031 serge 1633
	if (obj->madv == I915_MADV_DONTNEED)
1634
		obj->dirty = 0;
2332 Serge 1635
 
3746 Serge 1636
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1637
		struct page *page = sg_page_iter_page(&sg_iter);
2332 Serge 1638
 
3290 Serge 1639
        page_cache_release(page);
3243 Serge 1640
	}
1641
    //DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
3290 Serge 1642
 
4104 Serge 1643
    obj->dirty = 0;
3243 Serge 1644
 
1645
	sg_free_table(obj->pages);
1646
	kfree(obj->pages);
3031 serge 1647
}
2332 Serge 1648
 
3480 Serge 1649
int
3031 serge 1650
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1651
{
1652
	const struct drm_i915_gem_object_ops *ops = obj->ops;
2332 Serge 1653
 
3243 Serge 1654
	if (obj->pages == NULL)
3031 serge 1655
		return 0;
2332 Serge 1656
 
3031 serge 1657
	if (obj->pages_pin_count)
1658
		return -EBUSY;
1659
 
4104 Serge 1660
	BUG_ON(i915_gem_obj_bound_any(obj));
1661
 
3243 Serge 1662
	/* ->put_pages might need to allocate memory for the bit17 swizzle
1663
	 * array, hence protect them from being reaped by removing them from gtt
1664
	 * lists early. */
4104 Serge 1665
	list_del(&obj->global_list);
3243 Serge 1666
 
3031 serge 1667
	ops->put_pages(obj);
3243 Serge 1668
	obj->pages = NULL;
3031 serge 1669
 
1670
	if (i915_gem_object_is_purgeable(obj))
1671
		i915_gem_object_truncate(obj);
1672
 
1673
	return 0;
1674
}
1675
 
1676
 
1677
 
1678
 
1679
 
1680
 
1681
 
1682
 
2332 Serge 1683
static int
3031 serge 1684
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2332 Serge 1685
{
3260 Serge 1686
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3243 Serge 1687
    int page_count, i;
4104 Serge 1688
    struct sg_table *st;
3243 Serge 1689
	struct scatterlist *sg;
3746 Serge 1690
	struct sg_page_iter sg_iter;
3243 Serge 1691
	struct page *page;
3746 Serge 1692
	unsigned long last_pfn = 0;	/* suppress gcc warning */
3243 Serge 1693
	gfp_t gfp;
2332 Serge 1694
 
3243 Serge 1695
	/* Assert that the object is not currently in any GPU domain. As it
1696
	 * wasn't in the GTT, there shouldn't be any way it could have been in
1697
	 * a GPU cache
2332 Serge 1698
	 */
3243 Serge 1699
	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
1700
	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
1701
 
1702
	st = kmalloc(sizeof(*st), GFP_KERNEL);
1703
	if (st == NULL)
1704
		return -ENOMEM;
1705
 
2332 Serge 1706
	page_count = obj->base.size / PAGE_SIZE;
3243 Serge 1707
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
1708
		kfree(st);
3746 Serge 1709
        FAIL();
2332 Serge 1710
		return -ENOMEM;
3243 Serge 1711
	}
2332 Serge 1712
 
3243 Serge 1713
	/* Get the list of pages out of our struct file.  They'll be pinned
1714
	 * at this point until we release them.
1715
	 *
1716
	 * Fail silently without starting the shrinker
1717
	 */
3746 Serge 1718
	sg = st->sgl;
1719
	st->nents = 0;
1720
	for (i = 0; i < page_count; i++) {
4104 Serge 1721
        page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
3260 Serge 1722
		if (IS_ERR(page)) {
1723
            dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
2332 Serge 1724
			goto err_pages;
1725
 
3260 Serge 1726
		}
3746 Serge 1727
 
1728
		if (!i || page_to_pfn(page) != last_pfn + 1) {
1729
			if (i)
1730
				sg = sg_next(sg);
1731
			st->nents++;
3243 Serge 1732
		sg_set_page(sg, page, PAGE_SIZE, 0);
3746 Serge 1733
		} else {
1734
			sg->length += PAGE_SIZE;
1735
		}
1736
		last_pfn = page_to_pfn(page);
3243 Serge 1737
	}
3031 serge 1738
 
3746 Serge 1739
		sg_mark_end(sg);
3243 Serge 1740
	obj->pages = st;
3031 serge 1741
 
2332 Serge 1742
	return 0;
1743
 
1744
err_pages:
3746 Serge 1745
	sg_mark_end(sg);
1746
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
1747
		page_cache_release(sg_page_iter_page(&sg_iter));
3243 Serge 1748
	sg_free_table(st);
1749
	kfree(st);
3746 Serge 1750
    FAIL();
3243 Serge 1751
	return PTR_ERR(page);
2332 Serge 1752
}
1753
 
3031 serge 1754
/* Ensure that the associated pages are gathered from the backing storage
1755
 * and pinned into our object. i915_gem_object_get_pages() may be called
1756
 * multiple times before they are released by a single call to
1757
 * i915_gem_object_put_pages() - once the pages are no longer referenced
1758
 * either as a result of memory pressure (reaping pages under the shrinker)
1759
 * or as the object is itself released.
1760
 */
1761
int
1762
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2332 Serge 1763
{
3031 serge 1764
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1765
	const struct drm_i915_gem_object_ops *ops = obj->ops;
1766
	int ret;
2332 Serge 1767
 
3243 Serge 1768
	if (obj->pages)
3031 serge 1769
		return 0;
2332 Serge 1770
 
3031 serge 1771
	BUG_ON(obj->pages_pin_count);
2332 Serge 1772
 
3031 serge 1773
	ret = ops->get_pages(obj);
1774
	if (ret)
1775
		return ret;
2344 Serge 1776
 
4104 Serge 1777
	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3243 Serge 1778
    return 0;
2332 Serge 1779
}
1780
 
1781
void
1782
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
3243 Serge 1783
			       struct intel_ring_buffer *ring)
2332 Serge 1784
{
1785
	struct drm_device *dev = obj->base.dev;
1786
	struct drm_i915_private *dev_priv = dev->dev_private;
3243 Serge 1787
	u32 seqno = intel_ring_get_seqno(ring);
2332 Serge 1788
 
1789
	BUG_ON(ring == NULL);
4104 Serge 1790
	if (obj->ring != ring && obj->last_write_seqno) {
1791
		/* Keep the seqno relative to the current ring */
1792
		obj->last_write_seqno = seqno;
1793
	}
2332 Serge 1794
	obj->ring = ring;
1795
 
1796
	/* Add a reference if we're newly entering the active list. */
1797
	if (!obj->active) {
2344 Serge 1798
		drm_gem_object_reference(&obj->base);
2332 Serge 1799
		obj->active = 1;
1800
	}
1801
 
1802
	list_move_tail(&obj->ring_list, &ring->active_list);
1803
 
3031 serge 1804
	obj->last_read_seqno = seqno;
1805
 
2332 Serge 1806
	if (obj->fenced_gpu_access) {
3031 serge 1807
		obj->last_fenced_seqno = seqno;
1808
 
1809
		/* Bump MRU to take account of the delayed flush */
1810
		if (obj->fence_reg != I915_FENCE_REG_NONE) {
2332 Serge 1811
		struct drm_i915_fence_reg *reg;
1812
 
1813
		reg = &dev_priv->fence_regs[obj->fence_reg];
3031 serge 1814
			list_move_tail(®->lru_list,
1815
				       &dev_priv->mm.fence_list);
1816
		}
2332 Serge 1817
	}
1818
}
1819
 
2344 Serge 1820
static void
3031 serge 1821
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2344 Serge 1822
{
4104 Serge 1823
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1824
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1825
	struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2332 Serge 1826
 
3031 serge 1827
	BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2344 Serge 1828
	BUG_ON(!obj->active);
2332 Serge 1829
 
4104 Serge 1830
	list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
2344 Serge 1831
 
3031 serge 1832
	list_del_init(&obj->ring_list);
2352 Serge 1833
	obj->ring = NULL;
2344 Serge 1834
 
3031 serge 1835
	obj->last_read_seqno = 0;
1836
	obj->last_write_seqno = 0;
1837
	obj->base.write_domain = 0;
1838
 
1839
	obj->last_fenced_seqno = 0;
2352 Serge 1840
	obj->fenced_gpu_access = false;
2344 Serge 1841
 
2352 Serge 1842
	obj->active = 0;
1843
	drm_gem_object_unreference(&obj->base);
1844
 
1845
	WARN_ON(i915_verify_lists(dev));
1846
}
1847
 
3243 Serge 1848
static int
3480 Serge 1849
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2344 Serge 1850
{
3243 Serge 1851
	struct drm_i915_private *dev_priv = dev->dev_private;
1852
	struct intel_ring_buffer *ring;
1853
	int ret, i, j;
2344 Serge 1854
 
3480 Serge 1855
	/* Carefully retire all requests without writing to the rings */
3243 Serge 1856
	for_each_ring(ring, dev_priv, i) {
3480 Serge 1857
		ret = intel_ring_idle(ring);
3243 Serge 1858
	if (ret)
1859
		return ret;
3480 Serge 1860
	}
1861
	i915_gem_retire_requests(dev);
3243 Serge 1862
 
3480 Serge 1863
	/* Finally reset hw state */
3243 Serge 1864
	for_each_ring(ring, dev_priv, i) {
3480 Serge 1865
		intel_ring_init_seqno(ring, seqno);
1866
 
3243 Serge 1867
		for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1868
			ring->sync_seqno[j] = 0;
1869
	}
1870
 
1871
	return 0;
2344 Serge 1872
}
1873
 
3480 Serge 1874
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1875
{
1876
	struct drm_i915_private *dev_priv = dev->dev_private;
1877
	int ret;
1878
 
1879
	if (seqno == 0)
1880
		return -EINVAL;
1881
 
1882
	/* HWS page needs to be set less than what we
1883
	 * will inject to ring
1884
	 */
1885
	ret = i915_gem_init_seqno(dev, seqno - 1);
1886
	if (ret)
1887
		return ret;
1888
 
1889
	/* Carefully set the last_seqno value so that wrap
1890
	 * detection still works
1891
	 */
1892
	dev_priv->next_seqno = seqno;
1893
	dev_priv->last_seqno = seqno - 1;
1894
	if (dev_priv->last_seqno == 0)
1895
		dev_priv->last_seqno--;
1896
 
1897
	return 0;
1898
}
1899
 
3243 Serge 1900
int
1901
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2344 Serge 1902
{
3243 Serge 1903
	struct drm_i915_private *dev_priv = dev->dev_private;
2344 Serge 1904
 
3243 Serge 1905
	/* reserve 0 for non-seqno */
1906
	if (dev_priv->next_seqno == 0) {
3480 Serge 1907
		int ret = i915_gem_init_seqno(dev, 0);
3243 Serge 1908
		if (ret)
1909
			return ret;
1910
 
1911
		dev_priv->next_seqno = 1;
1912
	}
1913
 
3480 Serge 1914
	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
3243 Serge 1915
	return 0;
2332 Serge 1916
}
1917
 
4104 Serge 1918
int __i915_add_request(struct intel_ring_buffer *ring,
2352 Serge 1919
		 struct drm_file *file,
4104 Serge 1920
		       struct drm_i915_gem_object *obj,
3031 serge 1921
		 u32 *out_seqno)
2352 Serge 1922
{
1923
	drm_i915_private_t *dev_priv = ring->dev->dev_private;
3031 serge 1924
	struct drm_i915_gem_request *request;
4104 Serge 1925
	u32 request_ring_position, request_start;
2352 Serge 1926
	int was_empty;
1927
	int ret;
2332 Serge 1928
 
4104 Serge 1929
	request_start = intel_ring_get_tail(ring);
3031 serge 1930
	/*
1931
	 * Emit any outstanding flushes - execbuf can fail to emit the flush
1932
	 * after having emitted the batchbuffer command. Hence we need to fix
1933
	 * things up similar to emitting the lazy request. The difference here
1934
	 * is that the flush _must_ happen before the next request, no matter
1935
	 * what.
1936
	 */
4104 Serge 1937
   ret = intel_ring_flush_all_caches(ring);
1938
   if (ret)
1939
       return ret;
2332 Serge 1940
 
3031 serge 1941
	request = kmalloc(sizeof(*request), GFP_KERNEL);
1942
	if (request == NULL)
1943
		return -ENOMEM;
1944
 
1945
 
1946
	/* Record the position of the start of the request so that
1947
	 * should we detect the updated seqno part-way through the
4104 Serge 1948
    * GPU processing the request, we never over-estimate the
3031 serge 1949
	 * position of the head.
1950
	 */
4104 Serge 1951
   request_ring_position = intel_ring_get_tail(ring);
3031 serge 1952
 
3243 Serge 1953
	ret = ring->add_request(ring);
3031 serge 1954
	if (ret) {
1955
		kfree(request);
4104 Serge 1956
		return ret;
3031 serge 1957
	}
2332 Serge 1958
 
3243 Serge 1959
	request->seqno = intel_ring_get_seqno(ring);
2352 Serge 1960
	request->ring = ring;
4104 Serge 1961
	request->head = request_start;
3031 serge 1962
	request->tail = request_ring_position;
4104 Serge 1963
	request->ctx = ring->last_context;
1964
	request->batch_obj = obj;
1965
 
1966
	/* Whilst this request exists, batch_obj will be on the
1967
	 * active_list, and so will hold the active reference. Only when this
1968
	 * request is retired will the the batch_obj be moved onto the
1969
	 * inactive_list and lose its active reference. Hence we do not need
1970
	 * to explicitly hold another reference here.
1971
	 */
1972
 
1973
	if (request->ctx)
1974
		i915_gem_context_reference(request->ctx);
1975
 
3031 serge 1976
    request->emitted_jiffies = GetTimerTicks();
2352 Serge 1977
	was_empty = list_empty(&ring->request_list);
1978
	list_add_tail(&request->list, &ring->request_list);
3031 serge 1979
	request->file_priv = NULL;
2332 Serge 1980
 
3263 Serge 1981
	if (file) {
1982
		struct drm_i915_file_private *file_priv = file->driver_priv;
2332 Serge 1983
 
3263 Serge 1984
		spin_lock(&file_priv->mm.lock);
1985
		request->file_priv = file_priv;
1986
		list_add_tail(&request->client_list,
1987
			      &file_priv->mm.request_list);
1988
		spin_unlock(&file_priv->mm.lock);
1989
	}
1990
 
1991
	trace_i915_gem_request_add(ring, request->seqno);
3031 serge 1992
	ring->outstanding_lazy_request = 0;
2332 Serge 1993
 
4104 Serge 1994
	if (!dev_priv->ums.mm_suspended) {
1995
//		i915_queue_hangcheck(ring->dev);
1996
 
1997
       if (was_empty) {
2360 Serge 1998
           queue_delayed_work(dev_priv->wq,
3482 Serge 1999
					   &dev_priv->mm.retire_work,
2000
					   round_jiffies_up_relative(HZ));
4104 Serge 2001
           intel_mark_busy(dev_priv->dev);
2002
       }
2003
   }
3031 serge 2004
 
2005
	if (out_seqno)
3243 Serge 2006
		*out_seqno = request->seqno;
2352 Serge 2007
	return 0;
2008
}
2332 Serge 2009
 
3263 Serge 2010
static inline void
2011
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2012
{
2013
	struct drm_i915_file_private *file_priv = request->file_priv;
2332 Serge 2014
 
3263 Serge 2015
	if (!file_priv)
2016
		return;
2332 Serge 2017
 
3263 Serge 2018
	spin_lock(&file_priv->mm.lock);
2019
	if (request->file_priv) {
2020
		list_del(&request->client_list);
2021
		request->file_priv = NULL;
2022
	}
2023
	spin_unlock(&file_priv->mm.lock);
2024
}
2332 Serge 2025
 
4104 Serge 2026
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
2027
				    struct i915_address_space *vm)
2028
{
2029
	if (acthd >= i915_gem_obj_offset(obj, vm) &&
2030
	    acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
2031
		return true;
2032
 
2033
	return false;
2034
}
2035
 
2036
static bool i915_head_inside_request(const u32 acthd_unmasked,
2037
				     const u32 request_start,
2038
				     const u32 request_end)
2039
{
2040
	const u32 acthd = acthd_unmasked & HEAD_ADDR;
2041
 
2042
	if (request_start < request_end) {
2043
		if (acthd >= request_start && acthd < request_end)
2044
			return true;
2045
	} else if (request_start > request_end) {
2046
		if (acthd >= request_start || acthd < request_end)
2047
			return true;
2048
	}
2049
 
2050
	return false;
2051
}
2052
 
2053
static struct i915_address_space *
2054
request_to_vm(struct drm_i915_gem_request *request)
2055
{
2056
	struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
2057
	struct i915_address_space *vm;
2058
 
2059
	vm = &dev_priv->gtt.base;
2060
 
2061
	return vm;
2062
}
2063
 
2064
static bool i915_request_guilty(struct drm_i915_gem_request *request,
2065
				const u32 acthd, bool *inside)
2066
{
2067
	/* There is a possibility that unmasked head address
2068
	 * pointing inside the ring, matches the batch_obj address range.
2069
	 * However this is extremely unlikely.
2070
	 */
2071
	if (request->batch_obj) {
2072
		if (i915_head_inside_object(acthd, request->batch_obj,
2073
					    request_to_vm(request))) {
2074
			*inside = true;
2075
			return true;
2076
		}
2077
	}
2078
 
2079
	if (i915_head_inside_request(acthd, request->head, request->tail)) {
2080
		*inside = false;
2081
		return true;
2082
	}
2083
 
2084
	return false;
2085
}
2086
 
2087
static void i915_set_reset_status(struct intel_ring_buffer *ring,
2088
				  struct drm_i915_gem_request *request,
2089
				  u32 acthd)
2090
{
2091
	struct i915_ctx_hang_stats *hs = NULL;
2092
	bool inside, guilty;
2093
	unsigned long offset = 0;
2094
 
2095
	/* Innocent until proven guilty */
2096
	guilty = false;
2097
 
2098
	if (request->batch_obj)
2099
		offset = i915_gem_obj_offset(request->batch_obj,
2100
					     request_to_vm(request));
2101
 
2102
	if (ring->hangcheck.action != HANGCHECK_WAIT &&
2103
	    i915_request_guilty(request, acthd, &inside)) {
2104
		DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
2105
			  ring->name,
2106
			  inside ? "inside" : "flushing",
2107
			  offset,
2108
			  request->ctx ? request->ctx->id : 0,
2109
			  acthd);
2110
 
2111
		guilty = true;
2112
	}
2113
 
2114
	/* If contexts are disabled or this is the default context, use
2115
	 * file_priv->reset_state
2116
	 */
2117
	if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
2118
		hs = &request->ctx->hang_stats;
2119
	else if (request->file_priv)
2120
		hs = &request->file_priv->hang_stats;
2121
 
2122
	if (hs) {
2123
		if (guilty)
2124
			hs->batch_active++;
2125
		else
2126
			hs->batch_pending++;
2127
	}
2128
}
2129
 
2130
static void i915_gem_free_request(struct drm_i915_gem_request *request)
2131
{
2132
	list_del(&request->list);
2133
	i915_gem_request_remove_from_client(request);
2134
 
2135
	if (request->ctx)
2136
		i915_gem_context_unreference(request->ctx);
2137
 
2138
	kfree(request);
2139
}
2140
 
3031 serge 2141
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
2142
				      struct intel_ring_buffer *ring)
2143
{
4104 Serge 2144
	u32 completed_seqno;
2145
	u32 acthd;
2146
 
2147
	acthd = intel_ring_get_active_head(ring);
2148
	completed_seqno = ring->get_seqno(ring, false);
2149
 
3031 serge 2150
	while (!list_empty(&ring->request_list)) {
2151
		struct drm_i915_gem_request *request;
2332 Serge 2152
 
3031 serge 2153
		request = list_first_entry(&ring->request_list,
2154
					   struct drm_i915_gem_request,
2155
					   list);
2332 Serge 2156
 
4104 Serge 2157
		if (request->seqno > completed_seqno)
2158
			i915_set_reset_status(ring, request, acthd);
2159
 
2160
		i915_gem_free_request(request);
3031 serge 2161
	}
2332 Serge 2162
 
3031 serge 2163
	while (!list_empty(&ring->active_list)) {
2164
		struct drm_i915_gem_object *obj;
2332 Serge 2165
 
3031 serge 2166
		obj = list_first_entry(&ring->active_list,
2167
				       struct drm_i915_gem_object,
2168
				       ring_list);
2332 Serge 2169
 
3031 serge 2170
		i915_gem_object_move_to_inactive(obj);
2171
	}
2172
}
2332 Serge 2173
 
3746 Serge 2174
void i915_gem_restore_fences(struct drm_device *dev)
3031 serge 2175
{
2176
	struct drm_i915_private *dev_priv = dev->dev_private;
2177
	int i;
2332 Serge 2178
 
3031 serge 2179
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
2180
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
4104 Serge 2181
 
2182
		/*
2183
		 * Commit delayed tiling changes if we have an object still
2184
		 * attached to the fence, otherwise just clear the fence.
2185
		 */
2186
		if (reg->obj) {
2187
			i915_gem_object_update_fence(reg->obj, reg,
2188
						     reg->obj->tiling_mode);
2189
		} else {
2190
			i915_gem_write_fence(dev, i, NULL);
2191
		}
3031 serge 2192
	}
2193
}
2360 Serge 2194
 
3031 serge 2195
void i915_gem_reset(struct drm_device *dev)
2196
{
2197
	struct drm_i915_private *dev_priv = dev->dev_private;
2198
	struct intel_ring_buffer *ring;
2199
	int i;
2360 Serge 2200
 
3031 serge 2201
	for_each_ring(ring, dev_priv, i)
2202
		i915_gem_reset_ring_lists(dev_priv, ring);
2360 Serge 2203
 
3746 Serge 2204
	i915_gem_restore_fences(dev);
3031 serge 2205
}
2360 Serge 2206
 
2352 Serge 2207
/**
2208
 * This function clears the request list as sequence numbers are passed.
2209
 */
3031 serge 2210
void
2352 Serge 2211
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2212
{
2213
	uint32_t seqno;
2332 Serge 2214
 
2352 Serge 2215
	if (list_empty(&ring->request_list))
2216
		return;
2332 Serge 2217
 
2352 Serge 2218
	WARN_ON(i915_verify_lists(ring->dev));
2332 Serge 2219
 
3031 serge 2220
	seqno = ring->get_seqno(ring, true);
2332 Serge 2221
 
2352 Serge 2222
	while (!list_empty(&ring->request_list)) {
2223
		struct drm_i915_gem_request *request;
2332 Serge 2224
 
2352 Serge 2225
		request = list_first_entry(&ring->request_list,
2226
					   struct drm_i915_gem_request,
2227
					   list);
2332 Serge 2228
 
2352 Serge 2229
		if (!i915_seqno_passed(seqno, request->seqno))
2230
			break;
2332 Serge 2231
 
2352 Serge 2232
		trace_i915_gem_request_retire(ring, request->seqno);
3031 serge 2233
		/* We know the GPU must have read the request to have
2234
		 * sent us the seqno + interrupt, so use the position
2235
		 * of tail of the request to update the last known position
2236
		 * of the GPU head.
2237
		 */
2238
		ring->last_retired_head = request->tail;
2332 Serge 2239
 
4104 Serge 2240
		i915_gem_free_request(request);
2352 Serge 2241
	}
2332 Serge 2242
 
2352 Serge 2243
	/* Move any buffers on the active list that are no longer referenced
2244
	 * by the ringbuffer to the flushing/inactive lists as appropriate.
2245
	 */
2246
	while (!list_empty(&ring->active_list)) {
2247
		struct drm_i915_gem_object *obj;
2332 Serge 2248
 
2352 Serge 2249
		obj = list_first_entry(&ring->active_list,
2250
				      struct drm_i915_gem_object,
2251
				      ring_list);
2332 Serge 2252
 
3031 serge 2253
		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2352 Serge 2254
			break;
2332 Serge 2255
 
2352 Serge 2256
			i915_gem_object_move_to_inactive(obj);
2257
	}
2332 Serge 2258
 
2352 Serge 2259
	if (unlikely(ring->trace_irq_seqno &&
2260
		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2261
		ring->irq_put(ring);
2262
		ring->trace_irq_seqno = 0;
2263
	}
2332 Serge 2264
 
2352 Serge 2265
	WARN_ON(i915_verify_lists(ring->dev));
2266
}
2332 Serge 2267
 
2352 Serge 2268
void
2269
i915_gem_retire_requests(struct drm_device *dev)
2270
{
2271
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 2272
	struct intel_ring_buffer *ring;
2352 Serge 2273
	int i;
2332 Serge 2274
 
3031 serge 2275
	for_each_ring(ring, dev_priv, i)
2276
		i915_gem_retire_requests_ring(ring);
2352 Serge 2277
}
2278
 
2360 Serge 2279
static void
2280
i915_gem_retire_work_handler(struct work_struct *work)
2281
{
2282
	drm_i915_private_t *dev_priv;
2283
	struct drm_device *dev;
3031 serge 2284
	struct intel_ring_buffer *ring;
2360 Serge 2285
	bool idle;
2286
	int i;
2352 Serge 2287
 
2360 Serge 2288
	dev_priv = container_of(work, drm_i915_private_t,
2289
				mm.retire_work.work);
2290
	dev = dev_priv->dev;
2352 Serge 2291
 
2360 Serge 2292
	/* Come back later if the device is busy... */
2293
	if (!mutex_trylock(&dev->struct_mutex)) {
3482 Serge 2294
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2295
				   round_jiffies_up_relative(HZ));
3243 Serge 2296
        return;
2360 Serge 2297
	}
2352 Serge 2298
 
2360 Serge 2299
	i915_gem_retire_requests(dev);
2352 Serge 2300
 
2360 Serge 2301
	/* Send a periodic flush down the ring so we don't hold onto GEM
2302
	 * objects indefinitely.
2303
	 */
2304
	idle = true;
3031 serge 2305
	for_each_ring(ring, dev_priv, i) {
2306
		if (ring->gpu_caches_dirty)
4104 Serge 2307
			i915_add_request(ring, NULL);
2352 Serge 2308
 
2360 Serge 2309
		idle &= list_empty(&ring->request_list);
2310
	}
2352 Serge 2311
 
4104 Serge 2312
	if (!dev_priv->ums.mm_suspended && !idle)
3482 Serge 2313
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2314
				   round_jiffies_up_relative(HZ));
3031 serge 2315
	if (idle)
2316
		intel_mark_idle(dev);
2360 Serge 2317
 
2318
	mutex_unlock(&dev->struct_mutex);
2319
}
2320
 
2344 Serge 2321
/**
3031 serge 2322
 * Ensures that an object will eventually get non-busy by flushing any required
2323
 * write domains, emitting any outstanding lazy request and retiring and
2324
 * completed requests.
2352 Serge 2325
 */
3031 serge 2326
static int
2327
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2352 Serge 2328
{
3031 serge 2329
	int ret;
2352 Serge 2330
 
3031 serge 2331
	if (obj->active) {
2332
		ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2333
		if (ret)
2334
			return ret;
2352 Serge 2335
 
3031 serge 2336
		i915_gem_retire_requests_ring(obj->ring);
2337
	}
2352 Serge 2338
 
3031 serge 2339
	return 0;
2340
}
2352 Serge 2341
 
3243 Serge 2342
/**
2343
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2344
 * @DRM_IOCTL_ARGS: standard ioctl arguments
2345
 *
2346
 * Returns 0 if successful, else an error is returned with the remaining time in
2347
 * the timeout parameter.
2348
 *  -ETIME: object is still busy after timeout
2349
 *  -ERESTARTSYS: signal interrupted the wait
2350
 *  -ENONENT: object doesn't exist
2351
 * Also possible, but rare:
2352
 *  -EAGAIN: GPU wedged
2353
 *  -ENOMEM: damn
2354
 *  -ENODEV: Internal IRQ fail
2355
 *  -E?: The add request failed
2356
 *
2357
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2358
 * non-zero timeout parameter the wait ioctl will wait for the given number of
2359
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2360
 * without holding struct_mutex the object may become re-busied before this
2361
 * function completes. A similar but shorter * race condition exists in the busy
2362
 * ioctl
2363
 */
4246 Serge 2364
int
2365
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2366
{
2367
	drm_i915_private_t *dev_priv = dev->dev_private;
2368
	struct drm_i915_gem_wait *args = data;
2369
	struct drm_i915_gem_object *obj;
2370
	struct intel_ring_buffer *ring = NULL;
2371
	struct timespec timeout_stack, *timeout = NULL;
2372
	unsigned reset_counter;
2373
	u32 seqno = 0;
2374
	int ret = 0;
2352 Serge 2375
 
4246 Serge 2376
	if (args->timeout_ns >= 0) {
2377
		timeout_stack = ns_to_timespec(args->timeout_ns);
2378
		timeout = &timeout_stack;
2379
	}
2352 Serge 2380
 
4246 Serge 2381
	ret = i915_mutex_lock_interruptible(dev);
2382
	if (ret)
2383
		return ret;
2352 Serge 2384
 
4246 Serge 2385
    if(args->bo_handle == -2)
2386
    {
2387
        obj = get_fb_obj();
2388
        drm_gem_object_reference(&obj->base);
2389
    }
2390
    else
2391
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2392
	if (&obj->base == NULL) {
2393
		mutex_unlock(&dev->struct_mutex);
2394
		return -ENOENT;
2395
	}
2352 Serge 2396
 
4246 Serge 2397
	/* Need to make sure the object gets inactive eventually. */
2398
	ret = i915_gem_object_flush_active(obj);
2399
	if (ret)
2400
		goto out;
2352 Serge 2401
 
4246 Serge 2402
	if (obj->active) {
2403
		seqno = obj->last_read_seqno;
2404
		ring = obj->ring;
2405
	}
2352 Serge 2406
 
4246 Serge 2407
	if (seqno == 0)
2408
		 goto out;
2352 Serge 2409
 
4246 Serge 2410
	/* Do this after OLR check to make sure we make forward progress polling
2411
	 * on this IOCTL with a 0 timeout (like busy ioctl)
2412
	 */
2413
	if (!args->timeout_ns) {
2414
		ret = -ETIME;
2415
		goto out;
2416
	}
2352 Serge 2417
 
4246 Serge 2418
	drm_gem_object_unreference(&obj->base);
2419
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2420
	mutex_unlock(&dev->struct_mutex);
2352 Serge 2421
 
4246 Serge 2422
	ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2423
	if (timeout)
2424
		args->timeout_ns = timespec_to_ns(timeout);
2425
	return ret;
3243 Serge 2426
 
4246 Serge 2427
out:
2428
	drm_gem_object_unreference(&obj->base);
2429
	mutex_unlock(&dev->struct_mutex);
2430
	return ret;
2431
}
3243 Serge 2432
 
2352 Serge 2433
/**
3031 serge 2434
 * i915_gem_object_sync - sync an object to a ring.
2435
 *
2436
 * @obj: object which may be in use on another ring.
2437
 * @to: ring we wish to use the object on. May be NULL.
2438
 *
2439
 * This code is meant to abstract object synchronization with the GPU.
2440
 * Calling with NULL implies synchronizing the object with the CPU
2441
 * rather than a particular GPU ring.
2442
 *
2443
 * Returns 0 if successful, else propagates up the lower layer error.
2344 Serge 2444
 */
2445
int
3031 serge 2446
i915_gem_object_sync(struct drm_i915_gem_object *obj,
2447
		     struct intel_ring_buffer *to)
2344 Serge 2448
{
3031 serge 2449
	struct intel_ring_buffer *from = obj->ring;
2450
	u32 seqno;
2451
	int ret, idx;
2332 Serge 2452
 
3031 serge 2453
	if (from == NULL || to == from)
2454
		return 0;
2332 Serge 2455
 
3031 serge 2456
	if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2457
		return i915_gem_object_wait_rendering(obj, false);
2332 Serge 2458
 
3031 serge 2459
	idx = intel_ring_sync_index(from, to);
2460
 
2461
	seqno = obj->last_read_seqno;
2462
	if (seqno <= from->sync_seqno[idx])
2463
		return 0;
2464
 
2465
	ret = i915_gem_check_olr(obj->ring, seqno);
2466
	if (ret)
2467
		return ret;
2468
 
2469
	ret = to->sync_to(to, from, seqno);
2470
	if (!ret)
3243 Serge 2471
		/* We use last_read_seqno because sync_to()
2472
		 * might have just caused seqno wrap under
2473
		 * the radar.
2474
		 */
2475
		from->sync_seqno[idx] = obj->last_read_seqno;
3031 serge 2476
 
2477
	return ret;
2344 Serge 2478
}
2332 Serge 2479
 
2344 Serge 2480
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2481
{
2482
	u32 old_write_domain, old_read_domains;
2332 Serge 2483
 
2344 Serge 2484
	/* Force a pagefault for domain tracking on next user access */
2485
//	i915_gem_release_mmap(obj);
2332 Serge 2486
 
2344 Serge 2487
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2488
		return;
2332 Serge 2489
 
3480 Serge 2490
	/* Wait for any direct GTT access to complete */
2491
	mb();
2492
 
2344 Serge 2493
	old_read_domains = obj->base.read_domains;
2494
	old_write_domain = obj->base.write_domain;
2351 Serge 2495
 
2344 Serge 2496
	obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2497
	obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2332 Serge 2498
 
2351 Serge 2499
	trace_i915_gem_object_change_domain(obj,
2500
					    old_read_domains,
2501
					    old_write_domain);
2344 Serge 2502
}
2332 Serge 2503
 
4104 Serge 2504
int i915_vma_unbind(struct i915_vma *vma)
2344 Serge 2505
{
4104 Serge 2506
	struct drm_i915_gem_object *obj = vma->obj;
3031 serge 2507
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
3480 Serge 2508
	int ret;
2332 Serge 2509
 
3263 Serge 2510
    if(obj == get_fb_obj())
2511
        return 0;
2512
 
4104 Serge 2513
	if (list_empty(&vma->vma_link))
2344 Serge 2514
		return 0;
2332 Serge 2515
 
4104 Serge 2516
	if (!drm_mm_node_allocated(&vma->node))
2517
		goto destroy;
2518
 
3031 serge 2519
	if (obj->pin_count)
2520
		return -EBUSY;
2332 Serge 2521
 
3243 Serge 2522
	BUG_ON(obj->pages == NULL);
3031 serge 2523
 
2344 Serge 2524
	ret = i915_gem_object_finish_gpu(obj);
3031 serge 2525
	if (ret)
2344 Serge 2526
		return ret;
2527
	/* Continue on if we fail due to EIO, the GPU is hung so we
2528
	 * should be safe and we need to cleanup or else we might
2529
	 * cause memory corruption through use-after-free.
2530
	 */
2332 Serge 2531
 
2344 Serge 2532
	i915_gem_object_finish_gtt(obj);
2332 Serge 2533
 
2344 Serge 2534
	/* release the fence reg _after_ flushing */
2535
	ret = i915_gem_object_put_fence(obj);
3031 serge 2536
	if (ret)
2344 Serge 2537
		return ret;
2332 Serge 2538
 
4104 Serge 2539
	trace_i915_vma_unbind(vma);
2332 Serge 2540
 
3031 serge 2541
	if (obj->has_global_gtt_mapping)
3243 Serge 2542
        i915_gem_gtt_unbind_object(obj);
3031 serge 2543
	if (obj->has_aliasing_ppgtt_mapping) {
2544
		i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
2545
		obj->has_aliasing_ppgtt_mapping = 0;
2546
	}
2547
	i915_gem_gtt_finish_object(obj);
4104 Serge 2548
	i915_gem_object_unpin_pages(obj);
2332 Serge 2549
 
4104 Serge 2550
	list_del(&vma->mm_list);
2344 Serge 2551
	/* Avoid an unnecessary call to unbind on rebind. */
4104 Serge 2552
	if (i915_is_ggtt(vma->vm))
2344 Serge 2553
	obj->map_and_fenceable = true;
2332 Serge 2554
 
4104 Serge 2555
	drm_mm_remove_node(&vma->node);
2332 Serge 2556
 
4104 Serge 2557
destroy:
2558
	i915_gem_vma_destroy(vma);
2559
 
2560
	/* Since the unbound list is global, only move to that list if
2561
	 * no more VMAs exist.
2562
	 * NB: Until we have real VMAs there will only ever be one */
2563
	WARN_ON(!list_empty(&obj->vma_list));
2564
	if (list_empty(&obj->vma_list))
2565
		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2566
 
2344 Serge 2567
	return 0;
2568
}
2332 Serge 2569
 
4104 Serge 2570
/**
2571
 * Unbinds an object from the global GTT aperture.
2572
 */
2573
int
2574
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
2575
{
2576
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2577
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
2578
 
2579
	if (!i915_gem_obj_ggtt_bound(obj))
2580
		return 0;
2581
 
2582
	if (obj->pin_count)
2583
		return -EBUSY;
2584
 
2585
	BUG_ON(obj->pages == NULL);
2586
 
2587
	return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
2588
}
2589
 
3031 serge 2590
int i915_gpu_idle(struct drm_device *dev)
2344 Serge 2591
{
2592
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 2593
	struct intel_ring_buffer *ring;
2344 Serge 2594
	int ret, i;
2332 Serge 2595
 
2344 Serge 2596
	/* Flush everything onto the inactive list. */
3031 serge 2597
	for_each_ring(ring, dev_priv, i) {
2598
		ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
2344 Serge 2599
		if (ret)
2600
			return ret;
3031 serge 2601
 
3243 Serge 2602
		ret = intel_ring_idle(ring);
3031 serge 2603
		if (ret)
2604
			return ret;
2344 Serge 2605
	}
2332 Serge 2606
 
2344 Serge 2607
	return 0;
2608
}
2332 Serge 2609
 
3480 Serge 2610
static void i965_write_fence_reg(struct drm_device *dev, int reg,
3031 serge 2611
					struct drm_i915_gem_object *obj)
2612
{
2613
	drm_i915_private_t *dev_priv = dev->dev_private;
3480 Serge 2614
	int fence_reg;
2615
	int fence_pitch_shift;
2332 Serge 2616
 
3480 Serge 2617
	if (INTEL_INFO(dev)->gen >= 6) {
2618
		fence_reg = FENCE_REG_SANDYBRIDGE_0;
2619
		fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2620
	} else {
2621
		fence_reg = FENCE_REG_965_0;
2622
		fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2623
	}
2332 Serge 2624
 
4104 Serge 2625
	fence_reg += reg * 8;
2626
 
2627
	/* To w/a incoherency with non-atomic 64-bit register updates,
2628
	 * we split the 64-bit update into two 32-bit writes. In order
2629
	 * for a partial fence not to be evaluated between writes, we
2630
	 * precede the update with write to turn off the fence register,
2631
	 * and only enable the fence as the last step.
2632
	 *
2633
	 * For extra levels of paranoia, we make sure each step lands
2634
	 * before applying the next step.
2635
	 */
2636
	I915_WRITE(fence_reg, 0);
2637
	POSTING_READ(fence_reg);
2638
 
3031 serge 2639
	if (obj) {
4104 Serge 2640
		u32 size = i915_gem_obj_ggtt_size(obj);
2641
		uint64_t val;
2332 Serge 2642
 
4104 Serge 2643
		val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3031 serge 2644
				 0xfffff000) << 32;
4104 Serge 2645
		val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3480 Serge 2646
		val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3031 serge 2647
		if (obj->tiling_mode == I915_TILING_Y)
2648
			val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2649
		val |= I965_FENCE_REG_VALID;
2332 Serge 2650
 
4104 Serge 2651
		I915_WRITE(fence_reg + 4, val >> 32);
2652
		POSTING_READ(fence_reg + 4);
2653
 
2654
		I915_WRITE(fence_reg + 0, val);
4280 Serge 2655
 
2656
        dbgprintf("%s val %x%x\n",__FUNCTION__, (int)(val >> 32), (int)val);
2657
 
3480 Serge 2658
	POSTING_READ(fence_reg);
4104 Serge 2659
	} else {
2660
		I915_WRITE(fence_reg + 4, 0);
2661
		POSTING_READ(fence_reg + 4);
2662
	}
3031 serge 2663
}
2332 Serge 2664
 
3031 serge 2665
static void i915_write_fence_reg(struct drm_device *dev, int reg,
2666
				 struct drm_i915_gem_object *obj)
2667
{
2668
	drm_i915_private_t *dev_priv = dev->dev_private;
2669
	u32 val;
2332 Serge 2670
 
3031 serge 2671
	if (obj) {
4104 Serge 2672
		u32 size = i915_gem_obj_ggtt_size(obj);
3031 serge 2673
		int pitch_val;
2674
		int tile_width;
2332 Serge 2675
 
4104 Serge 2676
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3031 serge 2677
		     (size & -size) != size ||
4104 Serge 2678
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2679
		     "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2680
		     i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
2332 Serge 2681
 
3031 serge 2682
		if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2683
			tile_width = 128;
2684
		else
2685
			tile_width = 512;
2332 Serge 2686
 
3031 serge 2687
		/* Note: pitch better be a power of two tile widths */
2688
		pitch_val = obj->stride / tile_width;
2689
		pitch_val = ffs(pitch_val) - 1;
2332 Serge 2690
 
4104 Serge 2691
		val = i915_gem_obj_ggtt_offset(obj);
3031 serge 2692
		if (obj->tiling_mode == I915_TILING_Y)
2693
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2694
		val |= I915_FENCE_SIZE_BITS(size);
2695
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2696
		val |= I830_FENCE_REG_VALID;
2697
	} else
2698
		val = 0;
2332 Serge 2699
 
3031 serge 2700
	if (reg < 8)
2701
		reg = FENCE_REG_830_0 + reg * 4;
2702
	else
2703
		reg = FENCE_REG_945_8 + (reg - 8) * 4;
2332 Serge 2704
 
3031 serge 2705
	I915_WRITE(reg, val);
2706
	POSTING_READ(reg);
2707
}
2332 Serge 2708
 
3031 serge 2709
static void i830_write_fence_reg(struct drm_device *dev, int reg,
2710
				struct drm_i915_gem_object *obj)
2711
{
2712
	drm_i915_private_t *dev_priv = dev->dev_private;
2713
	uint32_t val;
2344 Serge 2714
 
3031 serge 2715
	if (obj) {
4104 Serge 2716
		u32 size = i915_gem_obj_ggtt_size(obj);
3031 serge 2717
		uint32_t pitch_val;
2344 Serge 2718
 
4104 Serge 2719
		WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3031 serge 2720
		     (size & -size) != size ||
4104 Serge 2721
		     (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
2722
		     "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
2723
		     i915_gem_obj_ggtt_offset(obj), size);
2344 Serge 2724
 
3031 serge 2725
		pitch_val = obj->stride / 128;
2726
		pitch_val = ffs(pitch_val) - 1;
2344 Serge 2727
 
4104 Serge 2728
		val = i915_gem_obj_ggtt_offset(obj);
3031 serge 2729
		if (obj->tiling_mode == I915_TILING_Y)
2730
			val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2731
		val |= I830_FENCE_SIZE_BITS(size);
2732
		val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2733
		val |= I830_FENCE_REG_VALID;
2734
	} else
2735
		val = 0;
2736
 
2737
	I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2738
	POSTING_READ(FENCE_REG_830_0 + reg * 4);
2739
}
2740
 
3480 Serge 2741
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2742
{
2743
	return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2744
}
2745
 
3031 serge 2746
static void i915_gem_write_fence(struct drm_device *dev, int reg,
2747
				 struct drm_i915_gem_object *obj)
2332 Serge 2748
{
3480 Serge 2749
	struct drm_i915_private *dev_priv = dev->dev_private;
2750
 
2751
	/* Ensure that all CPU reads are completed before installing a fence
2752
	 * and all writes before removing the fence.
2753
	 */
2754
	if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2755
		mb();
2756
 
4104 Serge 2757
	WARN(obj && (!obj->stride || !obj->tiling_mode),
2758
	     "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
2759
	     obj->stride, obj->tiling_mode);
2760
 
3031 serge 2761
	switch (INTEL_INFO(dev)->gen) {
2762
	case 7:
3480 Serge 2763
	case 6:
3031 serge 2764
	case 5:
2765
	case 4: i965_write_fence_reg(dev, reg, obj); break;
2766
	case 3: i915_write_fence_reg(dev, reg, obj); break;
2767
	case 2: i830_write_fence_reg(dev, reg, obj); break;
3480 Serge 2768
	default: BUG();
3031 serge 2769
	}
3480 Serge 2770
 
2771
	/* And similarly be paranoid that no direct access to this region
2772
	 * is reordered to before the fence is installed.
2773
	 */
2774
	if (i915_gem_object_needs_mb(obj))
2775
		mb();
2344 Serge 2776
}
2777
 
3031 serge 2778
static inline int fence_number(struct drm_i915_private *dev_priv,
2779
			       struct drm_i915_fence_reg *fence)
2344 Serge 2780
{
3031 serge 2781
	return fence - dev_priv->fence_regs;
2782
}
2332 Serge 2783
 
3031 serge 2784
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2785
					 struct drm_i915_fence_reg *fence,
2786
					 bool enable)
2787
{
4104 Serge 2788
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2789
	int reg = fence_number(dev_priv, fence);
2332 Serge 2790
 
4104 Serge 2791
	i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3031 serge 2792
 
2793
	if (enable) {
4104 Serge 2794
		obj->fence_reg = reg;
3031 serge 2795
		fence->obj = obj;
2796
		list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2797
	} else {
2798
		obj->fence_reg = I915_FENCE_REG_NONE;
2799
		fence->obj = NULL;
2800
		list_del_init(&fence->lru_list);
2344 Serge 2801
	}
4104 Serge 2802
	obj->fence_dirty = false;
3031 serge 2803
}
2344 Serge 2804
 
3031 serge 2805
static int
3480 Serge 2806
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3031 serge 2807
{
2808
	if (obj->last_fenced_seqno) {
2809
		int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2352 Serge 2810
			if (ret)
2811
				return ret;
2344 Serge 2812
 
2813
		obj->last_fenced_seqno = 0;
2814
	}
2815
 
3031 serge 2816
	obj->fenced_gpu_access = false;
2332 Serge 2817
	return 0;
2818
}
2819
 
2820
int
2344 Serge 2821
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2332 Serge 2822
{
3031 serge 2823
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3746 Serge 2824
	struct drm_i915_fence_reg *fence;
2332 Serge 2825
	int ret;
2826
 
3480 Serge 2827
	ret = i915_gem_object_wait_fence(obj);
2332 Serge 2828
	if (ret)
2829
		return ret;
2830
 
3031 serge 2831
	if (obj->fence_reg == I915_FENCE_REG_NONE)
2832
		return 0;
2332 Serge 2833
 
3746 Serge 2834
	fence = &dev_priv->fence_regs[obj->fence_reg];
2835
 
3031 serge 2836
	i915_gem_object_fence_lost(obj);
3746 Serge 2837
	i915_gem_object_update_fence(obj, fence, false);
2344 Serge 2838
 
2332 Serge 2839
	return 0;
2840
}
2841
 
3031 serge 2842
static struct drm_i915_fence_reg *
2843
i915_find_fence_reg(struct drm_device *dev)
2844
{
2845
	struct drm_i915_private *dev_priv = dev->dev_private;
2846
	struct drm_i915_fence_reg *reg, *avail;
2847
	int i;
2332 Serge 2848
 
3031 serge 2849
	/* First try to find a free reg */
2850
	avail = NULL;
2851
	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2852
		reg = &dev_priv->fence_regs[i];
2853
		if (!reg->obj)
2854
			return reg;
2332 Serge 2855
 
3031 serge 2856
		if (!reg->pin_count)
2857
			avail = reg;
2858
	}
2332 Serge 2859
 
3031 serge 2860
	if (avail == NULL)
2861
		return NULL;
2332 Serge 2862
 
3031 serge 2863
	/* None available, try to steal one or wait for a user to finish */
2864
	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2865
		if (reg->pin_count)
2866
			continue;
2332 Serge 2867
 
3031 serge 2868
		return reg;
2869
	}
2332 Serge 2870
 
3031 serge 2871
	return NULL;
2872
}
2332 Serge 2873
 
3031 serge 2874
/**
2875
 * i915_gem_object_get_fence - set up fencing for an object
2876
 * @obj: object to map through a fence reg
2877
 *
2878
 * When mapping objects through the GTT, userspace wants to be able to write
2879
 * to them without having to worry about swizzling if the object is tiled.
2880
 * This function walks the fence regs looking for a free one for @obj,
2881
 * stealing one if it can't find any.
2882
 *
2883
 * It then sets up the reg based on the object's properties: address, pitch
2884
 * and tiling format.
2885
 *
2886
 * For an untiled surface, this removes any existing fence.
2887
 */
2888
int
2889
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2890
{
2891
	struct drm_device *dev = obj->base.dev;
2892
	struct drm_i915_private *dev_priv = dev->dev_private;
2893
	bool enable = obj->tiling_mode != I915_TILING_NONE;
2894
	struct drm_i915_fence_reg *reg;
2895
	int ret;
2332 Serge 2896
 
3031 serge 2897
	/* Have we updated the tiling parameters upon the object and so
2898
	 * will need to serialise the write to the associated fence register?
2899
	 */
2900
	if (obj->fence_dirty) {
3480 Serge 2901
		ret = i915_gem_object_wait_fence(obj);
3031 serge 2902
		if (ret)
2903
			return ret;
2904
	}
2332 Serge 2905
 
3031 serge 2906
	/* Just update our place in the LRU if our fence is getting reused. */
2907
	if (obj->fence_reg != I915_FENCE_REG_NONE) {
2908
		reg = &dev_priv->fence_regs[obj->fence_reg];
2909
		if (!obj->fence_dirty) {
2910
			list_move_tail(®->lru_list,
2911
				       &dev_priv->mm.fence_list);
2912
			return 0;
2913
		}
2914
	} else if (enable) {
2915
		reg = i915_find_fence_reg(dev);
2916
		if (reg == NULL)
2917
			return -EDEADLK;
2332 Serge 2918
 
3031 serge 2919
		if (reg->obj) {
2920
			struct drm_i915_gem_object *old = reg->obj;
2332 Serge 2921
 
3480 Serge 2922
			ret = i915_gem_object_wait_fence(old);
3031 serge 2923
			if (ret)
2924
				return ret;
2332 Serge 2925
 
3031 serge 2926
			i915_gem_object_fence_lost(old);
2927
		}
2928
	} else
2929
		return 0;
2332 Serge 2930
 
3031 serge 2931
	i915_gem_object_update_fence(obj, reg, enable);
2332 Serge 2932
 
3031 serge 2933
	return 0;
2934
}
2332 Serge 2935
 
3031 serge 2936
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2937
				     struct drm_mm_node *gtt_space,
2938
				     unsigned long cache_level)
2939
{
2940
	struct drm_mm_node *other;
2332 Serge 2941
 
3031 serge 2942
	/* On non-LLC machines we have to be careful when putting differing
2943
	 * types of snoopable memory together to avoid the prefetcher
3480 Serge 2944
	 * crossing memory domains and dying.
3031 serge 2945
	 */
2946
	if (HAS_LLC(dev))
2947
		return true;
2332 Serge 2948
 
4104 Serge 2949
	if (!drm_mm_node_allocated(gtt_space))
3031 serge 2950
		return true;
2332 Serge 2951
 
3031 serge 2952
	if (list_empty(>t_space->node_list))
2953
		return true;
2332 Serge 2954
 
3031 serge 2955
	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2956
	if (other->allocated && !other->hole_follows && other->color != cache_level)
2957
		return false;
2344 Serge 2958
 
3031 serge 2959
	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2960
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2961
		return false;
2344 Serge 2962
 
3031 serge 2963
	return true;
2964
}
2344 Serge 2965
 
3031 serge 2966
static void i915_gem_verify_gtt(struct drm_device *dev)
2967
{
2968
#if WATCH_GTT
2969
	struct drm_i915_private *dev_priv = dev->dev_private;
2970
	struct drm_i915_gem_object *obj;
2971
	int err = 0;
2344 Serge 2972
 
4104 Serge 2973
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3031 serge 2974
		if (obj->gtt_space == NULL) {
2975
			printk(KERN_ERR "object found on GTT list with no space reserved\n");
2976
			err++;
2977
			continue;
2978
		}
2344 Serge 2979
 
3031 serge 2980
		if (obj->cache_level != obj->gtt_space->color) {
2981
			printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
4104 Serge 2982
			       i915_gem_obj_ggtt_offset(obj),
2983
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3031 serge 2984
			       obj->cache_level,
2985
			       obj->gtt_space->color);
2986
			err++;
2987
			continue;
2988
		}
2344 Serge 2989
 
3031 serge 2990
		if (!i915_gem_valid_gtt_space(dev,
2991
					      obj->gtt_space,
2992
					      obj->cache_level)) {
2993
			printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
4104 Serge 2994
			       i915_gem_obj_ggtt_offset(obj),
2995
			       i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3031 serge 2996
			       obj->cache_level);
2997
			err++;
2998
			continue;
2999
		}
3000
	}
2344 Serge 3001
 
3031 serge 3002
	WARN_ON(err);
3003
#endif
2326 Serge 3004
}
3005
 
2332 Serge 3006
/**
3007
 * Finds free space in the GTT aperture and binds the object there.
3008
 */
3009
static int
4104 Serge 3010
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3011
			   struct i915_address_space *vm,
2332 Serge 3012
			    unsigned alignment,
3031 serge 3013
			    bool map_and_fenceable,
3014
			    bool nonblocking)
2332 Serge 3015
{
3016
	struct drm_device *dev = obj->base.dev;
3017
	drm_i915_private_t *dev_priv = dev->dev_private;
3018
	u32 size, fence_size, fence_alignment, unfenced_alignment;
4104 Serge 3019
	size_t gtt_max =
3020
		map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
3021
	struct i915_vma *vma;
2332 Serge 3022
	int ret;
2326 Serge 3023
 
2332 Serge 3024
	fence_size = i915_gem_get_gtt_size(dev,
3025
					   obj->base.size,
3026
					   obj->tiling_mode);
3027
	fence_alignment = i915_gem_get_gtt_alignment(dev,
3028
						     obj->base.size,
3480 Serge 3029
						     obj->tiling_mode, true);
2332 Serge 3030
	unfenced_alignment =
3480 Serge 3031
		i915_gem_get_gtt_alignment(dev,
2332 Serge 3032
						    obj->base.size,
3480 Serge 3033
						    obj->tiling_mode, false);
2332 Serge 3034
 
3035
	if (alignment == 0)
3036
		alignment = map_and_fenceable ? fence_alignment :
3037
						unfenced_alignment;
3038
	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
3039
		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
3040
		return -EINVAL;
3041
	}
3042
 
3043
	size = map_and_fenceable ? fence_size : obj->base.size;
3044
 
3045
	/* If the object is bigger than the entire aperture, reject it early
3046
	 * before evicting everything in a vain attempt to find space.
3047
	 */
4104 Serge 3048
	if (obj->base.size > gtt_max) {
3049
		DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
3050
			  obj->base.size,
3051
			  map_and_fenceable ? "mappable" : "total",
3052
			  gtt_max);
2332 Serge 3053
		return -E2BIG;
3054
	}
3055
 
3031 serge 3056
	ret = i915_gem_object_get_pages(obj);
3057
	if (ret)
3058
		return ret;
3059
 
3243 Serge 3060
	i915_gem_object_pin_pages(obj);
3061
 
4104 Serge 3062
	BUG_ON(!i915_is_ggtt(vm));
3063
 
3064
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3065
	if (IS_ERR(vma)) {
3066
		ret = PTR_ERR(vma);
3067
		goto err_unpin;
3243 Serge 3068
	}
3069
 
4104 Serge 3070
	/* For now we only ever use 1 vma per object */
3071
	WARN_ON(!list_is_singular(&obj->vma_list));
3072
 
3073
search_free:
3074
	ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3075
						  size, alignment,
3076
						  obj->cache_level, 0, gtt_max,
3077
						  DRM_MM_SEARCH_DEFAULT);
3243 Serge 3078
	if (ret) {
2332 Serge 3079
 
4104 Serge 3080
		goto err_free_vma;
2332 Serge 3081
	}
4104 Serge 3082
	if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
3083
					      obj->cache_level))) {
3084
		ret = -EINVAL;
3085
		goto err_remove_node;
3031 serge 3086
	}
2332 Serge 3087
 
3031 serge 3088
	ret = i915_gem_gtt_prepare_object(obj);
4104 Serge 3089
	if (ret)
3090
		goto err_remove_node;
2332 Serge 3091
 
4104 Serge 3092
	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3093
	list_add_tail(&vma->mm_list, &vm->inactive_list);
2332 Serge 3094
 
4104 Serge 3095
	if (i915_is_ggtt(vm)) {
3096
		bool mappable, fenceable;
2332 Serge 3097
 
4104 Serge 3098
		fenceable = (vma->node.size == fence_size &&
3099
			     (vma->node.start & (fence_alignment - 1)) == 0);
2332 Serge 3100
 
4104 Serge 3101
		mappable = (vma->node.start + obj->base.size <=
3102
			    dev_priv->gtt.mappable_end);
2332 Serge 3103
 
3104
	obj->map_and_fenceable = mappable && fenceable;
4104 Serge 3105
	}
2332 Serge 3106
 
4104 Serge 3107
	WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
3108
 
3109
	trace_i915_vma_bind(vma, map_and_fenceable);
3031 serge 3110
	i915_gem_verify_gtt(dev);
2332 Serge 3111
	return 0;
4104 Serge 3112
 
3113
err_remove_node:
3114
	drm_mm_remove_node(&vma->node);
3115
err_free_vma:
3116
	i915_gem_vma_destroy(vma);
3117
err_unpin:
3118
	i915_gem_object_unpin_pages(obj);
3119
	return ret;
2332 Serge 3120
}
3121
 
4104 Serge 3122
bool
3123
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3124
			bool force)
2332 Serge 3125
{
3126
	/* If we don't have a page list set up, then we're not pinned
3127
	 * to GPU, and we can ignore the cache flush because it'll happen
3128
	 * again at bind time.
3129
	 */
3243 Serge 3130
	if (obj->pages == NULL)
4104 Serge 3131
		return false;
2332 Serge 3132
 
3480 Serge 3133
	/*
3134
	 * Stolen memory is always coherent with the GPU as it is explicitly
3135
	 * marked as wc by the system, or the system is cache-coherent.
3136
	 */
3137
	if (obj->stolen)
4104 Serge 3138
		return false;
3480 Serge 3139
 
2332 Serge 3140
	/* If the GPU is snooping the contents of the CPU cache,
3141
	 * we do not need to manually clear the CPU cache lines.  However,
3142
	 * the caches are only snooped when the render cache is
3143
	 * flushed/invalidated.  As we always have to emit invalidations
3144
	 * and flushes when moving into and out of the RENDER domain, correct
3145
	 * snooping behaviour occurs naturally as the result of our domain
3146
	 * tracking.
3147
	 */
4104 Serge 3148
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3149
		return false;
2332 Serge 3150
 
4293 Serge 3151
	trace_i915_gem_object_clflush(obj);
3152
	drm_clflush_sg(obj->pages);
2344 Serge 3153
 
4104 Serge 3154
	return true;
2332 Serge 3155
}
3156
 
2344 Serge 3157
/** Flushes the GTT write domain for the object if it's dirty. */
3158
static void
3159
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3160
{
3161
	uint32_t old_write_domain;
2332 Serge 3162
 
2344 Serge 3163
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3164
		return;
2332 Serge 3165
 
2344 Serge 3166
	/* No actual flushing is required for the GTT write domain.  Writes
3167
	 * to it immediately go to main memory as far as we know, so there's
3168
	 * no chipset flush.  It also doesn't land in render cache.
3169
	 *
3170
	 * However, we do have to enforce the order so that all writes through
3171
	 * the GTT land before any writes to the device, such as updates to
3172
	 * the GATT itself.
3173
	 */
3174
	wmb();
2332 Serge 3175
 
2344 Serge 3176
	old_write_domain = obj->base.write_domain;
3177
	obj->base.write_domain = 0;
2332 Serge 3178
 
2351 Serge 3179
	trace_i915_gem_object_change_domain(obj,
3180
					    obj->base.read_domains,
3181
					    old_write_domain);
2344 Serge 3182
}
2332 Serge 3183
 
3184
/** Flushes the CPU write domain for the object if it's dirty. */
2326 Serge 3185
static void
4104 Serge 3186
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3187
				       bool force)
2332 Serge 3188
{
3189
	uint32_t old_write_domain;
3190
 
3191
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3192
		return;
3193
 
4104 Serge 3194
	if (i915_gem_clflush_object(obj, force))
3243 Serge 3195
	i915_gem_chipset_flush(obj->base.dev);
4104 Serge 3196
 
2332 Serge 3197
	old_write_domain = obj->base.write_domain;
3198
	obj->base.write_domain = 0;
3199
 
2351 Serge 3200
	trace_i915_gem_object_change_domain(obj,
3201
					    obj->base.read_domains,
3202
					    old_write_domain);
2332 Serge 3203
}
3204
 
3205
/**
3206
 * Moves a single object to the GTT read, and possibly write domain.
3207
 *
3208
 * This function returns when the move is complete, including waiting on
3209
 * flushes to occur.
3210
 */
3211
int
3212
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3213
{
3031 serge 3214
	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2332 Serge 3215
	uint32_t old_write_domain, old_read_domains;
3216
	int ret;
3217
 
3218
	/* Not valid to be called on unbound objects. */
4104 Serge 3219
	if (!i915_gem_obj_bound_any(obj))
2332 Serge 3220
		return -EINVAL;
3221
 
3222
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3223
		return 0;
3224
 
3031 serge 3225
	ret = i915_gem_object_wait_rendering(obj, !write);
2332 Serge 3226
		if (ret)
3227
			return ret;
3228
 
4104 Serge 3229
	i915_gem_object_flush_cpu_write_domain(obj, false);
2332 Serge 3230
 
3480 Serge 3231
	/* Serialise direct access to this object with the barriers for
3232
	 * coherent writes from the GPU, by effectively invalidating the
3233
	 * GTT domain upon first access.
3234
	 */
3235
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3236
		mb();
3237
 
2332 Serge 3238
	old_write_domain = obj->base.write_domain;
3239
	old_read_domains = obj->base.read_domains;
3240
 
3241
	/* It should now be out of any other write domains, and we can update
3242
	 * the domain values for our changes.
3243
	 */
3244
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3245
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3246
	if (write) {
3247
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3248
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3249
		obj->dirty = 1;
3250
	}
3251
 
2351 Serge 3252
	trace_i915_gem_object_change_domain(obj,
3253
					    old_read_domains,
3254
					    old_write_domain);
3255
 
3031 serge 3256
	/* And bump the LRU for this access */
4104 Serge 3257
	if (i915_gem_object_is_inactive(obj)) {
3258
		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
3259
							   &dev_priv->gtt.base);
3260
		if (vma)
3261
			list_move_tail(&vma->mm_list,
3262
				       &dev_priv->gtt.base.inactive_list);
3031 serge 3263
 
4104 Serge 3264
	}
3265
 
2332 Serge 3266
	return 0;
3267
}
3268
 
2335 Serge 3269
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3270
				    enum i915_cache_level cache_level)
3271
{
3031 serge 3272
	struct drm_device *dev = obj->base.dev;
3273
	drm_i915_private_t *dev_priv = dev->dev_private;
4104 Serge 3274
	struct i915_vma *vma;
2335 Serge 3275
	int ret;
2332 Serge 3276
 
2335 Serge 3277
	if (obj->cache_level == cache_level)
3278
		return 0;
2332 Serge 3279
 
2335 Serge 3280
	if (obj->pin_count) {
3281
		DRM_DEBUG("can not change the cache level of pinned objects\n");
3282
		return -EBUSY;
3283
	}
2332 Serge 3284
 
4104 Serge 3285
	list_for_each_entry(vma, &obj->vma_list, vma_link) {
3286
		if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
3287
			ret = i915_vma_unbind(vma);
3031 serge 3288
		if (ret)
3289
			return ret;
4104 Serge 3290
 
3291
			break;
3292
		}
3031 serge 3293
	}
3294
 
4104 Serge 3295
	if (i915_gem_obj_bound_any(obj)) {
2335 Serge 3296
		ret = i915_gem_object_finish_gpu(obj);
3297
		if (ret)
3298
			return ret;
2332 Serge 3299
 
2335 Serge 3300
		i915_gem_object_finish_gtt(obj);
2332 Serge 3301
 
2335 Serge 3302
		/* Before SandyBridge, you could not use tiling or fence
3303
		 * registers with snooped memory, so relinquish any fences
3304
		 * currently pointing to our region in the aperture.
3305
		 */
3031 serge 3306
		if (INTEL_INFO(dev)->gen < 6) {
2335 Serge 3307
			ret = i915_gem_object_put_fence(obj);
3308
			if (ret)
3309
				return ret;
3310
		}
2332 Serge 3311
 
3031 serge 3312
		if (obj->has_global_gtt_mapping)
3313
			i915_gem_gtt_bind_object(obj, cache_level);
3314
		if (obj->has_aliasing_ppgtt_mapping)
3315
			i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
3316
					       obj, cache_level);
2335 Serge 3317
	}
2332 Serge 3318
 
4104 Serge 3319
	list_for_each_entry(vma, &obj->vma_list, vma_link)
3320
		vma->node.color = cache_level;
3321
	obj->cache_level = cache_level;
3322
 
3323
	if (cpu_write_needs_clflush(obj)) {
2335 Serge 3324
		u32 old_read_domains, old_write_domain;
2332 Serge 3325
 
2335 Serge 3326
		/* If we're coming from LLC cached, then we haven't
3327
		 * actually been tracking whether the data is in the
3328
		 * CPU cache or not, since we only allow one bit set
3329
		 * in obj->write_domain and have been skipping the clflushes.
3330
		 * Just set it to the CPU cache for now.
3331
		 */
3332
		WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
2332 Serge 3333
 
2335 Serge 3334
		old_read_domains = obj->base.read_domains;
3335
		old_write_domain = obj->base.write_domain;
2332 Serge 3336
 
2335 Serge 3337
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3338
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2332 Serge 3339
 
2351 Serge 3340
		trace_i915_gem_object_change_domain(obj,
3341
						    old_read_domains,
3342
						    old_write_domain);
2344 Serge 3343
    }
2332 Serge 3344
 
3031 serge 3345
	i915_gem_verify_gtt(dev);
2335 Serge 3346
	return 0;
3347
}
2332 Serge 3348
 
3260 Serge 3349
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3350
			       struct drm_file *file)
3351
{
3352
	struct drm_i915_gem_caching *args = data;
3353
	struct drm_i915_gem_object *obj;
3354
	int ret;
3355
 
3480 Serge 3356
     if(args->handle == -2)
3357
     {
3358
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3359
        return 0;
3360
     }
3361
 
3260 Serge 3362
	ret = i915_mutex_lock_interruptible(dev);
3363
	if (ret)
3364
		return ret;
3365
 
3366
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3367
	if (&obj->base == NULL) {
3368
		ret = -ENOENT;
3369
		goto unlock;
3370
	}
3371
 
4104 Serge 3372
	switch (obj->cache_level) {
3373
	case I915_CACHE_LLC:
3374
	case I915_CACHE_L3_LLC:
3375
		args->caching = I915_CACHING_CACHED;
3376
		break;
3260 Serge 3377
 
4104 Serge 3378
	case I915_CACHE_WT:
3379
		args->caching = I915_CACHING_DISPLAY;
3380
		break;
3381
 
3382
	default:
3383
		args->caching = I915_CACHING_NONE;
3384
		break;
3385
	}
3386
 
3260 Serge 3387
	drm_gem_object_unreference(&obj->base);
3388
unlock:
3389
	mutex_unlock(&dev->struct_mutex);
3390
	return ret;
3391
}
3392
 
3393
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3394
			       struct drm_file *file)
3395
{
3396
	struct drm_i915_gem_caching *args = data;
3397
	struct drm_i915_gem_object *obj;
3398
	enum i915_cache_level level;
3399
	int ret;
3400
 
3480 Serge 3401
     if(args->handle == -2)
3402
     {
3403
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3404
        return 0;
3405
     }
3406
 
3260 Serge 3407
	switch (args->caching) {
3408
	case I915_CACHING_NONE:
3409
		level = I915_CACHE_NONE;
3410
		break;
3411
	case I915_CACHING_CACHED:
3412
		level = I915_CACHE_LLC;
3413
		break;
4104 Serge 3414
	case I915_CACHING_DISPLAY:
3415
		level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3416
		break;
3260 Serge 3417
	default:
3418
		return -EINVAL;
3419
	}
3420
 
3421
	ret = i915_mutex_lock_interruptible(dev);
3422
	if (ret)
3423
		return ret;
3424
 
3425
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3426
	if (&obj->base == NULL) {
3427
		ret = -ENOENT;
3428
		goto unlock;
3429
	}
3430
 
3431
	ret = i915_gem_object_set_cache_level(obj, level);
3432
 
3433
	drm_gem_object_unreference(&obj->base);
3434
unlock:
3435
	mutex_unlock(&dev->struct_mutex);
3436
	return ret;
3437
}
3438
 
4104 Serge 3439
static bool is_pin_display(struct drm_i915_gem_object *obj)
3440
{
3441
	/* There are 3 sources that pin objects:
3442
	 *   1. The display engine (scanouts, sprites, cursors);
3443
	 *   2. Reservations for execbuffer;
3444
	 *   3. The user.
3445
	 *
3446
	 * We can ignore reservations as we hold the struct_mutex and
3447
	 * are only called outside of the reservation path.  The user
3448
	 * can only increment pin_count once, and so if after
3449
	 * subtracting the potential reference by the user, any pin_count
3450
	 * remains, it must be due to another use by the display engine.
3451
	 */
3452
	return obj->pin_count - !!obj->user_pin_count;
3453
}
3454
 
2335 Serge 3455
/*
3456
 * Prepare buffer for display plane (scanout, cursors, etc).
3457
 * Can be called from an uninterruptible phase (modesetting) and allows
3458
 * any flushes to be pipelined (for pageflips).
3459
 */
3460
int
3461
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3462
				     u32 alignment,
3463
				     struct intel_ring_buffer *pipelined)
3464
{
3465
	u32 old_read_domains, old_write_domain;
3466
	int ret;
2332 Serge 3467
 
3031 serge 3468
	if (pipelined != obj->ring) {
3469
		ret = i915_gem_object_sync(obj, pipelined);
2335 Serge 3470
	if (ret)
3471
		return ret;
3472
	}
2332 Serge 3473
 
4104 Serge 3474
	/* Mark the pin_display early so that we account for the
3475
	 * display coherency whilst setting up the cache domains.
3476
	 */
3477
	obj->pin_display = true;
3478
 
2335 Serge 3479
	/* The display engine is not coherent with the LLC cache on gen6.  As
3480
	 * a result, we make sure that the pinning that is about to occur is
3481
	 * done with uncached PTEs. This is lowest common denominator for all
3482
	 * chipsets.
3483
	 *
3484
	 * However for gen6+, we could do better by using the GFDT bit instead
3485
	 * of uncaching, which would allow us to flush all the LLC-cached data
3486
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3487
	 */
4104 Serge 3488
	ret = i915_gem_object_set_cache_level(obj,
3489
					      HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
2360 Serge 3490
	if (ret)
4104 Serge 3491
		goto err_unpin_display;
2332 Serge 3492
 
2335 Serge 3493
	/* As the user may map the buffer once pinned in the display plane
3494
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3495
	 * always use map_and_fenceable for all scanout buffers.
3496
	 */
4104 Serge 3497
	ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
2335 Serge 3498
	if (ret)
4104 Serge 3499
		goto err_unpin_display;
2332 Serge 3500
 
4104 Serge 3501
	i915_gem_object_flush_cpu_write_domain(obj, true);
2332 Serge 3502
 
2335 Serge 3503
	old_write_domain = obj->base.write_domain;
3504
	old_read_domains = obj->base.read_domains;
2332 Serge 3505
 
2335 Serge 3506
	/* It should now be out of any other write domains, and we can update
3507
	 * the domain values for our changes.
3508
	 */
3031 serge 3509
	obj->base.write_domain = 0;
2335 Serge 3510
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2332 Serge 3511
 
2351 Serge 3512
	trace_i915_gem_object_change_domain(obj,
3513
					    old_read_domains,
3514
					    old_write_domain);
2332 Serge 3515
 
2335 Serge 3516
	return 0;
4104 Serge 3517
 
3518
err_unpin_display:
3519
	obj->pin_display = is_pin_display(obj);
3520
	return ret;
2335 Serge 3521
}
2332 Serge 3522
 
4104 Serge 3523
void
3524
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3525
{
3526
	i915_gem_object_unpin(obj);
3527
	obj->pin_display = is_pin_display(obj);
3528
}
3529
 
2344 Serge 3530
int
3531
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
3532
{
3533
	int ret;
2332 Serge 3534
 
2344 Serge 3535
	if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
3536
		return 0;
2332 Serge 3537
 
3031 serge 3538
	ret = i915_gem_object_wait_rendering(obj, false);
3243 Serge 3539
    if (ret)
3540
        return ret;
2332 Serge 3541
 
2344 Serge 3542
	/* Ensure that we invalidate the GPU's caches and TLBs. */
3543
	obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
3031 serge 3544
	return 0;
2344 Serge 3545
}
2332 Serge 3546
 
2344 Serge 3547
/**
3548
 * Moves a single object to the CPU read, and possibly write domain.
3549
 *
3550
 * This function returns when the move is complete, including waiting on
3551
 * flushes to occur.
3552
 */
3031 serge 3553
int
2344 Serge 3554
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3555
{
3556
	uint32_t old_write_domain, old_read_domains;
3557
	int ret;
2332 Serge 3558
 
2344 Serge 3559
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3560
		return 0;
2332 Serge 3561
 
3031 serge 3562
	ret = i915_gem_object_wait_rendering(obj, !write);
2344 Serge 3563
	if (ret)
3564
		return ret;
2332 Serge 3565
 
2344 Serge 3566
	i915_gem_object_flush_gtt_write_domain(obj);
2332 Serge 3567
 
2344 Serge 3568
	old_write_domain = obj->base.write_domain;
3569
	old_read_domains = obj->base.read_domains;
2332 Serge 3570
 
2344 Serge 3571
	/* Flush the CPU cache if it's still invalid. */
3572
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4104 Serge 3573
		i915_gem_clflush_object(obj, false);
2332 Serge 3574
 
2344 Serge 3575
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3576
	}
2332 Serge 3577
 
2344 Serge 3578
	/* It should now be out of any other write domains, and we can update
3579
	 * the domain values for our changes.
3580
	 */
3581
	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2332 Serge 3582
 
2344 Serge 3583
	/* If we're writing through the CPU, then the GPU read domains will
3584
	 * need to be invalidated at next use.
3585
	 */
3586
	if (write) {
3587
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3588
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3589
	}
2332 Serge 3590
 
2351 Serge 3591
	trace_i915_gem_object_change_domain(obj,
3592
					    old_read_domains,
3593
					    old_write_domain);
2332 Serge 3594
 
2344 Serge 3595
	return 0;
3596
}
2332 Serge 3597
 
3031 serge 3598
/* Throttle our rendering by waiting until the ring has completed our requests
3599
 * emitted over 20 msec ago.
2344 Serge 3600
 *
3031 serge 3601
 * Note that if we were to use the current jiffies each time around the loop,
3602
 * we wouldn't escape the function with any frames outstanding if the time to
3603
 * render a frame was over 20ms.
3604
 *
3605
 * This should get us reasonable parallelism between CPU and GPU but also
3606
 * relatively low latency when blocking on a particular request to finish.
2344 Serge 3607
 */
3031 serge 3608
static int
3609
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
2344 Serge 3610
{
3031 serge 3611
	struct drm_i915_private *dev_priv = dev->dev_private;
3612
	struct drm_i915_file_private *file_priv = file->driver_priv;
3263 Serge 3613
	unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20);
3031 serge 3614
	struct drm_i915_gem_request *request;
3615
	struct intel_ring_buffer *ring = NULL;
3480 Serge 3616
	unsigned reset_counter;
3031 serge 3617
	u32 seqno = 0;
3618
	int ret;
2332 Serge 3619
 
3480 Serge 3620
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3621
	if (ret)
3622
		return ret;
2332 Serge 3623
 
3480 Serge 3624
	ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3625
	if (ret)
3626
		return ret;
3627
 
3031 serge 3628
	spin_lock(&file_priv->mm.lock);
3629
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3630
		if (time_after_eq(request->emitted_jiffies, recent_enough))
3631
			break;
2332 Serge 3632
 
3031 serge 3633
		ring = request->ring;
3634
		seqno = request->seqno;
3635
	}
3480 Serge 3636
	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3031 serge 3637
	spin_unlock(&file_priv->mm.lock);
2332 Serge 3638
 
3031 serge 3639
	if (seqno == 0)
3640
		return 0;
2332 Serge 3641
 
3480 Serge 3642
	ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3031 serge 3643
	if (ret == 0)
3644
		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
2332 Serge 3645
 
3031 serge 3646
	return ret;
2352 Serge 3647
}
2332 Serge 3648
 
3649
int
3650
i915_gem_object_pin(struct drm_i915_gem_object *obj,
4104 Serge 3651
		    struct i915_address_space *vm,
2332 Serge 3652
		    uint32_t alignment,
3031 serge 3653
		    bool map_and_fenceable,
3654
		    bool nonblocking)
2332 Serge 3655
{
4104 Serge 3656
	struct i915_vma *vma;
2332 Serge 3657
	int ret;
3658
 
3031 serge 3659
	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
3660
		return -EBUSY;
2332 Serge 3661
 
4104 Serge 3662
	WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
3663
 
3664
	vma = i915_gem_obj_to_vma(obj, vm);
3665
 
3666
	if (vma) {
3667
		if ((alignment &&
3668
		     vma->node.start & (alignment - 1)) ||
2332 Serge 3669
		    (map_and_fenceable && !obj->map_and_fenceable)) {
3670
			WARN(obj->pin_count,
3671
			     "bo is already pinned with incorrect alignment:"
4104 Serge 3672
			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
2332 Serge 3673
			     " obj->map_and_fenceable=%d\n",
4104 Serge 3674
			     i915_gem_obj_offset(obj, vm), alignment,
2332 Serge 3675
			     map_and_fenceable,
3676
			     obj->map_and_fenceable);
4104 Serge 3677
			ret = i915_vma_unbind(vma);
2332 Serge 3678
			if (ret)
3679
				return ret;
3680
		}
3681
	}
3682
 
4104 Serge 3683
	if (!i915_gem_obj_bound(obj, vm)) {
3243 Serge 3684
		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3685
 
4104 Serge 3686
		ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
3031 serge 3687
						  map_and_fenceable,
3688
						  nonblocking);
2332 Serge 3689
		if (ret)
3690
			return ret;
3243 Serge 3691
 
3692
		if (!dev_priv->mm.aliasing_ppgtt)
3693
			i915_gem_gtt_bind_object(obj, obj->cache_level);
2332 Serge 3694
	}
3695
 
3031 serge 3696
	if (!obj->has_global_gtt_mapping && map_and_fenceable)
3697
		i915_gem_gtt_bind_object(obj, obj->cache_level);
3698
 
3699
	obj->pin_count++;
2332 Serge 3700
	obj->pin_mappable |= map_and_fenceable;
3701
 
3702
	return 0;
3703
}
3704
 
2344 Serge 3705
void
3706
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
3707
{
3708
	BUG_ON(obj->pin_count == 0);
4104 Serge 3709
	BUG_ON(!i915_gem_obj_bound_any(obj));
2332 Serge 3710
 
3031 serge 3711
	if (--obj->pin_count == 0)
2344 Serge 3712
		obj->pin_mappable = false;
3713
}
2332 Serge 3714
 
3031 serge 3715
int
3716
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3717
		   struct drm_file *file)
3718
{
3719
	struct drm_i915_gem_pin *args = data;
3720
	struct drm_i915_gem_object *obj;
3721
	int ret;
2332 Serge 3722
 
3480 Serge 3723
     if(args->handle == -2)
3724
     {
3725
        printf("%s handle %d\n", __FUNCTION__, args->handle);
3726
        return 0;
3727
     }
3728
 
3031 serge 3729
	ret = i915_mutex_lock_interruptible(dev);
3730
	if (ret)
3731
		return ret;
2332 Serge 3732
 
3031 serge 3733
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3734
	if (&obj->base == NULL) {
3735
		ret = -ENOENT;
3736
		goto unlock;
3737
	}
2332 Serge 3738
 
3031 serge 3739
	if (obj->madv != I915_MADV_WILLNEED) {
3740
		DRM_ERROR("Attempting to pin a purgeable buffer\n");
3741
		ret = -EINVAL;
3742
		goto out;
3743
	}
2332 Serge 3744
 
3031 serge 3745
	if (obj->pin_filp != NULL && obj->pin_filp != file) {
3746
		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
3747
			  args->handle);
3748
		ret = -EINVAL;
3749
		goto out;
3750
	}
2332 Serge 3751
 
3243 Serge 3752
	if (obj->user_pin_count == 0) {
4104 Serge 3753
		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
3031 serge 3754
		if (ret)
3755
			goto out;
3756
	}
2332 Serge 3757
 
3243 Serge 3758
	obj->user_pin_count++;
3759
	obj->pin_filp = file;
3760
 
4104 Serge 3761
	args->offset = i915_gem_obj_ggtt_offset(obj);
3031 serge 3762
out:
3763
	drm_gem_object_unreference(&obj->base);
3764
unlock:
3765
	mutex_unlock(&dev->struct_mutex);
3766
	return ret;
3767
}
2332 Serge 3768
 
3031 serge 3769
int
3770
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
3771
		     struct drm_file *file)
3772
{
3773
	struct drm_i915_gem_pin *args = data;
3774
	struct drm_i915_gem_object *obj;
3775
	int ret;
2332 Serge 3776
 
3031 serge 3777
	ret = i915_mutex_lock_interruptible(dev);
3778
	if (ret)
3779
		return ret;
2332 Serge 3780
 
4246 Serge 3781
    if(args->handle == -2)
3782
    {
3783
        obj = get_fb_obj();
3784
        drm_gem_object_reference(&obj->base);
3785
    }
3786
    else
3031 serge 3787
	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3788
	if (&obj->base == NULL) {
3789
		ret = -ENOENT;
3790
		goto unlock;
3791
	}
2332 Serge 3792
 
3031 serge 3793
	if (obj->pin_filp != file) {
3794
		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
3795
			  args->handle);
3796
		ret = -EINVAL;
3797
		goto out;
3798
	}
3799
	obj->user_pin_count--;
3800
	if (obj->user_pin_count == 0) {
3801
		obj->pin_filp = NULL;
3802
		i915_gem_object_unpin(obj);
3803
	}
2332 Serge 3804
 
3031 serge 3805
out:
3806
	drm_gem_object_unreference(&obj->base);
3807
unlock:
3808
	mutex_unlock(&dev->struct_mutex);
3809
	return ret;
3810
}
2332 Serge 3811
 
3031 serge 3812
int
3813
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3814
		    struct drm_file *file)
3815
{
3816
	struct drm_i915_gem_busy *args = data;
3817
	struct drm_i915_gem_object *obj;
3818
	int ret;
2332 Serge 3819
 
3031 serge 3820
	ret = i915_mutex_lock_interruptible(dev);
3821
	if (ret)
3822
		return ret;
2332 Serge 3823
 
3480 Serge 3824
    if(args->handle == -2)
3825
    {
3826
        obj = get_fb_obj();
3827
        drm_gem_object_reference(&obj->base);
3828
    }
3829
    else
4104 Serge 3830
        obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3031 serge 3831
	if (&obj->base == NULL) {
3832
		ret = -ENOENT;
3833
		goto unlock;
3834
	}
2332 Serge 3835
 
3031 serge 3836
	/* Count all active objects as busy, even if they are currently not used
3837
	 * by the gpu. Users of this interface expect objects to eventually
3838
	 * become non-busy without any further actions, therefore emit any
3839
	 * necessary flushes here.
3840
	 */
3841
	ret = i915_gem_object_flush_active(obj);
2332 Serge 3842
 
3031 serge 3843
	args->busy = obj->active;
3844
	if (obj->ring) {
3845
		BUILD_BUG_ON(I915_NUM_RINGS > 16);
3846
		args->busy |= intel_ring_flag(obj->ring) << 16;
3847
	}
2332 Serge 3848
 
3031 serge 3849
	drm_gem_object_unreference(&obj->base);
3850
unlock:
3851
	mutex_unlock(&dev->struct_mutex);
3852
	return ret;
3853
}
2332 Serge 3854
 
3031 serge 3855
int
3856
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3857
			struct drm_file *file_priv)
3858
{
3859
	return i915_gem_ring_throttle(dev, file_priv);
3860
}
2332 Serge 3861
 
3263 Serge 3862
#if 0
3863
 
3031 serge 3864
int
3865
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3866
		       struct drm_file *file_priv)
3867
{
3868
	struct drm_i915_gem_madvise *args = data;
3869
	struct drm_i915_gem_object *obj;
3870
	int ret;
2332 Serge 3871
 
3031 serge 3872
	switch (args->madv) {
3873
	case I915_MADV_DONTNEED:
3874
	case I915_MADV_WILLNEED:
3875
	    break;
3876
	default:
3877
	    return -EINVAL;
3878
	}
2332 Serge 3879
 
3031 serge 3880
	ret = i915_mutex_lock_interruptible(dev);
3881
	if (ret)
3882
		return ret;
2332 Serge 3883
 
3031 serge 3884
	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
3885
	if (&obj->base == NULL) {
3886
		ret = -ENOENT;
3887
		goto unlock;
3888
	}
2332 Serge 3889
 
3031 serge 3890
	if (obj->pin_count) {
3891
		ret = -EINVAL;
3892
		goto out;
3893
	}
2332 Serge 3894
 
3031 serge 3895
	if (obj->madv != __I915_MADV_PURGED)
3896
		obj->madv = args->madv;
2332 Serge 3897
 
3031 serge 3898
	/* if the object is no longer attached, discard its backing storage */
3899
	if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
3900
		i915_gem_object_truncate(obj);
2332 Serge 3901
 
3031 serge 3902
	args->retained = obj->madv != __I915_MADV_PURGED;
2332 Serge 3903
 
3031 serge 3904
out:
3905
	drm_gem_object_unreference(&obj->base);
3906
unlock:
3907
	mutex_unlock(&dev->struct_mutex);
3908
	return ret;
3909
}
3910
#endif
2332 Serge 3911
 
3031 serge 3912
void i915_gem_object_init(struct drm_i915_gem_object *obj,
3913
			  const struct drm_i915_gem_object_ops *ops)
3914
{
4104 Serge 3915
	INIT_LIST_HEAD(&obj->global_list);
3031 serge 3916
	INIT_LIST_HEAD(&obj->ring_list);
3917
	INIT_LIST_HEAD(&obj->exec_list);
4104 Serge 3918
	INIT_LIST_HEAD(&obj->obj_exec_link);
3919
	INIT_LIST_HEAD(&obj->vma_list);
2332 Serge 3920
 
3031 serge 3921
	obj->ops = ops;
3922
 
3923
	obj->fence_reg = I915_FENCE_REG_NONE;
3924
	obj->madv = I915_MADV_WILLNEED;
3925
	/* Avoid an unnecessary call to unbind on the first bind. */
3926
	obj->map_and_fenceable = true;
3927
 
3928
	i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
3929
}
3930
 
3931
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3932
	.get_pages = i915_gem_object_get_pages_gtt,
3933
	.put_pages = i915_gem_object_put_pages_gtt,
3934
};
3935
 
2332 Serge 3936
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3937
						  size_t size)
3938
{
3939
	struct drm_i915_gem_object *obj;
3031 serge 3940
	struct address_space *mapping;
3480 Serge 3941
	gfp_t mask;
2340 Serge 3942
 
3746 Serge 3943
	obj = i915_gem_object_alloc(dev);
2332 Serge 3944
	if (obj == NULL)
3945
		return NULL;
3946
 
3947
	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4104 Serge 3948
		i915_gem_object_free(obj);
2332 Serge 3949
		return NULL;
3950
	}
3951
 
3952
 
3031 serge 3953
	i915_gem_object_init(obj, &i915_gem_object_ops);
2332 Serge 3954
 
3955
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3956
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3957
 
3031 serge 3958
	if (HAS_LLC(dev)) {
3959
		/* On some devices, we can have the GPU use the LLC (the CPU
2332 Serge 3960
		 * cache) for about a 10% performance improvement
3961
		 * compared to uncached.  Graphics requests other than
3962
		 * display scanout are coherent with the CPU in
3963
		 * accessing this cache.  This means in this mode we
3964
		 * don't need to clflush on the CPU side, and on the
3965
		 * GPU side we only need to flush internal caches to
3966
		 * get data visible to the CPU.
3967
		 *
3968
		 * However, we maintain the display planes as UC, and so
3969
		 * need to rebind when first used as such.
3970
		 */
3971
		obj->cache_level = I915_CACHE_LLC;
3972
	} else
3973
		obj->cache_level = I915_CACHE_NONE;
3974
 
3975
	return obj;
3976
}
3977
 
2344 Serge 3978
int i915_gem_init_object(struct drm_gem_object *obj)
3979
{
3980
	BUG();
2332 Serge 3981
 
2344 Serge 3982
	return 0;
3983
}
2332 Serge 3984
 
3031 serge 3985
void i915_gem_free_object(struct drm_gem_object *gem_obj)
2344 Serge 3986
{
3031 serge 3987
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
2344 Serge 3988
	struct drm_device *dev = obj->base.dev;
3989
	drm_i915_private_t *dev_priv = dev->dev_private;
4104 Serge 3990
	struct i915_vma *vma, *next;
2332 Serge 3991
 
3031 serge 3992
	trace_i915_gem_object_destroy(obj);
3993
 
3994
 
3995
	obj->pin_count = 0;
4104 Serge 3996
	/* NB: 0 or 1 elements */
3997
	WARN_ON(!list_empty(&obj->vma_list) &&
3998
		!list_is_singular(&obj->vma_list));
3999
	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4000
		int ret = i915_vma_unbind(vma);
4001
		if (WARN_ON(ret == -ERESTARTSYS)) {
3031 serge 4002
		bool was_interruptible;
4003
 
4004
		was_interruptible = dev_priv->mm.interruptible;
4005
		dev_priv->mm.interruptible = false;
4006
 
4104 Serge 4007
			WARN_ON(i915_vma_unbind(vma));
3031 serge 4008
 
4009
		dev_priv->mm.interruptible = was_interruptible;
2344 Serge 4010
	}
4104 Serge 4011
	}
2332 Serge 4012
 
4104 Serge 4013
	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4014
	 * before progressing. */
4015
	if (obj->stolen)
4016
		i915_gem_object_unpin_pages(obj);
4017
 
4018
	if (WARN_ON(obj->pages_pin_count))
3031 serge 4019
	obj->pages_pin_count = 0;
4020
	i915_gem_object_put_pages(obj);
4021
//   i915_gem_object_free_mmap_offset(obj);
4104 Serge 4022
	i915_gem_object_release_stolen(obj);
2332 Serge 4023
 
3243 Serge 4024
	BUG_ON(obj->pages);
2332 Serge 4025
 
3031 serge 4026
 
3290 Serge 4027
    if(obj->base.filp != NULL)
4028
    {
3298 Serge 4029
//        printf("filp %p\n", obj->base.filp);
3290 Serge 4030
        shmem_file_delete(obj->base.filp);
4031
    }
4032
 
2344 Serge 4033
	drm_gem_object_release(&obj->base);
4034
	i915_gem_info_remove_obj(dev_priv, obj->base.size);
2332 Serge 4035
 
2344 Serge 4036
	kfree(obj->bit_17);
4104 Serge 4037
	i915_gem_object_free(obj);
2344 Serge 4038
}
2332 Serge 4039
 
4104 Serge 4040
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
4041
				     struct i915_address_space *vm)
4042
{
4043
	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
4044
	if (vma == NULL)
4045
		return ERR_PTR(-ENOMEM);
4046
 
4047
	INIT_LIST_HEAD(&vma->vma_link);
4048
	INIT_LIST_HEAD(&vma->mm_list);
4049
	INIT_LIST_HEAD(&vma->exec_list);
4050
	vma->vm = vm;
4051
	vma->obj = obj;
4052
 
4053
	/* Keep GGTT vmas first to make debug easier */
4054
	if (i915_is_ggtt(vm))
4055
		list_add(&vma->vma_link, &obj->vma_list);
4056
	else
4057
		list_add_tail(&vma->vma_link, &obj->vma_list);
4058
 
4059
	return vma;
4060
}
4061
 
4062
void i915_gem_vma_destroy(struct i915_vma *vma)
4063
{
4064
	WARN_ON(vma->node.allocated);
4065
	list_del(&vma->vma_link);
4066
	kfree(vma);
4067
}
4068
 
3031 serge 4069
#if 0
4070
int
4071
i915_gem_idle(struct drm_device *dev)
2344 Serge 4072
{
3031 serge 4073
	drm_i915_private_t *dev_priv = dev->dev_private;
4074
	int ret;
2332 Serge 4075
 
4104 Serge 4076
	if (dev_priv->ums.mm_suspended) {
3031 serge 4077
		mutex_unlock(&dev->struct_mutex);
4078
		return 0;
4079
	}
2332 Serge 4080
 
3031 serge 4081
	ret = i915_gpu_idle(dev);
4082
	if (ret) {
4083
		mutex_unlock(&dev->struct_mutex);
4084
		return ret;
4085
	}
4086
	i915_gem_retire_requests(dev);
4087
 
3480 Serge 4088
	/* Under UMS, be paranoid and evict. */
4089
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
4090
		i915_gem_evict_everything(dev);
4091
 
4092
	del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3031 serge 4093
 
4094
	i915_kernel_lost_context(dev);
4095
	i915_gem_cleanup_ringbuffer(dev);
4096
 
4097
	/* Cancel the retire work handler, which should be idle now. */
3263 Serge 4098
	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
3031 serge 4099
 
4100
	return 0;
2344 Serge 4101
}
3031 serge 4102
#endif
2332 Serge 4103
 
3031 serge 4104
void i915_gem_l3_remap(struct drm_device *dev)
4105
{
4106
	drm_i915_private_t *dev_priv = dev->dev_private;
4107
	u32 misccpctl;
4108
	int i;
2332 Serge 4109
 
3480 Serge 4110
	if (!HAS_L3_GPU_CACHE(dev))
3031 serge 4111
		return;
2332 Serge 4112
 
3243 Serge 4113
	if (!dev_priv->l3_parity.remap_info)
3031 serge 4114
		return;
2332 Serge 4115
 
3031 serge 4116
	misccpctl = I915_READ(GEN7_MISCCPCTL);
4117
	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
4118
	POSTING_READ(GEN7_MISCCPCTL);
2332 Serge 4119
 
3031 serge 4120
	for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4121
		u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3243 Serge 4122
		if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3031 serge 4123
			DRM_DEBUG("0x%x was already programmed to %x\n",
4124
				  GEN7_L3LOG_BASE + i, remap);
3243 Serge 4125
		if (remap && !dev_priv->l3_parity.remap_info[i/4])
3031 serge 4126
			DRM_DEBUG_DRIVER("Clearing remapped register\n");
3243 Serge 4127
		I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3031 serge 4128
	}
2332 Serge 4129
 
3031 serge 4130
	/* Make sure all the writes land before disabling dop clock gating */
4131
	POSTING_READ(GEN7_L3LOG_BASE);
2332 Serge 4132
 
3031 serge 4133
	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
4134
}
2332 Serge 4135
 
3031 serge 4136
void i915_gem_init_swizzling(struct drm_device *dev)
4137
{
4138
	drm_i915_private_t *dev_priv = dev->dev_private;
2332 Serge 4139
 
3031 serge 4140
	if (INTEL_INFO(dev)->gen < 5 ||
4141
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4142
		return;
2332 Serge 4143
 
3031 serge 4144
	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4145
				 DISP_TILE_SURFACE_SWIZZLING);
2332 Serge 4146
 
3031 serge 4147
	if (IS_GEN5(dev))
4148
		return;
2344 Serge 4149
 
3031 serge 4150
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4151
	if (IS_GEN6(dev))
4152
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3480 Serge 4153
	else if (IS_GEN7(dev))
4154
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3031 serge 4155
	else
3480 Serge 4156
		BUG();
3031 serge 4157
}
4158
 
4159
static bool
4160
intel_enable_blt(struct drm_device *dev)
4161
{
4162
	if (!HAS_BLT(dev))
4163
		return false;
4164
 
4165
	/* The blitter was dysfunctional on early prototypes */
4166
	if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4167
		DRM_INFO("BLT not supported on this pre-production hardware;"
4168
			 " graphics performance will be degraded.\n");
4169
		return false;
4170
	}
4171
 
4172
	return true;
4173
}
4174
 
3480 Serge 4175
static int i915_gem_init_rings(struct drm_device *dev)
2332 Serge 4176
{
3480 Serge 4177
	struct drm_i915_private *dev_priv = dev->dev_private;
2332 Serge 4178
	int ret;
2351 Serge 4179
 
2332 Serge 4180
	ret = intel_init_render_ring_buffer(dev);
4181
	if (ret)
4182
		return ret;
4183
 
4184
    if (HAS_BSD(dev)) {
4185
		ret = intel_init_bsd_ring_buffer(dev);
4186
		if (ret)
4187
			goto cleanup_render_ring;
4188
	}
4189
 
3031 serge 4190
	if (intel_enable_blt(dev)) {
2332 Serge 4191
		ret = intel_init_blt_ring_buffer(dev);
4192
		if (ret)
4193
			goto cleanup_bsd_ring;
4194
	}
4195
 
4104 Serge 4196
	if (HAS_VEBOX(dev)) {
4197
		ret = intel_init_vebox_ring_buffer(dev);
4198
		if (ret)
4199
			goto cleanup_blt_ring;
4200
	}
4201
 
4202
 
3480 Serge 4203
	ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4204
	if (ret)
4104 Serge 4205
		goto cleanup_vebox_ring;
2351 Serge 4206
 
2332 Serge 4207
	return 0;
4208
 
4104 Serge 4209
cleanup_vebox_ring:
4210
	intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
3480 Serge 4211
cleanup_blt_ring:
4212
	intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
2332 Serge 4213
cleanup_bsd_ring:
4214
	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4215
cleanup_render_ring:
4216
	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3480 Serge 4217
 
2332 Serge 4218
	return ret;
4219
}
4220
 
3480 Serge 4221
int
4222
i915_gem_init_hw(struct drm_device *dev)
3031 serge 4223
{
3480 Serge 4224
	drm_i915_private_t *dev_priv = dev->dev_private;
4225
	int ret;
3031 serge 4226
 
3480 Serge 4227
	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4228
		return -EIO;
3031 serge 4229
 
4104 Serge 4230
	if (dev_priv->ellc_size)
4231
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
3480 Serge 4232
 
3746 Serge 4233
	if (HAS_PCH_NOP(dev)) {
4234
		u32 temp = I915_READ(GEN7_MSG_CTL);
4235
		temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4236
		I915_WRITE(GEN7_MSG_CTL, temp);
4237
	}
4238
 
3480 Serge 4239
	i915_gem_l3_remap(dev);
4240
 
4241
	i915_gem_init_swizzling(dev);
4242
 
4243
	ret = i915_gem_init_rings(dev);
4244
	if (ret)
4245
		return ret;
4246
 
4247
	/*
4248
	 * XXX: There was some w/a described somewhere suggesting loading
4249
	 * contexts before PPGTT.
4250
	 */
4251
	i915_gem_context_init(dev);
3746 Serge 4252
	if (dev_priv->mm.aliasing_ppgtt) {
4253
		ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
4254
		if (ret) {
4255
			i915_gem_cleanup_aliasing_ppgtt(dev);
4256
			DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
4257
		}
4258
	}
3480 Serge 4259
 
4260
	return 0;
3031 serge 4261
}
4262
 
4263
int i915_gem_init(struct drm_device *dev)
4264
{
4265
	struct drm_i915_private *dev_priv = dev->dev_private;
4266
	int ret;
4267
 
4268
	mutex_lock(&dev->struct_mutex);
3746 Serge 4269
 
4270
	if (IS_VALLEYVIEW(dev)) {
4271
		/* VLVA0 (potential hack), BIOS isn't actually waking us */
4272
		I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
4273
		if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
4274
			DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4275
	}
4276
 
3480 Serge 4277
	i915_gem_init_global_gtt(dev);
3746 Serge 4278
 
3031 serge 4279
	ret = i915_gem_init_hw(dev);
4280
	mutex_unlock(&dev->struct_mutex);
4281
	if (ret) {
4282
		i915_gem_cleanup_aliasing_ppgtt(dev);
4283
		return ret;
4284
	}
4285
 
3746 Serge 4286
 
3031 serge 4287
    return 0;
4288
}
4289
 
2332 Serge 4290
void
4291
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4292
{
4293
	drm_i915_private_t *dev_priv = dev->dev_private;
3031 serge 4294
	struct intel_ring_buffer *ring;
2332 Serge 4295
	int i;
4296
 
3031 serge 4297
	for_each_ring(ring, dev_priv, i)
4298
		intel_cleanup_ring_buffer(ring);
2332 Serge 4299
}
4300
 
3031 serge 4301
#if 0
4302
 
2332 Serge 4303
int
4304
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4305
		       struct drm_file *file_priv)
4306
{
4104 Serge 4307
	struct drm_i915_private *dev_priv = dev->dev_private;
3031 serge 4308
	int ret;
2332 Serge 4309
 
4310
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4311
		return 0;
4312
 
3480 Serge 4313
	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2332 Serge 4314
		DRM_ERROR("Reenabling wedged hardware, good luck\n");
3480 Serge 4315
		atomic_set(&dev_priv->gpu_error.reset_counter, 0);
2332 Serge 4316
	}
4317
 
4318
	mutex_lock(&dev->struct_mutex);
4104 Serge 4319
	dev_priv->ums.mm_suspended = 0;
2332 Serge 4320
 
3031 serge 4321
	ret = i915_gem_init_hw(dev);
2332 Serge 4322
	if (ret != 0) {
4323
		mutex_unlock(&dev->struct_mutex);
4324
		return ret;
4325
	}
4326
 
4104 Serge 4327
	BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
2332 Serge 4328
	mutex_unlock(&dev->struct_mutex);
4329
 
4330
	ret = drm_irq_install(dev);
4331
	if (ret)
4332
		goto cleanup_ringbuffer;
4333
 
4334
	return 0;
4335
 
4336
cleanup_ringbuffer:
4337
	mutex_lock(&dev->struct_mutex);
4338
	i915_gem_cleanup_ringbuffer(dev);
4104 Serge 4339
	dev_priv->ums.mm_suspended = 1;
2332 Serge 4340
	mutex_unlock(&dev->struct_mutex);
4341
 
4342
	return ret;
4343
}
4344
 
4345
int
4346
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4347
		       struct drm_file *file_priv)
4348
{
4104 Serge 4349
	struct drm_i915_private *dev_priv = dev->dev_private;
4350
	int ret;
4351
 
2332 Serge 4352
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4353
		return 0;
4354
 
4355
	drm_irq_uninstall(dev);
4104 Serge 4356
 
4357
	mutex_lock(&dev->struct_mutex);
4358
	ret =  i915_gem_idle(dev);
4359
 
4360
	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
4361
	 * We need to replace this with a semaphore, or something.
4362
	 * And not confound ums.mm_suspended!
4363
	 */
4364
	if (ret != 0)
4365
		dev_priv->ums.mm_suspended = 1;
4366
	mutex_unlock(&dev->struct_mutex);
4367
 
4368
	return ret;
2332 Serge 4369
}
4370
 
4371
void
4372
i915_gem_lastclose(struct drm_device *dev)
4373
{
4374
	int ret;
4375
 
4376
	if (drm_core_check_feature(dev, DRIVER_MODESET))
4377
		return;
4378
 
4104 Serge 4379
	mutex_lock(&dev->struct_mutex);
2332 Serge 4380
	ret = i915_gem_idle(dev);
4381
	if (ret)
4382
		DRM_ERROR("failed to idle hardware: %d\n", ret);
4104 Serge 4383
	mutex_unlock(&dev->struct_mutex);
2332 Serge 4384
}
4385
#endif
4386
 
4387
static void
2326 Serge 4388
init_ring_lists(struct intel_ring_buffer *ring)
4389
{
4390
    INIT_LIST_HEAD(&ring->active_list);
4391
    INIT_LIST_HEAD(&ring->request_list);
4392
}
4393
 
4104 Serge 4394
static void i915_init_vm(struct drm_i915_private *dev_priv,
4395
			 struct i915_address_space *vm)
4396
{
4397
	vm->dev = dev_priv->dev;
4398
	INIT_LIST_HEAD(&vm->active_list);
4399
	INIT_LIST_HEAD(&vm->inactive_list);
4400
	INIT_LIST_HEAD(&vm->global_link);
4401
	list_add(&vm->global_link, &dev_priv->vm_list);
4402
}
4403
 
2326 Serge 4404
void
4405
i915_gem_load(struct drm_device *dev)
4406
{
3480 Serge 4407
	drm_i915_private_t *dev_priv = dev->dev_private;
2326 Serge 4408
    int i;
4409
 
4104 Serge 4410
	INIT_LIST_HEAD(&dev_priv->vm_list);
4411
	i915_init_vm(dev_priv, &dev_priv->gtt.base);
4412
 
3031 serge 4413
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4414
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
2326 Serge 4415
    INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4416
    for (i = 0; i < I915_NUM_RINGS; i++)
4417
        init_ring_lists(&dev_priv->ring[i]);
2342 Serge 4418
	for (i = 0; i < I915_MAX_NUM_FENCES; i++)
2326 Serge 4419
        INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
2360 Serge 4420
	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4421
			  i915_gem_retire_work_handler);
3480 Serge 4422
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
2326 Serge 4423
 
4424
    /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4425
    if (IS_GEN3(dev)) {
3031 serge 4426
		I915_WRITE(MI_ARB_STATE,
4427
			   _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
2326 Serge 4428
    }
4429
 
4430
    dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4431
 
3746 Serge 4432
	if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
4433
		dev_priv->num_fence_regs = 32;
4434
	else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2326 Serge 4435
        dev_priv->num_fence_regs = 16;
4436
    else
4437
        dev_priv->num_fence_regs = 8;
4438
 
4439
    /* Initialize fence registers to zero */
3746 Serge 4440
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4441
	i915_gem_restore_fences(dev);
2326 Serge 4442
 
4443
    i915_gem_detect_bit_6_swizzle(dev);
4444
 
4445
    dev_priv->mm.interruptible = true;
4446
 
4447
}
4448
 
4104 Serge 4449
#if 0
4450
/*
4451
 * Create a physically contiguous memory object for this object
4452
 * e.g. for cursor + overlay regs
4453
 */
4454
static int i915_gem_init_phys_object(struct drm_device *dev,
4455
				     int id, int size, int align)
4456
{
4457
	drm_i915_private_t *dev_priv = dev->dev_private;
4458
	struct drm_i915_gem_phys_object *phys_obj;
4459
	int ret;
2326 Serge 4460
 
4104 Serge 4461
	if (dev_priv->mm.phys_objs[id - 1] || !size)
4462
		return 0;
4463
 
4464
	phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
4465
	if (!phys_obj)
4466
		return -ENOMEM;
4467
 
4468
	phys_obj->id = id;
4469
 
4470
	phys_obj->handle = drm_pci_alloc(dev, size, align);
4471
	if (!phys_obj->handle) {
4472
		ret = -ENOMEM;
4473
		goto kfree_obj;
4474
	}
4475
#ifdef CONFIG_X86
4476
	set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4477
#endif
4478
 
4479
	dev_priv->mm.phys_objs[id - 1] = phys_obj;
4480
 
4481
	return 0;
4482
kfree_obj:
4483
	kfree(phys_obj);
4484
	return ret;
4485
}
4486
 
4487
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
4488
{
4489
	drm_i915_private_t *dev_priv = dev->dev_private;
4490
	struct drm_i915_gem_phys_object *phys_obj;
4491
 
4492
	if (!dev_priv->mm.phys_objs[id - 1])
4493
		return;
4494
 
4495
	phys_obj = dev_priv->mm.phys_objs[id - 1];
4496
	if (phys_obj->cur_obj) {
4497
		i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
4498
	}
4499
 
4500
#ifdef CONFIG_X86
4501
	set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
4502
#endif
4503
	drm_pci_free(dev, phys_obj->handle);
4504
	kfree(phys_obj);
4505
	dev_priv->mm.phys_objs[id - 1] = NULL;
4506
}
4507
 
4508
void i915_gem_free_all_phys_object(struct drm_device *dev)
4509
{
4510
	int i;
4511
 
4512
	for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
4513
		i915_gem_free_phys_object(dev, i);
4514
}
4515
 
4516
void i915_gem_detach_phys_object(struct drm_device *dev,
4517
				 struct drm_i915_gem_object *obj)
4518
{
4519
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4520
	char *vaddr;
4521
	int i;
4522
	int page_count;
4523
 
4524
	if (!obj->phys_obj)
4525
		return;
4526
	vaddr = obj->phys_obj->handle->vaddr;
4527
 
4528
	page_count = obj->base.size / PAGE_SIZE;
4529
	for (i = 0; i < page_count; i++) {
4530
		struct page *page = shmem_read_mapping_page(mapping, i);
4531
		if (!IS_ERR(page)) {
4532
			char *dst = kmap_atomic(page);
4533
			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
4534
			kunmap_atomic(dst);
4535
 
4536
			drm_clflush_pages(&page, 1);
4537
 
4538
			set_page_dirty(page);
4539
			mark_page_accessed(page);
4540
			page_cache_release(page);
4541
		}
4542
	}
4543
	i915_gem_chipset_flush(dev);
4544
 
4545
	obj->phys_obj->cur_obj = NULL;
4546
	obj->phys_obj = NULL;
4547
}
4548
 
4549
int
4550
i915_gem_attach_phys_object(struct drm_device *dev,
4551
			    struct drm_i915_gem_object *obj,
4552
			    int id,
4553
			    int align)
4554
{
4555
	struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
4556
	drm_i915_private_t *dev_priv = dev->dev_private;
4557
	int ret = 0;
4558
	int page_count;
4559
	int i;
4560
 
4561
	if (id > I915_MAX_PHYS_OBJECT)
4562
		return -EINVAL;
4563
 
4564
	if (obj->phys_obj) {
4565
		if (obj->phys_obj->id == id)
4566
			return 0;
4567
		i915_gem_detach_phys_object(dev, obj);
4568
	}
4569
 
4570
	/* create a new object */
4571
	if (!dev_priv->mm.phys_objs[id - 1]) {
4572
		ret = i915_gem_init_phys_object(dev, id,
4573
						obj->base.size, align);
4574
		if (ret) {
4575
			DRM_ERROR("failed to init phys object %d size: %zu\n",
4576
				  id, obj->base.size);
4577
			return ret;
4578
		}
4579
	}
4580
 
4581
	/* bind to the object */
4582
	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4583
	obj->phys_obj->cur_obj = obj;
4584
 
4585
	page_count = obj->base.size / PAGE_SIZE;
4586
 
4587
	for (i = 0; i < page_count; i++) {
4588
		struct page *page;
4589
		char *dst, *src;
4590
 
4591
		page = shmem_read_mapping_page(mapping, i);
4592
		if (IS_ERR(page))
4593
			return PTR_ERR(page);
4594
 
4595
		src = kmap_atomic(page);
4596
		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4597
		memcpy(dst, src, PAGE_SIZE);
4598
		kunmap_atomic(src);
4599
 
4600
		mark_page_accessed(page);
4601
		page_cache_release(page);
4602
	}
4603
 
4604
	return 0;
4605
}
4606
 
4607
static int
4608
i915_gem_phys_pwrite(struct drm_device *dev,
4609
		     struct drm_i915_gem_object *obj,
4610
		     struct drm_i915_gem_pwrite *args,
4611
		     struct drm_file *file_priv)
4612
{
4613
	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4614
	char __user *user_data = to_user_ptr(args->data_ptr);
4615
 
4616
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4617
		unsigned long unwritten;
4618
 
4619
		/* The physical object once assigned is fixed for the lifetime
4620
		 * of the obj, so we can safely drop the lock and continue
4621
		 * to access vaddr.
4622
		 */
4623
		mutex_unlock(&dev->struct_mutex);
4624
		unwritten = copy_from_user(vaddr, user_data, args->size);
4625
		mutex_lock(&dev->struct_mutex);
4626
		if (unwritten)
4627
			return -EFAULT;
4628
	}
4629
 
4630
	i915_gem_chipset_flush(dev);
4631
	return 0;
4632
}
4633
 
4634
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4635
{
4636
	struct drm_i915_file_private *file_priv = file->driver_priv;
4637
 
4638
	/* Clean up our request list when the client is going away, so that
4639
	 * later retire_requests won't dereference our soon-to-be-gone
4640
	 * file_priv.
4641
	 */
4642
	spin_lock(&file_priv->mm.lock);
4643
	while (!list_empty(&file_priv->mm.request_list)) {
4644
		struct drm_i915_gem_request *request;
4645
 
4646
		request = list_first_entry(&file_priv->mm.request_list,
4647
					   struct drm_i915_gem_request,
4648
					   client_list);
4649
		list_del(&request->client_list);
4650
		request->file_priv = NULL;
4651
	}
4652
	spin_unlock(&file_priv->mm.lock);
4653
}
4654
#endif
4655
 
4656
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4657
{
4658
	if (!mutex_is_locked(mutex))
4659
		return false;
4660
 
4661
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4662
	return mutex->owner == task;
4663
#else
4664
	/* Since UP may be pre-empted, we cannot assume that we own the lock */
4665
	return false;
4666
#endif
4667
}
4668
 
4669
/* All the new VM stuff */
4670
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
4671
				  struct i915_address_space *vm)
4672
{
4673
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4674
	struct i915_vma *vma;
4675
 
4676
	if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4677
		vm = &dev_priv->gtt.base;
4678
 
4679
	BUG_ON(list_empty(&o->vma_list));
4680
	list_for_each_entry(vma, &o->vma_list, vma_link) {
4681
		if (vma->vm == vm)
4682
			return vma->node.start;
4683
 
4684
	}
4685
    return 0; //-1;
4686
}
4687
 
4688
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4689
			struct i915_address_space *vm)
4690
{
4691
	struct i915_vma *vma;
4692
 
4693
	list_for_each_entry(vma, &o->vma_list, vma_link)
4694
		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4695
			return true;
4696
 
4697
	return false;
4698
}
4699
 
4700
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
4701
{
4702
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4703
	struct i915_address_space *vm;
4704
 
4705
	list_for_each_entry(vm, &dev_priv->vm_list, global_link)
4706
		if (i915_gem_obj_bound(o, vm))
4707
			return true;
4708
 
4709
	return false;
4710
}
4711
 
4712
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
4713
				struct i915_address_space *vm)
4714
{
4715
	struct drm_i915_private *dev_priv = o->base.dev->dev_private;
4716
	struct i915_vma *vma;
4717
 
4718
	if (vm == &dev_priv->mm.aliasing_ppgtt->base)
4719
		vm = &dev_priv->gtt.base;
4720
 
4721
	BUG_ON(list_empty(&o->vma_list));
4722
 
4723
	list_for_each_entry(vma, &o->vma_list, vma_link)
4724
		if (vma->vm == vm)
4725
			return vma->node.size;
4726
 
4727
	return 0;
4728
}
4729
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4730
				     struct i915_address_space *vm)
4731
{
4732
	struct i915_vma *vma;
4733
	list_for_each_entry(vma, &obj->vma_list, vma_link)
4734
		if (vma->vm == vm)
4735
			return vma;
4736
 
4737
	return NULL;
4738
}
4739
 
4740
struct i915_vma *
4741
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
4742
				  struct i915_address_space *vm)
4743
{
4744
	struct i915_vma *vma;
4745
 
4746
	vma = i915_gem_obj_to_vma(obj, vm);
4747
	if (!vma)
4748
		vma = i915_gem_vma_create(obj, vm);
4749
 
4750
	return vma;
4751
}