Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3263 Serge 1
/*
2
 * Copyright © 2008,2010 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *    Chris Wilson 
26
 *
27
 */
28
 
29
#include 
30
#include 
31
#include "i915_drv.h"
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
//#include 
35
 
4560 Serge 36
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
37
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
3263 Serge 38
 
39
static unsigned long
40
copy_to_user(void __user *to, const void *from, unsigned long n)
41
{
42
    memcpy(to, from, n);
43
    return 0;
44
}
45
 
46
static unsigned long
47
copy_from_user(void *to, const void __user *from, unsigned long n)
48
{
49
    memcpy(to, from, n);
50
    return 0;
51
}
52
 
4560 Serge 53
struct eb_vmas {
54
	struct list_head vmas;
3263 Serge 55
	int and;
3480 Serge 56
	union {
4560 Serge 57
		struct i915_vma *lut[0];
3263 Serge 58
	struct hlist_head buckets[0];
3480 Serge 59
	};
3263 Serge 60
};
61
 
4560 Serge 62
static struct eb_vmas *
3480 Serge 63
eb_create(struct drm_i915_gem_execbuffer2 *args)
3263 Serge 64
{
4560 Serge 65
	struct eb_vmas *eb = NULL;
3480 Serge 66
 
67
	if (args->flags & I915_EXEC_HANDLE_LUT) {
4560 Serge 68
		unsigned size = args->buffer_count;
69
		size *= sizeof(struct i915_vma *);
70
		size += sizeof(struct eb_vmas);
3480 Serge 71
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
72
	}
73
 
74
	if (eb == NULL) {
4560 Serge 75
		unsigned size = args->buffer_count;
76
		unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
3480 Serge 77
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
78
		while (count > 2*size)
3263 Serge 79
		count >>= 1;
80
	eb = kzalloc(count*sizeof(struct hlist_head) +
4560 Serge 81
			     sizeof(struct eb_vmas),
3480 Serge 82
			     GFP_TEMPORARY);
3263 Serge 83
	if (eb == NULL)
84
		return eb;
85
 
86
	eb->and = count - 1;
3480 Serge 87
	} else
88
		eb->and = -args->buffer_count;
89
 
4560 Serge 90
	INIT_LIST_HEAD(&eb->vmas);
3263 Serge 91
	return eb;
92
}
93
 
94
static void
4560 Serge 95
eb_reset(struct eb_vmas *eb)
3263 Serge 96
{
3480 Serge 97
	if (eb->and >= 0)
3263 Serge 98
	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
99
}
100
 
3480 Serge 101
static int
4560 Serge 102
eb_lookup_vmas(struct eb_vmas *eb,
3480 Serge 103
		  struct drm_i915_gem_exec_object2 *exec,
104
		  const struct drm_i915_gem_execbuffer2 *args,
4560 Serge 105
	       struct i915_address_space *vm,
3480 Serge 106
		  struct drm_file *file)
3263 Serge 107
{
4560 Serge 108
	struct drm_i915_gem_object *obj;
109
	struct list_head objects;
110
	int i, ret;
3480 Serge 111
 
4560 Serge 112
	INIT_LIST_HEAD(&objects);
3480 Serge 113
	spin_lock(&file->table_lock);
4560 Serge 114
	/* Grab a reference to the object and release the lock so we can lookup
115
	 * or create the VMA without using GFP_ATOMIC */
3480 Serge 116
	for (i = 0; i < args->buffer_count; i++) {
117
		    obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
118
		if (obj == NULL) {
119
			spin_unlock(&file->table_lock);
120
			DRM_DEBUG("Invalid object handle %d at index %d\n",
121
				   exec[i].handle, i);
4560 Serge 122
			ret = -ENOENT;
123
			goto err;
3480 Serge 124
		}
125
 
4560 Serge 126
		if (!list_empty(&obj->obj_exec_link)) {
3480 Serge 127
			spin_unlock(&file->table_lock);
128
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
129
				   obj, exec[i].handle, i);
4560 Serge 130
			ret = -EINVAL;
131
			goto err;
3480 Serge 132
		}
133
 
134
		drm_gem_object_reference(&obj->base);
4560 Serge 135
		list_add_tail(&obj->obj_exec_link, &objects);
136
	}
137
	spin_unlock(&file->table_lock);
3480 Serge 138
 
4560 Serge 139
	i = 0;
140
	while (!list_empty(&objects)) {
141
		struct i915_vma *vma;
142
 
143
		obj = list_first_entry(&objects,
144
				       struct drm_i915_gem_object,
145
				       obj_exec_link);
146
 
147
		/*
148
		 * NOTE: We can leak any vmas created here when something fails
149
		 * later on. But that's no issue since vma_unbind can deal with
150
		 * vmas which are not actually bound. And since only
151
		 * lookup_or_create exists as an interface to get at the vma
152
		 * from the (obj, vm) we don't run the risk of creating
153
		 * duplicated vmas for the same vm.
154
		 */
155
		vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
156
		if (IS_ERR(vma)) {
157
			DRM_DEBUG("Failed to lookup VMA\n");
158
			ret = PTR_ERR(vma);
159
			goto err;
160
		}
161
 
162
		/* Transfer ownership from the objects list to the vmas list. */
163
		list_add_tail(&vma->exec_list, &eb->vmas);
164
		list_del_init(&obj->obj_exec_link);
165
 
166
		vma->exec_entry = &exec[i];
3480 Serge 167
		if (eb->and < 0) {
4560 Serge 168
			eb->lut[i] = vma;
3480 Serge 169
		} else {
170
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
4560 Serge 171
			vma->exec_handle = handle;
172
			hlist_add_head(&vma->exec_node,
3480 Serge 173
				       &eb->buckets[handle & eb->and]);
174
		}
4560 Serge 175
		++i;
3480 Serge 176
	}
177
 
178
	return 0;
4560 Serge 179
 
180
 
181
err:
182
	while (!list_empty(&objects)) {
183
		obj = list_first_entry(&objects,
184
				       struct drm_i915_gem_object,
185
				       obj_exec_link);
186
		list_del_init(&obj->obj_exec_link);
187
		drm_gem_object_unreference(&obj->base);
188
	}
189
	/*
190
	 * Objects already transfered to the vmas list will be unreferenced by
191
	 * eb_destroy.
192
	 */
193
 
194
	return ret;
3263 Serge 195
}
196
 
4560 Serge 197
static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
3263 Serge 198
{
3480 Serge 199
	if (eb->and < 0) {
200
		if (handle >= -eb->and)
201
			return NULL;
202
		return eb->lut[handle];
203
	} else {
3263 Serge 204
	struct hlist_head *head;
205
	struct hlist_node *node;
206
 
207
	head = &eb->buckets[handle & eb->and];
208
	hlist_for_each(node, head) {
4560 Serge 209
			struct i915_vma *vma;
3480 Serge 210
 
4560 Serge 211
			vma = hlist_entry(node, struct i915_vma, exec_node);
212
			if (vma->exec_handle == handle)
213
				return vma;
3263 Serge 214
	}
215
	return NULL;
3480 Serge 216
	}
3263 Serge 217
}
218
 
219
static void
4560 Serge 220
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
3263 Serge 221
{
4560 Serge 222
	struct drm_i915_gem_exec_object2 *entry;
223
	struct drm_i915_gem_object *obj = vma->obj;
3480 Serge 224
 
4560 Serge 225
	if (!drm_mm_node_allocated(&vma->node))
226
		return;
227
 
228
	entry = vma->exec_entry;
229
 
230
	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
231
		i915_gem_object_unpin_fence(obj);
232
 
233
	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
234
		i915_gem_object_unpin(obj);
235
 
236
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
237
}
238
 
239
static void eb_destroy(struct eb_vmas *eb)
240
{
241
	while (!list_empty(&eb->vmas)) {
242
		struct i915_vma *vma;
243
 
244
		vma = list_first_entry(&eb->vmas,
245
				       struct i915_vma,
3480 Serge 246
				       exec_list);
4560 Serge 247
		list_del_init(&vma->exec_list);
248
		i915_gem_execbuffer_unreserve_vma(vma);
249
		drm_gem_object_unreference(&vma->obj->base);
3480 Serge 250
	}
3263 Serge 251
	kfree(eb);
252
}
253
 
254
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
255
{
4560 Serge 256
	return (HAS_LLC(obj->base.dev) ||
257
		obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
3263 Serge 258
		!obj->map_and_fenceable ||
259
		obj->cache_level != I915_CACHE_NONE);
260
}
261
 
262
static int
4371 Serge 263
relocate_entry_cpu(struct drm_i915_gem_object *obj,
264
		   struct drm_i915_gem_relocation_entry *reloc)
265
{
4539 Serge 266
    struct drm_device *dev = obj->base.dev;
267
    struct drm_i915_private *dev_priv = dev->dev_private;
4371 Serge 268
	uint32_t page_offset = offset_in_page(reloc->offset);
269
	char *vaddr;
4560 Serge 270
	int ret;
4371 Serge 271
 
4560 Serge 272
	ret = i915_gem_object_set_to_cpu_domain(obj, true);
4371 Serge 273
	if (ret)
274
		return ret;
275
 
4539 Serge 276
    vaddr = dev_priv->gtt.mappable+4096;
277
    MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW);
4371 Serge 278
	*(uint32_t *)(vaddr + page_offset) = reloc->delta;
279
 
280
	return 0;
281
}
282
 
283
static int
284
relocate_entry_gtt(struct drm_i915_gem_object *obj,
285
		   struct drm_i915_gem_relocation_entry *reloc)
286
{
287
	struct drm_device *dev = obj->base.dev;
288
	struct drm_i915_private *dev_priv = dev->dev_private;
289
	uint32_t __iomem *reloc_entry;
290
	void __iomem *reloc_page;
4560 Serge 291
	int ret;
4371 Serge 292
 
293
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
294
	if (ret)
295
		return ret;
296
 
297
	ret = i915_gem_object_put_fence(obj);
298
	if (ret)
299
		return ret;
300
 
301
	/* Map the page containing the relocation we're going to perform.  */
302
	reloc->offset += i915_gem_obj_ggtt_offset(obj);
4539 Serge 303
    MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
304
                                 (reloc->offset & PAGE_MASK), PG_SW);
305
	reloc_page = dev_priv->gtt.mappable;
4371 Serge 306
	reloc_entry = (uint32_t __iomem *)
307
		(reloc_page + offset_in_page(reloc->offset));
308
	iowrite32(reloc->delta, reloc_entry);
309
 
310
	return 0;
311
}
312
 
313
static int
3263 Serge 314
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
4560 Serge 315
				   struct eb_vmas *eb,
4104 Serge 316
				   struct drm_i915_gem_relocation_entry *reloc,
317
				   struct i915_address_space *vm)
3263 Serge 318
{
319
	struct drm_device *dev = obj->base.dev;
320
	struct drm_gem_object *target_obj;
321
	struct drm_i915_gem_object *target_i915_obj;
4560 Serge 322
	struct i915_vma *target_vma;
3263 Serge 323
	uint32_t target_offset;
4560 Serge 324
	int ret;
3263 Serge 325
 
326
	/* we've already hold a reference to all valid objects */
4560 Serge 327
	target_vma = eb_get_vma(eb, reloc->target_handle);
328
	if (unlikely(target_vma == NULL))
3263 Serge 329
		return -ENOENT;
4560 Serge 330
	target_i915_obj = target_vma->obj;
331
	target_obj = &target_vma->obj->base;
3263 Serge 332
 
4560 Serge 333
	target_offset = target_vma->node.start;
3263 Serge 334
 
335
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
336
	 * pipe_control writes because the gpu doesn't properly redirect them
337
	 * through the ppgtt for non_secure batchbuffers. */
338
	if (unlikely(IS_GEN6(dev) &&
339
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
340
	    !target_i915_obj->has_global_gtt_mapping)) {
341
		i915_gem_gtt_bind_object(target_i915_obj,
342
					 target_i915_obj->cache_level);
343
	}
344
 
345
	/* Validate that the target is in a valid r/w GPU domain */
346
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
347
		DRM_DEBUG("reloc with multiple write domains: "
348
			  "obj %p target %d offset %d "
349
			  "read %08x write %08x",
350
			  obj, reloc->target_handle,
351
			  (int) reloc->offset,
352
			  reloc->read_domains,
353
			  reloc->write_domain);
4560 Serge 354
		return -EINVAL;
3263 Serge 355
	}
356
	if (unlikely((reloc->write_domain | reloc->read_domains)
357
		     & ~I915_GEM_GPU_DOMAINS)) {
358
		DRM_DEBUG("reloc with read/write non-GPU domains: "
359
			  "obj %p target %d offset %d "
360
			  "read %08x write %08x",
361
			  obj, reloc->target_handle,
362
			  (int) reloc->offset,
363
			  reloc->read_domains,
364
			  reloc->write_domain);
4560 Serge 365
		return -EINVAL;
3263 Serge 366
	}
367
 
368
	target_obj->pending_read_domains |= reloc->read_domains;
369
	target_obj->pending_write_domain |= reloc->write_domain;
370
 
371
	/* If the relocation already has the right value in it, no
372
	 * more work needs to be done.
373
	 */
374
	if (target_offset == reloc->presumed_offset)
375
		return 0;
376
 
377
	/* Check that the relocation address is valid... */
4560 Serge 378
	if (unlikely(reloc->offset >
379
		obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
3263 Serge 380
		DRM_DEBUG("Relocation beyond object bounds: "
381
			  "obj %p target %d offset %d size %d.\n",
382
			  obj, reloc->target_handle,
383
			  (int) reloc->offset,
384
			  (int) obj->base.size);
4560 Serge 385
		return -EINVAL;
3263 Serge 386
	}
387
	if (unlikely(reloc->offset & 3)) {
388
		DRM_DEBUG("Relocation not 4-byte aligned: "
389
			  "obj %p target %d offset %d.\n",
390
			  obj, reloc->target_handle,
391
			  (int) reloc->offset);
4560 Serge 392
		return -EINVAL;
3263 Serge 393
	}
394
 
395
	/* We can't wait for rendering with pagefaults disabled */
396
 
397
	reloc->delta += target_offset;
4371 Serge 398
	if (use_cpu_reloc(obj))
399
		ret = relocate_entry_cpu(obj, reloc);
400
	else
401
		ret = relocate_entry_gtt(obj, reloc);
3263 Serge 402
 
403
		if (ret)
404
			return ret;
405
 
406
	/* and update the user's relocation entry */
407
	reloc->presumed_offset = target_offset;
408
 
409
	return 0;
410
}
411
 
412
static int
4560 Serge 413
i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
414
				 struct eb_vmas *eb)
3263 Serge 415
{
416
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
3266 Serge 417
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
3263 Serge 418
	struct drm_i915_gem_relocation_entry __user *user_relocs;
4560 Serge 419
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
3263 Serge 420
	int remain, ret;
421
 
4539 Serge 422
	user_relocs = to_user_ptr(entry->relocs_ptr);
3263 Serge 423
 
424
	remain = entry->relocation_count;
425
	while (remain) {
426
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
427
		int count = remain;
428
		if (count > ARRAY_SIZE(stack_reloc))
429
			count = ARRAY_SIZE(stack_reloc);
430
		remain -= count;
431
 
432
        memcpy(r, user_relocs, count*sizeof(r[0]));
433
 
434
		do {
435
			u64 offset = r->presumed_offset;
436
 
4560 Serge 437
			ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
438
								 vma->vm);
3263 Serge 439
			if (ret)
440
				return ret;
441
 
4392 Serge 442
		if (r->presumed_offset != offset)
443
		{
3263 Serge 444
            memcpy(&user_relocs->presumed_offset,
445
                   &r->presumed_offset,
446
                   sizeof(r->presumed_offset));
4392 Serge 447
		}
3263 Serge 448
 
449
			user_relocs++;
450
			r++;
451
		} while (--count);
452
	}
453
 
454
	return 0;
455
#undef N_RELOC
456
}
457
 
458
static int
4560 Serge 459
i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
460
				      struct eb_vmas *eb,
461
				      struct drm_i915_gem_relocation_entry *relocs)
3263 Serge 462
{
4560 Serge 463
	const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
3263 Serge 464
	int i, ret;
465
 
466
	for (i = 0; i < entry->relocation_count; i++) {
4560 Serge 467
		ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
468
							 vma->vm);
3263 Serge 469
		if (ret)
470
			return ret;
471
	}
472
 
473
	return 0;
474
}
475
 
476
static int
4560 Serge 477
i915_gem_execbuffer_relocate(struct eb_vmas *eb)
3263 Serge 478
{
4560 Serge 479
	struct i915_vma *vma;
3263 Serge 480
	int ret = 0;
481
 
482
	/* This is the fast path and we cannot handle a pagefault whilst
483
	 * holding the struct mutex lest the user pass in the relocations
484
	 * contained within a mmaped bo. For in such a case we, the page
485
	 * fault handler would call i915_gem_fault() and we would try to
486
	 * acquire the struct mutex again. Obviously this is bad and so
487
	 * lockdep complains vehemently.
488
	 */
4104 Serge 489
//	pagefault_disable();
4560 Serge 490
	list_for_each_entry(vma, &eb->vmas, exec_list) {
491
		ret = i915_gem_execbuffer_relocate_vma(vma, eb);
3263 Serge 492
		if (ret)
493
			break;
494
	}
495
//   pagefault_enable();
496
 
497
	return ret;
498
}
499
 
500
static int
4560 Serge 501
need_reloc_mappable(struct i915_vma *vma)
3263 Serge 502
{
4560 Serge 503
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
504
	return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
505
		i915_is_ggtt(vma->vm);
3263 Serge 506
}
507
 
508
static int
4560 Serge 509
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
3480 Serge 510
				   struct intel_ring_buffer *ring,
511
				   bool *need_reloc)
3263 Serge 512
{
4560 Serge 513
	struct drm_i915_private *dev_priv = ring->dev->dev_private;
514
	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
3263 Serge 515
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
516
	bool need_fence, need_mappable;
4560 Serge 517
	struct drm_i915_gem_object *obj = vma->obj;
3263 Serge 518
	int ret;
519
 
520
	need_fence =
521
		has_fenced_gpu_access &&
522
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
523
		obj->tiling_mode != I915_TILING_NONE;
4560 Serge 524
	need_mappable = need_fence || need_reloc_mappable(vma);
3263 Serge 525
 
4560 Serge 526
	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
4104 Serge 527
				  false);
3263 Serge 528
	if (ret)
529
		return ret;
530
 
531
	entry->flags |= __EXEC_OBJECT_HAS_PIN;
532
 
533
	if (has_fenced_gpu_access) {
534
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
535
			ret = i915_gem_object_get_fence(obj);
536
			if (ret)
537
				return ret;
538
 
539
			if (i915_gem_object_pin_fence(obj))
540
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
541
 
542
			obj->pending_fenced_gpu_access = true;
543
		}
544
	}
545
 
546
	/* Ensure ppgtt mapping exists if needed */
547
	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
548
		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
549
				       obj, obj->cache_level);
550
 
551
		obj->has_aliasing_ppgtt_mapping = 1;
552
	}
553
 
4560 Serge 554
	if (entry->offset != vma->node.start) {
555
		entry->offset = vma->node.start;
3480 Serge 556
		*need_reloc = true;
557
	}
3266 Serge 558
 
3480 Serge 559
	if (entry->flags & EXEC_OBJECT_WRITE) {
560
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
561
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
562
	}
563
 
564
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
565
	    !obj->has_global_gtt_mapping)
566
		i915_gem_gtt_bind_object(obj, obj->cache_level);
567
 
3263 Serge 568
	return 0;
569
}
570
 
571
static int
572
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
4560 Serge 573
			    struct list_head *vmas,
3480 Serge 574
			    bool *need_relocs)
3263 Serge 575
{
576
	struct drm_i915_gem_object *obj;
4560 Serge 577
	struct i915_vma *vma;
578
	struct i915_address_space *vm;
579
	struct list_head ordered_vmas;
3263 Serge 580
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
581
	int retry;
582
 
4560 Serge 583
	if (list_empty(vmas))
584
		return 0;
585
 
586
	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
587
 
588
	INIT_LIST_HEAD(&ordered_vmas);
589
	while (!list_empty(vmas)) {
3263 Serge 590
		struct drm_i915_gem_exec_object2 *entry;
591
		bool need_fence, need_mappable;
592
 
4560 Serge 593
		vma = list_first_entry(vmas, struct i915_vma, exec_list);
594
		obj = vma->obj;
595
		entry = vma->exec_entry;
3263 Serge 596
 
597
		need_fence =
598
			has_fenced_gpu_access &&
599
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
600
			obj->tiling_mode != I915_TILING_NONE;
4560 Serge 601
		need_mappable = need_fence || need_reloc_mappable(vma);
3263 Serge 602
 
603
		if (need_mappable)
4560 Serge 604
			list_move(&vma->exec_list, &ordered_vmas);
3263 Serge 605
		else
4560 Serge 606
			list_move_tail(&vma->exec_list, &ordered_vmas);
3263 Serge 607
 
3480 Serge 608
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
3263 Serge 609
		obj->base.pending_write_domain = 0;
610
		obj->pending_fenced_gpu_access = false;
611
	}
4560 Serge 612
	list_splice(&ordered_vmas, vmas);
3263 Serge 613
 
614
	/* Attempt to pin all of the buffers into the GTT.
615
	 * This is done in 3 phases:
616
	 *
617
	 * 1a. Unbind all objects that do not match the GTT constraints for
618
	 *     the execbuffer (fenceable, mappable, alignment etc).
619
	 * 1b. Increment pin count for already bound objects.
620
	 * 2.  Bind new objects.
621
	 * 3.  Decrement pin count.
622
	 *
623
	 * This avoid unnecessary unbinding of later objects in order to make
624
	 * room for the earlier objects *unless* we need to defragment.
625
	 */
626
	retry = 0;
627
	do {
628
		int ret = 0;
629
 
630
		/* Unbind any ill-fitting objects or pin. */
4560 Serge 631
		list_for_each_entry(vma, vmas, exec_list) {
632
			struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
3263 Serge 633
			bool need_fence, need_mappable;
634
 
4560 Serge 635
			obj = vma->obj;
636
 
637
			if (!drm_mm_node_allocated(&vma->node))
3263 Serge 638
				continue;
639
 
640
			need_fence =
641
				has_fenced_gpu_access &&
642
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
643
				obj->tiling_mode != I915_TILING_NONE;
4560 Serge 644
			need_mappable = need_fence || need_reloc_mappable(vma);
3263 Serge 645
 
4246 Serge 646
			WARN_ON((need_mappable || need_fence) &&
4560 Serge 647
			       !i915_is_ggtt(vma->vm));
4104 Serge 648
 
649
			if ((entry->alignment &&
4560 Serge 650
			     vma->node.start & (entry->alignment - 1)) ||
3263 Serge 651
			    (need_mappable && !obj->map_and_fenceable))
4560 Serge 652
				ret = i915_vma_unbind(vma);
3263 Serge 653
			else
4560 Serge 654
				ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
3263 Serge 655
			if (ret)
656
				goto err;
657
		}
658
 
659
		/* Bind fresh objects */
4560 Serge 660
		list_for_each_entry(vma, vmas, exec_list) {
661
			if (drm_mm_node_allocated(&vma->node))
3263 Serge 662
				continue;
663
 
4560 Serge 664
			ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
3263 Serge 665
			if (ret)
666
				goto err;
667
		}
668
 
4560 Serge 669
err:
3263 Serge 670
		if (ret != -ENOSPC || retry++)
671
			return ret;
672
 
4560 Serge 673
		/* Decrement pin count for bound objects */
674
		list_for_each_entry(vma, vmas, exec_list)
675
			i915_gem_execbuffer_unreserve_vma(vma);
676
 
677
//		ret = i915_gem_evict_vm(vm, true);
3263 Serge 678
		if (ret)
679
			return ret;
680
	} while (1);
681
}
682
 
683
static int
684
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3480 Serge 685
				  struct drm_i915_gem_execbuffer2 *args,
3263 Serge 686
				  struct drm_file *file,
687
				  struct intel_ring_buffer *ring,
4560 Serge 688
				  struct eb_vmas *eb,
689
				  struct drm_i915_gem_exec_object2 *exec)
3263 Serge 690
{
691
	struct drm_i915_gem_relocation_entry *reloc;
4560 Serge 692
	struct i915_address_space *vm;
693
	struct i915_vma *vma;
3480 Serge 694
	bool need_relocs;
3263 Serge 695
	int *reloc_offset;
696
	int i, total, ret;
4560 Serge 697
	unsigned count = args->buffer_count;
3263 Serge 698
 
4560 Serge 699
	if (WARN_ON(list_empty(&eb->vmas)))
700
		return 0;
701
 
702
	vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
703
 
3263 Serge 704
	/* We may process another execbuffer during the unlock... */
4560 Serge 705
	while (!list_empty(&eb->vmas)) {
706
		vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
707
		list_del_init(&vma->exec_list);
708
		i915_gem_execbuffer_unreserve_vma(vma);
709
		drm_gem_object_unreference(&vma->obj->base);
3263 Serge 710
	}
711
 
712
	mutex_unlock(&dev->struct_mutex);
713
 
714
	total = 0;
715
	for (i = 0; i < count; i++)
716
		total += exec[i].relocation_count;
717
 
718
    reloc_offset = malloc(count * sizeof(*reloc_offset));
719
    reloc = malloc(total * sizeof(*reloc));
720
	if (reloc == NULL || reloc_offset == NULL) {
3266 Serge 721
        kfree(reloc);
722
        kfree(reloc_offset);
3263 Serge 723
		mutex_lock(&dev->struct_mutex);
724
		return -ENOMEM;
725
	}
726
 
727
	total = 0;
728
	for (i = 0; i < count; i++) {
729
		struct drm_i915_gem_relocation_entry __user *user_relocs;
730
		u64 invalid_offset = (u64)-1;
731
		int j;
732
 
4539 Serge 733
		user_relocs = to_user_ptr(exec[i].relocs_ptr);
3263 Serge 734
 
735
		if (copy_from_user(reloc+total, user_relocs,
736
				   exec[i].relocation_count * sizeof(*reloc))) {
737
			ret = -EFAULT;
738
			mutex_lock(&dev->struct_mutex);
739
			goto err;
740
		}
741
 
742
		/* As we do not update the known relocation offsets after
743
		 * relocating (due to the complexities in lock handling),
744
		 * we need to mark them as invalid now so that we force the
745
		 * relocation processing next time. Just in case the target
746
		 * object is evicted and then rebound into its old
747
		 * presumed_offset before the next execbuffer - if that
748
		 * happened we would make the mistake of assuming that the
749
		 * relocations were valid.
750
		 */
751
		for (j = 0; j < exec[i].relocation_count; j++) {
752
			if (copy_to_user(&user_relocs[j].presumed_offset,
753
					 &invalid_offset,
754
					 sizeof(invalid_offset))) {
755
				ret = -EFAULT;
756
				mutex_lock(&dev->struct_mutex);
757
				goto err;
758
			}
759
		}
760
 
761
		reloc_offset[i] = total;
762
		total += exec[i].relocation_count;
763
	}
764
 
765
	ret = i915_mutex_lock_interruptible(dev);
766
	if (ret) {
767
		mutex_lock(&dev->struct_mutex);
768
		goto err;
769
	}
770
 
771
	/* reacquire the objects */
772
	eb_reset(eb);
4560 Serge 773
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
3480 Serge 774
	if (ret)
3263 Serge 775
			goto err;
776
 
3480 Serge 777
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
4560 Serge 778
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
3263 Serge 779
	if (ret)
780
		goto err;
781
 
4560 Serge 782
	list_for_each_entry(vma, &eb->vmas, exec_list) {
783
		int offset = vma->exec_entry - exec;
784
		ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
785
							    reloc + reloc_offset[offset]);
3263 Serge 786
		if (ret)
787
			goto err;
788
	}
789
 
790
	/* Leave the user relocations as are, this is the painfully slow path,
791
	 * and we want to avoid the complication of dropping the lock whilst
792
	 * having buffers reserved in the aperture and so causing spurious
793
	 * ENOSPC for random operations.
794
	 */
795
 
796
err:
3266 Serge 797
    kfree(reloc);
798
    kfree(reloc_offset);
3263 Serge 799
	return ret;
800
}
801
 
802
static int
803
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
4560 Serge 804
				struct list_head *vmas)
3263 Serge 805
{
4560 Serge 806
	struct i915_vma *vma;
3263 Serge 807
	uint32_t flush_domains = 0;
4104 Serge 808
	bool flush_chipset = false;
3263 Serge 809
	int ret;
810
 
4560 Serge 811
	list_for_each_entry(vma, vmas, exec_list) {
812
		struct drm_i915_gem_object *obj = vma->obj;
3263 Serge 813
		ret = i915_gem_object_sync(obj, ring);
814
		if (ret)
815
			return ret;
816
 
817
		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
4104 Serge 818
			flush_chipset |= i915_gem_clflush_object(obj, false);
3263 Serge 819
 
820
		flush_domains |= obj->base.write_domain;
821
	}
822
 
4104 Serge 823
	if (flush_chipset)
3263 Serge 824
		i915_gem_chipset_flush(ring->dev);
825
 
826
	if (flush_domains & I915_GEM_DOMAIN_GTT)
827
		wmb();
828
 
829
	/* Unconditionally invalidate gpu caches and ensure that we do flush
830
	 * any residual writes from the previous batch.
831
	 */
832
	return intel_ring_invalidate_all_caches(ring);
833
}
834
 
835
static bool
836
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
837
{
3480 Serge 838
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
839
		return false;
840
 
3263 Serge 841
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
842
}
843
 
844
static int
845
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
846
		   int count)
847
{
848
	int i;
4560 Serge 849
	unsigned relocs_total = 0;
850
	unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
3263 Serge 851
 
852
	for (i = 0; i < count; i++) {
3746 Serge 853
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
3263 Serge 854
		int length; /* limited by fault_in_pages_readable() */
855
 
3480 Serge 856
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
3263 Serge 857
			return -EINVAL;
858
 
3480 Serge 859
		/* First check for malicious input causing overflow in
860
		 * the worst case where we need to allocate the entire
861
		 * relocation tree as a single array.
862
		 */
863
		if (exec[i].relocation_count > relocs_max - relocs_total)
864
			return -EINVAL;
865
		relocs_total += exec[i].relocation_count;
866
 
3263 Serge 867
		length = exec[i].relocation_count *
868
			sizeof(struct drm_i915_gem_relocation_entry);
3746 Serge 869
		/*
870
		 * We must check that the entire relocation array is safe
871
		 * to read, but since we may need to update the presumed
872
		 * offsets during execution, check for full write access.
873
		 */
4560 Serge 874
	}
3263 Serge 875
 
4560 Serge 876
	return 0;
877
}
878
 
879
static int
880
i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
881
			  const u32 ctx_id)
882
{
883
	struct i915_ctx_hang_stats *hs;
884
 
885
	hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
886
	if (IS_ERR(hs))
887
		return PTR_ERR(hs);
888
 
889
	if (hs->banned) {
890
		DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
891
		return -EIO;
3263 Serge 892
	}
893
 
894
	return 0;
895
}
896
 
897
static void
4560 Serge 898
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
3263 Serge 899
				   struct intel_ring_buffer *ring)
900
{
4560 Serge 901
	struct i915_vma *vma;
3263 Serge 902
 
4560 Serge 903
	list_for_each_entry(vma, vmas, exec_list) {
904
		struct drm_i915_gem_object *obj = vma->obj;
3263 Serge 905
		u32 old_read = obj->base.read_domains;
906
		u32 old_write = obj->base.write_domain;
907
 
3480 Serge 908
		obj->base.write_domain = obj->base.pending_write_domain;
909
		if (obj->base.write_domain == 0)
910
			obj->base.pending_read_domains |= obj->base.read_domains;
3263 Serge 911
		obj->base.read_domains = obj->base.pending_read_domains;
912
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
913
 
4560 Serge 914
		i915_vma_move_to_active(vma, ring);
3263 Serge 915
		if (obj->base.write_domain) {
916
			obj->dirty = 1;
917
			obj->last_write_seqno = intel_ring_get_seqno(ring);
918
			if (obj->pin_count) /* check for potential scanout */
4104 Serge 919
				intel_mark_fb_busy(obj, ring);
3263 Serge 920
		}
921
 
922
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
923
	}
924
}
925
 
926
static void
927
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
928
				    struct drm_file *file,
4104 Serge 929
				    struct intel_ring_buffer *ring,
930
				    struct drm_i915_gem_object *obj)
3263 Serge 931
{
932
	/* Unconditionally force add_request to emit a full flush. */
933
	ring->gpu_caches_dirty = true;
934
 
935
	/* Add a breadcrumb for the completion of the batch buffer */
4104 Serge 936
	(void)__i915_add_request(ring, file, obj, NULL);
3263 Serge 937
}
938
 
939
static int
940
i915_reset_gen7_sol_offsets(struct drm_device *dev,
941
			    struct intel_ring_buffer *ring)
942
{
943
	drm_i915_private_t *dev_priv = dev->dev_private;
944
	int ret, i;
945
 
946
	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
947
		return 0;
948
 
949
	ret = intel_ring_begin(ring, 4 * 3);
950
	if (ret)
951
		return ret;
952
 
953
	for (i = 0; i < 4; i++) {
954
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
955
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
956
		intel_ring_emit(ring, 0);
957
	}
958
 
959
	intel_ring_advance(ring);
960
 
961
	return 0;
962
}
963
 
964
static int
965
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
966
		       struct drm_file *file,
967
		       struct drm_i915_gem_execbuffer2 *args,
4104 Serge 968
		       struct drm_i915_gem_exec_object2 *exec,
969
		       struct i915_address_space *vm)
3263 Serge 970
{
971
	drm_i915_private_t *dev_priv = dev->dev_private;
4560 Serge 972
	struct eb_vmas *eb;
3263 Serge 973
	struct drm_i915_gem_object *batch_obj;
974
	struct drm_clip_rect *cliprects = NULL;
975
	struct intel_ring_buffer *ring;
4560 Serge 976
	const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
3263 Serge 977
	u32 exec_start, exec_len;
3480 Serge 978
	u32 mask, flags;
3263 Serge 979
	int ret, mode, i;
3480 Serge 980
	bool need_relocs;
3263 Serge 981
 
3480 Serge 982
	if (!i915_gem_check_execbuffer(args))
3263 Serge 983
		return -EINVAL;
984
 
985
	ret = validate_exec_list(exec, args->buffer_count);
986
	if (ret)
987
		return ret;
988
 
989
	flags = 0;
990
	if (args->flags & I915_EXEC_SECURE) {
991
 
992
		flags |= I915_DISPATCH_SECURE;
993
	}
994
	if (args->flags & I915_EXEC_IS_PINNED)
995
		flags |= I915_DISPATCH_PINNED;
996
 
997
	switch (args->flags & I915_EXEC_RING_MASK) {
998
	case I915_EXEC_DEFAULT:
999
	case I915_EXEC_RENDER:
1000
		ring = &dev_priv->ring[RCS];
1001
		break;
1002
	case I915_EXEC_BSD:
1003
		ring = &dev_priv->ring[VCS];
4104 Serge 1004
		if (ctx_id != DEFAULT_CONTEXT_ID) {
3263 Serge 1005
			DRM_DEBUG("Ring %s doesn't support contexts\n",
1006
				  ring->name);
1007
			return -EPERM;
1008
		}
1009
		break;
1010
	case I915_EXEC_BLT:
1011
		ring = &dev_priv->ring[BCS];
4104 Serge 1012
		if (ctx_id != DEFAULT_CONTEXT_ID) {
3263 Serge 1013
			DRM_DEBUG("Ring %s doesn't support contexts\n",
1014
				  ring->name);
1015
			return -EPERM;
1016
		}
1017
		break;
4104 Serge 1018
	case I915_EXEC_VEBOX:
1019
		ring = &dev_priv->ring[VECS];
1020
		if (ctx_id != DEFAULT_CONTEXT_ID) {
1021
			DRM_DEBUG("Ring %s doesn't support contexts\n",
1022
				  ring->name);
1023
			return -EPERM;
1024
		}
1025
		break;
1026
 
3263 Serge 1027
	default:
1028
		DRM_DEBUG("execbuf with unknown ring: %d\n",
1029
			  (int)(args->flags & I915_EXEC_RING_MASK));
1030
		return -EINVAL;
1031
	}
1032
	if (!intel_ring_initialized(ring)) {
1033
		DRM_DEBUG("execbuf with invalid ring: %d\n",
1034
			  (int)(args->flags & I915_EXEC_RING_MASK));
1035
		return -EINVAL;
1036
	}
1037
 
1038
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1039
	mask = I915_EXEC_CONSTANTS_MASK;
1040
	switch (mode) {
1041
	case I915_EXEC_CONSTANTS_REL_GENERAL:
1042
	case I915_EXEC_CONSTANTS_ABSOLUTE:
1043
	case I915_EXEC_CONSTANTS_REL_SURFACE:
1044
		if (ring == &dev_priv->ring[RCS] &&
1045
		    mode != dev_priv->relative_constants_mode) {
1046
			if (INTEL_INFO(dev)->gen < 4)
1047
				return -EINVAL;
1048
 
1049
			if (INTEL_INFO(dev)->gen > 5 &&
1050
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1051
				return -EINVAL;
1052
 
1053
			/* The HW changed the meaning on this bit on gen6 */
1054
			if (INTEL_INFO(dev)->gen >= 6)
1055
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1056
		}
1057
		break;
1058
	default:
1059
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1060
		return -EINVAL;
1061
	}
1062
 
1063
	if (args->buffer_count < 1) {
1064
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1065
		return -EINVAL;
1066
	}
1067
 
1068
	if (args->num_cliprects != 0) {
1069
		if (ring != &dev_priv->ring[RCS]) {
1070
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1071
			return -EINVAL;
1072
		}
1073
 
1074
		if (INTEL_INFO(dev)->gen >= 5) {
1075
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1076
			return -EINVAL;
1077
		}
1078
 
1079
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1080
			DRM_DEBUG("execbuf with %u cliprects\n",
1081
				  args->num_cliprects);
1082
			return -EINVAL;
1083
		}
1084
 
4560 Serge 1085
		cliprects = kcalloc(args->num_cliprects,
1086
				    sizeof(*cliprects),
3263 Serge 1087
				    GFP_KERNEL);
1088
		if (cliprects == NULL) {
1089
			ret = -ENOMEM;
1090
			goto pre_mutex_err;
1091
		}
1092
 
1093
		if (copy_from_user(cliprects,
3746 Serge 1094
				   to_user_ptr(args->cliprects_ptr),
3263 Serge 1095
				     sizeof(*cliprects)*args->num_cliprects)) {
1096
			ret = -EFAULT;
1097
			goto pre_mutex_err;
1098
		}
1099
	}
1100
 
4560 Serge 1101
	intel_runtime_pm_get(dev_priv);
1102
 
3263 Serge 1103
	ret = i915_mutex_lock_interruptible(dev);
1104
	if (ret)
1105
		goto pre_mutex_err;
1106
 
4104 Serge 1107
	if (dev_priv->ums.mm_suspended) {
3263 Serge 1108
		mutex_unlock(&dev->struct_mutex);
1109
		ret = -EBUSY;
1110
		goto pre_mutex_err;
1111
	}
1112
 
4560 Serge 1113
	ret = i915_gem_validate_context(dev, file, ctx_id);
1114
	if (ret) {
1115
		mutex_unlock(&dev->struct_mutex);
1116
		goto pre_mutex_err;
1117
	}
1118
 
3480 Serge 1119
	eb = eb_create(args);
3263 Serge 1120
	if (eb == NULL) {
1121
		mutex_unlock(&dev->struct_mutex);
1122
		ret = -ENOMEM;
1123
		goto pre_mutex_err;
1124
	}
1125
 
1126
	/* Look up object handles */
4560 Serge 1127
	ret = eb_lookup_vmas(eb, exec, args, vm, file);
3480 Serge 1128
	if (ret)
3263 Serge 1129
			goto err;
1130
 
1131
	/* take note of the batch buffer before we might reorder the lists */
4560 Serge 1132
	batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
3263 Serge 1133
 
1134
	/* Move the objects en-masse into the GTT, evicting if necessary. */
3480 Serge 1135
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
4560 Serge 1136
	ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
3263 Serge 1137
	if (ret)
1138
		goto err;
1139
 
1140
	/* The objects are in their final locations, apply the relocations. */
3480 Serge 1141
	if (need_relocs)
4560 Serge 1142
		ret = i915_gem_execbuffer_relocate(eb);
3263 Serge 1143
	if (ret) {
1144
		if (ret == -EFAULT) {
3480 Serge 1145
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
4560 Serge 1146
								eb, exec);
3263 Serge 1147
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1148
		}
1149
		if (ret)
1150
			goto err;
1151
	}
1152
 
1153
	/* Set the pending read domains for the batch buffer to COMMAND */
1154
	if (batch_obj->base.pending_write_domain) {
1155
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1156
		ret = -EINVAL;
1157
		goto err;
1158
	}
1159
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1160
 
1161
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1162
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
4560 Serge 1163
	 * hsw should have this fixed, but bdw mucks it up again. */
3263 Serge 1164
	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1165
		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1166
 
4560 Serge 1167
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
3263 Serge 1168
	if (ret)
1169
		goto err;
1170
 
1171
	ret = i915_switch_context(ring, file, ctx_id);
1172
	if (ret)
1173
		goto err;
1174
 
1175
	if (ring == &dev_priv->ring[RCS] &&
1176
	    mode != dev_priv->relative_constants_mode) {
1177
		ret = intel_ring_begin(ring, 4);
1178
		if (ret)
1179
				goto err;
1180
 
1181
		intel_ring_emit(ring, MI_NOOP);
1182
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1183
		intel_ring_emit(ring, INSTPM);
1184
		intel_ring_emit(ring, mask << 16 | mode);
1185
		intel_ring_advance(ring);
1186
 
1187
		dev_priv->relative_constants_mode = mode;
1188
	}
1189
 
1190
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1191
		ret = i915_reset_gen7_sol_offsets(dev, ring);
1192
		if (ret)
1193
			goto err;
1194
	}
1195
 
4104 Serge 1196
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
1197
		args->batch_start_offset;
3263 Serge 1198
	exec_len = args->batch_len;
1199
	if (cliprects) {
4246 Serge 1200
		for (i = 0; i < args->num_cliprects; i++) {
1201
			ret = i915_emit_box(dev, &cliprects[i],
1202
					    args->DR1, args->DR4);
1203
			if (ret)
1204
				goto err;
3263 Serge 1205
 
4246 Serge 1206
			ret = ring->dispatch_execbuffer(ring,
1207
							exec_start, exec_len,
1208
							flags);
1209
			if (ret)
1210
				goto err;
1211
		}
3263 Serge 1212
	} else {
1213
		ret = ring->dispatch_execbuffer(ring,
1214
						exec_start, exec_len,
1215
						flags);
1216
		if (ret)
1217
			goto err;
1218
	}
1219
 
3266 Serge 1220
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
3263 Serge 1221
 
4560 Serge 1222
	i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
4104 Serge 1223
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
3266 Serge 1224
 
3263 Serge 1225
err:
1226
	eb_destroy(eb);
1227
 
1228
	mutex_unlock(&dev->struct_mutex);
1229
 
1230
pre_mutex_err:
4104 Serge 1231
    kfree(cliprects);
4560 Serge 1232
 
1233
	/* intel_gpu_busy should also get a ref, so it will free when the device
1234
	 * is really idle. */
1235
	intel_runtime_pm_put(dev_priv);
3263 Serge 1236
	return ret;
1237
}
1238
 
4246 Serge 1239
#if 0
1240
/*
1241
 * Legacy execbuffer just creates an exec2 list from the original exec object
1242
 * list array and passes it to the real function.
1243
 */
1244
int
1245
i915_gem_execbuffer(struct drm_device *dev, void *data,
1246
		    struct drm_file *file)
1247
{
1248
	struct drm_i915_private *dev_priv = dev->dev_private;
1249
	struct drm_i915_gem_execbuffer *args = data;
1250
	struct drm_i915_gem_execbuffer2 exec2;
1251
	struct drm_i915_gem_exec_object *exec_list = NULL;
1252
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1253
	int ret, i;
3480 Serge 1254
 
4246 Serge 1255
	if (args->buffer_count < 1) {
1256
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1257
		return -EINVAL;
1258
	}
3480 Serge 1259
 
4246 Serge 1260
	/* Copy in the exec list from userland */
1261
	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1262
	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1263
	if (exec_list == NULL || exec2_list == NULL) {
1264
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1265
			  args->buffer_count);
1266
		drm_free_large(exec_list);
1267
		drm_free_large(exec2_list);
1268
		return -ENOMEM;
1269
	}
1270
	ret = copy_from_user(exec_list,
1271
			     to_user_ptr(args->buffers_ptr),
1272
			     sizeof(*exec_list) * args->buffer_count);
1273
	if (ret != 0) {
1274
		DRM_DEBUG("copy %d exec entries failed %d\n",
1275
			  args->buffer_count, ret);
1276
		drm_free_large(exec_list);
1277
		drm_free_large(exec2_list);
1278
		return -EFAULT;
1279
	}
1280
 
1281
	for (i = 0; i < args->buffer_count; i++) {
1282
		exec2_list[i].handle = exec_list[i].handle;
1283
		exec2_list[i].relocation_count = exec_list[i].relocation_count;
1284
		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1285
		exec2_list[i].alignment = exec_list[i].alignment;
1286
		exec2_list[i].offset = exec_list[i].offset;
1287
		if (INTEL_INFO(dev)->gen < 4)
1288
			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1289
		else
1290
			exec2_list[i].flags = 0;
1291
	}
1292
 
1293
	exec2.buffers_ptr = args->buffers_ptr;
1294
	exec2.buffer_count = args->buffer_count;
1295
	exec2.batch_start_offset = args->batch_start_offset;
1296
	exec2.batch_len = args->batch_len;
1297
	exec2.DR1 = args->DR1;
1298
	exec2.DR4 = args->DR4;
1299
	exec2.num_cliprects = args->num_cliprects;
1300
	exec2.cliprects_ptr = args->cliprects_ptr;
1301
	exec2.flags = I915_EXEC_RENDER;
1302
	i915_execbuffer2_set_context_id(exec2, 0);
1303
 
1304
	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
1305
				     &dev_priv->gtt.base);
1306
	if (!ret) {
1307
		/* Copy the new buffer offsets back to the user's exec list. */
1308
		for (i = 0; i < args->buffer_count; i++)
1309
			exec_list[i].offset = exec2_list[i].offset;
1310
		/* ... and back out to userspace */
1311
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1312
				   exec_list,
1313
				   sizeof(*exec_list) * args->buffer_count);
1314
		if (ret) {
1315
			ret = -EFAULT;
1316
			DRM_DEBUG("failed to copy %d exec entries "
1317
				  "back to user (%d)\n",
1318
				  args->buffer_count, ret);
1319
		}
1320
	}
1321
 
1322
	drm_free_large(exec_list);
1323
	drm_free_large(exec2_list);
1324
	return ret;
1325
}
1326
#endif
1327
 
3263 Serge 1328
int
1329
i915_gem_execbuffer2(struct drm_device *dev, void *data,
1330
		     struct drm_file *file)
1331
{
4104 Serge 1332
	struct drm_i915_private *dev_priv = dev->dev_private;
3263 Serge 1333
	struct drm_i915_gem_execbuffer2 *args = data;
1334
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1335
	int ret;
1336
 
1337
	if (args->buffer_count < 1 ||
1338
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1339
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1340
		return -EINVAL;
1341
	}
1342
 
3480 Serge 1343
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1344
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
3263 Serge 1345
	if (exec2_list == NULL) {
1346
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1347
			  args->buffer_count);
1348
		return -ENOMEM;
1349
	}
1350
	ret = copy_from_user(exec2_list,
4539 Serge 1351
			     to_user_ptr(args->buffers_ptr),
3263 Serge 1352
			     sizeof(*exec2_list) * args->buffer_count);
1353
	if (ret != 0) {
1354
		DRM_DEBUG("copy %d exec entries failed %d\n",
1355
			  args->buffer_count, ret);
3266 Serge 1356
        kfree(exec2_list);
1357
        FAIL();
3263 Serge 1358
		return -EFAULT;
1359
	}
1360
 
4104 Serge 1361
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1362
				     &dev_priv->gtt.base);
3263 Serge 1363
	if (!ret) {
1364
		/* Copy the new buffer offsets back to the user's exec list. */
4539 Serge 1365
		ret = copy_to_user(to_user_ptr(args->buffers_ptr),
3263 Serge 1366
				   exec2_list,
1367
				   sizeof(*exec2_list) * args->buffer_count);
1368
		if (ret) {
1369
			ret = -EFAULT;
1370
			DRM_DEBUG("failed to copy %d exec entries "
1371
				  "back to user (%d)\n",
1372
				  args->buffer_count, ret);
1373
		}
1374
	}
1375
 
3266 Serge 1376
    kfree(exec2_list);
3263 Serge 1377
	return ret;
1378
}