Subversion Repositories Kolibri OS

Rev

Rev 3746 | Rev 4246 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3263 Serge 1
/*
2
 * Copyright © 2008,2010 Intel Corporation
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 * IN THE SOFTWARE.
22
 *
23
 * Authors:
24
 *    Eric Anholt 
25
 *    Chris Wilson 
26
 *
27
 */
28
 
29
#include 
30
#include 
31
#include "i915_drv.h"
32
#include "i915_trace.h"
33
#include "intel_drv.h"
34
//#include 
35
 
36
#define I915_EXEC_SECURE        (1<<9)
37
#define I915_EXEC_IS_PINNED     (1<<10)
4104 Serge 38
#define I915_EXEC_VEBOX         (4<<0)
3263 Serge 39
 
40
#define wmb() asm volatile ("sfence")
41
 
42
struct drm_i915_gem_object *get_fb_obj();
43
 
44
 
45
static unsigned long
46
copy_to_user(void __user *to, const void *from, unsigned long n)
47
{
48
    memcpy(to, from, n);
49
    return 0;
50
}
51
 
52
static unsigned long
53
copy_from_user(void *to, const void __user *from, unsigned long n)
54
{
55
    memcpy(to, from, n);
56
    return 0;
57
}
58
 
59
struct eb_objects {
3480 Serge 60
	struct list_head objects;
3263 Serge 61
	int and;
3480 Serge 62
	union {
63
		struct drm_i915_gem_object *lut[0];
3263 Serge 64
	struct hlist_head buckets[0];
3480 Serge 65
	};
3263 Serge 66
};
67
 
68
static struct eb_objects *
3480 Serge 69
eb_create(struct drm_i915_gem_execbuffer2 *args)
3263 Serge 70
{
3480 Serge 71
	struct eb_objects *eb = NULL;
72
 
73
	if (args->flags & I915_EXEC_HANDLE_LUT) {
74
		int size = args->buffer_count;
75
		size *= sizeof(struct drm_i915_gem_object *);
76
		size += sizeof(struct eb_objects);
77
		eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
78
	}
79
 
80
	if (eb == NULL) {
81
		int size = args->buffer_count;
3263 Serge 82
	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
3480 Serge 83
		BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
84
		while (count > 2*size)
3263 Serge 85
		count >>= 1;
86
	eb = kzalloc(count*sizeof(struct hlist_head) +
87
		     sizeof(struct eb_objects),
3480 Serge 88
			     GFP_TEMPORARY);
3263 Serge 89
	if (eb == NULL)
90
		return eb;
91
 
92
	eb->and = count - 1;
3480 Serge 93
	} else
94
		eb->and = -args->buffer_count;
95
 
96
	INIT_LIST_HEAD(&eb->objects);
3263 Serge 97
	return eb;
98
}
99
 
100
static void
101
eb_reset(struct eb_objects *eb)
102
{
3480 Serge 103
	if (eb->and >= 0)
3263 Serge 104
	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
105
}
106
 
3480 Serge 107
static int
108
eb_lookup_objects(struct eb_objects *eb,
109
		  struct drm_i915_gem_exec_object2 *exec,
110
		  const struct drm_i915_gem_execbuffer2 *args,
111
		  struct drm_file *file)
3263 Serge 112
{
3480 Serge 113
	int i;
114
 
115
	spin_lock(&file->table_lock);
116
	for (i = 0; i < args->buffer_count; i++) {
117
		struct drm_i915_gem_object *obj;
118
 
119
        if(exec[i].handle == -2)
120
            obj = get_fb_obj();
121
        else
122
		    obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
123
		if (obj == NULL) {
124
			spin_unlock(&file->table_lock);
125
			DRM_DEBUG("Invalid object handle %d at index %d\n",
126
				   exec[i].handle, i);
127
			return -ENOENT;
128
		}
129
 
130
		if (!list_empty(&obj->exec_list)) {
131
			spin_unlock(&file->table_lock);
132
			DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
133
				   obj, exec[i].handle, i);
134
			return -EINVAL;
135
		}
136
 
137
		drm_gem_object_reference(&obj->base);
138
		list_add_tail(&obj->exec_list, &eb->objects);
139
 
140
		obj->exec_entry = &exec[i];
141
		if (eb->and < 0) {
142
			eb->lut[i] = obj;
143
		} else {
144
			uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
145
			obj->exec_handle = handle;
3263 Serge 146
	hlist_add_head(&obj->exec_node,
3480 Serge 147
				       &eb->buckets[handle & eb->and]);
148
		}
149
	}
150
	spin_unlock(&file->table_lock);
151
 
152
	return 0;
3263 Serge 153
}
154
 
155
static struct drm_i915_gem_object *
156
eb_get_object(struct eb_objects *eb, unsigned long handle)
157
{
3480 Serge 158
	if (eb->and < 0) {
159
		if (handle >= -eb->and)
160
			return NULL;
161
		return eb->lut[handle];
162
	} else {
3263 Serge 163
	struct hlist_head *head;
164
	struct hlist_node *node;
165
 
166
	head = &eb->buckets[handle & eb->and];
167
	hlist_for_each(node, head) {
3480 Serge 168
			struct drm_i915_gem_object *obj;
169
 
3263 Serge 170
		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
171
		if (obj->exec_handle == handle)
172
			return obj;
173
	}
174
	return NULL;
3480 Serge 175
	}
3263 Serge 176
}
177
 
178
static void
179
eb_destroy(struct eb_objects *eb)
180
{
3480 Serge 181
	while (!list_empty(&eb->objects)) {
182
		struct drm_i915_gem_object *obj;
183
 
184
		obj = list_first_entry(&eb->objects,
185
				       struct drm_i915_gem_object,
186
				       exec_list);
187
		list_del_init(&obj->exec_list);
188
		drm_gem_object_unreference(&obj->base);
189
	}
3263 Serge 190
	kfree(eb);
191
}
192
 
193
static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
194
{
195
	return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
196
		!obj->map_and_fenceable ||
197
		obj->cache_level != I915_CACHE_NONE);
198
}
199
 
200
static int
201
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
202
				   struct eb_objects *eb,
4104 Serge 203
				   struct drm_i915_gem_relocation_entry *reloc,
204
				   struct i915_address_space *vm)
3263 Serge 205
{
206
	struct drm_device *dev = obj->base.dev;
207
	struct drm_gem_object *target_obj;
208
	struct drm_i915_gem_object *target_i915_obj;
209
	uint32_t target_offset;
210
	int ret = -EINVAL;
211
 
212
	/* we've already hold a reference to all valid objects */
213
	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
214
	if (unlikely(target_obj == NULL))
215
		return -ENOENT;
216
 
217
	target_i915_obj = to_intel_bo(target_obj);
4104 Serge 218
	target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
3263 Serge 219
 
220
	/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
221
	 * pipe_control writes because the gpu doesn't properly redirect them
222
	 * through the ppgtt for non_secure batchbuffers. */
223
	if (unlikely(IS_GEN6(dev) &&
224
	    reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
225
	    !target_i915_obj->has_global_gtt_mapping)) {
226
		i915_gem_gtt_bind_object(target_i915_obj,
227
					 target_i915_obj->cache_level);
228
	}
229
 
230
	/* Validate that the target is in a valid r/w GPU domain */
231
	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
232
		DRM_DEBUG("reloc with multiple write domains: "
233
			  "obj %p target %d offset %d "
234
			  "read %08x write %08x",
235
			  obj, reloc->target_handle,
236
			  (int) reloc->offset,
237
			  reloc->read_domains,
238
			  reloc->write_domain);
239
		return ret;
240
	}
241
	if (unlikely((reloc->write_domain | reloc->read_domains)
242
		     & ~I915_GEM_GPU_DOMAINS)) {
243
		DRM_DEBUG("reloc with read/write non-GPU domains: "
244
			  "obj %p target %d offset %d "
245
			  "read %08x write %08x",
246
			  obj, reloc->target_handle,
247
			  (int) reloc->offset,
248
			  reloc->read_domains,
249
			  reloc->write_domain);
250
		return ret;
251
	}
252
 
253
	target_obj->pending_read_domains |= reloc->read_domains;
254
	target_obj->pending_write_domain |= reloc->write_domain;
255
 
256
	/* If the relocation already has the right value in it, no
257
	 * more work needs to be done.
258
	 */
259
	if (target_offset == reloc->presumed_offset)
260
		return 0;
261
 
262
	/* Check that the relocation address is valid... */
263
	if (unlikely(reloc->offset > obj->base.size - 4)) {
264
		DRM_DEBUG("Relocation beyond object bounds: "
265
			  "obj %p target %d offset %d size %d.\n",
266
			  obj, reloc->target_handle,
267
			  (int) reloc->offset,
268
			  (int) obj->base.size);
269
		return ret;
270
	}
271
	if (unlikely(reloc->offset & 3)) {
272
		DRM_DEBUG("Relocation not 4-byte aligned: "
273
			  "obj %p target %d offset %d.\n",
274
			  obj, reloc->target_handle,
275
			  (int) reloc->offset);
276
		return ret;
277
	}
278
 
279
	/* We can't wait for rendering with pagefaults disabled */
280
 
281
	reloc->delta += target_offset;
282
	if (use_cpu_reloc(obj)) {
283
		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
284
		char *vaddr;
285
 
286
		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
287
		if (ret)
288
			return ret;
289
 
290
        vaddr = (char *)MapIoMem((addr_t)i915_gem_object_get_page(obj,
291
                                 reloc->offset >> PAGE_SHIFT), 4096, 3);
292
		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
293
        FreeKernelSpace(vaddr);
294
	} else {
295
		struct drm_i915_private *dev_priv = dev->dev_private;
296
		uint32_t __iomem *reloc_entry;
297
		void __iomem *reloc_page;
298
 
299
		ret = i915_gem_object_set_to_gtt_domain(obj, true);
300
		if (ret)
301
			return ret;
302
 
303
		ret = i915_gem_object_put_fence(obj);
304
		if (ret)
305
			return ret;
306
 
307
		/* Map the page containing the relocation we're going to perform.  */
4104 Serge 308
        reloc->offset += i915_gem_obj_ggtt_offset(obj);
3263 Serge 309
        reloc_page = (void*)MapIoMem(reloc->offset & PAGE_MASK, 4096, 3);
310
		reloc_entry = (uint32_t __iomem *)
311
			(reloc_page + (reloc->offset & ~PAGE_MASK));
312
		iowrite32(reloc->delta, reloc_entry);
313
        FreeKernelSpace(reloc_page);
314
	}
315
 
316
	/* and update the user's relocation entry */
317
	reloc->presumed_offset = target_offset;
318
 
319
	return 0;
320
}
321
 
322
static int
323
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
4104 Serge 324
				    struct eb_objects *eb,
325
				    struct i915_address_space *vm)
3263 Serge 326
{
327
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
3266 Serge 328
	struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)];
3263 Serge 329
	struct drm_i915_gem_relocation_entry __user *user_relocs;
330
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
331
	int remain, ret;
332
 
333
	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
334
 
335
	remain = entry->relocation_count;
336
	while (remain) {
337
		struct drm_i915_gem_relocation_entry *r = stack_reloc;
338
		int count = remain;
339
		if (count > ARRAY_SIZE(stack_reloc))
340
			count = ARRAY_SIZE(stack_reloc);
341
		remain -= count;
342
 
343
        memcpy(r, user_relocs, count*sizeof(r[0]));
344
 
345
		do {
346
			u64 offset = r->presumed_offset;
347
 
4104 Serge 348
			ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
349
								 vm);
3263 Serge 350
			if (ret)
351
				return ret;
352
 
353
            memcpy(&user_relocs->presumed_offset,
354
                   &r->presumed_offset,
355
                   sizeof(r->presumed_offset));
356
 
357
			user_relocs++;
358
			r++;
359
		} while (--count);
360
	}
361
 
362
	return 0;
363
#undef N_RELOC
364
}
365
 
366
static int
367
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
368
					 struct eb_objects *eb,
4104 Serge 369
					 struct drm_i915_gem_relocation_entry *relocs,
370
					 struct i915_address_space *vm)
3263 Serge 371
{
372
	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
373
	int i, ret;
374
 
375
	for (i = 0; i < entry->relocation_count; i++) {
4104 Serge 376
		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
377
							 vm);
3263 Serge 378
		if (ret)
379
			return ret;
380
	}
381
 
382
	return 0;
383
}
384
 
385
static int
4104 Serge 386
i915_gem_execbuffer_relocate(struct eb_objects *eb,
387
			     struct i915_address_space *vm)
3263 Serge 388
{
389
	struct drm_i915_gem_object *obj;
390
	int ret = 0;
391
 
392
	/* This is the fast path and we cannot handle a pagefault whilst
393
	 * holding the struct mutex lest the user pass in the relocations
394
	 * contained within a mmaped bo. For in such a case we, the page
395
	 * fault handler would call i915_gem_fault() and we would try to
396
	 * acquire the struct mutex again. Obviously this is bad and so
397
	 * lockdep complains vehemently.
398
	 */
4104 Serge 399
//	pagefault_disable();
3480 Serge 400
	list_for_each_entry(obj, &eb->objects, exec_list) {
4104 Serge 401
		ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
3263 Serge 402
		if (ret)
403
			break;
404
	}
405
//   pagefault_enable();
406
 
407
	return ret;
408
}
409
 
410
#define  __EXEC_OBJECT_HAS_PIN (1<<31)
411
#define  __EXEC_OBJECT_HAS_FENCE (1<<30)
412
 
413
static int
414
need_reloc_mappable(struct drm_i915_gem_object *obj)
415
{
416
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
417
	return entry->relocation_count && !use_cpu_reloc(obj);
418
}
419
 
420
static int
421
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
3480 Serge 422
				   struct intel_ring_buffer *ring,
4104 Serge 423
				   struct i915_address_space *vm,
3480 Serge 424
				   bool *need_reloc)
3263 Serge 425
{
426
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
427
	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
428
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
429
	bool need_fence, need_mappable;
430
	int ret;
431
 
432
	need_fence =
433
		has_fenced_gpu_access &&
434
		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
435
		obj->tiling_mode != I915_TILING_NONE;
436
	need_mappable = need_fence || need_reloc_mappable(obj);
437
 
4104 Serge 438
	ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
439
				  false);
3263 Serge 440
	if (ret)
441
		return ret;
442
 
443
	entry->flags |= __EXEC_OBJECT_HAS_PIN;
444
 
445
	if (has_fenced_gpu_access) {
446
		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
447
			ret = i915_gem_object_get_fence(obj);
448
			if (ret)
449
				return ret;
450
 
451
			if (i915_gem_object_pin_fence(obj))
452
				entry->flags |= __EXEC_OBJECT_HAS_FENCE;
453
 
454
			obj->pending_fenced_gpu_access = true;
455
		}
456
	}
457
 
458
	/* Ensure ppgtt mapping exists if needed */
459
	if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
460
		i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
461
				       obj, obj->cache_level);
462
 
463
		obj->has_aliasing_ppgtt_mapping = 1;
464
	}
465
 
4104 Serge 466
	if (entry->offset != i915_gem_obj_offset(obj, vm)) {
467
		entry->offset = i915_gem_obj_offset(obj, vm);
3480 Serge 468
		*need_reloc = true;
469
	}
3266 Serge 470
 
3480 Serge 471
	if (entry->flags & EXEC_OBJECT_WRITE) {
472
		obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
473
		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
474
	}
475
 
476
	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
477
	    !obj->has_global_gtt_mapping)
478
		i915_gem_gtt_bind_object(obj, obj->cache_level);
479
 
3263 Serge 480
	return 0;
481
}
482
 
483
static void
484
i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
485
{
486
	struct drm_i915_gem_exec_object2 *entry;
487
 
4104 Serge 488
	if (!i915_gem_obj_bound_any(obj))
3263 Serge 489
		return;
490
 
491
	entry = obj->exec_entry;
492
 
493
	if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
494
		i915_gem_object_unpin_fence(obj);
495
 
496
	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
497
		i915_gem_object_unpin(obj);
498
 
499
	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
500
}
501
 
502
static int
503
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
3480 Serge 504
			    struct list_head *objects,
4104 Serge 505
			    struct i915_address_space *vm,
3480 Serge 506
			    bool *need_relocs)
3263 Serge 507
{
508
	struct drm_i915_gem_object *obj;
509
	struct list_head ordered_objects;
510
	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
511
	int retry;
512
 
513
	INIT_LIST_HEAD(&ordered_objects);
514
	while (!list_empty(objects)) {
515
		struct drm_i915_gem_exec_object2 *entry;
516
		bool need_fence, need_mappable;
517
 
518
		obj = list_first_entry(objects,
519
				       struct drm_i915_gem_object,
520
				       exec_list);
521
		entry = obj->exec_entry;
522
 
523
		need_fence =
524
			has_fenced_gpu_access &&
525
			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
526
			obj->tiling_mode != I915_TILING_NONE;
527
		need_mappable = need_fence || need_reloc_mappable(obj);
528
 
529
		if (need_mappable)
530
			list_move(&obj->exec_list, &ordered_objects);
531
		else
532
			list_move_tail(&obj->exec_list, &ordered_objects);
533
 
3480 Serge 534
		obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
3263 Serge 535
		obj->base.pending_write_domain = 0;
536
		obj->pending_fenced_gpu_access = false;
537
	}
538
	list_splice(&ordered_objects, objects);
539
 
540
	/* Attempt to pin all of the buffers into the GTT.
541
	 * This is done in 3 phases:
542
	 *
543
	 * 1a. Unbind all objects that do not match the GTT constraints for
544
	 *     the execbuffer (fenceable, mappable, alignment etc).
545
	 * 1b. Increment pin count for already bound objects.
546
	 * 2.  Bind new objects.
547
	 * 3.  Decrement pin count.
548
	 *
549
	 * This avoid unnecessary unbinding of later objects in order to make
550
	 * room for the earlier objects *unless* we need to defragment.
551
	 */
552
	retry = 0;
553
	do {
554
		int ret = 0;
555
 
556
		/* Unbind any ill-fitting objects or pin. */
557
		list_for_each_entry(obj, objects, exec_list) {
558
			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
559
			bool need_fence, need_mappable;
4104 Serge 560
			u32 obj_offset;
3263 Serge 561
 
4104 Serge 562
			if (!i915_gem_obj_bound(obj, vm))
3263 Serge 563
				continue;
564
 
4104 Serge 565
			obj_offset = i915_gem_obj_offset(obj, vm);
3263 Serge 566
			need_fence =
567
				has_fenced_gpu_access &&
568
				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
569
				obj->tiling_mode != I915_TILING_NONE;
570
			need_mappable = need_fence || need_reloc_mappable(obj);
571
 
4104 Serge 572
 
573
			if ((entry->alignment &&
574
			     obj_offset & (entry->alignment - 1)) ||
3263 Serge 575
			    (need_mappable && !obj->map_and_fenceable))
4104 Serge 576
				ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
3263 Serge 577
			else
4104 Serge 578
				ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
3263 Serge 579
			if (ret)
580
				goto err;
581
		}
582
 
583
		/* Bind fresh objects */
584
		list_for_each_entry(obj, objects, exec_list) {
4104 Serge 585
			if (i915_gem_obj_bound(obj, vm))
3263 Serge 586
				continue;
587
 
4104 Serge 588
			ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
3263 Serge 589
			if (ret)
590
				goto err;
591
		}
592
 
593
err:		/* Decrement pin count for bound objects */
594
		list_for_each_entry(obj, objects, exec_list)
595
			i915_gem_execbuffer_unreserve_object(obj);
596
 
597
		if (ret != -ENOSPC || retry++)
598
			return ret;
599
 
600
//       ret = i915_gem_evict_everything(ring->dev);
601
		if (ret)
602
			return ret;
603
	} while (1);
604
}
605
 
606
static int
607
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3480 Serge 608
				  struct drm_i915_gem_execbuffer2 *args,
3263 Serge 609
				  struct drm_file *file,
610
				  struct intel_ring_buffer *ring,
611
				  struct eb_objects *eb,
4104 Serge 612
				  struct drm_i915_gem_exec_object2 *exec,
613
				  struct i915_address_space *vm)
3263 Serge 614
{
615
	struct drm_i915_gem_relocation_entry *reloc;
616
	struct drm_i915_gem_object *obj;
3480 Serge 617
	bool need_relocs;
3263 Serge 618
	int *reloc_offset;
619
	int i, total, ret;
3480 Serge 620
	int count = args->buffer_count;
3263 Serge 621
 
622
	/* We may process another execbuffer during the unlock... */
3480 Serge 623
	while (!list_empty(&eb->objects)) {
624
		obj = list_first_entry(&eb->objects,
3263 Serge 625
				       struct drm_i915_gem_object,
626
				       exec_list);
627
		list_del_init(&obj->exec_list);
628
		drm_gem_object_unreference(&obj->base);
629
	}
630
 
631
	mutex_unlock(&dev->struct_mutex);
632
 
633
	total = 0;
634
	for (i = 0; i < count; i++)
635
		total += exec[i].relocation_count;
636
 
637
    reloc_offset = malloc(count * sizeof(*reloc_offset));
638
    reloc = malloc(total * sizeof(*reloc));
639
	if (reloc == NULL || reloc_offset == NULL) {
3266 Serge 640
        kfree(reloc);
641
        kfree(reloc_offset);
3263 Serge 642
		mutex_lock(&dev->struct_mutex);
643
		return -ENOMEM;
644
	}
645
 
646
	total = 0;
647
	for (i = 0; i < count; i++) {
648
		struct drm_i915_gem_relocation_entry __user *user_relocs;
649
		u64 invalid_offset = (u64)-1;
650
		int j;
651
 
652
		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
653
 
654
		if (copy_from_user(reloc+total, user_relocs,
655
				   exec[i].relocation_count * sizeof(*reloc))) {
656
			ret = -EFAULT;
657
			mutex_lock(&dev->struct_mutex);
658
			goto err;
659
		}
660
 
661
		/* As we do not update the known relocation offsets after
662
		 * relocating (due to the complexities in lock handling),
663
		 * we need to mark them as invalid now so that we force the
664
		 * relocation processing next time. Just in case the target
665
		 * object is evicted and then rebound into its old
666
		 * presumed_offset before the next execbuffer - if that
667
		 * happened we would make the mistake of assuming that the
668
		 * relocations were valid.
669
		 */
670
		for (j = 0; j < exec[i].relocation_count; j++) {
671
			if (copy_to_user(&user_relocs[j].presumed_offset,
672
					 &invalid_offset,
673
					 sizeof(invalid_offset))) {
674
				ret = -EFAULT;
675
				mutex_lock(&dev->struct_mutex);
676
				goto err;
677
			}
678
		}
679
 
680
		reloc_offset[i] = total;
681
		total += exec[i].relocation_count;
682
	}
683
 
684
	ret = i915_mutex_lock_interruptible(dev);
685
	if (ret) {
686
		mutex_lock(&dev->struct_mutex);
687
		goto err;
688
	}
689
 
690
	/* reacquire the objects */
691
	eb_reset(eb);
3480 Serge 692
	ret = eb_lookup_objects(eb, exec, args, file);
693
	if (ret)
3263 Serge 694
			goto err;
695
 
3480 Serge 696
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
4104 Serge 697
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
3263 Serge 698
	if (ret)
699
		goto err;
700
 
3480 Serge 701
	list_for_each_entry(obj, &eb->objects, exec_list) {
3263 Serge 702
		int offset = obj->exec_entry - exec;
703
		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
4104 Serge 704
							       reloc + reloc_offset[offset],
705
							       vm);
3263 Serge 706
		if (ret)
707
			goto err;
708
	}
709
 
710
	/* Leave the user relocations as are, this is the painfully slow path,
711
	 * and we want to avoid the complication of dropping the lock whilst
712
	 * having buffers reserved in the aperture and so causing spurious
713
	 * ENOSPC for random operations.
714
	 */
715
 
716
err:
3266 Serge 717
    kfree(reloc);
718
    kfree(reloc_offset);
3263 Serge 719
	return ret;
720
}
721
 
722
static int
723
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
724
				struct list_head *objects)
725
{
726
	struct drm_i915_gem_object *obj;
727
	uint32_t flush_domains = 0;
4104 Serge 728
	bool flush_chipset = false;
3263 Serge 729
	int ret;
730
 
731
	list_for_each_entry(obj, objects, exec_list) {
732
		ret = i915_gem_object_sync(obj, ring);
733
		if (ret)
734
			return ret;
735
 
736
		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
4104 Serge 737
			flush_chipset |= i915_gem_clflush_object(obj, false);
3263 Serge 738
 
739
		flush_domains |= obj->base.write_domain;
740
	}
741
 
4104 Serge 742
	if (flush_chipset)
3263 Serge 743
		i915_gem_chipset_flush(ring->dev);
744
 
745
	if (flush_domains & I915_GEM_DOMAIN_GTT)
746
		wmb();
747
 
748
	/* Unconditionally invalidate gpu caches and ensure that we do flush
749
	 * any residual writes from the previous batch.
750
	 */
751
	return intel_ring_invalidate_all_caches(ring);
752
}
753
 
754
static bool
755
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
756
{
3480 Serge 757
	if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
758
		return false;
759
 
3263 Serge 760
	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
761
}
762
 
763
static int
764
validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
765
		   int count)
766
{
767
	int i;
3480 Serge 768
	int relocs_total = 0;
769
	int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
3263 Serge 770
 
771
	for (i = 0; i < count; i++) {
3746 Serge 772
		char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
3263 Serge 773
		int length; /* limited by fault_in_pages_readable() */
774
 
3480 Serge 775
		if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
3263 Serge 776
			return -EINVAL;
777
 
3480 Serge 778
		/* First check for malicious input causing overflow in
779
		 * the worst case where we need to allocate the entire
780
		 * relocation tree as a single array.
781
		 */
782
		if (exec[i].relocation_count > relocs_max - relocs_total)
783
			return -EINVAL;
784
		relocs_total += exec[i].relocation_count;
785
 
3263 Serge 786
		length = exec[i].relocation_count *
787
			sizeof(struct drm_i915_gem_relocation_entry);
3746 Serge 788
		/*
789
		 * We must check that the entire relocation array is safe
790
		 * to read, but since we may need to update the presumed
791
		 * offsets during execution, check for full write access.
792
		 */
3263 Serge 793
//       if (!access_ok(VERIFY_WRITE, ptr, length))
794
//           return -EFAULT;
795
 
796
//       if (fault_in_multipages_readable(ptr, length))
797
//           return -EFAULT;
798
	}
799
 
800
	return 0;
801
}
802
 
803
static void
804
i915_gem_execbuffer_move_to_active(struct list_head *objects,
4104 Serge 805
				   struct i915_address_space *vm,
3263 Serge 806
				   struct intel_ring_buffer *ring)
807
{
808
	struct drm_i915_gem_object *obj;
809
 
810
	list_for_each_entry(obj, objects, exec_list) {
811
		u32 old_read = obj->base.read_domains;
812
		u32 old_write = obj->base.write_domain;
813
 
3480 Serge 814
		obj->base.write_domain = obj->base.pending_write_domain;
815
		if (obj->base.write_domain == 0)
816
			obj->base.pending_read_domains |= obj->base.read_domains;
3263 Serge 817
		obj->base.read_domains = obj->base.pending_read_domains;
818
		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
819
 
4104 Serge 820
		/* FIXME: This lookup gets fixed later <-- danvet */
821
		list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
3263 Serge 822
		i915_gem_object_move_to_active(obj, ring);
823
		if (obj->base.write_domain) {
824
			obj->dirty = 1;
825
			obj->last_write_seqno = intel_ring_get_seqno(ring);
826
			if (obj->pin_count) /* check for potential scanout */
4104 Serge 827
				intel_mark_fb_busy(obj, ring);
3263 Serge 828
		}
829
 
830
		trace_i915_gem_object_change_domain(obj, old_read, old_write);
831
	}
832
}
833
 
834
static void
835
i915_gem_execbuffer_retire_commands(struct drm_device *dev,
836
				    struct drm_file *file,
4104 Serge 837
				    struct intel_ring_buffer *ring,
838
				    struct drm_i915_gem_object *obj)
3263 Serge 839
{
840
	/* Unconditionally force add_request to emit a full flush. */
841
	ring->gpu_caches_dirty = true;
842
 
843
	/* Add a breadcrumb for the completion of the batch buffer */
4104 Serge 844
	(void)__i915_add_request(ring, file, obj, NULL);
3263 Serge 845
}
846
 
847
static int
848
i915_reset_gen7_sol_offsets(struct drm_device *dev,
849
			    struct intel_ring_buffer *ring)
850
{
851
	drm_i915_private_t *dev_priv = dev->dev_private;
852
	int ret, i;
853
 
854
	if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
855
		return 0;
856
 
857
	ret = intel_ring_begin(ring, 4 * 3);
858
	if (ret)
859
		return ret;
860
 
861
	for (i = 0; i < 4; i++) {
862
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
863
		intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
864
		intel_ring_emit(ring, 0);
865
	}
866
 
867
	intel_ring_advance(ring);
868
 
869
	return 0;
870
}
871
 
872
static int
873
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
874
		       struct drm_file *file,
875
		       struct drm_i915_gem_execbuffer2 *args,
4104 Serge 876
		       struct drm_i915_gem_exec_object2 *exec,
877
		       struct i915_address_space *vm)
3263 Serge 878
{
879
	drm_i915_private_t *dev_priv = dev->dev_private;
880
	struct eb_objects *eb;
881
	struct drm_i915_gem_object *batch_obj;
882
	struct drm_clip_rect *cliprects = NULL;
883
	struct intel_ring_buffer *ring;
884
	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
885
	u32 exec_start, exec_len;
3480 Serge 886
	u32 mask, flags;
3263 Serge 887
	int ret, mode, i;
3480 Serge 888
	bool need_relocs;
3263 Serge 889
 
3480 Serge 890
	if (!i915_gem_check_execbuffer(args))
3263 Serge 891
		return -EINVAL;
892
 
893
	ret = validate_exec_list(exec, args->buffer_count);
894
	if (ret)
895
		return ret;
896
 
897
	flags = 0;
898
	if (args->flags & I915_EXEC_SECURE) {
899
 
900
		flags |= I915_DISPATCH_SECURE;
901
	}
902
	if (args->flags & I915_EXEC_IS_PINNED)
903
		flags |= I915_DISPATCH_PINNED;
904
 
905
	switch (args->flags & I915_EXEC_RING_MASK) {
906
	case I915_EXEC_DEFAULT:
907
	case I915_EXEC_RENDER:
908
		ring = &dev_priv->ring[RCS];
909
		break;
910
	case I915_EXEC_BSD:
911
		ring = &dev_priv->ring[VCS];
4104 Serge 912
		if (ctx_id != DEFAULT_CONTEXT_ID) {
3263 Serge 913
			DRM_DEBUG("Ring %s doesn't support contexts\n",
914
				  ring->name);
915
			return -EPERM;
916
		}
917
		break;
918
	case I915_EXEC_BLT:
919
		ring = &dev_priv->ring[BCS];
4104 Serge 920
		if (ctx_id != DEFAULT_CONTEXT_ID) {
3263 Serge 921
			DRM_DEBUG("Ring %s doesn't support contexts\n",
922
				  ring->name);
923
			return -EPERM;
924
		}
925
		break;
4104 Serge 926
	case I915_EXEC_VEBOX:
927
		ring = &dev_priv->ring[VECS];
928
		if (ctx_id != DEFAULT_CONTEXT_ID) {
929
			DRM_DEBUG("Ring %s doesn't support contexts\n",
930
				  ring->name);
931
			return -EPERM;
932
		}
933
		break;
934
 
3263 Serge 935
	default:
936
		DRM_DEBUG("execbuf with unknown ring: %d\n",
937
			  (int)(args->flags & I915_EXEC_RING_MASK));
938
		return -EINVAL;
939
	}
940
	if (!intel_ring_initialized(ring)) {
941
		DRM_DEBUG("execbuf with invalid ring: %d\n",
942
			  (int)(args->flags & I915_EXEC_RING_MASK));
943
		return -EINVAL;
944
	}
945
 
946
	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
947
	mask = I915_EXEC_CONSTANTS_MASK;
948
	switch (mode) {
949
	case I915_EXEC_CONSTANTS_REL_GENERAL:
950
	case I915_EXEC_CONSTANTS_ABSOLUTE:
951
	case I915_EXEC_CONSTANTS_REL_SURFACE:
952
		if (ring == &dev_priv->ring[RCS] &&
953
		    mode != dev_priv->relative_constants_mode) {
954
			if (INTEL_INFO(dev)->gen < 4)
955
				return -EINVAL;
956
 
957
			if (INTEL_INFO(dev)->gen > 5 &&
958
			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
959
				return -EINVAL;
960
 
961
			/* The HW changed the meaning on this bit on gen6 */
962
			if (INTEL_INFO(dev)->gen >= 6)
963
				mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
964
		}
965
		break;
966
	default:
967
		DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
968
		return -EINVAL;
969
	}
970
 
971
	if (args->buffer_count < 1) {
972
		DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
973
		return -EINVAL;
974
	}
975
 
976
	if (args->num_cliprects != 0) {
977
		if (ring != &dev_priv->ring[RCS]) {
978
			DRM_DEBUG("clip rectangles are only valid with the render ring\n");
979
			return -EINVAL;
980
		}
981
 
982
		if (INTEL_INFO(dev)->gen >= 5) {
983
			DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
984
			return -EINVAL;
985
		}
986
 
987
		if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
988
			DRM_DEBUG("execbuf with %u cliprects\n",
989
				  args->num_cliprects);
990
			return -EINVAL;
991
		}
992
 
993
		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
994
				    GFP_KERNEL);
995
		if (cliprects == NULL) {
996
			ret = -ENOMEM;
997
			goto pre_mutex_err;
998
		}
999
 
1000
		if (copy_from_user(cliprects,
3746 Serge 1001
				   to_user_ptr(args->cliprects_ptr),
3263 Serge 1002
				     sizeof(*cliprects)*args->num_cliprects)) {
1003
			ret = -EFAULT;
1004
			goto pre_mutex_err;
1005
		}
1006
	}
1007
 
1008
	ret = i915_mutex_lock_interruptible(dev);
1009
	if (ret)
1010
		goto pre_mutex_err;
1011
 
4104 Serge 1012
	if (dev_priv->ums.mm_suspended) {
3263 Serge 1013
		mutex_unlock(&dev->struct_mutex);
1014
		ret = -EBUSY;
1015
		goto pre_mutex_err;
1016
	}
1017
 
3480 Serge 1018
	eb = eb_create(args);
3263 Serge 1019
	if (eb == NULL) {
1020
		mutex_unlock(&dev->struct_mutex);
1021
		ret = -ENOMEM;
1022
		goto pre_mutex_err;
1023
	}
1024
 
1025
	/* Look up object handles */
3480 Serge 1026
	ret = eb_lookup_objects(eb, exec, args, file);
1027
	if (ret)
3263 Serge 1028
			goto err;
1029
 
1030
	/* take note of the batch buffer before we might reorder the lists */
3480 Serge 1031
	batch_obj = list_entry(eb->objects.prev,
3263 Serge 1032
			       struct drm_i915_gem_object,
1033
			       exec_list);
1034
 
1035
	/* Move the objects en-masse into the GTT, evicting if necessary. */
3480 Serge 1036
	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
4104 Serge 1037
	ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
3263 Serge 1038
	if (ret)
1039
		goto err;
1040
 
1041
	/* The objects are in their final locations, apply the relocations. */
3480 Serge 1042
	if (need_relocs)
4104 Serge 1043
		ret = i915_gem_execbuffer_relocate(eb, vm);
3263 Serge 1044
	if (ret) {
1045
		if (ret == -EFAULT) {
3480 Serge 1046
			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
4104 Serge 1047
								eb, exec, vm);
3263 Serge 1048
			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1049
		}
1050
		if (ret)
1051
			goto err;
1052
	}
1053
 
1054
	/* Set the pending read domains for the batch buffer to COMMAND */
1055
	if (batch_obj->base.pending_write_domain) {
1056
		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1057
		ret = -EINVAL;
1058
		goto err;
1059
	}
1060
	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1061
 
1062
	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1063
	 * batch" bit. Hence we need to pin secure batches into the global gtt.
1064
	 * hsw should have this fixed, but let's be paranoid and do it
1065
	 * unconditionally for now. */
1066
	if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1067
		i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1068
 
3480 Serge 1069
	ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
3263 Serge 1070
	if (ret)
1071
		goto err;
1072
 
1073
	ret = i915_switch_context(ring, file, ctx_id);
1074
	if (ret)
1075
		goto err;
1076
 
1077
	if (ring == &dev_priv->ring[RCS] &&
1078
	    mode != dev_priv->relative_constants_mode) {
1079
		ret = intel_ring_begin(ring, 4);
1080
		if (ret)
1081
				goto err;
1082
 
1083
		intel_ring_emit(ring, MI_NOOP);
1084
		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1085
		intel_ring_emit(ring, INSTPM);
1086
		intel_ring_emit(ring, mask << 16 | mode);
1087
		intel_ring_advance(ring);
1088
 
1089
		dev_priv->relative_constants_mode = mode;
1090
	}
1091
 
1092
	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1093
		ret = i915_reset_gen7_sol_offsets(dev, ring);
1094
		if (ret)
1095
			goto err;
1096
	}
1097
 
4104 Serge 1098
	exec_start = i915_gem_obj_offset(batch_obj, vm) +
1099
		args->batch_start_offset;
3263 Serge 1100
	exec_len = args->batch_len;
1101
	if (cliprects) {
1102
 
1103
	} else {
1104
		ret = ring->dispatch_execbuffer(ring,
1105
						exec_start, exec_len,
1106
						flags);
1107
		if (ret)
1108
			goto err;
1109
	}
1110
 
3266 Serge 1111
	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
3263 Serge 1112
 
4104 Serge 1113
	i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
1114
	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
3266 Serge 1115
 
3263 Serge 1116
err:
1117
	eb_destroy(eb);
1118
 
1119
	mutex_unlock(&dev->struct_mutex);
1120
 
1121
pre_mutex_err:
4104 Serge 1122
    kfree(cliprects);
3263 Serge 1123
	return ret;
1124
}
1125
 
3480 Serge 1126
 
1127
 
3263 Serge 1128
int
1129
i915_gem_execbuffer2(struct drm_device *dev, void *data,
1130
		     struct drm_file *file)
1131
{
4104 Serge 1132
	struct drm_i915_private *dev_priv = dev->dev_private;
3263 Serge 1133
	struct drm_i915_gem_execbuffer2 *args = data;
1134
	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1135
	int ret;
1136
 
1137
	if (args->buffer_count < 1 ||
1138
	    args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1139
		DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1140
		return -EINVAL;
1141
	}
1142
 
3480 Serge 1143
	exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1144
			     GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
3263 Serge 1145
	if (exec2_list == NULL) {
1146
		DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1147
			  args->buffer_count);
1148
		return -ENOMEM;
1149
	}
1150
	ret = copy_from_user(exec2_list,
1151
			     (struct drm_i915_relocation_entry __user *)
1152
			     (uintptr_t) args->buffers_ptr,
1153
			     sizeof(*exec2_list) * args->buffer_count);
1154
	if (ret != 0) {
1155
		DRM_DEBUG("copy %d exec entries failed %d\n",
1156
			  args->buffer_count, ret);
3266 Serge 1157
        kfree(exec2_list);
1158
        FAIL();
3263 Serge 1159
		return -EFAULT;
1160
	}
1161
 
4104 Serge 1162
	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
1163
				     &dev_priv->gtt.base);
3263 Serge 1164
	if (!ret) {
1165
		/* Copy the new buffer offsets back to the user's exec list. */
1166
		ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr,
1167
				   exec2_list,
1168
				   sizeof(*exec2_list) * args->buffer_count);
1169
		if (ret) {
1170
			ret = -EFAULT;
1171
			DRM_DEBUG("failed to copy %d exec entries "
1172
				  "back to user (%d)\n",
1173
				  args->buffer_count, ret);
1174
		}
1175
	}
1176
 
3266 Serge 1177
    kfree(exec2_list);
1178
 
1179
//    LEAVE();
1180
 
3263 Serge 1181
	return ret;
1182
}