Subversion Repositories Kolibri OS

Rev

Rev 6103 | Rev 6283 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6103 Rev 6131
Line 38... Line 38...
38
#include 
38
#include 
39
#include 
39
#include 
40
#define RQ_BUG_ON(expr)
40
#define RQ_BUG_ON(expr)
Line 41... Line 41...
41
 
41
 
-
 
42
extern int x86_clflush_size;
Line 42... Line 43...
42
extern int x86_clflush_size;
43
#define __copy_to_user_inatomic __copy_to_user
43
 
44
 
44
#define PROT_READ       0x1             /* page can be read */
45
#define PROT_READ       0x1             /* page can be read */
Line 55... Line 56...
55
 
56
 
Line 56... Line 57...
56
 
57
 
57
#define MAX_ERRNO       4095
-
 
-
 
58
#define MAX_ERRNO       4095
Line 58... Line 59...
58
 
59
 
59
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
60
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
60
 
61
#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
61
 
62
 
Line 236... Line 237...
236
 
237
 
237
	return i915_gem_create(file, dev,
238
	return i915_gem_create(file, dev,
238
			       args->size, &args->handle);
239
			       args->size, &args->handle);
Line 239... Line -...
239
}
-
 
240
 
-
 
241
 
-
 
242
#if 0
240
}
243
 
241
 
244
static inline int
242
static inline int
245
__copy_to_user_swizzled(char __user *cpu_vaddr,
243
__copy_to_user_swizzled(char __user *cpu_vaddr,
246
			const char *gpu_vaddr, int gpu_offset,
244
			const char *gpu_vaddr, int gpu_offset,
Line 291... Line 289...
291
	}
289
	}
Line 292... Line 290...
292
 
290
 
293
	return 0;
291
	return 0;
Line -... Line 292...
-
 
292
}
-
 
293
 
-
 
294
/*
-
 
295
 * Pins the specified object's pages and synchronizes the object with
-
 
296
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
-
 
297
 * flush the object from the CPU cache.
-
 
298
 */
-
 
299
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
-
 
300
				    int *needs_clflush)
-
 
301
{
-
 
302
	int ret;
-
 
303
 
-
 
304
	*needs_clflush = 0;
-
 
305
 
-
 
306
	if (!obj->base.filp)
-
 
307
		return -EINVAL;
-
 
308
 
-
 
309
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
-
 
310
		/* If we're not in the cpu read domain, set ourself into the gtt
-
 
311
		 * read domain and manually flush cachelines (if required). This
-
 
312
		 * optimizes for the case when the gpu will dirty the data
-
 
313
		 * anyway again before the next pread happens. */
-
 
314
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
-
 
315
							obj->cache_level);
-
 
316
		ret = i915_gem_object_wait_rendering(obj, true);
-
 
317
		if (ret)
-
 
318
			return ret;
-
 
319
	}
-
 
320
 
-
 
321
	ret = i915_gem_object_get_pages(obj);
-
 
322
	if (ret)
-
 
323
		return ret;
-
 
324
 
-
 
325
	i915_gem_object_pin_pages(obj);
-
 
326
 
-
 
327
	return ret;
294
}
328
}
295
 
329
 
296
/* Per-page copy function for the shmem pread fastpath.
330
/* Per-page copy function for the shmem pread fastpath.
297
 * Flushes invalid cachelines before reading the target if
331
 * Flushes invalid cachelines before reading the target if
298
 * needs_clflush is set. */
332
 * needs_clflush is set. */
Line 422... Line 456...
422
		if (ret == 0)
456
		if (ret == 0)
423
			goto next_page;
457
			goto next_page;
Line 424... Line 458...
424
 
458
 
Line 425... Line -...
425
		mutex_unlock(&dev->struct_mutex);
-
 
426
 
-
 
427
		if (likely(!i915.prefault_disable) && !prefaulted) {
-
 
428
			ret = fault_in_multipages_writeable(user_data, remain);
-
 
429
			/* Userspace is tricking us, but we've already clobbered
-
 
430
			 * its pages with the prefault and promised to write the
-
 
431
			 * data up to the first fault. Hence ignore any errors
-
 
432
			 * and just continue. */
-
 
433
			(void)ret;
-
 
434
			prefaulted = 1;
-
 
435
		}
459
		mutex_unlock(&dev->struct_mutex);
436
 
460
 
437
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
461
		ret = shmem_pread_slow(page, shmem_page_offset, page_length,
Line 438... Line 462...
438
				       user_data, page_do_bit17_swizzling,
462
				       user_data, page_do_bit17_swizzling,
Line 469... Line 493...
469
	int ret = 0;
493
	int ret = 0;
Line 470... Line 494...
470
 
494
 
471
	if (args->size == 0)
495
	if (args->size == 0)
Line 472... Line -...
472
		return 0;
-
 
473
 
-
 
474
	if (!access_ok(VERIFY_WRITE,
-
 
475
		       to_user_ptr(args->data_ptr),
-
 
476
		       args->size))
-
 
477
		return -EFAULT;
496
		return 0;
478
 
497
 
479
	ret = i915_mutex_lock_interruptible(dev);
498
	ret = i915_mutex_lock_interruptible(dev);
Line 480... Line 499...
480
	if (ret)
499
	if (ret)
Line 514... Line 533...
514
 
533
 
515
/* This is the fast write path which cannot handle
534
/* This is the fast write path which cannot handle
516
 * page faults in the source data
535
 * page faults in the source data
Line 517... Line -...
517
 */
-
 
518
 
-
 
519
static inline int
-
 
520
fast_user_write(struct io_mapping *mapping,
-
 
521
		loff_t page_base, int page_offset,
-
 
522
		char __user *user_data,
-
 
523
		int length)
-
 
524
{
-
 
525
	void __iomem *vaddr_atomic;
-
 
526
	void *vaddr;
-
 
527
	unsigned long unwritten;
-
 
528
 
-
 
529
	vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
-
 
530
	/* We can use the cpu mem copy function because this is X86. */
-
 
531
	vaddr = (void __force*)vaddr_atomic + page_offset;
-
 
532
	unwritten = __copy_from_user_inatomic_nocache(vaddr,
-
 
533
						      user_data, length);
-
 
534
	io_mapping_unmap_atomic(vaddr_atomic);
-
 
535
	return unwritten;
-
 
Line 536... Line -...
536
}
-
 
537
#endif
536
 */
538
 
537
 
539
#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
538
 
540
/**
539
/**
541
 * This is the fast pwrite path, where we copy the data directly from the
540
 * This is the fast pwrite path, where we copy the data directly from the
Line 583... Line 582...
583
		page_offset = offset_in_page(offset);
582
		page_offset = offset_in_page(offset);
584
		page_length = remain;
583
		page_length = remain;
585
		if ((page_offset + remain) > PAGE_SIZE)
584
		if ((page_offset + remain) > PAGE_SIZE)
586
			page_length = PAGE_SIZE - page_offset;
585
			page_length = PAGE_SIZE - page_offset;
Line -... Line 586...
-
 
586
 
587
 
587
		MapPage(dev_priv->gtt.mappable,
Line 588... Line 588...
588
        MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
588
				dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
Line 589... Line 589...
589
 
589
 
590
        memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
590
		memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);