30,7 → 30,6 |
#include "i915_drv.h" |
#include "i915_trace.h" |
#include "intel_drv.h" |
#include <linux/shmem_fs.h> |
#include <linux/slab.h> |
//#include <linux/swap.h> |
#include <linux/pci.h> |
53,7 → 52,39 |
|
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
|
void |
drm_gem_object_free(struct kref *kref) |
{ |
struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
struct drm_device *dev = obj->dev; |
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
|
i915_gem_free_object(obj); |
} |
|
/** |
* Initialize an already allocated GEM object of the specified size with |
* shmfs backing store. |
*/ |
int drm_gem_object_init(struct drm_device *dev, |
struct drm_gem_object *obj, size_t size) |
{ |
BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
|
obj->dev = dev; |
kref_init(&obj->refcount); |
atomic_set(&obj->handle_count, 0); |
obj->size = size; |
|
return 0; |
} |
|
void |
drm_gem_object_release(struct drm_gem_object *obj) |
{ } |
|
|
#define I915_EXEC_CONSTANTS_MASK (3<<6) |
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
107,6 → 138,7 |
dev_priv->mm.object_memory -= size; |
} |
|
|
static int |
i915_gem_wait_for_error(struct drm_device *dev) |
{ |
249,6 → 281,7 |
trace_i915_gem_object_create(obj); |
|
*handle_p = handle; |
|
return 0; |
} |
|
284,6 → 317,8 |
args->size, &args->handle); |
} |
|
#if 0 |
|
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
291,7 → 326,6 |
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
obj->tiling_mode != I915_TILING_NONE; |
} |
#if 0 |
|
static inline int |
__copy_to_user_swizzled(char __user *cpu_vaddr, |
616,9 → 650,7 |
io_mapping_unmap_atomic(vaddr_atomic); |
return unwritten; |
} |
#endif |
|
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
/** |
* This is the fast pwrite path, where we copy the data directly from the |
* user into the GTT, uncached. |
634,7 → 666,6 |
loff_t offset, page_base; |
char __user *user_data; |
int page_offset, page_length, ret; |
char *vaddr; |
|
ret = i915_gem_object_pin(obj, 0, true, true); |
if (ret) |
648,13 → 679,6 |
if (ret) |
goto out_unpin; |
|
vaddr = AllocKernelSpace(4096); |
if(vaddr == NULL) |
{ |
ret = -ENOSPC; |
goto out_unpin; |
}; |
|
user_data = (char __user *) (uintptr_t) args->data_ptr; |
remain = args->size; |
|
673,22 → 697,24 |
if ((page_offset + remain) > PAGE_SIZE) |
page_length = PAGE_SIZE - page_offset; |
|
MapPage(vaddr, page_base, PG_SW|PG_NOCACHE); |
/* If we get a fault while copying data, then (presumably) our |
* source page isn't available. Return the error and we'll |
* retry in the slow path. |
*/ |
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, |
page_offset, user_data, page_length)) { |
ret = -EFAULT; |
goto out_unpin; |
} |
|
memcpy(vaddr+page_offset, user_data, page_length); |
|
remain -= page_length; |
user_data += page_length; |
offset += page_length; |
} |
|
FreeKernelSpace(vaddr); |
|
out_unpin: |
i915_gem_object_unpin(obj); |
out: |
printf("% s ret = %d\n", __FUNCTION__, ret); |
|
return ret; |
} |
|
704,26 → 730,25 |
bool needs_clflush_after) |
{ |
char *vaddr; |
int ret = 0; |
int ret; |
|
if (unlikely(page_do_bit17_swizzling)) |
return -EINVAL; |
|
vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW); |
vaddr = kmap_atomic(page); |
if (needs_clflush_before) |
drm_clflush_virt_range(vaddr + shmem_page_offset, |
page_length); |
memcpy(vaddr + shmem_page_offset, |
ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset, |
user_data, |
page_length); |
if (needs_clflush_after) |
drm_clflush_virt_range(vaddr + shmem_page_offset, |
page_length); |
FreeKernelSpace(vaddr); |
kunmap_atomic(vaddr); |
|
return ret ? -EFAULT : 0; |
} |
#if 0 |
|
/* Only difference to the fast-path function is that this can handle bit17 |
* and uses non-atomic copy and kmap functions. */ |
758,9 → 783,7 |
|
return ret ? -EFAULT : 0; |
} |
#endif |
|
|
static int |
i915_gem_shmem_pwrite(struct drm_device *dev, |
struct drm_i915_gem_object *obj, |
837,7 → 860,7 |
* overcomplicate things and flush the entire patch. */ |
partial_cacheline_write = needs_clflush_before && |
((shmem_page_offset | page_length) |
& (x86_clflush_size - 1)); |
& (boot_cpu_data.x86_clflush_size - 1)); |
|
page = sg_page(sg); |
page_do_bit17_swizzling = obj_do_bit17_swizzling && |
852,16 → 875,16 |
|
hit_slowpath = 1; |
mutex_unlock(&dev->struct_mutex); |
dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__); |
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, |
user_data, page_do_bit17_swizzling, |
partial_cacheline_write, |
needs_clflush_after); |
|
// ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, |
// user_data, page_do_bit17_swizzling, |
// partial_cacheline_write, |
// needs_clflush_after); |
|
mutex_lock(&dev->struct_mutex); |
|
next_page: |
set_page_dirty(page); |
mark_page_accessed(page); |
|
if (ret) |
goto out; |
908,6 → 931,16 |
if (args->size == 0) |
return 0; |
|
if (!access_ok(VERIFY_READ, |
(char __user *)(uintptr_t)args->data_ptr, |
args->size)) |
return -EFAULT; |
|
ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr, |
args->size); |
if (ret) |
return -EFAULT; |
|
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
942,10 → 975,10 |
* pread/pwrite currently are reading and writing from the CPU |
* perspective, requiring manual detiling by the client. |
*/ |
// if (obj->phys_obj) { |
// ret = i915_gem_phys_pwrite(dev, obj, args, file); |
// goto out; |
// } |
if (obj->phys_obj) { |
ret = i915_gem_phys_pwrite(dev, obj, args, file); |
goto out; |
} |
|
if (obj->cache_level == I915_CACHE_NONE && |
obj->tiling_mode == I915_TILING_NONE && |
966,6 → 999,8 |
return ret; |
} |
|
#endif |
|
int |
i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
bool interruptible) |
1088,7 → 1123,6 |
WARN_ON(end < 0); /* We're not aware of other errors */ |
return 0; |
} |
|
#endif |
|
#define EXIT_COND \ |
1161,116 → 1195,24 |
return 0; |
} |
|
/* A nonblocking variant of the above wait. This is a highly dangerous routine |
* as the object state may change during this call. |
*/ |
static __must_check int |
i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, |
bool readonly) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring = obj->ring; |
u32 seqno; |
int ret; |
|
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
BUG_ON(!dev_priv->mm.interruptible); |
|
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno; |
if (seqno == 0) |
return 0; |
|
ret = i915_gem_check_wedge(dev_priv, true); |
if (ret) |
return ret; |
|
ret = i915_gem_check_olr(ring, seqno); |
if (ret) |
return ret; |
|
mutex_unlock(&dev->struct_mutex); |
ret = __wait_seqno(ring, seqno, true, NULL); |
mutex_lock(&dev->struct_mutex); |
|
i915_gem_retire_requests_ring(ring); |
|
/* Manually manage the write flush as we may have not yet |
* retired the buffer. |
*/ |
if (obj->last_write_seqno && |
i915_seqno_passed(seqno, obj->last_write_seqno)) { |
obj->last_write_seqno = 0; |
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
} |
|
return ret; |
} |
|
/** |
* Called when user space prepares to use an object with the CPU, either |
* through the mmap ioctl's mapping or a GTT mapping. |
*/ |
int |
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_set_domain *args = data; |
struct drm_i915_gem_object *obj; |
uint32_t read_domains = args->read_domains; |
uint32_t write_domain = args->write_domain; |
int ret; |
|
/* Only handle setting domains to types used by the CPU. */ |
if (write_domain & I915_GEM_GPU_DOMAINS) |
return -EINVAL; |
|
if (read_domains & I915_GEM_GPU_DOMAINS) |
return -EINVAL; |
|
/* Having something in the write domain implies it's in the read |
* domain, and only that read domain. Enforce that in the request. |
*/ |
if (write_domain != 0 && read_domains != write_domain) |
return -EINVAL; |
|
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
|
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) { |
ret = -ENOENT; |
goto unlock; |
} |
|
/* Try to flush the object off the GPU without holding the lock. |
* We will repeat the flush holding the lock in the normal manner |
* to catch cases where we are gazumped. |
*/ |
ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain); |
if (ret) |
goto unref; |
|
if (read_domains & I915_GEM_DOMAIN_GTT) { |
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); |
|
/* Silently promote "you're not bound, there was nothing to do" |
* to success, since the client was just asking us to |
* make sure everything was done. |
*/ |
if (ret == -EINVAL) |
ret = 0; |
} else { |
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); |
} |
|
unref: |
drm_gem_object_unreference(&obj->base); |
unlock: |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
|
|
|
1277,47 → 1219,11 |
|
|
|
/** |
* Maps the contents of an object, returning the address it is mapped |
* into. |
* |
* While the mapping holds a reference on the contents of the object, it doesn't |
* imply a ref on the object itself. |
*/ |
int |
i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_mmap *args = data; |
struct drm_gem_object *obj; |
unsigned long addr = 0; |
|
obj = drm_gem_object_lookup(dev, file, args->handle); |
if (obj == NULL) |
return -ENOENT; |
|
dbgprintf("%s offset %lld size %lld not supported\n", |
args->offset, args->size); |
/* prime objects have no backing filp to GEM mmap |
* pages from. |
*/ |
if (!obj->filp) { |
drm_gem_object_unreference_unlocked(obj); |
return -EINVAL; |
} |
|
// addr = vm_mmap(obj->filp, 0, args->size, |
// PROT_READ | PROT_WRITE, MAP_SHARED, |
// args->offset); |
drm_gem_object_unreference_unlocked(obj); |
// if (IS_ERR((void *)addr)) |
// return addr; |
|
args->addr_ptr = (uint64_t) addr; |
return -EINVAL; |
|
// return 0; |
} |
|
|
|
1537,9 → 1443,7 |
static int |
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) |
{ |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
int page_count, i; |
struct address_space *mapping; |
struct sg_table *st; |
struct scatterlist *sg; |
struct page *page; |
1569,18 → 1473,16 |
* Fail silently without starting the shrinker |
*/ |
for_each_sg(st->sgl, sg, page_count, i) { |
page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp); |
if (IS_ERR(page)) { |
dbgprintf("%s invalid page %p\n", __FUNCTION__, page); |
page = (struct page *)AllocPage(); // oh-oh |
if ( page == 0 ) |
goto err_pages; |
|
} |
sg_set_page(sg, page, PAGE_SIZE, 0); |
} |
|
obj->pages = st; |
|
DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
// DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count); |
|
return 0; |
|
2047,6 → 1949,8 |
|
|
|
|
|
/** |
* i915_gem_object_sync - sync an object to a ring. |
* |
2917,68 → 2821,6 |
return 0; |
} |
|
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_caching *args = data; |
struct drm_i915_gem_object *obj; |
int ret; |
|
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
|
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) { |
ret = -ENOENT; |
goto unlock; |
} |
|
args->caching = obj->cache_level != I915_CACHE_NONE; |
|
drm_gem_object_unreference(&obj->base); |
unlock: |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
|
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_caching *args = data; |
struct drm_i915_gem_object *obj; |
enum i915_cache_level level; |
int ret; |
|
switch (args->caching) { |
case I915_CACHING_NONE: |
level = I915_CACHE_NONE; |
break; |
case I915_CACHING_CACHED: |
level = I915_CACHE_LLC; |
break; |
default: |
return -EINVAL; |
} |
|
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
|
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) { |
ret = -ENOENT; |
goto unlock; |
} |
|
ret = i915_gem_object_set_cache_level(obj, level); |
|
drm_gem_object_unreference(&obj->base); |
unlock: |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
|
/* |
* Prepare buffer for display plane (scanout, cursors, etc). |
* Can be called from an uninterruptible phase (modesetting) and allows |
3303,8 → 3145,6 |
return ret; |
} |
|
#endif |
|
int |
i915_gem_busy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
3342,7 → 3182,6 |
return ret; |
} |
|
#if 0 |
int |
i915_gem_throttle_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |