Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7144

/drivers/video/drm/i915/i915_gem.c
41,14 → 41,11
#define RQ_BUG_ON(expr)
 
extern int x86_clflush_size;
#define __copy_to_user_inatomic __copy_to_user
 
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define MAP_SHARED 0x01 /* Share changes */
 
 
 
struct drm_i915_gem_object *get_fb_obj();
 
unsigned long vm_mmap(struct file *file, unsigned long addr,
155,10 → 152,10
 
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
if (vma->pin_count)
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
247,7 → 244,7
int ret;
 
drm_gem_object_reference(&obj->base);
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
 
653,7 → 650,25
* page faults in the source data
*/
 
static inline int
fast_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
 
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force*)vaddr_atomic + page_offset;
unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
 
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
702,11 → 717,16
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
 
MapPage(dev_priv->gtt.mappable,
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW);
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
}
 
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
 
remain -= page_length;
user_data += page_length;
offset += page_length;
741,9 → 761,8
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
memcpy(vaddr + shmem_page_offset,
user_data,
page_length);
ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
user_data, page_length);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
1126,7 → 1145,7
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
wait_queue_t wait;
unsigned long timeout_expire;
s64 before, now;
s64 before = 0; /* Only to silence a compiler warning. */
int ret;
 
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1146,14 → 1165,17
return -ETIME;
 
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
 
/*
* Record current time in case interrupted by signal, or wedged.
*/
before = ktime_get_raw_ns();
}
 
if (INTEL_INFO(dev_priv)->gen >= 6)
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
/* Record current time in case interrupted by signal, or wedged */
trace_i915_gem_request_wait_begin(req);
before = ktime_get_raw_ns();
 
/* Optimistic spin for the next jiffie before touching IRQs */
ret = __i915_spin_request(req, state);
1213,11 → 1235,10
DestroyEvent(wait.evnt);
 
out:
now = ktime_get_raw_ns();
trace_i915_gem_request_wait_end(req);
 
if (timeout) {
s64 tres = *timeout - (now - before);
s64 tres = *timeout - (ktime_get_raw_ns() - before);
 
*timeout = tres < 0 ? 0 : tres;
 
2053,7 → 2074,7
list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
list_move_tail(&vma->mm_list, &vma->vm->active_list);
list_move_tail(&vma->vm_link, &vma->vm->active_list);
}
 
static void
2091,9 → 2112,9
list_move_tail(&obj->global_list,
&to_i915(obj->base.dev)->mm.bound_list);
 
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!list_empty(&vma->mm_list))
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!list_empty(&vma->vm_link))
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
}
 
i915_gem_request_assign(&obj->last_fenced_req, NULL);
2250,7 → 2271,7
 
trace_i915_gem_request_add(request);
 
// i915_queue_hangcheck(ring->dev);
i915_queue_hangcheck(ring->dev);
 
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
2316,10 → 2337,8
i915_gem_request_remove_from_client(req);
 
if (ctx) {
if (i915.enable_execlists) {
if (ctx != req->ring->default_context)
intel_lr_context_unpin(req);
}
if (i915.enable_execlists && ctx != req->i915->kernel_context)
intel_lr_context_unpin(ctx, req->ring);
 
i915_gem_context_unreference(ctx);
}
2327,7 → 2346,8
kfree(req);
}
 
int i915_gem_request_alloc(struct intel_engine_cs *ring,
static inline int
__i915_gem_request_alloc(struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
2393,6 → 2413,31
return ret;
}
 
/**
* i915_gem_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* This can be NULL if the request is not directly related to
* any specific user context, in which case this function will
* choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
 
if (ctx == NULL)
ctx = to_i915(engine->dev)->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
 
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
2584,11 → 2629,9
i915_gem_retire_requests_ring(ring);
idle &= list_empty(&ring->request_list);
if (i915.enable_execlists) {
unsigned long flags;
 
spin_lock_irqsave(&ring->execlist_lock, flags);
spin_lock_irq(&ring->execlist_lock);
idle &= list_empty(&ring->execlist_queue);
spin_unlock_irqrestore(&ring->execlist_lock, flags);
spin_unlock_irq(&ring->execlist_lock);
 
intel_execlists_retire_requests(ring);
}
2810,9 → 2853,13
return 0;
 
if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req);
if (ret)
return ret;
struct drm_i915_gem_request *req;
 
req = i915_gem_request_alloc(to, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
 
*to_req = req;
}
 
trace_i915_gem_ring_sync_to(*to_req, from, from_req);
2929,7 → 2976,7
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
 
if (list_empty(&vma->vma_link))
if (list_empty(&vma->obj_link))
return 0;
 
if (!drm_mm_node_allocated(&vma->node)) {
2948,8 → 2995,7
return ret;
}
 
if (i915_is_ggtt(vma->vm) &&
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
i915_gem_object_finish_gtt(obj);
 
/* release the fence reg _after_ flushing */
2963,8 → 3009,8
vma->vm->unbind_vma(vma);
vma->bound = 0;
 
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
list_del_init(&vma->vm_link);
if (vma->is_ggtt) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
obj->map_and_fenceable = false;
} else if (vma->ggtt_view.pages) {
3012,9 → 3058,9
if (!i915.enable_execlists) {
struct drm_i915_gem_request *req;
 
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret)
return ret;
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
 
ret = i915_switch_context(req);
if (ret) {
3210,7 → 3256,7
goto err_remove_node;
 
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &vm->inactive_list);
list_add_tail(&vma->vm_link, &vm->inactive_list);
 
return vma;
 
3375,7 → 3421,7
/* And bump the LRU for this access */
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->mm_list,
list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->gtt.base.inactive_list);
 
return 0;
3410,7 → 3456,7
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
3473,7 → 3519,7
*/
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
 
3483,7 → 3529,7
}
}
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
vma->node.color = cache_level;
obj->cache_level = cache_level;
 
3957,10 → 4003,20
if (ret)
goto unref;
 
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy = obj->active << 16;
args->busy = 0;
if (obj->active) {
int i;
 
for (i = 0; i < I915_NUM_RINGS; i++) {
struct drm_i915_gem_request *req;
 
req = obj->last_read_req[i];
if (req)
args->busy |= 1 << (16 + req->ring->exec_id);
}
if (obj->last_write_req)
args->busy |= obj->last_write_req->ring->id;
args->busy |= obj->last_write_req->ring->exec_id;
}
 
unref:
drm_gem_object_unreference(&obj->base);
4136,7 → 4192,7
 
trace_i915_gem_object_destroy(obj);
 
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
int ret;
 
vma->pin_count = 0;
4190,7 → 4246,7
struct i915_address_space *vm)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link) {
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
vma->vm == vm)
return vma;
4207,7 → 4263,7
if (WARN_ONCE(!view, "no view specified"))
return ERR_PTR(-EINVAL);
 
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
4216,7 → 4272,6
 
void i915_gem_vma_destroy(struct i915_vma *vma)
{
struct i915_address_space *vm = NULL;
WARN_ON(vma->node.allocated);
 
/* Keep the vma as a placeholder in the execbuffer reservation lists */
4223,13 → 4278,11
if (!list_empty(&vma->exec_list))
return;
 
vm = vma->vm;
if (!vma->is_ggtt)
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
if (!i915_is_ggtt(vm))
i915_ppgtt_put(i915_vm_to_ppgtt(vm));
list_del(&vma->obj_link);
 
list_del(&vma->vma_link);
 
kfree(vma);
}
 
4450,7 → 4503,7
*/
init_unused_rings(dev);
 
BUG_ON(!dev_priv->ring[RCS].default_context);
BUG_ON(!dev_priv->kernel_context);
 
ret = i915_ppgtt_init_hw(dev);
if (ret) {
4487,10 → 4540,9
for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req;
 
WARN_ON(!ring->default_context);
 
ret = i915_gem_request_alloc(ring, ring->default_context, &req);
if (ret) {
req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_ringbuffer(dev);
goto out;
}
4595,6 → 4647,14
 
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
 
if (i915.enable_execlists)
/*
* Neither the BIOS, ourselves or any other kernel
* expects the system to be in execlists mode on startup,
* so we need to reset the GPU back to legacy mode.
*/
intel_gpu_reset(dev);
}
 
static void
4605,7 → 4665,7
}
 
void
i915_gem_load(struct drm_device *dev)
i915_gem_load_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
4651,6 → 4711,7
i915_gem_restore_fences(dev);
 
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
 
dev_priv->mm.interruptible = true;
 
4703,6 → 4764,8
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
 
file_priv->bsd_ring = -1;
 
ret = i915_gem_context_open(dev, file);
if (ret)
kfree(file_priv);
4745,8 → 4808,8
 
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
4764,7 → 4827,7
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
4778,8 → 4841,8
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4795,7 → 4858,7
struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
4808,7 → 4871,7
{
struct i915_vma *vma;
 
list_for_each_entry(vma, &o->vma_list, vma_link)
list_for_each_entry(vma, &o->vma_list, obj_link)
if (drm_mm_node_allocated(&vma->node))
return true;
 
4825,8 → 4888,8
 
BUG_ON(list_empty(&o->vma_list));
 
list_for_each_entry(vma, &o->vma_list, vma_link) {
if (i915_is_ggtt(vma->vm) &&
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
continue;
if (vma->vm == vm)
4838,7 → 4901,7
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
list_for_each_entry(vma, &obj->vma_list, vma_link)
list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->pin_count > 0)
return true;