41,11 → 41,14 |
#define RQ_BUG_ON(expr) |
|
extern int x86_clflush_size; |
#define __copy_to_user_inatomic __copy_to_user |
|
#define PROT_READ 0x1 /* page can be read */ |
#define PROT_WRITE 0x2 /* page can be written */ |
#define MAP_SHARED 0x01 /* Share changes */ |
|
|
|
struct drm_i915_gem_object *get_fb_obj(); |
|
unsigned long vm_mmap(struct file *file, unsigned long addr, |
152,10 → 155,10 |
|
pinned = 0; |
mutex_lock(&dev->struct_mutex); |
list_for_each_entry(vma, &ggtt->base.active_list, vm_link) |
list_for_each_entry(vma, &ggtt->base.active_list, mm_list) |
if (vma->pin_count) |
pinned += vma->node.size; |
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) |
list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) |
if (vma->pin_count) |
pinned += vma->node.size; |
mutex_unlock(&dev->struct_mutex); |
244,7 → 247,7 |
int ret; |
|
drm_gem_object_reference(&obj->base); |
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) |
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) |
if (i915_vma_unbind(vma)) |
break; |
|
650,25 → 653,7 |
* page faults in the source data |
*/ |
|
static inline int |
fast_user_write(struct io_mapping *mapping, |
loff_t page_base, int page_offset, |
char __user *user_data, |
int length) |
{ |
void __iomem *vaddr_atomic; |
void *vaddr; |
unsigned long unwritten; |
|
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); |
/* We can use the cpu mem copy function because this is X86. */ |
vaddr = (void __force*)vaddr_atomic + page_offset; |
unwritten = __copy_from_user_inatomic_nocache(vaddr, |
user_data, length); |
io_mapping_unmap_atomic(vaddr_atomic); |
return unwritten; |
} |
|
/** |
* This is the fast pwrite path, where we copy the data directly from the |
* user into the GTT, uncached. |
717,16 → 702,11 |
if ((page_offset + remain) > PAGE_SIZE) |
page_length = PAGE_SIZE - page_offset; |
|
/* If we get a fault while copying data, then (presumably) our |
* source page isn't available. Return the error and we'll |
* retry in the slow path. |
*/ |
if (fast_user_write(dev_priv->gtt.mappable, page_base, |
page_offset, user_data, page_length)) { |
ret = -EFAULT; |
goto out_flush; |
} |
MapPage(dev_priv->gtt.mappable, |
dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW); |
|
memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length); |
|
remain -= page_length; |
user_data += page_length; |
offset += page_length; |
761,8 → 741,9 |
if (needs_clflush_before) |
drm_clflush_virt_range(vaddr + shmem_page_offset, |
page_length); |
ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, |
user_data, page_length); |
memcpy(vaddr + shmem_page_offset, |
user_data, |
page_length); |
if (needs_clflush_after) |
drm_clflush_virt_range(vaddr + shmem_page_offset, |
page_length); |
1145,7 → 1126,7 |
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
wait_queue_t wait; |
unsigned long timeout_expire; |
s64 before = 0; /* Only to silence a compiler warning. */ |
s64 before, now; |
int ret; |
|
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
1165,17 → 1146,14 |
return -ETIME; |
|
timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); |
|
/* |
* Record current time in case interrupted by signal, or wedged. |
*/ |
before = ktime_get_raw_ns(); |
} |
|
if (INTEL_INFO(dev_priv)->gen >= 6) |
gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); |
|
/* Record current time in case interrupted by signal, or wedged */ |
trace_i915_gem_request_wait_begin(req); |
before = ktime_get_raw_ns(); |
|
/* Optimistic spin for the next jiffie before touching IRQs */ |
ret = __i915_spin_request(req, state); |
1235,10 → 1213,11 |
DestroyEvent(wait.evnt); |
|
out: |
now = ktime_get_raw_ns(); |
trace_i915_gem_request_wait_end(req); |
|
if (timeout) { |
s64 tres = *timeout - (ktime_get_raw_ns() - before); |
s64 tres = *timeout - (now - before); |
|
*timeout = tres < 0 ? 0 : tres; |
|
2074,7 → 2053,7 |
list_move_tail(&obj->ring_list[ring->id], &ring->active_list); |
i915_gem_request_assign(&obj->last_read_req[ring->id], req); |
|
list_move_tail(&vma->vm_link, &vma->vm->active_list); |
list_move_tail(&vma->mm_list, &vma->vm->active_list); |
} |
|
static void |
2112,9 → 2091,9 |
list_move_tail(&obj->global_list, |
&to_i915(obj->base.dev)->mm.bound_list); |
|
list_for_each_entry(vma, &obj->vma_list, obj_link) { |
if (!list_empty(&vma->vm_link)) |
list_move_tail(&vma->vm_link, &vma->vm->inactive_list); |
list_for_each_entry(vma, &obj->vma_list, vma_link) { |
if (!list_empty(&vma->mm_list)) |
list_move_tail(&vma->mm_list, &vma->vm->inactive_list); |
} |
|
i915_gem_request_assign(&obj->last_fenced_req, NULL); |
2271,7 → 2250,7 |
|
trace_i915_gem_request_add(request); |
|
i915_queue_hangcheck(ring->dev); |
// i915_queue_hangcheck(ring->dev); |
|
queue_delayed_work(dev_priv->wq, |
&dev_priv->mm.retire_work, |
2337,8 → 2316,10 |
i915_gem_request_remove_from_client(req); |
|
if (ctx) { |
if (i915.enable_execlists && ctx != req->i915->kernel_context) |
intel_lr_context_unpin(ctx, req->ring); |
if (i915.enable_execlists) { |
if (ctx != req->ring->default_context) |
intel_lr_context_unpin(req); |
} |
|
i915_gem_context_unreference(ctx); |
} |
2346,8 → 2327,7 |
kfree(req); |
} |
|
static inline int |
__i915_gem_request_alloc(struct intel_engine_cs *ring, |
int i915_gem_request_alloc(struct intel_engine_cs *ring, |
struct intel_context *ctx, |
struct drm_i915_gem_request **req_out) |
{ |
2413,31 → 2393,6 |
return ret; |
} |
|
/** |
* i915_gem_request_alloc - allocate a request structure |
* |
* @engine: engine that we wish to issue the request on. |
* @ctx: context that the request will be associated with. |
* This can be NULL if the request is not directly related to |
* any specific user context, in which case this function will |
* choose an appropriate context to use. |
* |
* Returns a pointer to the allocated request if successful, |
* or an error code if not. |
*/ |
struct drm_i915_gem_request * |
i915_gem_request_alloc(struct intel_engine_cs *engine, |
struct intel_context *ctx) |
{ |
struct drm_i915_gem_request *req; |
int err; |
|
if (ctx == NULL) |
ctx = to_i915(engine->dev)->kernel_context; |
err = __i915_gem_request_alloc(engine, ctx, &req); |
return err ? ERR_PTR(err) : req; |
} |
|
void i915_gem_request_cancel(struct drm_i915_gem_request *req) |
{ |
intel_ring_reserved_space_cancel(req->ringbuf); |
2629,9 → 2584,11 |
i915_gem_retire_requests_ring(ring); |
idle &= list_empty(&ring->request_list); |
if (i915.enable_execlists) { |
spin_lock_irq(&ring->execlist_lock); |
unsigned long flags; |
|
spin_lock_irqsave(&ring->execlist_lock, flags); |
idle &= list_empty(&ring->execlist_queue); |
spin_unlock_irq(&ring->execlist_lock); |
spin_unlock_irqrestore(&ring->execlist_lock, flags); |
|
intel_execlists_retire_requests(ring); |
} |
2853,13 → 2810,9 |
return 0; |
|
if (*to_req == NULL) { |
struct drm_i915_gem_request *req; |
|
req = i915_gem_request_alloc(to, NULL); |
if (IS_ERR(req)) |
return PTR_ERR(req); |
|
*to_req = req; |
ret = i915_gem_request_alloc(to, to->default_context, to_req); |
if (ret) |
return ret; |
} |
|
trace_i915_gem_ring_sync_to(*to_req, from, from_req); |
2976,7 → 2929,7 |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
int ret; |
|
if (list_empty(&vma->obj_link)) |
if (list_empty(&vma->vma_link)) |
return 0; |
|
if (!drm_mm_node_allocated(&vma->node)) { |
2995,7 → 2948,8 |
return ret; |
} |
|
if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
if (i915_is_ggtt(vma->vm) && |
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
i915_gem_object_finish_gtt(obj); |
|
/* release the fence reg _after_ flushing */ |
3009,8 → 2963,8 |
vma->vm->unbind_vma(vma); |
vma->bound = 0; |
|
list_del_init(&vma->vm_link); |
if (vma->is_ggtt) { |
list_del_init(&vma->mm_list); |
if (i915_is_ggtt(vma->vm)) { |
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
obj->map_and_fenceable = false; |
} else if (vma->ggtt_view.pages) { |
3058,9 → 3012,9 |
if (!i915.enable_execlists) { |
struct drm_i915_gem_request *req; |
|
req = i915_gem_request_alloc(ring, NULL); |
if (IS_ERR(req)) |
return PTR_ERR(req); |
ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
if (ret) |
return ret; |
|
ret = i915_switch_context(req); |
if (ret) { |
3256,7 → 3210,7 |
goto err_remove_node; |
|
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
list_add_tail(&vma->vm_link, &vm->inactive_list); |
list_add_tail(&vma->mm_list, &vm->inactive_list); |
|
return vma; |
|
3421,7 → 3375,7 |
/* And bump the LRU for this access */ |
vma = i915_gem_obj_to_ggtt(obj); |
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) |
list_move_tail(&vma->vm_link, |
list_move_tail(&vma->mm_list, |
&to_i915(obj->base.dev)->gtt.base.inactive_list); |
|
return 0; |
3456,7 → 3410,7 |
* catch the issue of the CS prefetch crossing page boundaries and |
* reading an invalid PTE on older architectures. |
*/ |
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { |
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
if (!drm_mm_node_allocated(&vma->node)) |
continue; |
|
3519,7 → 3473,7 |
*/ |
} |
|
list_for_each_entry(vma, &obj->vma_list, obj_link) { |
list_for_each_entry(vma, &obj->vma_list, vma_link) { |
if (!drm_mm_node_allocated(&vma->node)) |
continue; |
|
3529,7 → 3483,7 |
} |
} |
|
list_for_each_entry(vma, &obj->vma_list, obj_link) |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
vma->node.color = cache_level; |
obj->cache_level = cache_level; |
|
4003,20 → 3957,10 |
if (ret) |
goto unref; |
|
args->busy = 0; |
if (obj->active) { |
int i; |
|
for (i = 0; i < I915_NUM_RINGS; i++) { |
struct drm_i915_gem_request *req; |
|
req = obj->last_read_req[i]; |
if (req) |
args->busy |= 1 << (16 + req->ring->exec_id); |
} |
BUILD_BUG_ON(I915_NUM_RINGS > 16); |
args->busy = obj->active << 16; |
if (obj->last_write_req) |
args->busy |= obj->last_write_req->ring->exec_id; |
} |
args->busy |= obj->last_write_req->ring->id; |
|
unref: |
drm_gem_object_unreference(&obj->base); |
4192,7 → 4136,7 |
|
trace_i915_gem_object_destroy(obj); |
|
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { |
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
int ret; |
|
vma->pin_count = 0; |
4246,7 → 4190,7 |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma; |
list_for_each_entry(vma, &obj->vma_list, obj_link) { |
list_for_each_entry(vma, &obj->vma_list, vma_link) { |
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && |
vma->vm == vm) |
return vma; |
4263,7 → 4207,7 |
if (WARN_ONCE(!view, "no view specified")) |
return ERR_PTR(-EINVAL); |
|
list_for_each_entry(vma, &obj->vma_list, obj_link) |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
if (vma->vm == ggtt && |
i915_ggtt_view_equal(&vma->ggtt_view, view)) |
return vma; |
4272,6 → 4216,7 |
|
void i915_gem_vma_destroy(struct i915_vma *vma) |
{ |
struct i915_address_space *vm = NULL; |
WARN_ON(vma->node.allocated); |
|
/* Keep the vma as a placeholder in the execbuffer reservation lists */ |
4278,11 → 4223,13 |
if (!list_empty(&vma->exec_list)) |
return; |
|
if (!vma->is_ggtt) |
i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm)); |
vm = vma->vm; |
|
list_del(&vma->obj_link); |
if (!i915_is_ggtt(vm)) |
i915_ppgtt_put(i915_vm_to_ppgtt(vm)); |
|
list_del(&vma->vma_link); |
|
kfree(vma); |
} |
|
4503,7 → 4450,7 |
*/ |
init_unused_rings(dev); |
|
BUG_ON(!dev_priv->kernel_context); |
BUG_ON(!dev_priv->ring[RCS].default_context); |
|
ret = i915_ppgtt_init_hw(dev); |
if (ret) { |
4540,9 → 4487,10 |
for_each_ring(ring, dev_priv, i) { |
struct drm_i915_gem_request *req; |
|
req = i915_gem_request_alloc(ring, NULL); |
if (IS_ERR(req)) { |
ret = PTR_ERR(req); |
WARN_ON(!ring->default_context); |
|
ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
if (ret) { |
i915_gem_cleanup_ringbuffer(dev); |
goto out; |
} |
4647,14 → 4595,6 |
|
for_each_ring(ring, dev_priv, i) |
dev_priv->gt.cleanup_ring(ring); |
|
if (i915.enable_execlists) |
/* |
* Neither the BIOS, ourselves or any other kernel |
* expects the system to be in execlists mode on startup, |
* so we need to reset the GPU back to legacy mode. |
*/ |
intel_gpu_reset(dev); |
} |
|
static void |
4665,7 → 4605,7 |
} |
|
void |
i915_gem_load_init(struct drm_device *dev) |
i915_gem_load(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int i; |
4711,7 → 4651,6 |
i915_gem_restore_fences(dev); |
|
i915_gem_detect_bit_6_swizzle(dev); |
init_waitqueue_head(&dev_priv->pending_flip_queue); |
|
dev_priv->mm.interruptible = true; |
|
4764,8 → 4703,6 |
spin_lock_init(&file_priv->mm.lock); |
INIT_LIST_HEAD(&file_priv->mm.request_list); |
|
file_priv->bsd_ring = -1; |
|
ret = i915_gem_context_open(dev, file); |
if (ret) |
kfree(file_priv); |
4808,8 → 4745,8 |
|
WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
|
list_for_each_entry(vma, &o->vma_list, obj_link) { |
if (vma->is_ggtt && |
list_for_each_entry(vma, &o->vma_list, vma_link) { |
if (i915_is_ggtt(vma->vm) && |
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
continue; |
if (vma->vm == vm) |
4827,7 → 4764,7 |
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
struct i915_vma *vma; |
|
list_for_each_entry(vma, &o->vma_list, obj_link) |
list_for_each_entry(vma, &o->vma_list, vma_link) |
if (vma->vm == ggtt && |
i915_ggtt_view_equal(&vma->ggtt_view, view)) |
return vma->node.start; |
4841,8 → 4778,8 |
{ |
struct i915_vma *vma; |
|
list_for_each_entry(vma, &o->vma_list, obj_link) { |
if (vma->is_ggtt && |
list_for_each_entry(vma, &o->vma_list, vma_link) { |
if (i915_is_ggtt(vma->vm) && |
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
continue; |
if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) |
4858,7 → 4795,7 |
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
struct i915_vma *vma; |
|
list_for_each_entry(vma, &o->vma_list, obj_link) |
list_for_each_entry(vma, &o->vma_list, vma_link) |
if (vma->vm == ggtt && |
i915_ggtt_view_equal(&vma->ggtt_view, view) && |
drm_mm_node_allocated(&vma->node)) |
4871,7 → 4808,7 |
{ |
struct i915_vma *vma; |
|
list_for_each_entry(vma, &o->vma_list, obj_link) |
list_for_each_entry(vma, &o->vma_list, vma_link) |
if (drm_mm_node_allocated(&vma->node)) |
return true; |
|
4888,8 → 4825,8 |
|
BUG_ON(list_empty(&o->vma_list)); |
|
list_for_each_entry(vma, &o->vma_list, obj_link) { |
if (vma->is_ggtt && |
list_for_each_entry(vma, &o->vma_list, vma_link) { |
if (i915_is_ggtt(vma->vm) && |
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
continue; |
if (vma->vm == vm) |
4901,7 → 4838,7 |
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) |
{ |
struct i915_vma *vma; |
list_for_each_entry(vma, &obj->vma_list, obj_link) |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
if (vma->pin_count > 0) |
return true; |
|