Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7143 → Rev 7144

/drivers/video/drm/i915/i915_gem_execbuffer.c
193,13 → 193,10
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct i915_vma *vma;
 
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct i915_vma *vma;
 
vma = hlist_entry(node, struct i915_vma, exec_node);
hlist_for_each_entry(vma, head, exec_node) {
if (vma->exec_handle == handle)
return vma;
}
333,13 → 330,26
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base +
(offset & PAGE_MASK), PG_SW);
reloc_page = dev_priv->gtt.mappable;
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
// io_mapping_unmap_atomic(reloc_page);
if (INTEL_INFO(dev)->gen >= 8) {
offset += sizeof(uint32_t);
 
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
offset);
}
 
iowrite32(upper_32_bits(delta),
reloc_page + offset_in_page(offset));
}
 
io_mapping_unmap_atomic(reloc_page);
 
return 0;
}
 
512,7 → 522,8
count = ARRAY_SIZE(stack_reloc);
remain -= count;
 
memcpy(r, user_relocs, count*sizeof(r[0]));
if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
return -EFAULT;
 
do {
u64 offset = r->presumed_offset;
521,11 → 532,11
if (ret)
return ret;
 
if (r->presumed_offset != offset)
{
memcpy(&user_relocs->presumed_offset,
if (r->presumed_offset != offset &&
__copy_to_user_inatomic(&user_relocs->presumed_offset,
&r->presumed_offset,
sizeof(r->presumed_offset));
sizeof(r->presumed_offset))) {
return -EFAULT;
}
 
user_relocs++;
655,7 → 666,7
if (entry->relocation_count == 0)
return false;
 
if (!i915_is_ggtt(vma->vm))
if (!vma->is_ggtt)
return false;
 
/* See also use_cpu_reloc() */
674,8 → 685,7
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
 
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
!i915_is_ggtt(vma->vm));
WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 
if (entry->alignment &&
vma->node.start & (entry->alignment - 1))
1286,6 → 1296,9
exec_start = params->batch_obj_vm_offset +
params->args_batch_start_offset;
 
if (exec_len == 0)
exec_len = params->batch_obj->base.size;
 
ret = ring->dispatch_execbuffer(params->request,
exec_start, exec_len,
params->dispatch_flags);
1302,34 → 1315,24
 
/**
* Find one BSD ring to dispatch the corresponding BSD command.
* The Ring ID is returned.
* The ring index is returned.
*/
static int gen8_dispatch_bsd_ring(struct drm_device *dev,
struct drm_file *file)
static unsigned int
gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
 
/* Check whether the file_priv is using one ring */
if (file_priv->bsd_ring)
return file_priv->bsd_ring->id;
else {
/* If no, use the ping-pong mechanism to select one ring */
int ring_id;
/* Check whether the file_priv has already selected one ring. */
if ((int)file_priv->bsd_ring < 0) {
/* If not, use the ping-pong mechanism to select one. */
mutex_lock(&dev_priv->dev->struct_mutex);
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
mutex_unlock(&dev_priv->dev->struct_mutex);
}
 
mutex_lock(&dev->struct_mutex);
if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
ring_id = VCS;
dev_priv->mm.bsd_ring_dispatch_index = 1;
} else {
ring_id = VCS2;
dev_priv->mm.bsd_ring_dispatch_index = 0;
return file_priv->bsd_ring;
}
file_priv->bsd_ring = &dev_priv->ring[ring_id];
mutex_unlock(&dev->struct_mutex);
return ring_id;
}
}
 
static struct drm_i915_gem_object *
eb_get_batch(struct eb_vmas *eb)
1351,7 → 1354,65
return vma->obj;
}
 
#define I915_USER_RINGS (4)
 
static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
[I915_EXEC_DEFAULT] = RCS,
[I915_EXEC_RENDER] = RCS,
[I915_EXEC_BLT] = BCS,
[I915_EXEC_BSD] = VCS,
[I915_EXEC_VEBOX] = VECS
};
 
static int
eb_select_ring(struct drm_i915_private *dev_priv,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
struct intel_engine_cs **ring)
{
unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
 
if (user_ring_id > I915_USER_RINGS) {
DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
return -EINVAL;
}
 
if ((user_ring_id != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
 
if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
bsd_idx <= I915_EXEC_BSD_RING2) {
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
bsd_idx);
return -EINVAL;
}
 
*ring = &dev_priv->ring[_VCS(bsd_idx)];
} else {
*ring = &dev_priv->ring[user_ring_map[user_ring_id]];
}
 
if (!intel_ring_initialized(*ring)) {
DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
return -EINVAL;
}
 
return 0;
}
 
static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
1358,6 → 1419,7
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry;
1386,52 → 1448,10
if (args->flags & I915_EXEC_IS_PINNED)
dispatch_flags |= I915_DISPATCH_PINNED;
 
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
ret = eb_select_ring(dev_priv, file, args, &ring);
if (ret)
return ret;
 
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
((args->flags & I915_EXEC_BSD_MASK) != 0)) {
DRM_DEBUG("execbuf with non bsd ring but with invalid "
"bsd dispatch flags: %d\n", (int)(args->flags));
return -EINVAL;
}
 
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
ring = &dev_priv->ring[RCS];
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
if (HAS_BSD2(dev)) {
int ring_id;
 
switch (args->flags & I915_EXEC_BSD_MASK) {
case I915_EXEC_BSD_DEFAULT:
ring_id = gen8_dispatch_bsd_ring(dev, file);
ring = &dev_priv->ring[ring_id];
break;
case I915_EXEC_BSD_RING1:
ring = &dev_priv->ring[VCS];
break;
case I915_EXEC_BSD_RING2:
ring = &dev_priv->ring[VCS2];
break;
default:
DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
(int)(args->flags & I915_EXEC_BSD_MASK));
return -EINVAL;
}
} else
ring = &dev_priv->ring[VCS];
} else
ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
 
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
return -EINVAL;
}
 
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
1580,11 → 1600,13
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
/* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request);
if (ret)
req = i915_gem_request_alloc(ring, ctx);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin;
}
 
ret = i915_gem_request_add_to_client(params->request, file);
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
 
1600,6 → 1622,7
params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj;
params->ctx = ctx;
params->request = req;
 
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
1623,8 → 1646,8
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && params->request)
i915_gem_request_cancel(params->request);
if (ret && !IS_ERR_OR_NULL(req))
i915_gem_request_cancel(req);
 
mutex_unlock(&dev->struct_mutex);