252,7 → 252,6 |
{ |
return (HAS_LLC(obj->base.dev) || |
obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
!obj->map_and_fenceable || |
obj->cache_level != I915_CACHE_NONE); |
} |
|
320,8 → 319,53 |
reloc_page = dev_priv->gtt.mappable; |
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); |
|
// io_mapping_unmap_atomic(reloc_page); |
|
return 0; |
} |
|
static void |
clflush_write32(void *addr, uint32_t value) |
{ |
/* This is not a fast path, so KISS. */ |
drm_clflush_virt_range(addr, sizeof(uint32_t)); |
*(uint32_t *)addr = value; |
drm_clflush_virt_range(addr, sizeof(uint32_t)); |
} |
|
static int |
relocate_entry_clflush(struct drm_i915_gem_object *obj, |
struct drm_i915_gem_relocation_entry *reloc, |
uint64_t target_offset) |
{ |
struct drm_device *dev = obj->base.dev; |
uint32_t page_offset = offset_in_page(reloc->offset); |
uint64_t delta = (int)reloc->delta + target_offset; |
char *vaddr; |
int ret; |
|
ret = i915_gem_object_set_to_gtt_domain(obj, true); |
if (ret) |
return ret; |
|
vaddr = kmap_atomic(i915_gem_object_get_page(obj, |
reloc->offset >> PAGE_SHIFT)); |
clflush_write32(vaddr + page_offset, lower_32_bits(delta)); |
|
if (INTEL_INFO(dev)->gen >= 8) { |
page_offset = offset_in_page(page_offset + sizeof(uint32_t)); |
|
if (page_offset == 0) { |
kunmap_atomic(vaddr); |
vaddr = kmap_atomic(i915_gem_object_get_page(obj, |
(reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); |
} |
|
clflush_write32(vaddr + page_offset, upper_32_bits(delta)); |
} |
|
kunmap_atomic(vaddr); |
|
return 0; |
} |
|
350,10 → 394,12 |
* pipe_control writes because the gpu doesn't properly redirect them |
* through the ppgtt for non_secure batchbuffers. */ |
if (unlikely(IS_GEN6(dev) && |
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
!(target_vma->bound & GLOBAL_BIND))) |
target_vma->bind_vma(target_vma, target_i915_obj->cache_level, |
GLOBAL_BIND); |
reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) { |
ret = i915_vma_bind(target_vma, target_i915_obj->cache_level, |
PIN_GLOBAL); |
if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!")) |
return ret; |
} |
|
/* Validate that the target is in a valid r/w GPU domain */ |
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
409,8 → 455,14 |
|
if (use_cpu_reloc(obj)) |
ret = relocate_entry_cpu(obj, reloc, target_offset); |
else |
else if (obj->map_and_fenceable) |
ret = relocate_entry_gtt(obj, reloc, target_offset); |
else if (1) |
ret = relocate_entry_clflush(obj, reloc, target_offset); |
else { |
WARN_ONCE(1, "Impossible case in relocation handling\n"); |
ret = -ENODEV; |
} |
|
if (ret) |
return ret; |
507,6 → 559,12 |
return ret; |
} |
|
static bool only_mappable_for_reloc(unsigned int flags) |
{ |
return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) == |
__EXEC_OBJECT_NEEDS_MAP; |
} |
|
static int |
i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, |
struct intel_engine_cs *ring, |
517,15 → 575,30 |
uint64_t flags; |
int ret; |
|
flags = 0; |
flags = PIN_USER; |
if (entry->flags & EXEC_OBJECT_NEEDS_GTT) |
flags |= PIN_GLOBAL; |
|
if (!drm_mm_node_allocated(&vma->node)) { |
/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, |
* limit address to the first 4GBs for unflagged objects. |
*/ |
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0) |
flags |= PIN_ZONE_4G; |
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) |
flags |= PIN_GLOBAL | PIN_MAPPABLE; |
if (entry->flags & EXEC_OBJECT_NEEDS_GTT) |
flags |= PIN_GLOBAL; |
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) |
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; |
if ((flags & PIN_MAPPABLE) == 0) |
flags |= PIN_HIGH; |
} |
|
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); |
if ((ret == -ENOSPC || ret == -E2BIG) && |
only_mappable_for_reloc(entry->flags)) |
ret = i915_gem_object_pin(obj, vma->vm, |
entry->alignment, |
flags & ~PIN_MAPPABLE); |
if (ret) |
return ret; |
|
587,13 → 660,18 |
vma->node.start & (entry->alignment - 1)) |
return true; |
|
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) |
return true; |
|
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && |
vma->node.start < BATCH_OFFSET_BIAS) |
return true; |
|
/* avoid costly ping-pong once a batch bo ended up non-mappable */ |
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) |
return !only_mappable_for_reloc(entry->flags); |
|
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && |
(vma->node.start + vma->node.size - 1) >> 32) |
return true; |
|
return false; |
} |
|
600,6 → 678,7 |
static int |
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, |
struct list_head *vmas, |
struct intel_context *ctx, |
bool *need_relocs) |
{ |
struct drm_i915_gem_object *obj; |
622,6 → 701,9 |
obj = vma->obj; |
entry = vma->exec_entry; |
|
if (ctx->flags & CONTEXT_NO_ZEROMAP) |
entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; |
|
if (!has_fenced_gpu_access) |
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE; |
need_fence = |
699,7 → 781,8 |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct eb_vmas *eb, |
struct drm_i915_gem_exec_object2 *exec) |
struct drm_i915_gem_exec_object2 *exec, |
struct intel_context *ctx) |
{ |
struct drm_i915_gem_relocation_entry *reloc; |
struct i915_address_space *vm; |
725,8 → 808,8 |
for (i = 0; i < count; i++) |
total += exec[i].relocation_count; |
|
reloc_offset = malloc(count * sizeof(*reloc_offset)); |
reloc = malloc(total * sizeof(*reloc)); |
reloc_offset = __builtin_malloc(count * sizeof(*reloc_offset)); |
reloc = __builtin_malloc(total * sizeof(*reloc)); |
if (reloc == NULL || reloc_offset == NULL) { |
kfree(reloc); |
kfree(reloc_offset); |
785,7 → 868,7 |
goto err; |
|
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); |
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); |
if (ret) |
goto err; |
|
810,9 → 893,10 |
} |
|
static int |
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring, |
i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, |
struct list_head *vmas) |
{ |
const unsigned other_rings = ~intel_ring_flag(req->ring); |
struct i915_vma *vma; |
uint32_t flush_domains = 0; |
bool flush_chipset = false; |
820,9 → 904,12 |
|
list_for_each_entry(vma, vmas, exec_list) { |
struct drm_i915_gem_object *obj = vma->obj; |
ret = i915_gem_object_sync(obj, ring); |
|
if (obj->active & other_rings) { |
ret = i915_gem_object_sync(obj, req->ring, &req); |
if (ret) |
return ret; |
} |
|
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
flush_chipset |= i915_gem_clflush_object(obj, false); |
831,7 → 918,7 |
} |
|
if (flush_chipset) |
i915_gem_chipset_flush(ring->dev); |
i915_gem_chipset_flush(req->ring->dev); |
|
if (flush_domains & I915_GEM_DOMAIN_GTT) |
wmb(); |
839,7 → 926,7 |
/* Unconditionally invalidate gpu caches and ensure that we do flush |
* any residual writes from the previous batch. |
*/ |
return intel_ring_invalidate_all_caches(ring); |
return intel_ring_invalidate_all_caches(req); |
} |
|
static bool |
848,9 → 935,23 |
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
return false; |
|
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
/* Kernel clipping was a DRI1 misfeature */ |
if (exec->num_cliprects || exec->cliprects_ptr) |
return false; |
|
if (exec->DR4 == 0xffffffff) { |
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); |
exec->DR4 = 0; |
} |
if (exec->DR1 || exec->DR4) |
return false; |
|
if ((exec->batch_start_offset | exec->batch_len) & 0x7) |
return false; |
|
return true; |
} |
|
static int |
validate_exec_list(struct drm_device *dev, |
struct drm_i915_gem_exec_object2 *exec, |
872,6 → 973,9 |
if (exec[i].flags & invalid_flags) |
return -EINVAL; |
|
if (exec[i].alignment && !is_power_of_2(exec[i].alignment)) |
return -EINVAL; |
|
/* First check for malicious input causing overflow in |
* the worst case where we need to allocate the entire |
* relocation tree as a single array. |
913,7 → 1017,7 |
} |
|
if (i915.enable_execlists && !ctx->engine[ring->id].state) { |
int ret = intel_lr_context_deferred_create(ctx, ring); |
int ret = intel_lr_context_deferred_alloc(ctx, ring); |
if (ret) { |
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); |
return ERR_PTR(ret); |
925,9 → 1029,9 |
|
void |
i915_gem_execbuffer_move_to_active(struct list_head *vmas, |
struct intel_engine_cs *ring) |
struct drm_i915_gem_request *req) |
{ |
u32 seqno = intel_ring_get_seqno(ring); |
struct intel_engine_cs *ring = i915_gem_request_get_ring(req); |
struct i915_vma *vma; |
|
list_for_each_entry(vma, vmas, exec_list) { |
936,23 → 1040,23 |
u32 old_read = obj->base.read_domains; |
u32 old_write = obj->base.write_domain; |
|
obj->dirty = 1; /* be paranoid */ |
obj->base.write_domain = obj->base.pending_write_domain; |
if (obj->base.write_domain == 0) |
obj->base.pending_read_domains |= obj->base.read_domains; |
obj->base.read_domains = obj->base.pending_read_domains; |
|
i915_vma_move_to_active(vma, ring); |
i915_vma_move_to_active(vma, req); |
if (obj->base.write_domain) { |
obj->dirty = 1; |
obj->last_write_seqno = seqno; |
i915_gem_request_assign(&obj->last_write_req, req); |
|
intel_fb_obj_invalidate(obj, ring); |
intel_fb_obj_invalidate(obj, ORIGIN_CS); |
|
/* update for the implicit flush after a batch */ |
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
} |
if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
obj->last_fenced_seqno = seqno; |
i915_gem_request_assign(&obj->last_fenced_req, req); |
if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { |
struct drm_i915_private *dev_priv = to_i915(ring->dev); |
list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, |
965,22 → 1069,20 |
} |
|
void |
i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
struct drm_file *file, |
struct intel_engine_cs *ring, |
struct drm_i915_gem_object *obj) |
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) |
{ |
/* Unconditionally force add_request to emit a full flush. */ |
ring->gpu_caches_dirty = true; |
params->ring->gpu_caches_dirty = true; |
|
/* Add a breadcrumb for the completion of the batch buffer */ |
(void)__i915_add_request(ring, file, obj, NULL); |
__i915_add_request(params->request, params->batch_obj, true); |
} |
|
static int |
i915_reset_gen7_sol_offsets(struct drm_device *dev, |
struct intel_engine_cs *ring) |
struct drm_i915_gem_request *req) |
{ |
struct intel_engine_cs *ring = req->ring; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret, i; |
|
989,7 → 1091,7 |
return -EINVAL; |
} |
|
ret = intel_ring_begin(ring, 4 * 3); |
ret = intel_ring_begin(req, 4 * 3); |
if (ret) |
return ret; |
|
1004,114 → 1106,83 |
return 0; |
} |
|
static int |
i915_emit_box(struct intel_engine_cs *ring, |
struct drm_clip_rect *box, |
int DR1, int DR4) |
static struct drm_i915_gem_object* |
i915_gem_execbuffer_parse(struct intel_engine_cs *ring, |
struct drm_i915_gem_exec_object2 *shadow_exec_entry, |
struct eb_vmas *eb, |
struct drm_i915_gem_object *batch_obj, |
u32 batch_start_offset, |
u32 batch_len, |
bool is_master) |
{ |
struct drm_i915_gem_object *shadow_batch_obj; |
struct i915_vma *vma; |
int ret; |
|
if (box->y2 <= box->y1 || box->x2 <= box->x1 || |
box->y2 <= 0 || box->x2 <= 0) { |
DRM_ERROR("Bad box %d,%d..%d,%d\n", |
box->x1, box->y1, box->x2, box->y2); |
return -EINVAL; |
} |
shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, |
PAGE_ALIGN(batch_len)); |
if (IS_ERR(shadow_batch_obj)) |
return shadow_batch_obj; |
|
if (INTEL_INFO(ring->dev)->gen >= 4) { |
ret = intel_ring_begin(ring, 4); |
ret = i915_parse_cmds(ring, |
batch_obj, |
shadow_batch_obj, |
batch_start_offset, |
batch_len, |
is_master); |
if (ret) |
return ret; |
goto err; |
|
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965); |
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); |
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); |
intel_ring_emit(ring, DR4); |
} else { |
ret = intel_ring_begin(ring, 6); |
ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0); |
if (ret) |
return ret; |
goto err; |
|
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO); |
intel_ring_emit(ring, DR1); |
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); |
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); |
intel_ring_emit(ring, DR4); |
intel_ring_emit(ring, 0); |
} |
intel_ring_advance(ring); |
i915_gem_object_unpin_pages(shadow_batch_obj); |
|
return 0; |
memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); |
|
vma = i915_gem_obj_to_ggtt(shadow_batch_obj); |
vma->exec_entry = shadow_exec_entry; |
vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN; |
drm_gem_object_reference(&shadow_batch_obj->base); |
list_add_tail(&vma->exec_list, &eb->vmas); |
|
shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND; |
|
return shadow_batch_obj; |
|
err: |
i915_gem_object_unpin_pages(shadow_batch_obj); |
if (ret == -EACCES) /* unhandled chained batch */ |
return batch_obj; |
else |
return ERR_PTR(ret); |
} |
|
|
int |
i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, |
struct intel_engine_cs *ring, |
struct intel_context *ctx, |
i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, |
struct drm_i915_gem_execbuffer2 *args, |
struct list_head *vmas, |
struct drm_i915_gem_object *batch_obj, |
u64 exec_start, u32 flags) |
struct list_head *vmas) |
{ |
struct drm_clip_rect *cliprects = NULL; |
struct drm_device *dev = params->dev; |
struct intel_engine_cs *ring = params->ring; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u64 exec_len; |
u64 exec_start, exec_len; |
int instp_mode; |
u32 instp_mask; |
int i, ret = 0; |
int ret; |
|
if (args->num_cliprects != 0) { |
if (ring != &dev_priv->ring[RCS]) { |
DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
return -EINVAL; |
} |
|
if (INTEL_INFO(dev)->gen >= 5) { |
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
return -EINVAL; |
} |
|
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { |
DRM_DEBUG("execbuf with %u cliprects\n", |
args->num_cliprects); |
return -EINVAL; |
} |
|
cliprects = kcalloc(args->num_cliprects, |
sizeof(*cliprects), |
GFP_KERNEL); |
if (cliprects == NULL) { |
ret = -ENOMEM; |
goto error; |
} |
|
if (copy_from_user(cliprects, |
to_user_ptr(args->cliprects_ptr), |
sizeof(*cliprects)*args->num_cliprects)) { |
ret = -EFAULT; |
goto error; |
} |
} else { |
if (args->DR4 == 0xffffffff) { |
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); |
args->DR4 = 0; |
} |
|
if (args->DR1 || args->DR4 || args->cliprects_ptr) { |
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); |
return -EINVAL; |
} |
} |
|
ret = i915_gem_execbuffer_move_to_gpu(ring, vmas); |
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); |
if (ret) |
goto error; |
return ret; |
|
ret = i915_switch_context(ring, ctx); |
ret = i915_switch_context(params->request); |
if (ret) |
goto error; |
return ret; |
|
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), |
"%s didn't clear reload\n", ring->name); |
|
instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
instp_mask = I915_EXEC_CONSTANTS_MASK; |
switch (instp_mode) { |
1120,22 → 1191,19 |
case I915_EXEC_CONSTANTS_REL_SURFACE: |
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { |
DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); |
ret = -EINVAL; |
goto error; |
return -EINVAL; |
} |
|
if (instp_mode != dev_priv->relative_constants_mode) { |
if (INTEL_INFO(dev)->gen < 4) { |
DRM_DEBUG("no rel constants on pre-gen4\n"); |
ret = -EINVAL; |
goto error; |
return -EINVAL; |
} |
|
if (INTEL_INFO(dev)->gen > 5 && |
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { |
DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); |
ret = -EINVAL; |
goto error; |
return -EINVAL; |
} |
|
/* The HW changed the meaning on this bit on gen6 */ |
1145,15 → 1213,14 |
break; |
default: |
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); |
ret = -EINVAL; |
goto error; |
return -EINVAL; |
} |
|
if (ring == &dev_priv->ring[RCS] && |
instp_mode != dev_priv->relative_constants_mode) { |
ret = intel_ring_begin(ring, 4); |
ret = intel_ring_begin(params->request, 4); |
if (ret) |
goto error; |
return ret; |
|
intel_ring_emit(ring, MI_NOOP); |
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
1165,41 → 1232,27 |
} |
|
if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
ret = i915_reset_gen7_sol_offsets(dev, ring); |
ret = i915_reset_gen7_sol_offsets(dev, params->request); |
if (ret) |
goto error; |
return ret; |
} |
|
exec_len = args->batch_len; |
if (cliprects) { |
for (i = 0; i < args->num_cliprects; i++) { |
ret = i915_emit_box(ring, &cliprects[i], |
args->DR1, args->DR4); |
if (ret) |
goto error; |
exec_start = params->batch_obj_vm_offset + |
params->args_batch_start_offset; |
|
ret = ring->dispatch_execbuffer(ring, |
ret = ring->dispatch_execbuffer(params->request, |
exec_start, exec_len, |
flags); |
params->dispatch_flags); |
if (ret) |
goto error; |
} |
} else { |
ret = ring->dispatch_execbuffer(ring, |
exec_start, exec_len, |
flags); |
if (ret) |
return ret; |
} |
|
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); |
|
i915_gem_execbuffer_move_to_active(vmas, ring); |
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
i915_gem_execbuffer_move_to_active(vmas, params->request); |
i915_gem_execbuffer_retire_commands(params); |
|
error: |
kfree(cliprects); |
return ret; |
return 0; |
} |
|
/** |
1261,12 → 1314,14 |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct eb_vmas *eb; |
struct drm_i915_gem_object *batch_obj; |
struct drm_i915_gem_exec_object2 shadow_exec_entry; |
struct intel_engine_cs *ring; |
struct intel_context *ctx; |
struct i915_address_space *vm; |
struct i915_execbuffer_params params_master; /* XXX: will be removed later */ |
struct i915_execbuffer_params *params = ¶ms_master; |
const u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
u64 exec_start = args->batch_start_offset; |
u32 flags; |
u32 dispatch_flags; |
int ret; |
bool need_relocs; |
|
1277,13 → 1332,13 |
if (ret) |
return ret; |
|
flags = 0; |
dispatch_flags = 0; |
if (args->flags & I915_EXEC_SECURE) { |
|
flags |= I915_DISPATCH_SECURE; |
dispatch_flags |= I915_DISPATCH_SECURE; |
} |
if (args->flags & I915_EXEC_IS_PINNED) |
flags |= I915_DISPATCH_PINNED; |
dispatch_flags |= I915_DISPATCH_PINNED; |
|
if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { |
DRM_DEBUG("execbuf with unknown ring: %d\n", |
1291,13 → 1346,35 |
return -EINVAL; |
} |
|
if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) && |
((args->flags & I915_EXEC_BSD_MASK) != 0)) { |
DRM_DEBUG("execbuf with non bsd ring but with invalid " |
"bsd dispatch flags: %d\n", (int)(args->flags)); |
return -EINVAL; |
} |
|
if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT) |
ring = &dev_priv->ring[RCS]; |
else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) { |
if (HAS_BSD2(dev)) { |
int ring_id; |
|
switch (args->flags & I915_EXEC_BSD_MASK) { |
case I915_EXEC_BSD_DEFAULT: |
ring_id = gen8_dispatch_bsd_ring(dev, file); |
ring = &dev_priv->ring[ring_id]; |
break; |
case I915_EXEC_BSD_RING1: |
ring = &dev_priv->ring[VCS]; |
break; |
case I915_EXEC_BSD_RING2: |
ring = &dev_priv->ring[VCS2]; |
break; |
default: |
DRM_DEBUG("execbuf with unknown bsd ring: %d\n", |
(int)(args->flags & I915_EXEC_BSD_MASK)); |
return -EINVAL; |
} |
} else |
ring = &dev_priv->ring[VCS]; |
} else |
1314,6 → 1391,20 |
return -EINVAL; |
} |
|
if (args->flags & I915_EXEC_RESOURCE_STREAMER) { |
if (!HAS_RESOURCE_STREAMER(dev)) { |
DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); |
return -EINVAL; |
} |
if (ring->id != RCS) { |
DRM_DEBUG("RS is not available on %s\n", |
ring->name); |
return -EINVAL; |
} |
|
dispatch_flags |= I915_DISPATCH_RS; |
} |
|
intel_runtime_pm_get(dev_priv); |
|
ret = i915_mutex_lock_interruptible(dev); |
1334,6 → 1425,8 |
else |
vm = &dev_priv->gtt.base; |
|
memset(¶ms_master, 0x00, sizeof(params_master)); |
|
eb = eb_create(args); |
if (eb == NULL) { |
i915_gem_context_unreference(ctx); |
1352,7 → 1445,7 |
|
/* Move the objects en-masse into the GTT, evicting if necessary. */ |
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); |
ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); |
if (ret) |
goto err; |
|
1362,7 → 1455,7 |
if (ret) { |
if (ret == -EFAULT) { |
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
eb, exec); |
eb, exec, ctx); |
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
} |
if (ret) |
1375,33 → 1468,53 |
ret = -EINVAL; |
goto err; |
} |
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
|
params->args_batch_start_offset = args->batch_start_offset; |
|
#if 0 |
if (i915_needs_cmd_parser(ring)) { |
ret = i915_parse_cmds(ring, |
if (i915_needs_cmd_parser(ring) && args->batch_len) { |
struct drm_i915_gem_object *parsed_batch_obj; |
|
parsed_batch_obj = i915_gem_execbuffer_parse(ring, |
&shadow_exec_entry, |
eb, |
batch_obj, |
args->batch_start_offset, |
args->batch_len, |
file->is_master); |
if (ret) { |
if (ret != -EACCES) |
if (IS_ERR(parsed_batch_obj)) { |
ret = PTR_ERR(parsed_batch_obj); |
goto err; |
} else { |
} |
|
/* |
* XXX: Actually do this when enabling batch copy... |
* parsed_batch_obj == batch_obj means batch not fully parsed: |
* Accept, but don't promote to secure. |
*/ |
|
if (parsed_batch_obj != batch_obj) { |
/* |
* Batch parsed and accepted: |
* |
* Set the DISPATCH_SECURE bit to remove the NON_SECURE bit |
* from MI_BATCH_BUFFER_START commands issued in the |
* dispatch_execbuffer implementations. We specifically don't |
* want that set when the command parser is enabled. |
* Set the DISPATCH_SECURE bit to remove the NON_SECURE |
* bit from MI_BATCH_BUFFER_START commands issued in |
* the dispatch_execbuffer implementations. We |
* specifically don't want that set on batches the |
* command parser has accepted. |
*/ |
dispatch_flags |= I915_DISPATCH_SECURE; |
params->args_batch_start_offset = 0; |
batch_obj = parsed_batch_obj; |
} |
} |
#endif |
|
#endif |
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
|
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
* batch" bit. Hence we need to pin secure batches into the global gtt. |
* hsw should have this fixed, but bdw mucks it up again. */ |
if (flags & I915_DISPATCH_SECURE) { |
if (dispatch_flags & I915_DISPATCH_SECURE) { |
/* |
* So on first glance it looks freaky that we pin the batch here |
* outside of the reservation loop. But: |
1408,7 → 1521,7 |
* - The batch is already pinned into the relevant ppgtt, so we |
* already have the backing storage fully allocated. |
* - No other BO uses the global gtt (well contexts, but meh), |
* so we don't really have issues with mutliple objects not |
* so we don't really have issues with multiple objects not |
* fitting due to fragmentation. |
* So this is actually safe. |
*/ |
1416,26 → 1529,57 |
if (ret) |
goto err; |
|
exec_start += i915_gem_obj_ggtt_offset(batch_obj); |
params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj); |
} else |
exec_start += i915_gem_obj_offset(batch_obj, vm); |
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); |
|
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, |
&eb->vmas, batch_obj, exec_start, flags); |
/* Allocate a request for this batch buffer nice and early. */ |
ret = i915_gem_request_alloc(ring, ctx, ¶ms->request); |
if (ret) |
goto err_batch_unpin; |
|
ret = i915_gem_request_add_to_client(params->request, file); |
if (ret) |
goto err_batch_unpin; |
|
/* |
* Save assorted stuff away to pass through to *_submission(). |
* NB: This data should be 'persistent' and not local as it will |
* kept around beyond the duration of the IOCTL once the GPU |
* scheduler arrives. |
*/ |
params->dev = dev; |
params->file = file; |
params->ring = ring; |
params->dispatch_flags = dispatch_flags; |
params->batch_obj = batch_obj; |
params->ctx = ctx; |
|
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); |
|
err_batch_unpin: |
/* |
* FIXME: We crucially rely upon the active tracking for the (ppgtt) |
* batch vma for correctness. For less ugly and less fragility this |
* needs to be adjusted to also track the ggtt batch vma properly as |
* active. |
*/ |
if (flags & I915_DISPATCH_SECURE) |
if (dispatch_flags & I915_DISPATCH_SECURE) |
i915_gem_object_ggtt_unpin(batch_obj); |
|
err: |
/* the request owns the ref now */ |
i915_gem_context_unreference(ctx); |
eb_destroy(eb); |
|
/* |
* If the request was created but not successfully submitted then it |
* must be freed again. If it was submitted then it is being tracked |
* on the active request list and no clean up is required here. |
*/ |
if (ret && params->request) |
i915_gem_request_cancel(params->request); |
|
mutex_unlock(&dev->struct_mutex); |
|
pre_mutex_err: |