56,11 → 56,6 |
unsigned long flag, unsigned long offset); |
|
|
#define MAX_ERRNO 4095 |
|
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) |
|
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
static void |
1973,6 → 1968,9 |
sg->length += PAGE_SIZE; |
} |
last_pfn = page_to_pfn(page); |
|
/* Check that the i965g/gm workaround works. */ |
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); |
} |
#ifdef CONFIG_SWIOTLB |
if (!swiotlb_nr_tbl()) |
2439,6 → 2437,8 |
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, |
struct intel_engine_cs *ring) |
{ |
struct intel_ringbuffer *buffer; |
|
while (!list_empty(&ring->active_list)) { |
struct drm_i915_gem_object *obj; |
|
2454,18 → 2454,16 |
* are the ones that keep the context and ringbuffer backing objects |
* pinned in place. |
*/ |
while (!list_empty(&ring->execlist_queue)) { |
struct drm_i915_gem_request *submit_req; |
|
submit_req = list_first_entry(&ring->execlist_queue, |
struct drm_i915_gem_request, |
execlist_link); |
list_del(&submit_req->execlist_link); |
if (i915.enable_execlists) { |
spin_lock_irq(&ring->execlist_lock); |
|
if (submit_req->ctx != ring->default_context) |
intel_lr_context_unpin(submit_req); |
/* list_splice_tail_init checks for empty lists */ |
list_splice_tail_init(&ring->execlist_queue, |
&ring->execlist_retired_req_list); |
|
i915_gem_request_unreference(submit_req); |
spin_unlock_irq(&ring->execlist_lock); |
intel_execlists_retire_requests(ring); |
} |
|
/* |
2484,7 → 2482,19 |
|
i915_gem_request_retire(request); |
} |
|
/* Having flushed all requests from all queues, we know that all |
* ringbuffers must now be empty. However, since we do not reclaim |
* all space when retiring the request (to prevent HEADs colliding |
* with rapid ringbuffer wraparound) the amount of available space |
* upon reset is less than when we start. Do one more pass over |
* all the ringbuffers to reset last_retired_head. |
*/ |
list_for_each_entry(buffer, &ring->buffers, link) { |
buffer->last_retired_head = buffer->tail; |
intel_ring_update_space(buffer); |
} |
} |
|
void i915_gem_reset(struct drm_device *dev) |
{ |
2584,10 → 2594,10 |
} |
} |
|
if (idle) |
mod_delayed_work(dev_priv->wq, |
&dev_priv->mm.idle_work, |
msecs_to_jiffies(100)); |
// if (idle) |
// mod_delayed_work(dev_priv->wq, |
// &dev_priv->mm.idle_work, |
// msecs_to_jiffies(100)); |
|
return idle; |
} |
2624,6 → 2634,10 |
if (!list_empty(&ring->request_list)) |
return; |
|
/* we probably should sync with hangcheck here, using cancel_work_sync. |
* Also locking seems to be fubar here, ring->request_list is protected |
* by dev->struct_mutex. */ |
|
intel_mark_idle(dev); |
|
if (mutex_trylock(&dev->struct_mutex)) { |
2748,7 → 2762,7 |
if (ret == 0) |
ret = __i915_wait_request(req[i], reset_counter, true, |
args->timeout_ns > 0 ? &args->timeout_ns : NULL, |
file->driver_priv); |
to_rps_client(file)); |
i915_gem_request_unreference__unlocked(req[i]); |
} |
return ret; |
3114,7 → 3128,7 |
if (flags & PIN_MAPPABLE) |
end = min_t(u64, end, dev_priv->gtt.mappable_end); |
if (flags & PIN_ZONE_4G) |
end = min_t(u64, end, (1ULL << 32)); |
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); |
|
if (alignment == 0) |
alignment = flags & PIN_MAPPABLE ? fence_alignment : |
3151,6 → 3165,20 |
if (IS_ERR(vma)) |
goto err_unpin; |
|
if (flags & PIN_OFFSET_FIXED) { |
uint64_t offset = flags & PIN_OFFSET_MASK; |
|
if (offset & (alignment - 1) || offset + size > end) { |
ret = -EINVAL; |
goto err_free_vma; |
} |
vma->node.start = offset; |
vma->node.size = size; |
vma->node.color = obj->cache_level; |
ret = drm_mm_reserve_node(&vm->mm, &vma->node); |
if (ret) |
goto err_free_vma; |
} else { |
if (flags & PIN_HIGH) { |
search_flag = DRM_MM_SEARCH_BELOW; |
alloc_flag = DRM_MM_CREATE_TOP; |
3170,6 → 3198,7 |
|
goto err_free_vma; |
} |
} |
if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { |
ret = -EINVAL; |
goto err_remove_node; |
3522,7 → 3551,7 |
* cacheline, whereas normally such cachelines would get |
* invalidated. |
*/ |
if (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0) |
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) |
return -ENODEV; |
|
level = I915_CACHE_LLC; |
3565,17 → 3594,11 |
int |
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
u32 alignment, |
struct intel_engine_cs *pipelined, |
struct drm_i915_gem_request **pipelined_request, |
const struct i915_ggtt_view *view) |
{ |
u32 old_read_domains, old_write_domain; |
int ret; |
|
ret = i915_gem_object_sync(obj, pipelined, pipelined_request); |
if (ret) |
return ret; |
|
/* Mark the pin_display early so that we account for the |
* display coherency whilst setting up the cache domains. |
*/ |
3765,6 → 3788,10 |
vma->node.start < (flags & PIN_OFFSET_MASK)) |
return true; |
|
if (flags & PIN_OFFSET_FIXED && |
vma->node.start != (flags & PIN_OFFSET_MASK)) |
return true; |
|
return false; |
} |
|
4030,6 → 4057,7 |
} |
|
static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE, |
.get_pages = i915_gem_object_get_pages_gtt, |
.put_pages = i915_gem_object_put_pages_gtt, |
}; |
4163,10 → 4191,8 |
{ |
struct i915_vma *vma; |
list_for_each_entry(vma, &obj->vma_list, vma_link) { |
if (i915_is_ggtt(vma->vm) && |
vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
continue; |
if (vma->vm == vm) |
if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && |
vma->vm == vm) |
return vma; |
} |
return NULL; |
4257,7 → 4283,6 |
struct intel_engine_cs *ring = req->ring; |
struct drm_device *dev = ring->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); |
u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; |
int i, ret; |
|
4273,10 → 4298,10 |
* here because no other code should access these registers other than |
* at initialization time. |
*/ |
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { |
for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { |
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
intel_ring_emit(ring, reg_base + i); |
intel_ring_emit(ring, remap_info[i/4]); |
intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); |
intel_ring_emit(ring, remap_info[i]); |
} |
|
intel_ring_advance(ring); |
4444,17 → 4469,8 |
if (HAS_GUC_UCODE(dev)) { |
ret = intel_guc_ucode_load(dev); |
if (ret) { |
/* |
* If we got an error and GuC submission is enabled, map |
* the error to -EIO so the GPU will be declared wedged. |
* OTOH, if we didn't intend to use the GuC anyway, just |
* discard the error and carry on. |
*/ |
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, |
i915.enable_guc_submission ? "" : |
" (ignored)"); |
ret = i915.enable_guc_submission ? -EIO : 0; |
if (ret) |
DRM_ERROR("Failed to initialize GuC, error %d\n", ret); |
ret = -EIO; |
goto out; |
} |
} |
4518,14 → 4534,6 |
|
mutex_lock(&dev->struct_mutex); |
|
if (IS_VALLEYVIEW(dev)) { |
/* VLVA0 (potential hack), BIOS isn't actually waking us */ |
I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); |
if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & |
VLV_GTLC_ALLOWWAKEACK), 10)) |
DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
} |
|
if (!i915.enable_execlists) { |
dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; |
dev_priv->gt.init_rings = i915_gem_init_rings; |
4619,7 → 4627,7 |
|
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
|
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) |
if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) |
dev_priv->num_fence_regs = 32; |
else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
dev_priv->num_fence_regs = 16; |
4837,6 → 4845,21 |
return false; |
} |
|
/* Like i915_gem_object_get_page(), but mark the returned page dirty */ |
struct page * |
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n) |
{ |
struct page *page; |
|
/* Only default objects have per-page dirty tracking */ |
if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) |
return NULL; |
|
page = i915_gem_object_get_page(obj, n); |
set_page_dirty(page); |
return page; |
} |
|
/* Allocate a new GEM object and fill it with the supplied data */ |
struct drm_i915_gem_object * |
i915_gem_object_create_from_data(struct drm_device *dev, |
4862,6 → 4885,7 |
i915_gem_object_pin_pages(obj); |
sg = obj->pages; |
bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size); |
obj->dirty = 1; /* Backing store is now out of date */ |
i915_gem_object_unpin_pages(obj); |
|
if (WARN_ON(bytes != size)) { |