71,8 → 71,11 |
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
|
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, |
bool force); |
static __must_check int |
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm, |
unsigned alignment, |
bool map_and_fenceable, |
bool nonblocking); |
88,9 → 91,23 |
bool enable); |
|
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); |
|
static bool cpu_cache_is_coherent(struct drm_device *dev, |
enum i915_cache_level level) |
{ |
return HAS_LLC(dev) || level != I915_CACHE_NONE; |
} |
|
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) |
{ |
if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) |
return true; |
|
return obj->pin_display; |
} |
|
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) |
{ |
if (obj->tiling_mode) |
107,15 → 124,19 |
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
size_t size) |
{ |
spin_lock(&dev_priv->mm.object_stat_lock); |
dev_priv->mm.object_count++; |
dev_priv->mm.object_memory += size; |
spin_unlock(&dev_priv->mm.object_stat_lock); |
} |
|
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
size_t size) |
{ |
spin_lock(&dev_priv->mm.object_stat_lock); |
dev_priv->mm.object_count--; |
dev_priv->mm.object_memory -= size; |
spin_unlock(&dev_priv->mm.object_stat_lock); |
} |
|
static int |
168,7 → 189,7 |
static inline bool |
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
{ |
return obj->gtt_space && !obj->active; |
return i915_gem_obj_bound_any(obj) && !obj->active; |
} |
|
|
213,12 → 234,12 |
|
pinned = 0; |
mutex_lock(&dev->struct_mutex); |
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |
if (obj->pin_count) |
pinned += obj->gtt_space->size; |
pinned += i915_gem_obj_ggtt_size(obj); |
mutex_unlock(&dev->struct_mutex); |
|
args->aper_size = dev_priv->gtt.total; |
args->aper_size = dev_priv->gtt.base.total; |
args->aper_available_size = args->aper_size - pinned; |
|
return 0; |
256,17 → 277,11 |
return -ENOMEM; |
|
ret = drm_gem_handle_create(file, &obj->base, &handle); |
if (ret) { |
drm_gem_object_release(&obj->base); |
i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
kfree(obj); |
/* drop reference from allocate - handle holds it now */ |
drm_gem_object_unreference_unlocked(&obj->base); |
if (ret) |
return ret; |
} |
|
/* drop reference from allocate - handle holds it now */ |
drm_gem_object_unreference(&obj->base); |
trace_i915_gem_object_create(obj); |
|
*handle_p = handle; |
return 0; |
} |
283,13 → 298,6 |
args->size, &args->handle); |
} |
|
int i915_gem_dumb_destroy(struct drm_file *file, |
struct drm_device *dev, |
uint32_t handle) |
{ |
return drm_gem_handle_delete(file, handle); |
} |
|
/** |
* Creates a new mm object and returns a handle to it. |
*/ |
460,9 → 468,8 |
* read domain and manually flush cachelines (if required). This |
* optimizes for the case when the gpu will dirty the data |
* anyway again before the next pread happens. */ |
if (obj->cache_level == I915_CACHE_NONE) |
needs_clflush = 1; |
if (obj->gtt_space) { |
needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); |
if (i915_gem_obj_bound_any(obj)) { |
ret = i915_gem_object_set_to_gtt_domain(obj, false); |
if (ret) |
return ret; |
505,7 → 512,7 |
|
mutex_unlock(&dev->struct_mutex); |
|
if (!prefaulted) { |
if (likely(!i915_prefault_disable) && !prefaulted) { |
ret = fault_in_multipages_writeable(user_data, remain); |
/* Userspace is tricking us, but we've already clobbered |
* its pages with the prefault and promised to write the |
637,7 → 644,7 |
int page_offset, page_length, ret; |
char *vaddr; |
|
ret = i915_gem_object_pin(obj, 0, true, true); |
ret = i915_gem_obj_ggtt_pin(obj, 0, true, true); |
if (ret) |
goto out; |
|
659,7 → 666,7 |
user_data = (char __user *) (uintptr_t) args->data_ptr; |
remain = args->size; |
|
offset = obj->gtt_offset + args->offset; |
offset = i915_gem_obj_ggtt_offset(obj) + args->offset; |
|
while (remain > 0) { |
/* Operation in this page |
786,19 → 793,18 |
* write domain and manually flush cachelines (if required). This |
* optimizes for the case when the gpu will use the data |
* right away and we therefore have to clflush anyway. */ |
if (obj->cache_level == I915_CACHE_NONE) |
needs_clflush_after = 1; |
if (obj->gtt_space) { |
needs_clflush_after = cpu_write_needs_clflush(obj); |
if (i915_gem_obj_bound_any(obj)) { |
ret = i915_gem_object_set_to_gtt_domain(obj, true); |
if (ret) |
return ret; |
} |
} |
/* Same trick applies for invalidate partially written cachelines before |
* writing. */ |
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU) |
&& obj->cache_level == I915_CACHE_NONE) |
needs_clflush_before = 1; |
/* Same trick applies to invalidate partially written cachelines read |
* before writing. */ |
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) |
needs_clflush_before = |
!cpu_cache_is_coherent(dev, obj->cache_level); |
|
ret = i915_gem_object_get_pages(obj); |
if (ret) |
877,7 → 883,7 |
*/ |
if (!needs_clflush_after && |
obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
i915_gem_clflush_object(obj); |
if (i915_gem_clflush_object(obj, obj->pin_display)) |
i915_gem_chipset_flush(dev); |
} |
} |
901,6 → 907,9 |
struct drm_i915_gem_object *obj; |
int ret; |
|
if (args->size == 0) |
return 0; |
|
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
907,9 → 916,6 |
return 0; |
} |
|
if (args->size == 0) |
return 0; |
|
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
949,9 → 955,9 |
// goto out; |
// } |
|
if (obj->cache_level == I915_CACHE_NONE && |
obj->tiling_mode == I915_TILING_NONE && |
obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
if (obj->tiling_mode == I915_TILING_NONE && |
obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
cpu_write_needs_clflush(obj)) { |
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
/* Note that the gtt paths might fail with non-page-backed user |
* pointers (e.g. gtt mappings when moving data between |
1001,7 → 1007,7 |
|
ret = 0; |
if (seqno == ring->outstanding_lazy_request) |
ret = i915_add_request(ring, NULL, NULL); |
ret = i915_add_request(ring, NULL); |
|
return ret; |
} |
1035,6 → 1041,8 |
bool wait_forever = true; |
int ret; |
|
WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n"); |
|
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
return 0; |
|
1045,7 → 1053,7 |
wait_forever = false; |
} |
|
timeout_jiffies = timespec_to_jiffies(&wait_time); |
timeout_jiffies = timespec_to_jiffies_timeout(&wait_time); |
|
if (WARN_ON(!ring->irq_get(ring))) |
return -ENODEV; |
1130,6 → 1138,25 |
interruptible, NULL); |
} |
|
static int |
i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *ring) |
{ |
i915_gem_retire_requests_ring(ring); |
|
/* Manually manage the write flush as we may have not yet |
* retired the buffer. |
* |
* Note that the last_write_seqno is always the earlier of |
* the two (read/write) seqno, so if we haved successfully waited, |
* we know we have passed the last write. |
*/ |
obj->last_write_seqno = 0; |
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
|
return 0; |
} |
|
/** |
* Ensures that all rendering to the object has completed and the object is |
* safe to unbind from the GTT or access from the CPU. |
1150,20 → 1177,9 |
if (ret) |
return ret; |
|
i915_gem_retire_requests_ring(ring); |
|
/* Manually manage the write flush as we may have not yet |
* retired the buffer. |
*/ |
if (obj->last_write_seqno && |
i915_seqno_passed(seqno, obj->last_write_seqno)) { |
obj->last_write_seqno = 0; |
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
return i915_gem_object_wait_rendering__tail(obj, ring); |
} |
|
return 0; |
} |
|
/* A nonblocking variant of the above wait. This is a highly dangerous routine |
* as the object state may change during this call. |
*/ |
1197,21 → 1213,12 |
mutex_unlock(&dev->struct_mutex); |
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
mutex_lock(&dev->struct_mutex); |
if (ret) |
return ret; |
|
i915_gem_retire_requests_ring(ring); |
|
/* Manually manage the write flush as we may have not yet |
* retired the buffer. |
*/ |
if (obj->last_write_seqno && |
i915_seqno_passed(seqno, obj->last_write_seqno)) { |
obj->last_write_seqno = 0; |
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
return i915_gem_object_wait_rendering__tail(obj, ring); |
} |
|
return ret; |
} |
|
/** |
* Called when user space prepares to use an object with the CPU, either |
* through the mmap ioctl's mapping or a GTT mapping. |
1368,11 → 1375,7 |
if (!obj->fault_mappable) |
return; |
|
if (obj->base.dev->dev_mapping) |
// unmap_mapping_range(obj->base.dev->dev_mapping, |
// (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, |
// obj->base.size, 1); |
|
// drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping); |
obj->fault_mappable = false; |
} |
|
1485,7 → 1488,7 |
goto out; |
} |
/* Now bind it into the GTT if needed */ |
ret = i915_gem_object_pin(obj, 0, true, false); |
ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); |
if (ret) |
goto out; |
|
1499,7 → 1502,7 |
|
obj->fault_mappable = true; |
|
pfn = dev_priv->gtt.mappable_base + obj->gtt_offset; |
pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj); |
|
/* Finally, remap it using the new GTT offset */ |
|
1517,7 → 1520,7 |
i915_gem_object_unpin(obj); |
|
|
*offset = (u64)mem; |
*offset = mem; |
|
out: |
drm_gem_object_unreference(&obj->base); |
1592,7 → 1595,7 |
* hope for the best. |
*/ |
WARN_ON(ret != -EIO); |
i915_gem_clflush_object(obj); |
i915_gem_clflush_object(obj, true); |
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
} |
|
1620,15 → 1623,15 |
if (obj->pages == NULL) |
return 0; |
|
BUG_ON(obj->gtt_space); |
|
if (obj->pages_pin_count) |
return -EBUSY; |
|
BUG_ON(i915_gem_obj_bound_any(obj)); |
|
/* ->put_pages might need to allocate memory for the bit17 swizzle |
* array, hence protect them from being reaped by removing them from gtt |
* lists early. */ |
list_del(&obj->gtt_list); |
list_del(&obj->global_list); |
|
ops->put_pages(obj); |
obj->pages = NULL; |
1671,7 → 1674,6 |
|
page_count = obj->base.size / PAGE_SIZE; |
if (sg_alloc_table(st, page_count, GFP_KERNEL)) { |
sg_free_table(st); |
kfree(st); |
FAIL(); |
return -ENOMEM; |
1741,7 → 1743,7 |
if (ret) |
return ret; |
|
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
return 0; |
} |
|
1754,6 → 1756,10 |
u32 seqno = intel_ring_get_seqno(ring); |
|
BUG_ON(ring == NULL); |
if (obj->ring != ring && obj->last_write_seqno) { |
/* Keep the seqno relative to the current ring */ |
obj->last_write_seqno = seqno; |
} |
obj->ring = ring; |
|
/* Add a reference if we're newly entering the active list. */ |
1762,8 → 1768,6 |
obj->active = 1; |
} |
|
/* Move from whatever list we were on to the tail of execution. */ |
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
list_move_tail(&obj->ring_list, &ring->active_list); |
|
obj->last_read_seqno = seqno; |
1785,13 → 1789,14 |
static void |
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
|
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
BUG_ON(!obj->active); |
|
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); |
|
list_del_init(&obj->ring_list); |
obj->ring = NULL; |
1879,17 → 1884,18 |
return 0; |
} |
|
int |
i915_add_request(struct intel_ring_buffer *ring, |
int __i915_add_request(struct intel_ring_buffer *ring, |
struct drm_file *file, |
struct drm_i915_gem_object *obj, |
u32 *out_seqno) |
{ |
drm_i915_private_t *dev_priv = ring->dev->dev_private; |
struct drm_i915_gem_request *request; |
u32 request_ring_position; |
u32 request_ring_position, request_start; |
int was_empty; |
int ret; |
|
request_start = intel_ring_get_tail(ring); |
/* |
* Emit any outstanding flushes - execbuf can fail to emit the flush |
* after having emitted the batchbuffer command. Hence we need to fix |
1921,7 → 1927,21 |
|
request->seqno = intel_ring_get_seqno(ring); |
request->ring = ring; |
request->head = request_start; |
request->tail = request_ring_position; |
request->ctx = ring->last_context; |
request->batch_obj = obj; |
|
/* Whilst this request exists, batch_obj will be on the |
* active_list, and so will hold the active reference. Only when this |
* request is retired will the the batch_obj be moved onto the |
* inactive_list and lose its active reference. Hence we do not need |
* to explicitly hold another reference here. |
*/ |
|
if (request->ctx) |
i915_gem_context_reference(request->ctx); |
|
request->emitted_jiffies = GetTimerTicks(); |
was_empty = list_empty(&ring->request_list); |
list_add_tail(&request->list, &ring->request_list); |
1940,12 → 1960,9 |
trace_i915_gem_request_add(ring, request->seqno); |
ring->outstanding_lazy_request = 0; |
|
if (!dev_priv->mm.suspended) { |
if (i915_enable_hangcheck) { |
// mod_timer(&dev_priv->hangcheck_timer, |
// jiffies + |
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); |
} |
if (!dev_priv->ums.mm_suspended) { |
// i915_queue_hangcheck(ring->dev); |
|
if (was_empty) { |
queue_delayed_work(dev_priv->wq, |
&dev_priv->mm.retire_work, |
1975,9 → 1992,130 |
spin_unlock(&file_priv->mm.lock); |
} |
|
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
if (acthd >= i915_gem_obj_offset(obj, vm) && |
acthd < i915_gem_obj_offset(obj, vm) + obj->base.size) |
return true; |
|
return false; |
} |
|
static bool i915_head_inside_request(const u32 acthd_unmasked, |
const u32 request_start, |
const u32 request_end) |
{ |
const u32 acthd = acthd_unmasked & HEAD_ADDR; |
|
if (request_start < request_end) { |
if (acthd >= request_start && acthd < request_end) |
return true; |
} else if (request_start > request_end) { |
if (acthd >= request_start || acthd < request_end) |
return true; |
} |
|
return false; |
} |
|
static struct i915_address_space * |
request_to_vm(struct drm_i915_gem_request *request) |
{ |
struct drm_i915_private *dev_priv = request->ring->dev->dev_private; |
struct i915_address_space *vm; |
|
vm = &dev_priv->gtt.base; |
|
return vm; |
} |
|
static bool i915_request_guilty(struct drm_i915_gem_request *request, |
const u32 acthd, bool *inside) |
{ |
/* There is a possibility that unmasked head address |
* pointing inside the ring, matches the batch_obj address range. |
* However this is extremely unlikely. |
*/ |
if (request->batch_obj) { |
if (i915_head_inside_object(acthd, request->batch_obj, |
request_to_vm(request))) { |
*inside = true; |
return true; |
} |
} |
|
if (i915_head_inside_request(acthd, request->head, request->tail)) { |
*inside = false; |
return true; |
} |
|
return false; |
} |
|
static void i915_set_reset_status(struct intel_ring_buffer *ring, |
struct drm_i915_gem_request *request, |
u32 acthd) |
{ |
struct i915_ctx_hang_stats *hs = NULL; |
bool inside, guilty; |
unsigned long offset = 0; |
|
/* Innocent until proven guilty */ |
guilty = false; |
|
if (request->batch_obj) |
offset = i915_gem_obj_offset(request->batch_obj, |
request_to_vm(request)); |
|
if (ring->hangcheck.action != HANGCHECK_WAIT && |
i915_request_guilty(request, acthd, &inside)) { |
DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n", |
ring->name, |
inside ? "inside" : "flushing", |
offset, |
request->ctx ? request->ctx->id : 0, |
acthd); |
|
guilty = true; |
} |
|
/* If contexts are disabled or this is the default context, use |
* file_priv->reset_state |
*/ |
if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) |
hs = &request->ctx->hang_stats; |
else if (request->file_priv) |
hs = &request->file_priv->hang_stats; |
|
if (hs) { |
if (guilty) |
hs->batch_active++; |
else |
hs->batch_pending++; |
} |
} |
|
static void i915_gem_free_request(struct drm_i915_gem_request *request) |
{ |
list_del(&request->list); |
i915_gem_request_remove_from_client(request); |
|
if (request->ctx) |
i915_gem_context_unreference(request->ctx); |
|
kfree(request); |
} |
|
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, |
struct intel_ring_buffer *ring) |
{ |
u32 completed_seqno; |
u32 acthd; |
|
acthd = intel_ring_get_active_head(ring); |
completed_seqno = ring->get_seqno(ring, false); |
|
while (!list_empty(&ring->request_list)) { |
struct drm_i915_gem_request *request; |
|
1985,9 → 2123,10 |
struct drm_i915_gem_request, |
list); |
|
list_del(&request->list); |
i915_gem_request_remove_from_client(request); |
kfree(request); |
if (request->seqno > completed_seqno) |
i915_set_reset_status(ring, request, acthd); |
|
i915_gem_free_request(request); |
} |
|
while (!list_empty(&ring->active_list)) { |
2008,14 → 2147,23 |
|
for (i = 0; i < dev_priv->num_fence_regs; i++) { |
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; |
i915_gem_write_fence(dev, i, reg->obj); |
|
/* |
* Commit delayed tiling changes if we have an object still |
* attached to the fence, otherwise just clear the fence. |
*/ |
if (reg->obj) { |
i915_gem_object_update_fence(reg->obj, reg, |
reg->obj->tiling_mode); |
} else { |
i915_gem_write_fence(dev, i, NULL); |
} |
} |
} |
|
void i915_gem_reset(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
struct intel_ring_buffer *ring; |
int i; |
|
2022,16 → 2170,6 |
for_each_ring(ring, dev_priv, i) |
i915_gem_reset_ring_lists(dev_priv, ring); |
|
/* Move everything out of the GPU domains to ensure we do any |
* necessary invalidation upon reuse. |
*/ |
list_for_each_entry(obj, |
&dev_priv->mm.inactive_list, |
mm_list) |
{ |
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
} |
|
i915_gem_restore_fences(dev); |
} |
|
2068,9 → 2206,7 |
*/ |
ring->last_retired_head = request->tail; |
|
list_del(&request->list); |
i915_gem_request_remove_from_client(request); |
kfree(request); |
i915_gem_free_request(request); |
} |
|
/* Move any buffers on the active list that are no longer referenced |
2137,12 → 2273,12 |
idle = true; |
for_each_ring(ring, dev_priv, i) { |
if (ring->gpu_caches_dirty) |
i915_add_request(ring, NULL, NULL); |
i915_add_request(ring, NULL); |
|
idle &= list_empty(&ring->request_list); |
} |
|
if (!dev_priv->mm.suspended && !idle) |
if (!dev_priv->ums.mm_suspended && !idle) |
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, |
round_jiffies_up_relative(HZ)); |
if (idle) |
2277,12 → 2413,9 |
old_write_domain); |
} |
|
/** |
* Unbinds an object from the GTT aperture. |
*/ |
int |
i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
int i915_vma_unbind(struct i915_vma *vma) |
{ |
struct drm_i915_gem_object *obj = vma->obj; |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
int ret; |
|
2289,9 → 2422,12 |
if(obj == get_fb_obj()) |
return 0; |
|
if (obj->gtt_space == NULL) |
if (list_empty(&vma->vma_link)) |
return 0; |
|
if (!drm_mm_node_allocated(&vma->node)) |
goto destroy; |
|
if (obj->pin_count) |
return -EBUSY; |
|
2312,7 → 2448,7 |
if (ret) |
return ret; |
|
trace_i915_gem_object_unbind(obj); |
trace_i915_vma_unbind(vma); |
|
if (obj->has_global_gtt_mapping) |
i915_gem_gtt_unbind_object(obj); |
2321,19 → 2457,48 |
obj->has_aliasing_ppgtt_mapping = 0; |
} |
i915_gem_gtt_finish_object(obj); |
i915_gem_object_unpin_pages(obj); |
|
list_del(&obj->mm_list); |
list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); |
list_del(&vma->mm_list); |
/* Avoid an unnecessary call to unbind on rebind. */ |
if (i915_is_ggtt(vma->vm)) |
obj->map_and_fenceable = true; |
|
drm_mm_put_block(obj->gtt_space); |
obj->gtt_space = NULL; |
obj->gtt_offset = 0; |
drm_mm_remove_node(&vma->node); |
|
destroy: |
i915_gem_vma_destroy(vma); |
|
/* Since the unbound list is global, only move to that list if |
* no more VMAs exist. |
* NB: Until we have real VMAs there will only ever be one */ |
WARN_ON(!list_empty(&obj->vma_list)); |
if (list_empty(&obj->vma_list)) |
list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); |
|
return 0; |
} |
|
/** |
* Unbinds an object from the global GTT aperture. |
*/ |
int |
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) |
{ |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
struct i915_address_space *ggtt = &dev_priv->gtt.base; |
|
if (!i915_gem_obj_ggtt_bound(obj)) |
return 0; |
|
if (obj->pin_count) |
return -EBUSY; |
|
BUG_ON(obj->pages == NULL); |
|
return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt)); |
} |
|
int i915_gpu_idle(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
2360,7 → 2525,6 |
drm_i915_private_t *dev_priv = dev->dev_private; |
int fence_reg; |
int fence_pitch_shift; |
uint64_t val; |
|
if (INTEL_INFO(dev)->gen >= 6) { |
fence_reg = FENCE_REG_SANDYBRIDGE_0; |
2370,23 → 2534,42 |
fence_pitch_shift = I965_FENCE_PITCH_SHIFT; |
} |
|
fence_reg += reg * 8; |
|
/* To w/a incoherency with non-atomic 64-bit register updates, |
* we split the 64-bit update into two 32-bit writes. In order |
* for a partial fence not to be evaluated between writes, we |
* precede the update with write to turn off the fence register, |
* and only enable the fence as the last step. |
* |
* For extra levels of paranoia, we make sure each step lands |
* before applying the next step. |
*/ |
I915_WRITE(fence_reg, 0); |
POSTING_READ(fence_reg); |
|
if (obj) { |
u32 size = obj->gtt_space->size; |
u32 size = i915_gem_obj_ggtt_size(obj); |
uint64_t val; |
|
val = (uint64_t)((obj->gtt_offset + size - 4096) & |
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & |
0xfffff000) << 32; |
val |= obj->gtt_offset & 0xfffff000; |
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; |
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; |
if (obj->tiling_mode == I915_TILING_Y) |
val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
val |= I965_FENCE_REG_VALID; |
} else |
val = 0; |
|
fence_reg += reg * 8; |
I915_WRITE64(fence_reg, val); |
I915_WRITE(fence_reg + 4, val >> 32); |
POSTING_READ(fence_reg + 4); |
|
I915_WRITE(fence_reg + 0, val); |
POSTING_READ(fence_reg); |
} else { |
I915_WRITE(fence_reg + 4, 0); |
POSTING_READ(fence_reg + 4); |
} |
} |
|
static void i915_write_fence_reg(struct drm_device *dev, int reg, |
struct drm_i915_gem_object *obj) |
2395,15 → 2578,15 |
u32 val; |
|
if (obj) { |
u32 size = obj->gtt_space->size; |
u32 size = i915_gem_obj_ggtt_size(obj); |
int pitch_val; |
int tile_width; |
|
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) || |
WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) || |
(size & -size) != size || |
(obj->gtt_offset & (size - 1)), |
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
obj->gtt_offset, obj->map_and_fenceable, size); |
(i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", |
i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size); |
|
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) |
tile_width = 128; |
2414,7 → 2597,7 |
pitch_val = obj->stride / tile_width; |
pitch_val = ffs(pitch_val) - 1; |
|
val = obj->gtt_offset; |
val = i915_gem_obj_ggtt_offset(obj); |
if (obj->tiling_mode == I915_TILING_Y) |
val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
val |= I915_FENCE_SIZE_BITS(size); |
2439,19 → 2622,19 |
uint32_t val; |
|
if (obj) { |
u32 size = obj->gtt_space->size; |
u32 size = i915_gem_obj_ggtt_size(obj); |
uint32_t pitch_val; |
|
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || |
WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) || |
(size & -size) != size || |
(obj->gtt_offset & (size - 1)), |
"object 0x%08x not 512K or pot-size 0x%08x aligned\n", |
obj->gtt_offset, size); |
(i915_gem_obj_ggtt_offset(obj) & (size - 1)), |
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n", |
i915_gem_obj_ggtt_offset(obj), size); |
|
pitch_val = obj->stride / 128; |
pitch_val = ffs(pitch_val) - 1; |
|
val = obj->gtt_offset; |
val = i915_gem_obj_ggtt_offset(obj); |
if (obj->tiling_mode == I915_TILING_Y) |
val |= 1 << I830_FENCE_TILING_Y_SHIFT; |
val |= I830_FENCE_SIZE_BITS(size); |
2480,6 → 2663,10 |
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
mb(); |
|
WARN(obj && (!obj->stride || !obj->tiling_mode), |
"bogus fence setup with stride: 0x%x, tiling mode: %i\n", |
obj->stride, obj->tiling_mode); |
|
switch (INTEL_INFO(dev)->gen) { |
case 7: |
case 6: |
2503,36 → 2690,17 |
return fence - dev_priv->fence_regs; |
} |
|
static void i915_gem_write_fence__ipi(void *data) |
{ |
asm volatile("wbinvd"); |
|
} |
|
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
struct drm_i915_fence_reg *fence, |
bool enable) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int fence_reg = fence_number(dev_priv, fence); |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
int reg = fence_number(dev_priv, fence); |
|
/* In order to fully serialize access to the fenced region and |
* the update to the fence register we need to take extreme |
* measures on SNB+. In theory, the write to the fence register |
* flushes all memory transactions before, and coupled with the |
* mb() placed around the register write we serialise all memory |
* operations with respect to the changes in the tiler. Yet, on |
* SNB+ we need to take a step further and emit an explicit wbinvd() |
* on each processor in order to manually flush all memory |
* transactions before updating the fence register. |
*/ |
if (HAS_LLC(obj->base.dev)) |
on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); |
i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); |
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); |
|
if (enable) { |
obj->fence_reg = fence_reg; |
obj->fence_reg = reg; |
fence->obj = obj; |
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
} else { |
2540,6 → 2708,7 |
fence->obj = NULL; |
list_del_init(&fence->lru_list); |
} |
obj->fence_dirty = false; |
} |
|
static int |
2669,7 → 2838,6 |
return 0; |
|
i915_gem_object_update_fence(obj, reg, enable); |
obj->fence_dirty = false; |
|
return 0; |
} |
2687,7 → 2855,7 |
if (HAS_LLC(dev)) |
return true; |
|
if (gtt_space == NULL) |
if (!drm_mm_node_allocated(gtt_space)) |
return true; |
|
if (list_empty(>t_space->node_list)) |
2711,7 → 2879,7 |
struct drm_i915_gem_object *obj; |
int err = 0; |
|
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { |
list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { |
if (obj->gtt_space == NULL) { |
printk(KERN_ERR "object found on GTT list with no space reserved\n"); |
err++; |
2720,8 → 2888,8 |
|
if (obj->cache_level != obj->gtt_space->color) { |
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", |
obj->gtt_space->start, |
obj->gtt_space->start + obj->gtt_space->size, |
i915_gem_obj_ggtt_offset(obj), |
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
obj->cache_level, |
obj->gtt_space->color); |
err++; |
2732,8 → 2900,8 |
obj->gtt_space, |
obj->cache_level)) { |
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", |
obj->gtt_space->start, |
obj->gtt_space->start + obj->gtt_space->size, |
i915_gem_obj_ggtt_offset(obj), |
i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
obj->cache_level); |
err++; |
continue; |
2748,7 → 2916,8 |
* Finds free space in the GTT aperture and binds the object there. |
*/ |
static int |
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm, |
unsigned alignment, |
bool map_and_fenceable, |
bool nonblocking) |
2755,9 → 2924,10 |
{ |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_mm_node *node; |
u32 size, fence_size, fence_alignment, unfenced_alignment; |
bool mappable, fenceable; |
size_t gtt_max = |
map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total; |
struct i915_vma *vma; |
int ret; |
|
fence_size = i915_gem_get_gtt_size(dev, |
2784,10 → 2954,11 |
/* If the object is bigger than the entire aperture, reject it early |
* before evicting everything in a vain attempt to find space. |
*/ |
if (obj->base.size > |
(map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { |
DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
FAIL(); |
if (obj->base.size > gtt_max) { |
DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", |
obj->base.size, |
map_and_fenceable ? "mappable" : "total", |
gtt_max); |
return -E2BIG; |
} |
|
2797,62 → 2968,69 |
|
i915_gem_object_pin_pages(obj); |
|
node = kzalloc(sizeof(*node), GFP_KERNEL); |
if (node == NULL) { |
i915_gem_object_unpin_pages(obj); |
return -ENOMEM; |
BUG_ON(!i915_is_ggtt(vm)); |
|
vma = i915_gem_obj_lookup_or_create_vma(obj, vm); |
if (IS_ERR(vma)) { |
ret = PTR_ERR(vma); |
goto err_unpin; |
} |
|
/* For now we only ever use 1 vma per object */ |
WARN_ON(!list_is_singular(&obj->vma_list)); |
|
search_free: |
if (map_and_fenceable) |
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
size, alignment, obj->cache_level, |
0, dev_priv->gtt.mappable_end); |
else |
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
size, alignment, obj->cache_level); |
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, |
size, alignment, |
obj->cache_level, 0, gtt_max, |
DRM_MM_SEARCH_DEFAULT); |
if (ret) { |
|
i915_gem_object_unpin_pages(obj); |
kfree(node); |
return ret; |
goto err_free_vma; |
} |
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) { |
i915_gem_object_unpin_pages(obj); |
drm_mm_put_block(node); |
return -EINVAL; |
if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, |
obj->cache_level))) { |
ret = -EINVAL; |
goto err_remove_node; |
} |
|
ret = i915_gem_gtt_prepare_object(obj); |
if (ret) { |
i915_gem_object_unpin_pages(obj); |
drm_mm_put_block(node); |
return ret; |
} |
if (ret) |
goto err_remove_node; |
|
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); |
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
list_add_tail(&vma->mm_list, &vm->inactive_list); |
|
obj->gtt_space = node; |
obj->gtt_offset = node->start; |
if (i915_is_ggtt(vm)) { |
bool mappable, fenceable; |
|
fenceable = |
node->size == fence_size && |
(node->start & (fence_alignment - 1)) == 0; |
fenceable = (vma->node.size == fence_size && |
(vma->node.start & (fence_alignment - 1)) == 0); |
|
mappable = |
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; |
mappable = (vma->node.start + obj->base.size <= |
dev_priv->gtt.mappable_end); |
|
obj->map_and_fenceable = mappable && fenceable; |
} |
|
i915_gem_object_unpin_pages(obj); |
trace_i915_gem_object_bind(obj, map_and_fenceable); |
WARN_ON(map_and_fenceable && !obj->map_and_fenceable); |
|
trace_i915_vma_bind(vma, map_and_fenceable); |
i915_gem_verify_gtt(dev); |
return 0; |
|
err_remove_node: |
drm_mm_remove_node(&vma->node); |
err_free_vma: |
i915_gem_vma_destroy(vma); |
err_unpin: |
i915_gem_object_unpin_pages(obj); |
return ret; |
} |
|
void |
i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
bool |
i915_gem_clflush_object(struct drm_i915_gem_object *obj, |
bool force) |
{ |
/* If we don't have a page list set up, then we're not pinned |
* to GPU, and we can ignore the cache flush because it'll happen |
2859,7 → 3037,7 |
* again at bind time. |
*/ |
if (obj->pages == NULL) |
return; |
return false; |
|
/* |
* Stolen memory is always coherent with the GPU as it is explicitly |
2866,7 → 3044,7 |
* marked as wc by the system, or the system is cache-coherent. |
*/ |
if (obj->stolen) |
return; |
return false; |
|
/* If the GPU is snooping the contents of the CPU cache, |
* we do not need to manually clear the CPU cache lines. However, |
2876,8 → 3054,8 |
* snooping behaviour occurs naturally as the result of our domain |
* tracking. |
*/ |
if (obj->cache_level != I915_CACHE_NONE) |
return; |
if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) |
return false; |
#if 0 |
if(obj->mapped != NULL) |
{ |
2927,6 → 3105,7 |
} |
#endif |
|
return true; |
} |
|
/** Flushes the GTT write domain for the object if it's dirty. */ |
2958,7 → 3137,8 |
|
/** Flushes the CPU write domain for the object if it's dirty. */ |
static void |
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, |
bool force) |
{ |
uint32_t old_write_domain; |
|
2965,8 → 3145,9 |
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
return; |
|
i915_gem_clflush_object(obj); |
if (i915_gem_clflush_object(obj, force)) |
i915_gem_chipset_flush(obj->base.dev); |
|
old_write_domain = obj->base.write_domain; |
obj->base.write_domain = 0; |
|
2989,7 → 3170,7 |
int ret; |
|
/* Not valid to be called on unbound objects. */ |
if (obj->gtt_space == NULL) |
if (!i915_gem_obj_bound_any(obj)) |
return -EINVAL; |
|
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
2999,7 → 3180,7 |
if (ret) |
return ret; |
|
i915_gem_object_flush_cpu_write_domain(obj); |
i915_gem_object_flush_cpu_write_domain(obj, false); |
|
/* Serialise direct access to this object with the barriers for |
* coherent writes from the GPU, by effectively invalidating the |
3027,9 → 3208,15 |
old_write_domain); |
|
/* And bump the LRU for this access */ |
if (i915_gem_object_is_inactive(obj)) |
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
if (i915_gem_object_is_inactive(obj)) { |
struct i915_vma *vma = i915_gem_obj_to_vma(obj, |
&dev_priv->gtt.base); |
if (vma) |
list_move_tail(&vma->mm_list, |
&dev_priv->gtt.base.inactive_list); |
|
} |
|
return 0; |
} |
|
3038,6 → 3225,7 |
{ |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct i915_vma *vma; |
int ret; |
|
if (obj->cache_level == cache_level) |
3048,13 → 3236,17 |
return -EBUSY; |
} |
|
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) { |
ret = i915_gem_object_unbind(obj); |
list_for_each_entry(vma, &obj->vma_list, vma_link) { |
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
ret = i915_vma_unbind(vma); |
if (ret) |
return ret; |
|
break; |
} |
} |
|
if (obj->gtt_space) { |
if (i915_gem_obj_bound_any(obj)) { |
ret = i915_gem_object_finish_gpu(obj); |
if (ret) |
return ret; |
3076,11 → 3268,13 |
if (obj->has_aliasing_ppgtt_mapping) |
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
obj, cache_level); |
|
obj->gtt_space->color = cache_level; |
} |
|
if (cache_level == I915_CACHE_NONE) { |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
vma->node.color = cache_level; |
obj->cache_level = cache_level; |
|
if (cpu_write_needs_clflush(obj)) { |
u32 old_read_domains, old_write_domain; |
|
/* If we're coming from LLC cached, then we haven't |
3090,7 → 3284,6 |
* Just set it to the CPU cache for now. |
*/ |
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); |
|
old_read_domains = obj->base.read_domains; |
old_write_domain = obj->base.write_domain; |
3103,7 → 3296,6 |
old_write_domain); |
} |
|
obj->cache_level = cache_level; |
i915_gem_verify_gtt(dev); |
return 0; |
} |
3131,8 → 3323,21 |
goto unlock; |
} |
|
args->caching = obj->cache_level != I915_CACHE_NONE; |
switch (obj->cache_level) { |
case I915_CACHE_LLC: |
case I915_CACHE_L3_LLC: |
args->caching = I915_CACHING_CACHED; |
break; |
|
case I915_CACHE_WT: |
args->caching = I915_CACHING_DISPLAY; |
break; |
|
default: |
args->caching = I915_CACHING_NONE; |
break; |
} |
|
drm_gem_object_unreference(&obj->base); |
unlock: |
mutex_unlock(&dev->struct_mutex); |
3160,6 → 3365,9 |
case I915_CACHING_CACHED: |
level = I915_CACHE_LLC; |
break; |
case I915_CACHING_DISPLAY: |
level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE; |
break; |
default: |
return -EINVAL; |
} |
3182,6 → 3390,22 |
return ret; |
} |
|
static bool is_pin_display(struct drm_i915_gem_object *obj) |
{ |
/* There are 3 sources that pin objects: |
* 1. The display engine (scanouts, sprites, cursors); |
* 2. Reservations for execbuffer; |
* 3. The user. |
* |
* We can ignore reservations as we hold the struct_mutex and |
* are only called outside of the reservation path. The user |
* can only increment pin_count once, and so if after |
* subtracting the potential reference by the user, any pin_count |
* remains, it must be due to another use by the display engine. |
*/ |
return obj->pin_count - !!obj->user_pin_count; |
} |
|
/* |
* Prepare buffer for display plane (scanout, cursors, etc). |
* Can be called from an uninterruptible phase (modesetting) and allows |
3201,6 → 3425,11 |
return ret; |
} |
|
/* Mark the pin_display early so that we account for the |
* display coherency whilst setting up the cache domains. |
*/ |
obj->pin_display = true; |
|
/* The display engine is not coherent with the LLC cache on gen6. As |
* a result, we make sure that the pinning that is about to occur is |
* done with uncached PTEs. This is lowest common denominator for all |
3210,19 → 3439,20 |
* of uncaching, which would allow us to flush all the LLC-cached data |
* with that bit in the PTE to main memory with just one PIPE_CONTROL. |
*/ |
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); |
ret = i915_gem_object_set_cache_level(obj, |
HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE); |
if (ret) |
return ret; |
goto err_unpin_display; |
|
/* As the user may map the buffer once pinned in the display plane |
* (e.g. libkms for the bootup splash), we have to ensure that we |
* always use map_and_fenceable for all scanout buffers. |
*/ |
ret = i915_gem_object_pin(obj, alignment, true, false); |
ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false); |
if (ret) |
return ret; |
goto err_unpin_display; |
|
i915_gem_object_flush_cpu_write_domain(obj); |
i915_gem_object_flush_cpu_write_domain(obj, true); |
|
old_write_domain = obj->base.write_domain; |
old_read_domains = obj->base.read_domains; |
3238,8 → 3468,19 |
old_write_domain); |
|
return 0; |
|
err_unpin_display: |
obj->pin_display = is_pin_display(obj); |
return ret; |
} |
|
void |
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) |
{ |
i915_gem_object_unpin(obj); |
obj->pin_display = is_pin_display(obj); |
} |
|
int |
i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
{ |
3283,7 → 3524,7 |
|
/* Flush the CPU cache if it's still invalid. */ |
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
i915_gem_clflush_object(obj); |
i915_gem_clflush_object(obj, false); |
|
obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
} |
3361,35 → 3602,42 |
|
int |
i915_gem_object_pin(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm, |
uint32_t alignment, |
bool map_and_fenceable, |
bool nonblocking) |
{ |
struct i915_vma *vma; |
int ret; |
|
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) |
return -EBUSY; |
|
if (obj->gtt_space != NULL) { |
if ((alignment && obj->gtt_offset & (alignment - 1)) || |
WARN_ON(map_and_fenceable && !i915_is_ggtt(vm)); |
|
vma = i915_gem_obj_to_vma(obj, vm); |
|
if (vma) { |
if ((alignment && |
vma->node.start & (alignment - 1)) || |
(map_and_fenceable && !obj->map_and_fenceable)) { |
WARN(obj->pin_count, |
"bo is already pinned with incorrect alignment:" |
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," |
" obj->map_and_fenceable=%d\n", |
obj->gtt_offset, alignment, |
i915_gem_obj_offset(obj, vm), alignment, |
map_and_fenceable, |
obj->map_and_fenceable); |
ret = i915_gem_object_unbind(obj); |
ret = i915_vma_unbind(vma); |
if (ret) |
return ret; |
} |
} |
|
if (obj->gtt_space == NULL) { |
if (!i915_gem_obj_bound(obj, vm)) { |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
|
ret = i915_gem_object_bind_to_gtt(obj, alignment, |
ret = i915_gem_object_bind_to_vm(obj, vm, alignment, |
map_and_fenceable, |
nonblocking); |
if (ret) |
3412,7 → 3660,7 |
i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
{ |
BUG_ON(obj->pin_count == 0); |
BUG_ON(obj->gtt_space == NULL); |
BUG_ON(!i915_gem_obj_bound_any(obj)); |
|
if (--obj->pin_count == 0) |
obj->pin_mappable = false; |
3456,7 → 3704,7 |
} |
|
if (obj->user_pin_count == 0) { |
ret = i915_gem_object_pin(obj, args->alignment, true, false); |
ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false); |
if (ret) |
goto out; |
} |
3464,11 → 3712,7 |
obj->user_pin_count++; |
obj->pin_filp = file; |
|
/* XXX - flush the CPU caches for pinned objects |
* as the X server doesn't manage domains yet |
*/ |
i915_gem_object_flush_cpu_write_domain(obj); |
args->offset = obj->gtt_offset; |
args->offset = i915_gem_obj_ggtt_offset(obj); |
out: |
drm_gem_object_unreference(&obj->base); |
unlock: |
3620,10 → 3864,11 |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
const struct drm_i915_gem_object_ops *ops) |
{ |
INIT_LIST_HEAD(&obj->mm_list); |
INIT_LIST_HEAD(&obj->gtt_list); |
INIT_LIST_HEAD(&obj->global_list); |
INIT_LIST_HEAD(&obj->ring_list); |
INIT_LIST_HEAD(&obj->exec_list); |
INIT_LIST_HEAD(&obj->obj_exec_link); |
INIT_LIST_HEAD(&obj->vma_list); |
|
obj->ops = ops; |
|
3648,8 → 3893,6 |
gfp_t mask; |
|
obj = i915_gem_object_alloc(dev); |
|
obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
if (obj == NULL) |
{ |
FAIL(); |
3657,8 → 3900,7 |
}; |
|
if (drm_gem_object_init(dev, &obj->base, size) != 0) { |
kfree(obj); |
FAIL(); |
i915_gem_object_free(obj); |
return NULL; |
} |
|
3700,33 → 3942,42 |
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
struct drm_device *dev = obj->base.dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct i915_vma *vma, *next; |
|
trace_i915_gem_object_destroy(obj); |
|
// if (obj->phys_obj) |
// i915_gem_detach_phys_object(dev, obj); |
// printf("%s obj %p\n", __FUNCTION__, obj); |
|
obj->pin_count = 0; |
if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) { |
/* NB: 0 or 1 elements */ |
WARN_ON(!list_empty(&obj->vma_list) && |
!list_is_singular(&obj->vma_list)); |
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
int ret = i915_vma_unbind(vma); |
if (WARN_ON(ret == -ERESTARTSYS)) { |
bool was_interruptible; |
|
was_interruptible = dev_priv->mm.interruptible; |
dev_priv->mm.interruptible = false; |
|
WARN_ON(i915_gem_object_unbind(obj)); |
WARN_ON(i915_vma_unbind(vma)); |
|
dev_priv->mm.interruptible = was_interruptible; |
} |
} |
|
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up |
* before progressing. */ |
if (obj->stolen) |
i915_gem_object_unpin_pages(obj); |
|
if (WARN_ON(obj->pages_pin_count)) |
obj->pages_pin_count = 0; |
i915_gem_object_put_pages(obj); |
// i915_gem_object_free_mmap_offset(obj); |
i915_gem_object_release_stolen(obj); |
|
BUG_ON(obj->pages); |
|
// if (obj->base.import_attach) |
// drm_prime_gem_destroy(&obj->base, NULL); |
|
if(obj->base.filp != NULL) |
{ |
3738,9 → 3989,38 |
i915_gem_info_remove_obj(dev_priv, obj->base.size); |
|
kfree(obj->bit_17); |
kfree(obj); |
i915_gem_object_free(obj); |
} |
|
struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); |
if (vma == NULL) |
return ERR_PTR(-ENOMEM); |
|
INIT_LIST_HEAD(&vma->vma_link); |
INIT_LIST_HEAD(&vma->mm_list); |
INIT_LIST_HEAD(&vma->exec_list); |
vma->vm = vm; |
vma->obj = obj; |
|
/* Keep GGTT vmas first to make debug easier */ |
if (i915_is_ggtt(vm)) |
list_add(&vma->vma_link, &obj->vma_list); |
else |
list_add_tail(&vma->vma_link, &obj->vma_list); |
|
return vma; |
} |
|
void i915_gem_vma_destroy(struct i915_vma *vma) |
{ |
WARN_ON(vma->node.allocated); |
list_del(&vma->vma_link); |
kfree(vma); |
} |
|
#if 0 |
int |
i915_gem_idle(struct drm_device *dev) |
3748,9 → 4028,7 |
drm_i915_private_t *dev_priv = dev->dev_private; |
int ret; |
|
mutex_lock(&dev->struct_mutex); |
|
if (dev_priv->mm.suspended) { |
if (dev_priv->ums.mm_suspended) { |
mutex_unlock(&dev->struct_mutex); |
return 0; |
} |
3766,18 → 4044,11 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
i915_gem_evict_everything(dev); |
|
/* Hack! Don't let anybody do execbuf while we don't control the chip. |
* We need to replace this with a semaphore, or something. |
* And not confound mm.suspended! |
*/ |
dev_priv->mm.suspended = 1; |
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
|
i915_kernel_lost_context(dev); |
i915_gem_cleanup_ringbuffer(dev); |
|
mutex_unlock(&dev->struct_mutex); |
|
/* Cancel the retire work handler, which should be idle now. */ |
cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
|
3877,12 → 4148,21 |
goto cleanup_bsd_ring; |
} |
|
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
if (HAS_VEBOX(dev)) { |
ret = intel_init_vebox_ring_buffer(dev); |
if (ret) |
goto cleanup_blt_ring; |
} |
|
|
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
if (ret) |
goto cleanup_vebox_ring; |
|
return 0; |
|
cleanup_vebox_ring: |
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); |
cleanup_blt_ring: |
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); |
cleanup_bsd_ring: |
3902,8 → 4182,8 |
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
return -EIO; |
|
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
if (dev_priv->ellc_size) |
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); |
|
if (HAS_PCH_NOP(dev)) { |
u32 temp = I915_READ(GEN7_MSG_CTL); |
3981,7 → 4261,7 |
i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3993,7 → 4273,7 |
} |
|
mutex_lock(&dev->struct_mutex); |
dev_priv->mm.suspended = 0; |
dev_priv->ums.mm_suspended = 0; |
|
ret = i915_gem_init_hw(dev); |
if (ret != 0) { |
4001,7 → 4281,7 |
return ret; |
} |
|
BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
mutex_unlock(&dev->struct_mutex); |
|
ret = drm_irq_install(dev); |
4013,7 → 4293,7 |
cleanup_ringbuffer: |
mutex_lock(&dev->struct_mutex); |
i915_gem_cleanup_ringbuffer(dev); |
dev_priv->mm.suspended = 1; |
dev_priv->ums.mm_suspended = 1; |
mutex_unlock(&dev->struct_mutex); |
|
return ret; |
4023,11 → 4303,26 |
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
|
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
return 0; |
|
drm_irq_uninstall(dev); |
return i915_gem_idle(dev); |
|
mutex_lock(&dev->struct_mutex); |
ret = i915_gem_idle(dev); |
|
/* Hack! Don't let anybody do execbuf while we don't control the chip. |
* We need to replace this with a semaphore, or something. |
* And not confound ums.mm_suspended! |
*/ |
if (ret != 0) |
dev_priv->ums.mm_suspended = 1; |
mutex_unlock(&dev->struct_mutex); |
|
return ret; |
} |
|
void |
4038,9 → 4333,11 |
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
return; |
|
mutex_lock(&dev->struct_mutex); |
ret = i915_gem_idle(dev); |
if (ret) |
DRM_ERROR("failed to idle hardware: %d\n", ret); |
mutex_unlock(&dev->struct_mutex); |
} |
#endif |
|
4051,6 → 4348,16 |
INIT_LIST_HEAD(&ring->request_list); |
} |
|
static void i915_init_vm(struct drm_i915_private *dev_priv, |
struct i915_address_space *vm) |
{ |
vm->dev = dev_priv->dev; |
INIT_LIST_HEAD(&vm->active_list); |
INIT_LIST_HEAD(&vm->inactive_list); |
INIT_LIST_HEAD(&vm->global_link); |
list_add(&vm->global_link, &dev_priv->vm_list); |
} |
|
void |
i915_gem_load(struct drm_device *dev) |
{ |
4057,8 → 4364,9 |
drm_i915_private_t *dev_priv = dev->dev_private; |
int i; |
|
INIT_LIST_HEAD(&dev_priv->mm.active_list); |
INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
INIT_LIST_HEAD(&dev_priv->vm_list); |
i915_init_vm(dev_priv, &dev_priv->gtt.base); |
|
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); |
INIT_LIST_HEAD(&dev_priv->mm.bound_list); |
INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4093,9 → 4401,308 |
|
dev_priv->mm.interruptible = true; |
|
// dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
// dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
// register_shrinker(&dev_priv->mm.inactive_shrinker); |
} |
|
#if 0 |
/* |
* Create a physically contiguous memory object for this object |
* e.g. for cursor + overlay regs |
*/ |
static int i915_gem_init_phys_object(struct drm_device *dev, |
int id, int size, int align) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_gem_phys_object *phys_obj; |
int ret; |
|
if (dev_priv->mm.phys_objs[id - 1] || !size) |
return 0; |
|
phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL); |
if (!phys_obj) |
return -ENOMEM; |
|
phys_obj->id = id; |
|
phys_obj->handle = drm_pci_alloc(dev, size, align); |
if (!phys_obj->handle) { |
ret = -ENOMEM; |
goto kfree_obj; |
} |
#ifdef CONFIG_X86 |
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); |
#endif |
|
dev_priv->mm.phys_objs[id - 1] = phys_obj; |
|
return 0; |
kfree_obj: |
kfree(phys_obj); |
return ret; |
} |
|
static void i915_gem_free_phys_object(struct drm_device *dev, int id) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_i915_gem_phys_object *phys_obj; |
|
if (!dev_priv->mm.phys_objs[id - 1]) |
return; |
|
phys_obj = dev_priv->mm.phys_objs[id - 1]; |
if (phys_obj->cur_obj) { |
i915_gem_detach_phys_object(dev, phys_obj->cur_obj); |
} |
|
#ifdef CONFIG_X86 |
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); |
#endif |
drm_pci_free(dev, phys_obj->handle); |
kfree(phys_obj); |
dev_priv->mm.phys_objs[id - 1] = NULL; |
} |
|
void i915_gem_free_all_phys_object(struct drm_device *dev) |
{ |
int i; |
|
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) |
i915_gem_free_phys_object(dev, i); |
} |
|
void i915_gem_detach_phys_object(struct drm_device *dev, |
struct drm_i915_gem_object *obj) |
{ |
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
char *vaddr; |
int i; |
int page_count; |
|
if (!obj->phys_obj) |
return; |
vaddr = obj->phys_obj->handle->vaddr; |
|
page_count = obj->base.size / PAGE_SIZE; |
for (i = 0; i < page_count; i++) { |
struct page *page = shmem_read_mapping_page(mapping, i); |
if (!IS_ERR(page)) { |
char *dst = kmap_atomic(page); |
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); |
kunmap_atomic(dst); |
|
drm_clflush_pages(&page, 1); |
|
set_page_dirty(page); |
mark_page_accessed(page); |
page_cache_release(page); |
} |
} |
i915_gem_chipset_flush(dev); |
|
obj->phys_obj->cur_obj = NULL; |
obj->phys_obj = NULL; |
} |
|
int |
i915_gem_attach_phys_object(struct drm_device *dev, |
struct drm_i915_gem_object *obj, |
int id, |
int align) |
{ |
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; |
drm_i915_private_t *dev_priv = dev->dev_private; |
int ret = 0; |
int page_count; |
int i; |
|
if (id > I915_MAX_PHYS_OBJECT) |
return -EINVAL; |
|
if (obj->phys_obj) { |
if (obj->phys_obj->id == id) |
return 0; |
i915_gem_detach_phys_object(dev, obj); |
} |
|
/* create a new object */ |
if (!dev_priv->mm.phys_objs[id - 1]) { |
ret = i915_gem_init_phys_object(dev, id, |
obj->base.size, align); |
if (ret) { |
DRM_ERROR("failed to init phys object %d size: %zu\n", |
id, obj->base.size); |
return ret; |
} |
} |
|
/* bind to the object */ |
obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; |
obj->phys_obj->cur_obj = obj; |
|
page_count = obj->base.size / PAGE_SIZE; |
|
for (i = 0; i < page_count; i++) { |
struct page *page; |
char *dst, *src; |
|
page = shmem_read_mapping_page(mapping, i); |
if (IS_ERR(page)) |
return PTR_ERR(page); |
|
src = kmap_atomic(page); |
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); |
memcpy(dst, src, PAGE_SIZE); |
kunmap_atomic(src); |
|
mark_page_accessed(page); |
page_cache_release(page); |
} |
|
return 0; |
} |
|
static int |
i915_gem_phys_pwrite(struct drm_device *dev, |
struct drm_i915_gem_object *obj, |
struct drm_i915_gem_pwrite *args, |
struct drm_file *file_priv) |
{ |
void *vaddr = obj->phys_obj->handle->vaddr + args->offset; |
char __user *user_data = to_user_ptr(args->data_ptr); |
|
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { |
unsigned long unwritten; |
|
/* The physical object once assigned is fixed for the lifetime |
* of the obj, so we can safely drop the lock and continue |
* to access vaddr. |
*/ |
mutex_unlock(&dev->struct_mutex); |
unwritten = copy_from_user(vaddr, user_data, args->size); |
mutex_lock(&dev->struct_mutex); |
if (unwritten) |
return -EFAULT; |
} |
|
i915_gem_chipset_flush(dev); |
return 0; |
} |
|
void i915_gem_release(struct drm_device *dev, struct drm_file *file) |
{ |
struct drm_i915_file_private *file_priv = file->driver_priv; |
|
/* Clean up our request list when the client is going away, so that |
* later retire_requests won't dereference our soon-to-be-gone |
* file_priv. |
*/ |
spin_lock(&file_priv->mm.lock); |
while (!list_empty(&file_priv->mm.request_list)) { |
struct drm_i915_gem_request *request; |
|
request = list_first_entry(&file_priv->mm.request_list, |
struct drm_i915_gem_request, |
client_list); |
list_del(&request->client_list); |
request->file_priv = NULL; |
} |
spin_unlock(&file_priv->mm.lock); |
} |
#endif |
|
static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) |
{ |
if (!mutex_is_locked(mutex)) |
return false; |
|
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) |
return mutex->owner == task; |
#else |
/* Since UP may be pre-empted, we cannot assume that we own the lock */ |
return false; |
#endif |
} |
|
/* All the new VM stuff */ |
unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, |
struct i915_address_space *vm) |
{ |
struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
struct i915_vma *vma; |
|
if (vm == &dev_priv->mm.aliasing_ppgtt->base) |
vm = &dev_priv->gtt.base; |
|
BUG_ON(list_empty(&o->vma_list)); |
list_for_each_entry(vma, &o->vma_list, vma_link) { |
if (vma->vm == vm) |
return vma->node.start; |
|
} |
return 0; //-1; |
} |
|
bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma; |
|
list_for_each_entry(vma, &o->vma_list, vma_link) |
if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) |
return true; |
|
return false; |
} |
|
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) |
{ |
struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
struct i915_address_space *vm; |
|
list_for_each_entry(vm, &dev_priv->vm_list, global_link) |
if (i915_gem_obj_bound(o, vm)) |
return true; |
|
return false; |
} |
|
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, |
struct i915_address_space *vm) |
{ |
struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
struct i915_vma *vma; |
|
if (vm == &dev_priv->mm.aliasing_ppgtt->base) |
vm = &dev_priv->gtt.base; |
|
BUG_ON(list_empty(&o->vma_list)); |
|
list_for_each_entry(vma, &o->vma_list, vma_link) |
if (vma->vm == vm) |
return vma->node.size; |
|
return 0; |
} |
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma; |
list_for_each_entry(vma, &obj->vma_list, vma_link) |
if (vma->vm == vm) |
return vma; |
|
return NULL; |
} |
|
struct i915_vma * |
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, |
struct i915_address_space *vm) |
{ |
struct i915_vma *vma; |
|
vma = i915_gem_obj_to_vma(obj, vm); |
if (!vma) |
vma = i915_gem_vma_create(obj, vm); |
|
return vma; |
} |