Rev 6937 | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6937 | Rev 7144 | ||
---|---|---|---|
Line 39... | Line 39... | ||
39 | #include |
39 | #include |
Line 40... | Line 40... | ||
40 | 40 | ||
Line 41... | Line 41... | ||
41 | #define RQ_BUG_ON(expr) |
41 | #define RQ_BUG_ON(expr) |
42 | - | ||
Line 43... | Line 42... | ||
43 | extern int x86_clflush_size; |
42 | |
44 | #define __copy_to_user_inatomic __copy_to_user |
43 | extern int x86_clflush_size; |
45 | 44 | ||
Line 46... | Line -... | ||
46 | #define PROT_READ 0x1 /* page can be read */ |
- | |
47 | #define PROT_WRITE 0x2 /* page can be written */ |
- | |
48 | #define MAP_SHARED 0x01 /* Share changes */ |
45 | #define PROT_READ 0x1 /* page can be read */ |
Line 49... | Line 46... | ||
49 | 46 | #define PROT_WRITE 0x2 /* page can be written */ |
|
50 | 47 | #define MAP_SHARED 0x01 /* Share changes */ |
|
51 | 48 | ||
Line 153... | Line 150... | ||
153 | struct i915_vma *vma; |
150 | struct i915_vma *vma; |
154 | size_t pinned; |
151 | size_t pinned; |
Line 155... | Line 152... | ||
155 | 152 | ||
156 | pinned = 0; |
153 | pinned = 0; |
157 | mutex_lock(&dev->struct_mutex); |
154 | mutex_lock(&dev->struct_mutex); |
158 | list_for_each_entry(vma, &ggtt->base.active_list, mm_list) |
155 | list_for_each_entry(vma, &ggtt->base.active_list, vm_link) |
159 | if (vma->pin_count) |
156 | if (vma->pin_count) |
160 | pinned += vma->node.size; |
157 | pinned += vma->node.size; |
161 | list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list) |
158 | list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) |
162 | if (vma->pin_count) |
159 | if (vma->pin_count) |
163 | pinned += vma->node.size; |
160 | pinned += vma->node.size; |
Line 164... | Line 161... | ||
164 | mutex_unlock(&dev->struct_mutex); |
161 | mutex_unlock(&dev->struct_mutex); |
Line 245... | Line 242... | ||
245 | { |
242 | { |
246 | struct i915_vma *vma, *next; |
243 | struct i915_vma *vma, *next; |
247 | int ret; |
244 | int ret; |
Line 248... | Line 245... | ||
248 | 245 | ||
249 | drm_gem_object_reference(&obj->base); |
246 | drm_gem_object_reference(&obj->base); |
250 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) |
247 | list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) |
251 | if (i915_vma_unbind(vma)) |
248 | if (i915_vma_unbind(vma)) |
Line 252... | Line 249... | ||
252 | break; |
249 | break; |
253 | 250 | ||
Line 651... | Line 648... | ||
651 | 648 | ||
652 | /* This is the fast write path which cannot handle |
649 | /* This is the fast write path which cannot handle |
653 | * page faults in the source data |
650 | * page faults in the source data |
Line -... | Line 651... | ||
- | 651 | */ |
|
- | 652 | ||
- | 653 | static inline int |
|
- | 654 | fast_user_write(struct io_mapping *mapping, |
|
- | 655 | loff_t page_base, int page_offset, |
|
- | 656 | char __user *user_data, |
|
- | 657 | int length) |
|
- | 658 | { |
|
- | 659 | void __iomem *vaddr_atomic; |
|
- | 660 | void *vaddr; |
|
- | 661 | unsigned long unwritten; |
|
- | 662 | ||
- | 663 | vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); |
|
- | 664 | /* We can use the cpu mem copy function because this is X86. */ |
|
- | 665 | vaddr = (void __force*)vaddr_atomic + page_offset; |
|
- | 666 | unwritten = __copy_from_user_inatomic_nocache(vaddr, |
|
- | 667 | user_data, length); |
|
- | 668 | io_mapping_unmap_atomic(vaddr_atomic); |
|
Line 654... | Line 669... | ||
654 | */ |
669 | return unwritten; |
655 | 670 | } |
|
656 | 671 | ||
657 | /** |
672 | /** |
Line 700... | Line 715... | ||
700 | page_offset = offset_in_page(offset); |
715 | page_offset = offset_in_page(offset); |
701 | page_length = remain; |
716 | page_length = remain; |
702 | if ((page_offset + remain) > PAGE_SIZE) |
717 | if ((page_offset + remain) > PAGE_SIZE) |
703 | page_length = PAGE_SIZE - page_offset; |
718 | page_length = PAGE_SIZE - page_offset; |
Line 704... | Line 719... | ||
704 | 719 | ||
705 | MapPage(dev_priv->gtt.mappable, |
720 | /* If we get a fault while copying data, then (presumably) our |
- | 721 | * source page isn't available. Return the error and we'll |
|
706 | dev_priv->gtt.mappable_base+page_base, PG_WRITEC|PG_SW); |
722 | * retry in the slow path. |
- | 723 | */ |
|
707 | 724 | if (fast_user_write(dev_priv->gtt.mappable, page_base, |
|
- | 725 | page_offset, user_data, page_length)) { |
|
- | 726 | ret = -EFAULT; |
|
- | 727 | goto out_flush; |
|
Line 708... | Line 728... | ||
708 | memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length); |
728 | } |
709 | 729 | ||
710 | remain -= page_length; |
730 | remain -= page_length; |
711 | user_data += page_length; |
731 | user_data += page_length; |
Line 739... | Line 759... | ||
739 | 759 | ||
740 | vaddr = kmap_atomic(page); |
760 | vaddr = kmap_atomic(page); |
741 | if (needs_clflush_before) |
761 | if (needs_clflush_before) |
742 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
762 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
743 | page_length); |
763 | page_length); |
744 | memcpy(vaddr + shmem_page_offset, |
- | |
745 | user_data, |
764 | ret = __copy_from_user_inatomic(vaddr + shmem_page_offset, |
746 | page_length); |
765 | user_data, page_length); |
747 | if (needs_clflush_after) |
766 | if (needs_clflush_after) |
748 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
767 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
749 | page_length); |
768 | page_length); |
Line 1124... | Line 1143... | ||
1124 | const bool irq_test_in_progress = |
1143 | const bool irq_test_in_progress = |
1125 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1144 | ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); |
1126 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
1145 | int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; |
1127 | wait_queue_t wait; |
1146 | wait_queue_t wait; |
1128 | unsigned long timeout_expire; |
1147 | unsigned long timeout_expire; |
1129 | s64 before, now; |
1148 | s64 before = 0; /* Only to silence a compiler warning. */ |
1130 | int ret; |
1149 | int ret; |
Line 1131... | Line 1150... | ||
1131 | 1150 | ||
Line 1132... | Line 1151... | ||
1132 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
1151 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
Line 1144... | Line 1163... | ||
1144 | 1163 | ||
1145 | if (*timeout == 0) |
1164 | if (*timeout == 0) |
Line 1146... | Line 1165... | ||
1146 | return -ETIME; |
1165 | return -ETIME; |
- | 1166 | ||
- | 1167 | timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); |
|
- | 1168 | ||
- | 1169 | /* |
|
- | 1170 | * Record current time in case interrupted by signal, or wedged. |
|
1147 | 1171 | */ |
|
Line 1148... | Line 1172... | ||
1148 | timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); |
1172 | before = ktime_get_raw_ns(); |
1149 | } |
1173 | } |
Line 1150... | Line -... | ||
1150 | - | ||
1151 | if (INTEL_INFO(dev_priv)->gen >= 6) |
1174 | |
1152 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); |
- | |
Line 1153... | Line 1175... | ||
1153 | 1175 | if (INTEL_INFO(dev_priv)->gen >= 6) |
|
1154 | /* Record current time in case interrupted by signal, or wedged */ |
1176 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); |
1155 | trace_i915_gem_request_wait_begin(req); |
1177 | |
1156 | before = ktime_get_raw_ns(); |
1178 | trace_i915_gem_request_wait_begin(req); |
Line 1211... | Line 1233... | ||
1211 | ring->irq_put(ring); |
1233 | ring->irq_put(ring); |
Line 1212... | Line 1234... | ||
1212 | 1234 | ||
Line 1213... | Line 1235... | ||
1213 | DestroyEvent(wait.evnt); |
1235 | DestroyEvent(wait.evnt); |
1214 | - | ||
1215 | out: |
1236 | |
Line 1216... | Line 1237... | ||
1216 | now = ktime_get_raw_ns(); |
1237 | out: |
1217 | trace_i915_gem_request_wait_end(req); |
1238 | trace_i915_gem_request_wait_end(req); |
Line 1218... | Line 1239... | ||
1218 | 1239 | ||
Line 1219... | Line 1240... | ||
1219 | if (timeout) { |
1240 | if (timeout) { |
1220 | s64 tres = *timeout - (now - before); |
1241 | s64 tres = *timeout - (ktime_get_raw_ns() - before); |
Line 2051... | Line 2072... | ||
2051 | obj->active |= intel_ring_flag(ring); |
2072 | obj->active |= intel_ring_flag(ring); |
Line 2052... | Line 2073... | ||
2052 | 2073 | ||
2053 | list_move_tail(&obj->ring_list[ring->id], &ring->active_list); |
2074 | list_move_tail(&obj->ring_list[ring->id], &ring->active_list); |
Line 2054... | Line 2075... | ||
2054 | i915_gem_request_assign(&obj->last_read_req[ring->id], req); |
2075 | i915_gem_request_assign(&obj->last_read_req[ring->id], req); |
2055 | 2076 | ||
Line 2056... | Line 2077... | ||
2056 | list_move_tail(&vma->mm_list, &vma->vm->active_list); |
2077 | list_move_tail(&vma->vm_link, &vma->vm->active_list); |
2057 | } |
2078 | } |
2058 | 2079 | ||
Line 2089... | Line 2110... | ||
2089 | * (unless we are forced to ofc!) |
2110 | * (unless we are forced to ofc!) |
2090 | */ |
2111 | */ |
2091 | list_move_tail(&obj->global_list, |
2112 | list_move_tail(&obj->global_list, |
2092 | &to_i915(obj->base.dev)->mm.bound_list); |
2113 | &to_i915(obj->base.dev)->mm.bound_list); |
Line 2093... | Line 2114... | ||
2093 | 2114 | ||
2094 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
2115 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
2095 | if (!list_empty(&vma->mm_list)) |
2116 | if (!list_empty(&vma->vm_link)) |
2096 | list_move_tail(&vma->mm_list, &vma->vm->inactive_list); |
2117 | list_move_tail(&vma->vm_link, &vma->vm->inactive_list); |
Line 2097... | Line 2118... | ||
2097 | } |
2118 | } |
2098 | 2119 | ||
2099 | i915_gem_request_assign(&obj->last_fenced_req, NULL); |
2120 | i915_gem_request_assign(&obj->last_fenced_req, NULL); |
Line 2248... | Line 2269... | ||
2248 | ring->last_submitted_seqno = request->seqno; |
2269 | ring->last_submitted_seqno = request->seqno; |
2249 | list_add_tail(&request->list, &ring->request_list); |
2270 | list_add_tail(&request->list, &ring->request_list); |
Line 2250... | Line 2271... | ||
2250 | 2271 | ||
Line 2251... | Line 2272... | ||
2251 | trace_i915_gem_request_add(request); |
2272 | trace_i915_gem_request_add(request); |
Line 2252... | Line 2273... | ||
2252 | 2273 | ||
2253 | // i915_queue_hangcheck(ring->dev); |
2274 | i915_queue_hangcheck(ring->dev); |
2254 | 2275 | ||
2255 | queue_delayed_work(dev_priv->wq, |
2276 | queue_delayed_work(dev_priv->wq, |
Line 2314... | Line 2335... | ||
2314 | 2335 | ||
2315 | if (req->file_priv) |
2336 | if (req->file_priv) |
Line 2316... | Line 2337... | ||
2316 | i915_gem_request_remove_from_client(req); |
2337 | i915_gem_request_remove_from_client(req); |
2317 | - | ||
2318 | if (ctx) { |
2338 | |
2319 | if (i915.enable_execlists) { |
2339 | if (ctx) { |
2320 | if (ctx != req->ring->default_context) |
- | |
Line 2321... | Line 2340... | ||
2321 | intel_lr_context_unpin(req); |
2340 | if (i915.enable_execlists && ctx != req->i915->kernel_context) |
2322 | } |
2341 | intel_lr_context_unpin(ctx, req->ring); |
Line 2323... | Line 2342... | ||
2323 | 2342 | ||
2324 | i915_gem_context_unreference(ctx); |
2343 | i915_gem_context_unreference(ctx); |
Line -... | Line 2344... | ||
- | 2344 | } |
|
2325 | } |
2345 | |
2326 | 2346 | kfree(req); |
|
2327 | kfree(req); |
2347 | } |
2328 | } |
2348 | |
2329 | 2349 | static inline int |
|
2330 | int i915_gem_request_alloc(struct intel_engine_cs *ring, |
2350 | __i915_gem_request_alloc(struct intel_engine_cs *ring, |
Line 2391... | Line 2411... | ||
2391 | err: |
2411 | err: |
2392 | kfree(req); |
2412 | kfree(req); |
2393 | return ret; |
2413 | return ret; |
2394 | } |
2414 | } |
Line -... | Line 2415... | ||
- | 2415 | ||
- | 2416 | /** |
|
- | 2417 | * i915_gem_request_alloc - allocate a request structure |
|
- | 2418 | * |
|
- | 2419 | * @engine: engine that we wish to issue the request on. |
|
- | 2420 | * @ctx: context that the request will be associated with. |
|
- | 2421 | * This can be NULL if the request is not directly related to |
|
- | 2422 | * any specific user context, in which case this function will |
|
- | 2423 | * choose an appropriate context to use. |
|
- | 2424 | * |
|
- | 2425 | * Returns a pointer to the allocated request if successful, |
|
- | 2426 | * or an error code if not. |
|
- | 2427 | */ |
|
- | 2428 | struct drm_i915_gem_request * |
|
- | 2429 | i915_gem_request_alloc(struct intel_engine_cs *engine, |
|
- | 2430 | struct intel_context *ctx) |
|
- | 2431 | { |
|
- | 2432 | struct drm_i915_gem_request *req; |
|
- | 2433 | int err; |
|
- | 2434 | ||
- | 2435 | if (ctx == NULL) |
|
- | 2436 | ctx = to_i915(engine->dev)->kernel_context; |
|
- | 2437 | err = __i915_gem_request_alloc(engine, ctx, &req); |
|
- | 2438 | return err ? ERR_PTR(err) : req; |
|
- | 2439 | } |
|
2395 | 2440 | ||
2396 | void i915_gem_request_cancel(struct drm_i915_gem_request *req) |
2441 | void i915_gem_request_cancel(struct drm_i915_gem_request *req) |
2397 | { |
2442 | { |
Line 2398... | Line 2443... | ||
2398 | intel_ring_reserved_space_cancel(req->ringbuf); |
2443 | intel_ring_reserved_space_cancel(req->ringbuf); |
Line 2582... | Line 2627... | ||
2582 | 2627 | ||
2583 | for_each_ring(ring, dev_priv, i) { |
2628 | for_each_ring(ring, dev_priv, i) { |
2584 | i915_gem_retire_requests_ring(ring); |
2629 | i915_gem_retire_requests_ring(ring); |
2585 | idle &= list_empty(&ring->request_list); |
2630 | idle &= list_empty(&ring->request_list); |
2586 | if (i915.enable_execlists) { |
- | |
2587 | unsigned long flags; |
- | |
2588 | 2631 | if (i915.enable_execlists) { |
|
2589 | spin_lock_irqsave(&ring->execlist_lock, flags); |
2632 | spin_lock_irq(&ring->execlist_lock); |
2590 | idle &= list_empty(&ring->execlist_queue); |
2633 | idle &= list_empty(&ring->execlist_queue); |
Line 2591... | Line 2634... | ||
2591 | spin_unlock_irqrestore(&ring->execlist_lock, flags); |
2634 | spin_unlock_irq(&ring->execlist_lock); |
2592 | 2635 | ||
2593 | intel_execlists_retire_requests(ring); |
2636 | intel_execlists_retire_requests(ring); |
Line 2808... | Line 2851... | ||
2808 | 2851 | ||
2809 | if (seqno <= from->semaphore.sync_seqno[idx]) |
2852 | if (seqno <= from->semaphore.sync_seqno[idx]) |
Line 2810... | Line 2853... | ||
2810 | return 0; |
2853 | return 0; |
- | 2854 | ||
- | 2855 | if (*to_req == NULL) { |
|
2811 | 2856 | struct drm_i915_gem_request *req; |
|
2812 | if (*to_req == NULL) { |
2857 | |
2813 | ret = i915_gem_request_alloc(to, to->default_context, to_req); |
2858 | req = i915_gem_request_alloc(to, NULL); |
- | 2859 | if (IS_ERR(req)) |
|
- | 2860 | return PTR_ERR(req); |
|
2814 | if (ret) |
2861 | |
Line 2815... | Line 2862... | ||
2815 | return ret; |
2862 | *to_req = req; |
2816 | } |
2863 | } |
2817 | 2864 | ||
Line 2927... | Line 2974... | ||
2927 | { |
2974 | { |
2928 | struct drm_i915_gem_object *obj = vma->obj; |
2975 | struct drm_i915_gem_object *obj = vma->obj; |
2929 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2976 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
2930 | int ret; |
2977 | int ret; |
Line 2931... | Line 2978... | ||
2931 | 2978 | ||
2932 | if (list_empty(&vma->vma_link)) |
2979 | if (list_empty(&vma->obj_link)) |
Line 2933... | Line 2980... | ||
2933 | return 0; |
2980 | return 0; |
2934 | 2981 | ||
2935 | if (!drm_mm_node_allocated(&vma->node)) { |
2982 | if (!drm_mm_node_allocated(&vma->node)) { |
Line 2946... | Line 2993... | ||
2946 | ret = i915_gem_object_wait_rendering(obj, false); |
2993 | ret = i915_gem_object_wait_rendering(obj, false); |
2947 | if (ret) |
2994 | if (ret) |
2948 | return ret; |
2995 | return ret; |
2949 | } |
2996 | } |
Line 2950... | Line -... | ||
2950 | - | ||
2951 | if (i915_is_ggtt(vma->vm) && |
2997 | |
2952 | vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
2998 | if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
Line 2953... | Line 2999... | ||
2953 | i915_gem_object_finish_gtt(obj); |
2999 | i915_gem_object_finish_gtt(obj); |
2954 | 3000 | ||
2955 | /* release the fence reg _after_ flushing */ |
3001 | /* release the fence reg _after_ flushing */ |
Line 2961... | Line 3007... | ||
2961 | trace_i915_vma_unbind(vma); |
3007 | trace_i915_vma_unbind(vma); |
Line 2962... | Line 3008... | ||
2962 | 3008 | ||
2963 | vma->vm->unbind_vma(vma); |
3009 | vma->vm->unbind_vma(vma); |
Line 2964... | Line 3010... | ||
2964 | vma->bound = 0; |
3010 | vma->bound = 0; |
2965 | 3011 | ||
2966 | list_del_init(&vma->mm_list); |
3012 | list_del_init(&vma->vm_link); |
2967 | if (i915_is_ggtt(vma->vm)) { |
3013 | if (vma->is_ggtt) { |
2968 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
3014 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { |
2969 | obj->map_and_fenceable = false; |
3015 | obj->map_and_fenceable = false; |
2970 | } else if (vma->ggtt_view.pages) { |
3016 | } else if (vma->ggtt_view.pages) { |
Line 3010... | Line 3056... | ||
3010 | /* Flush everything onto the inactive list. */ |
3056 | /* Flush everything onto the inactive list. */ |
3011 | for_each_ring(ring, dev_priv, i) { |
3057 | for_each_ring(ring, dev_priv, i) { |
3012 | if (!i915.enable_execlists) { |
3058 | if (!i915.enable_execlists) { |
3013 | struct drm_i915_gem_request *req; |
3059 | struct drm_i915_gem_request *req; |
Line 3014... | Line 3060... | ||
3014 | 3060 | ||
3015 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
3061 | req = i915_gem_request_alloc(ring, NULL); |
3016 | if (ret) |
3062 | if (IS_ERR(req)) |
Line 3017... | Line 3063... | ||
3017 | return ret; |
3063 | return PTR_ERR(req); |
3018 | 3064 | ||
3019 | ret = i915_switch_context(req); |
3065 | ret = i915_switch_context(req); |
3020 | if (ret) { |
3066 | if (ret) { |
Line 3208... | Line 3254... | ||
3208 | ret = i915_vma_bind(vma, obj->cache_level, flags); |
3254 | ret = i915_vma_bind(vma, obj->cache_level, flags); |
3209 | if (ret) |
3255 | if (ret) |
3210 | goto err_remove_node; |
3256 | goto err_remove_node; |
Line 3211... | Line 3257... | ||
3211 | 3257 | ||
3212 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3258 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
Line 3213... | Line 3259... | ||
3213 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
3259 | list_add_tail(&vma->vm_link, &vm->inactive_list); |
Line 3214... | Line 3260... | ||
3214 | 3260 | ||
3215 | return vma; |
3261 | return vma; |
Line 3373... | Line 3419... | ||
3373 | old_write_domain); |
3419 | old_write_domain); |
Line 3374... | Line 3420... | ||
3374 | 3420 | ||
3375 | /* And bump the LRU for this access */ |
3421 | /* And bump the LRU for this access */ |
3376 | vma = i915_gem_obj_to_ggtt(obj); |
3422 | vma = i915_gem_obj_to_ggtt(obj); |
3377 | if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) |
3423 | if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) |
3378 | list_move_tail(&vma->mm_list, |
3424 | list_move_tail(&vma->vm_link, |
Line 3379... | Line 3425... | ||
3379 | &to_i915(obj->base.dev)->gtt.base.inactive_list); |
3425 | &to_i915(obj->base.dev)->gtt.base.inactive_list); |
3380 | 3426 | ||
Line 3408... | Line 3454... | ||
3408 | /* Inspect the list of currently bound VMA and unbind any that would |
3454 | /* Inspect the list of currently bound VMA and unbind any that would |
3409 | * be invalid given the new cache-level. This is principally to |
3455 | * be invalid given the new cache-level. This is principally to |
3410 | * catch the issue of the CS prefetch crossing page boundaries and |
3456 | * catch the issue of the CS prefetch crossing page boundaries and |
3411 | * reading an invalid PTE on older architectures. |
3457 | * reading an invalid PTE on older architectures. |
3412 | */ |
3458 | */ |
3413 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3459 | list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { |
3414 | if (!drm_mm_node_allocated(&vma->node)) |
3460 | if (!drm_mm_node_allocated(&vma->node)) |
3415 | continue; |
3461 | continue; |
Line 3416... | Line 3462... | ||
3416 | 3462 | ||
3417 | if (vma->pin_count) { |
3463 | if (vma->pin_count) { |
Line 3471... | Line 3517... | ||
3471 | * rewrite it without confusing the GPU or having |
3517 | * rewrite it without confusing the GPU or having |
3472 | * to force userspace to fault back in its mmaps. |
3518 | * to force userspace to fault back in its mmaps. |
3473 | */ |
3519 | */ |
3474 | } |
3520 | } |
Line 3475... | Line 3521... | ||
3475 | 3521 | ||
3476 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
3522 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
3477 | if (!drm_mm_node_allocated(&vma->node)) |
3523 | if (!drm_mm_node_allocated(&vma->node)) |
Line 3478... | Line 3524... | ||
3478 | continue; |
3524 | continue; |
3479 | 3525 | ||
3480 | ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); |
3526 | ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); |
3481 | if (ret) |
3527 | if (ret) |
3482 | return ret; |
3528 | return ret; |
Line 3483... | Line 3529... | ||
3483 | } |
3529 | } |
3484 | } |
3530 | } |
3485 | 3531 | ||
Line 3486... | Line 3532... | ||
3486 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3532 | list_for_each_entry(vma, &obj->vma_list, obj_link) |
3487 | vma->node.color = cache_level; |
3533 | vma->node.color = cache_level; |
Line 3955... | Line 4001... | ||
3955 | */ |
4001 | */ |
3956 | ret = i915_gem_object_flush_active(obj); |
4002 | ret = i915_gem_object_flush_active(obj); |
3957 | if (ret) |
4003 | if (ret) |
3958 | goto unref; |
4004 | goto unref; |
Line -... | Line 4005... | ||
- | 4005 | ||
- | 4006 | args->busy = 0; |
|
- | 4007 | if (obj->active) { |
|
- | 4008 | int i; |
|
3959 | 4009 | ||
- | 4010 | for (i = 0; i < I915_NUM_RINGS; i++) { |
|
- | 4011 | struct drm_i915_gem_request *req; |
|
3960 | BUILD_BUG_ON(I915_NUM_RINGS > 16); |
4012 | |
- | 4013 | req = obj->last_read_req[i]; |
|
- | 4014 | if (req) |
|
- | 4015 | args->busy |= 1 << (16 + req->ring->exec_id); |
|
3961 | args->busy = obj->active << 16; |
4016 | } |
3962 | if (obj->last_write_req) |
4017 | if (obj->last_write_req) |
- | 4018 | args->busy |= obj->last_write_req->ring->exec_id; |
|
Line 3963... | Line 4019... | ||
3963 | args->busy |= obj->last_write_req->ring->id; |
4019 | } |
3964 | 4020 | ||
3965 | unref: |
4021 | unref: |
3966 | drm_gem_object_unreference(&obj->base); |
4022 | drm_gem_object_unreference(&obj->base); |
Line 4134... | Line 4190... | ||
4134 | 4190 | ||
Line 4135... | Line 4191... | ||
4135 | intel_runtime_pm_get(dev_priv); |
4191 | intel_runtime_pm_get(dev_priv); |
Line 4136... | Line 4192... | ||
4136 | 4192 | ||
4137 | trace_i915_gem_object_destroy(obj); |
4193 | trace_i915_gem_object_destroy(obj); |
Line 4138... | Line 4194... | ||
4138 | 4194 | ||
4139 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
4195 | list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) { |
4140 | int ret; |
4196 | int ret; |
Line 4188... | Line 4244... | ||
4188 | 4244 | ||
4189 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
4245 | struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, |
4190 | struct i915_address_space *vm) |
4246 | struct i915_address_space *vm) |
4191 | { |
4247 | { |
4192 | struct i915_vma *vma; |
4248 | struct i915_vma *vma; |
4193 | list_for_each_entry(vma, &obj->vma_list, vma_link) { |
4249 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
4194 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && |
4250 | if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL && |
4195 | vma->vm == vm) |
4251 | vma->vm == vm) |
4196 | return vma; |
4252 | return vma; |
4197 | } |
4253 | } |
Line 4205... | Line 4261... | ||
4205 | struct i915_vma *vma; |
4261 | struct i915_vma *vma; |
Line 4206... | Line 4262... | ||
4206 | 4262 | ||
4207 | if (WARN_ONCE(!view, "no view specified")) |
4263 | if (WARN_ONCE(!view, "no view specified")) |
Line 4208... | Line 4264... | ||
4208 | return ERR_PTR(-EINVAL); |
4264 | return ERR_PTR(-EINVAL); |
4209 | 4265 | ||
4210 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
4266 | list_for_each_entry(vma, &obj->vma_list, obj_link) |
4211 | if (vma->vm == ggtt && |
4267 | if (vma->vm == ggtt && |
4212 | i915_ggtt_view_equal(&vma->ggtt_view, view)) |
4268 | i915_ggtt_view_equal(&vma->ggtt_view, view)) |
4213 | return vma; |
4269 | return vma; |
Line 4214... | Line 4270... | ||
4214 | return NULL; |
4270 | return NULL; |
4215 | } |
4271 | } |
4216 | - | ||
4217 | void i915_gem_vma_destroy(struct i915_vma *vma) |
4272 | |
Line 4218... | Line 4273... | ||
4218 | { |
4273 | void i915_gem_vma_destroy(struct i915_vma *vma) |
4219 | struct i915_address_space *vm = NULL; |
4274 | { |
4220 | WARN_ON(vma->node.allocated); |
4275 | WARN_ON(vma->node.allocated); |
Line 4221... | Line -... | ||
4221 | - | ||
4222 | /* Keep the vma as a placeholder in the execbuffer reservation lists */ |
- | |
4223 | if (!list_empty(&vma->exec_list)) |
4276 | |
4224 | return; |
4277 | /* Keep the vma as a placeholder in the execbuffer reservation lists */ |
Line 4225... | Line 4278... | ||
4225 | 4278 | if (!list_empty(&vma->exec_list)) |
|
Line 4226... | Line 4279... | ||
4226 | vm = vma->vm; |
4279 | return; |
4227 | 4280 | ||
Line 4228... | Line 4281... | ||
4228 | if (!i915_is_ggtt(vm)) |
4281 | if (!vma->is_ggtt) |
Line 4448... | Line 4501... | ||
4448 | * will prevent c3 entry. Makes sure all unused rings |
4501 | * will prevent c3 entry. Makes sure all unused rings |
4449 | * are totally idle. |
4502 | * are totally idle. |
4450 | */ |
4503 | */ |
4451 | init_unused_rings(dev); |
4504 | init_unused_rings(dev); |
Line 4452... | Line 4505... | ||
4452 | 4505 | ||
Line 4453... | Line 4506... | ||
4453 | BUG_ON(!dev_priv->ring[RCS].default_context); |
4506 | BUG_ON(!dev_priv->kernel_context); |
4454 | 4507 | ||
4455 | ret = i915_ppgtt_init_hw(dev); |
4508 | ret = i915_ppgtt_init_hw(dev); |
4456 | if (ret) { |
4509 | if (ret) { |
Line 4485... | Line 4538... | ||
4485 | 4538 | ||
4486 | /* Now it is safe to go back round and do everything else: */ |
4539 | /* Now it is safe to go back round and do everything else: */ |
4487 | for_each_ring(ring, dev_priv, i) { |
4540 | for_each_ring(ring, dev_priv, i) { |
Line 4488... | Line -... | ||
4488 | struct drm_i915_gem_request *req; |
- | |
4489 | - | ||
4490 | WARN_ON(!ring->default_context); |
4541 | struct drm_i915_gem_request *req; |
4491 | 4542 | ||
- | 4543 | req = i915_gem_request_alloc(ring, NULL); |
|
4492 | ret = i915_gem_request_alloc(ring, ring->default_context, &req); |
4544 | if (IS_ERR(req)) { |
4493 | if (ret) { |
4545 | ret = PTR_ERR(req); |
4494 | i915_gem_cleanup_ringbuffer(dev); |
4546 | i915_gem_cleanup_ringbuffer(dev); |
Line 4495... | Line 4547... | ||
4495 | goto out; |
4547 | goto out; |
Line 4593... | Line 4645... | ||
4593 | struct intel_engine_cs *ring; |
4645 | struct intel_engine_cs *ring; |
4594 | int i; |
4646 | int i; |
Line 4595... | Line 4647... | ||
4595 | 4647 | ||
4596 | for_each_ring(ring, dev_priv, i) |
4648 | for_each_ring(ring, dev_priv, i) |
- | 4649 | dev_priv->gt.cleanup_ring(ring); |
|
- | 4650 | ||
- | 4651 | if (i915.enable_execlists) |
|
- | 4652 | /* |
|
- | 4653 | * Neither the BIOS, ourselves or any other kernel |
|
- | 4654 | * expects the system to be in execlists mode on startup, |
|
- | 4655 | * so we need to reset the GPU back to legacy mode. |
|
- | 4656 | */ |
|
4597 | dev_priv->gt.cleanup_ring(ring); |
4657 | intel_gpu_reset(dev); |
Line 4598... | Line 4658... | ||
4598 | } |
4658 | } |
4599 | 4659 | ||
4600 | static void |
4660 | static void |
4601 | init_ring_lists(struct intel_engine_cs *ring) |
4661 | init_ring_lists(struct intel_engine_cs *ring) |
4602 | { |
4662 | { |
4603 | INIT_LIST_HEAD(&ring->active_list); |
4663 | INIT_LIST_HEAD(&ring->active_list); |
Line 4604... | Line 4664... | ||
4604 | INIT_LIST_HEAD(&ring->request_list); |
4664 | INIT_LIST_HEAD(&ring->request_list); |
4605 | } |
4665 | } |
4606 | 4666 | ||
4607 | void |
4667 | void |
4608 | i915_gem_load(struct drm_device *dev) |
4668 | i915_gem_load_init(struct drm_device *dev) |
Line 4609... | Line 4669... | ||
4609 | { |
4669 | { |
Line 4649... | Line 4709... | ||
4649 | /* Initialize fence registers to zero */ |
4709 | /* Initialize fence registers to zero */ |
4650 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4710 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4651 | i915_gem_restore_fences(dev); |
4711 | i915_gem_restore_fences(dev); |
Line 4652... | Line 4712... | ||
4652 | 4712 | ||
- | 4713 | i915_gem_detect_bit_6_swizzle(dev); |
|
Line 4653... | Line 4714... | ||
4653 | i915_gem_detect_bit_6_swizzle(dev); |
4714 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
Line 4654... | Line 4715... | ||
4654 | 4715 | ||
4655 | dev_priv->mm.interruptible = true; |
4716 | dev_priv->mm.interruptible = true; |
Line 4701... | Line 4762... | ||
4701 | INIT_LIST_HEAD(&file_priv->rps.link); |
4762 | INIT_LIST_HEAD(&file_priv->rps.link); |
Line 4702... | Line 4763... | ||
4702 | 4763 | ||
4703 | spin_lock_init(&file_priv->mm.lock); |
4764 | spin_lock_init(&file_priv->mm.lock); |
Line -... | Line 4765... | ||
- | 4765 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
|
- | 4766 | ||
4704 | INIT_LIST_HEAD(&file_priv->mm.request_list); |
4767 | file_priv->bsd_ring = -1; |
4705 | 4768 | ||
4706 | ret = i915_gem_context_open(dev, file); |
4769 | ret = i915_gem_context_open(dev, file); |
Line 4707... | Line 4770... | ||
4707 | if (ret) |
4770 | if (ret) |
Line 4743... | Line 4806... | ||
4743 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4806 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4744 | struct i915_vma *vma; |
4807 | struct i915_vma *vma; |
Line 4745... | Line 4808... | ||
4745 | 4808 | ||
Line 4746... | Line 4809... | ||
4746 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
4809 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
4747 | 4810 | ||
4748 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
4811 | list_for_each_entry(vma, &o->vma_list, obj_link) { |
4749 | if (i915_is_ggtt(vma->vm) && |
4812 | if (vma->is_ggtt && |
4750 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4813 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4751 | continue; |
4814 | continue; |
4752 | if (vma->vm == vm) |
4815 | if (vma->vm == vm) |
Line 4762... | Line 4825... | ||
4762 | const struct i915_ggtt_view *view) |
4825 | const struct i915_ggtt_view *view) |
4763 | { |
4826 | { |
4764 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
4827 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
4765 | struct i915_vma *vma; |
4828 | struct i915_vma *vma; |
Line 4766... | Line 4829... | ||
4766 | 4829 | ||
4767 | list_for_each_entry(vma, &o->vma_list, vma_link) |
4830 | list_for_each_entry(vma, &o->vma_list, obj_link) |
4768 | if (vma->vm == ggtt && |
4831 | if (vma->vm == ggtt && |
4769 | i915_ggtt_view_equal(&vma->ggtt_view, view)) |
4832 | i915_ggtt_view_equal(&vma->ggtt_view, view)) |
Line 4770... | Line 4833... | ||
4770 | return vma->node.start; |
4833 | return vma->node.start; |
Line 4776... | Line 4839... | ||
4776 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
4839 | bool i915_gem_obj_bound(struct drm_i915_gem_object *o, |
4777 | struct i915_address_space *vm) |
4840 | struct i915_address_space *vm) |
4778 | { |
4841 | { |
4779 | struct i915_vma *vma; |
4842 | struct i915_vma *vma; |
Line 4780... | Line 4843... | ||
4780 | 4843 | ||
4781 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
4844 | list_for_each_entry(vma, &o->vma_list, obj_link) { |
4782 | if (i915_is_ggtt(vma->vm) && |
4845 | if (vma->is_ggtt && |
4783 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4846 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4784 | continue; |
4847 | continue; |
4785 | if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) |
4848 | if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) |
4786 | return true; |
4849 | return true; |
Line 4793... | Line 4856... | ||
4793 | const struct i915_ggtt_view *view) |
4856 | const struct i915_ggtt_view *view) |
4794 | { |
4857 | { |
4795 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
4858 | struct i915_address_space *ggtt = i915_obj_to_ggtt(o); |
4796 | struct i915_vma *vma; |
4859 | struct i915_vma *vma; |
Line 4797... | Line 4860... | ||
4797 | 4860 | ||
4798 | list_for_each_entry(vma, &o->vma_list, vma_link) |
4861 | list_for_each_entry(vma, &o->vma_list, obj_link) |
4799 | if (vma->vm == ggtt && |
4862 | if (vma->vm == ggtt && |
4800 | i915_ggtt_view_equal(&vma->ggtt_view, view) && |
4863 | i915_ggtt_view_equal(&vma->ggtt_view, view) && |
4801 | drm_mm_node_allocated(&vma->node)) |
4864 | drm_mm_node_allocated(&vma->node)) |
Line 4806... | Line 4869... | ||
4806 | 4869 | ||
4807 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) |
4870 | bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) |
4808 | { |
4871 | { |
Line 4809... | Line 4872... | ||
4809 | struct i915_vma *vma; |
4872 | struct i915_vma *vma; |
4810 | 4873 | ||
4811 | list_for_each_entry(vma, &o->vma_list, vma_link) |
4874 | list_for_each_entry(vma, &o->vma_list, obj_link) |
Line 4812... | Line 4875... | ||
4812 | if (drm_mm_node_allocated(&vma->node)) |
4875 | if (drm_mm_node_allocated(&vma->node)) |
4813 | return true; |
4876 | return true; |
Line 4823... | Line 4886... | ||
4823 | 4886 | ||
Line 4824... | Line 4887... | ||
4824 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
4887 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
Line 4825... | Line 4888... | ||
4825 | 4888 | ||
4826 | BUG_ON(list_empty(&o->vma_list)); |
4889 | BUG_ON(list_empty(&o->vma_list)); |
4827 | 4890 | ||
4828 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
4891 | list_for_each_entry(vma, &o->vma_list, obj_link) { |
4829 | if (i915_is_ggtt(vma->vm) && |
4892 | if (vma->is_ggtt && |
4830 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4893 | vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) |
4831 | continue; |
4894 | continue; |
Line 4836... | Line 4899... | ||
4836 | } |
4899 | } |
Line 4837... | Line 4900... | ||
4837 | 4900 | ||
4838 | bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) |
4901 | bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) |
4839 | { |
4902 | { |
4840 | struct i915_vma *vma; |
4903 | struct i915_vma *vma; |
4841 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
4904 | list_for_each_entry(vma, &obj->vma_list, obj_link) |
4842 | if (vma->pin_count > 0) |
4905 | if (vma->pin_count > 0) |
Line 4843... | Line 4906... | ||
4843 | return true; |
4906 | return true; |
4844 | 4907 |