Rev 5060 | Rev 5367 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5060 | Rev 5354 | ||
---|---|---|---|
Line 71... | Line 71... | ||
71 | 71 | ||
72 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
72 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
73 | unsigned long len, unsigned long prot, |
73 | unsigned long len, unsigned long prot, |
Line 74... | Line -... | ||
74 | unsigned long flag, unsigned long offset); |
- | |
75 | - | ||
76 | static inline void clflush(volatile void *__p) |
- | |
77 | { |
- | |
Line 78... | Line 74... | ||
78 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
74 | unsigned long flag, unsigned long offset); |
Line 79... | Line 75... | ||
79 | } |
75 | |
Line 96... | Line 92... | ||
96 | struct drm_i915_gem_object *obj); |
92 | struct drm_i915_gem_object *obj); |
97 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
93 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
98 | struct drm_i915_fence_reg *fence, |
94 | struct drm_i915_fence_reg *fence, |
99 | bool enable); |
95 | bool enable); |
Line 100... | Line -... | ||
100 | - | ||
- | 96 | ||
101 | static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target); |
97 | |
Line 102... | Line 98... | ||
102 | static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
98 | static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); |
103 | 99 | ||
104 | static bool cpu_cache_is_coherent(struct drm_device *dev, |
100 | static bool cpu_cache_is_coherent(struct drm_device *dev, |
Line 197... | Line 193... | ||
197 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
193 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
198 | { |
194 | { |
199 | return i915_gem_obj_bound_any(obj) && !obj->active; |
195 | return i915_gem_obj_bound_any(obj) && !obj->active; |
200 | } |
196 | } |
Line 201... | Line -... | ||
201 | - | ||
202 | - | ||
203 | #if 0 |
- | |
204 | - | ||
205 | int |
- | |
206 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
- | |
207 | struct drm_file *file) |
- | |
208 | { |
- | |
209 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | |
210 | struct drm_i915_gem_init *args = data; |
- | |
211 | - | ||
212 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
- | |
213 | return -ENODEV; |
- | |
214 | - | ||
215 | if (args->gtt_start >= args->gtt_end || |
- | |
216 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) |
- | |
217 | return -EINVAL; |
- | |
218 | - | ||
219 | /* GEM with user mode setting was never supported on ilk and later. */ |
- | |
220 | if (INTEL_INFO(dev)->gen >= 5) |
- | |
221 | return -ENODEV; |
- | |
222 | - | ||
223 | mutex_lock(&dev->struct_mutex); |
- | |
224 | i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, |
- | |
225 | args->gtt_end); |
- | |
226 | dev_priv->gtt.mappable_end = args->gtt_end; |
- | |
227 | mutex_unlock(&dev->struct_mutex); |
- | |
228 | - | ||
229 | return 0; |
- | |
230 | } |
- | |
231 | #endif |
- | |
232 | 197 | ||
233 | int |
198 | int |
234 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
199 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
235 | struct drm_file *file) |
200 | struct drm_file *file) |
236 | { |
201 | { |
Line 688... | Line 653... | ||
688 | bool page_do_bit17_swizzling, |
653 | bool page_do_bit17_swizzling, |
689 | bool needs_clflush_before, |
654 | bool needs_clflush_before, |
690 | bool needs_clflush_after) |
655 | bool needs_clflush_after) |
691 | { |
656 | { |
692 | char *vaddr; |
657 | char *vaddr; |
693 | int ret = 0; |
658 | int ret; |
Line 694... | Line 659... | ||
694 | 659 | ||
695 | if (unlikely(page_do_bit17_swizzling)) |
660 | if (unlikely(page_do_bit17_swizzling)) |
Line 696... | Line 661... | ||
696 | return -EINVAL; |
661 | return -EINVAL; |
697 | 662 | ||
698 | vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW); |
663 | vaddr = kmap_atomic(page); |
699 | if (needs_clflush_before) |
664 | if (needs_clflush_before) |
700 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
665 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
701 | page_length); |
666 | page_length); |
702 | memcpy(vaddr + shmem_page_offset, |
667 | memcpy(vaddr + shmem_page_offset, |
703 | user_data, |
668 | user_data, |
704 | page_length); |
669 | page_length); |
705 | if (needs_clflush_after) |
670 | if (needs_clflush_after) |
706 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
671 | drm_clflush_virt_range(vaddr + shmem_page_offset, |
Line 707... | Line 672... | ||
707 | page_length); |
672 | page_length); |
708 | FreeKernelSpace(vaddr); |
673 | kunmap_atomic(vaddr); |
709 | 674 | ||
Line 840... | Line 805... | ||
840 | // partial_cacheline_write, |
805 | // partial_cacheline_write, |
841 | // needs_clflush_after); |
806 | // needs_clflush_after); |
Line 842... | Line 807... | ||
842 | 807 | ||
Line 843... | Line -... | ||
843 | mutex_lock(&dev->struct_mutex); |
- | |
844 | - | ||
845 | next_page: |
808 | mutex_lock(&dev->struct_mutex); |
846 | 809 | ||
Line -... | Line 810... | ||
- | 810 | if (ret) |
|
847 | if (ret) |
811 | goto out; |
848 | goto out; |
812 | |
849 | 813 | next_page: |
|
850 | remain -= page_length; |
814 | remain -= page_length; |
Line 923... | Line 887... | ||
923 | * it would end up going through the fenced access, and we'll get |
887 | * it would end up going through the fenced access, and we'll get |
924 | * different detiling behavior between reading and writing. |
888 | * different detiling behavior between reading and writing. |
925 | * pread/pwrite currently are reading and writing from the CPU |
889 | * pread/pwrite currently are reading and writing from the CPU |
926 | * perspective, requiring manual detiling by the client. |
890 | * perspective, requiring manual detiling by the client. |
927 | */ |
891 | */ |
928 | // if (obj->phys_obj) { |
- | |
929 | // ret = i915_gem_phys_pwrite(dev, obj, args, file); |
- | |
930 | // goto out; |
- | |
931 | // } |
- | |
932 | - | ||
933 | if (obj->tiling_mode == I915_TILING_NONE && |
892 | if (obj->tiling_mode == I915_TILING_NONE && |
934 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
893 | obj->base.write_domain != I915_GEM_DOMAIN_CPU && |
935 | cpu_write_needs_clflush(obj)) { |
894 | cpu_write_needs_clflush(obj)) { |
936 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
895 | ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); |
937 | /* Note that the gtt paths might fail with non-page-backed user |
896 | /* Note that the gtt paths might fail with non-page-backed user |
Line 1005... | Line 964... | ||
1005 | 964 | ||
1006 | return !atomic_xchg(&file_priv->rps_wait_boost, true); |
965 | return !atomic_xchg(&file_priv->rps_wait_boost, true); |
Line 1007... | Line 966... | ||
1007 | } |
966 | } |
1008 | 967 | ||
1009 | /** |
968 | /** |
1010 | * __wait_seqno - wait until execution of seqno has finished |
969 | * __i915_wait_seqno - wait until execution of seqno has finished |
1011 | * @ring: the ring expected to report seqno |
970 | * @ring: the ring expected to report seqno |
1012 | * @seqno: duh! |
971 | * @seqno: duh! |
1013 | * @reset_counter: reset sequence associated with the given seqno |
972 | * @reset_counter: reset sequence associated with the given seqno |
Line 1022... | Line 981... | ||
1022 | * inserted. |
981 | * inserted. |
1023 | * |
982 | * |
1024 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
983 | * Returns 0 if the seqno was found within the alloted time. Else returns the |
1025 | * errno with remaining time filled in timeout argument. |
984 | * errno with remaining time filled in timeout argument. |
1026 | */ |
985 | */ |
1027 | static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
986 | int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno, |
1028 | unsigned reset_counter, |
987 | unsigned reset_counter, |
1029 | bool interruptible, |
988 | bool interruptible, |
1030 | s64 *timeout, |
989 | s64 *timeout, |
1031 | struct drm_i915_file_private *file_priv) |
990 | struct drm_i915_file_private *file_priv) |
1032 | { |
991 | { |
Line 1043... | Line 1002... | ||
1043 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
1002 | WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); |
Line 1044... | Line 1003... | ||
1044 | 1003 | ||
1045 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
1004 | if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) |
Line -... | Line 1005... | ||
- | 1005 | return 0; |
|
1046 | return 0; |
1006 | |
Line 1047... | Line 1007... | ||
1047 | 1007 | timeout_expire = timeout ? |
|
1048 | timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; |
1008 | jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; |
1049 | 1009 | ||
1050 | if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { |
1010 | if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { |
Line 1120... | Line 1080... | ||
1120 | i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) |
1080 | i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno) |
1121 | { |
1081 | { |
1122 | struct drm_device *dev = ring->dev; |
1082 | struct drm_device *dev = ring->dev; |
1123 | struct drm_i915_private *dev_priv = dev->dev_private; |
1083 | struct drm_i915_private *dev_priv = dev->dev_private; |
1124 | bool interruptible = dev_priv->mm.interruptible; |
1084 | bool interruptible = dev_priv->mm.interruptible; |
- | 1085 | unsigned reset_counter; |
|
1125 | int ret; |
1086 | int ret; |
Line 1126... | Line 1087... | ||
1126 | 1087 | ||
1127 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1088 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
Line 1133... | Line 1094... | ||
1133 | 1094 | ||
1134 | ret = i915_gem_check_olr(ring, seqno); |
1095 | ret = i915_gem_check_olr(ring, seqno); |
1135 | if (ret) |
1096 | if (ret) |
Line 1136... | Line 1097... | ||
1136 | return ret; |
1097 | return ret; |
1137 | 1098 | ||
1138 | return __wait_seqno(ring, seqno, |
1099 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1139 | atomic_read(&dev_priv->gpu_error.reset_counter), |
1100 | return __i915_wait_seqno(ring, seqno, reset_counter, interruptible, |
Line 1140... | Line 1101... | ||
1140 | interruptible, NULL, NULL); |
1101 | NULL, NULL); |
1141 | } |
1102 | } |
1142 | - | ||
1143 | static int |
1103 | |
1144 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, |
1104 | static int |
1145 | struct intel_engine_cs *ring) |
1105 | i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj) |
Line 1146... | Line 1106... | ||
1146 | { |
1106 | { |
Line 1177... | Line 1137... | ||
1177 | 1137 | ||
1178 | ret = i915_wait_seqno(ring, seqno); |
1138 | ret = i915_wait_seqno(ring, seqno); |
1179 | if (ret) |
1139 | if (ret) |
Line 1180... | Line 1140... | ||
1180 | return ret; |
1140 | return ret; |
1181 | 1141 | ||
Line 1182... | Line 1142... | ||
1182 | return i915_gem_object_wait_rendering__tail(obj, ring); |
1142 | return i915_gem_object_wait_rendering__tail(obj); |
1183 | } |
1143 | } |
1184 | 1144 | ||
Line 1212... | Line 1172... | ||
1212 | if (ret) |
1172 | if (ret) |
1213 | return ret; |
1173 | return ret; |
Line 1214... | Line 1174... | ||
1214 | 1174 | ||
1215 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1175 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
1216 | mutex_unlock(&dev->struct_mutex); |
1176 | mutex_unlock(&dev->struct_mutex); |
- | 1177 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, |
|
1217 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); |
1178 | file_priv); |
1218 | mutex_lock(&dev->struct_mutex); |
1179 | mutex_lock(&dev->struct_mutex); |
1219 | if (ret) |
1180 | if (ret) |
Line 1220... | Line 1181... | ||
1220 | return ret; |
1181 | return ret; |
1221 | 1182 | ||
Line 1222... | Line 1183... | ||
1222 | return i915_gem_object_wait_rendering__tail(obj, ring); |
1183 | return i915_gem_object_wait_rendering__tail(obj); |
1223 | } |
1184 | } |
1224 | 1185 | ||
Line 1324... | Line 1285... | ||
1324 | * Maps the contents of an object, returning the address it is mapped |
1285 | * Maps the contents of an object, returning the address it is mapped |
1325 | * into. |
1286 | * into. |
1326 | * |
1287 | * |
1327 | * While the mapping holds a reference on the contents of the object, it doesn't |
1288 | * While the mapping holds a reference on the contents of the object, it doesn't |
1328 | * imply a ref on the object itself. |
1289 | * imply a ref on the object itself. |
- | 1290 | * |
|
- | 1291 | * IMPORTANT: |
|
- | 1292 | * |
|
- | 1293 | * DRM driver writers who look a this function as an example for how to do GEM |
|
- | 1294 | * mmap support, please don't implement mmap support like here. The modern way |
|
- | 1295 | * to implement DRM mmap support is with an mmap offset ioctl (like |
|
- | 1296 | * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly. |
|
- | 1297 | * That way debug tooling like valgrind will understand what's going on, hiding |
|
- | 1298 | * the mmap call in a driver private ioctl will break that. The i915 driver only |
|
- | 1299 | * does cpu mmaps this way because we didn't know better. |
|
1329 | */ |
1300 | */ |
1330 | int |
1301 | int |
1331 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1302 | i915_gem_mmap_ioctl(struct drm_device *dev, void *data, |
1332 | struct drm_file *file) |
1303 | struct drm_file *file) |
1333 | { |
1304 | { |
Line 1691... | Line 1662... | ||
1691 | if (IS_ERR(page)) { |
1662 | if (IS_ERR(page)) { |
1692 | dbgprintf("%s invalid page %p\n", __FUNCTION__, page); |
1663 | dbgprintf("%s invalid page %p\n", __FUNCTION__, page); |
1693 | goto err_pages; |
1664 | goto err_pages; |
Line 1694... | Line 1665... | ||
1694 | 1665 | ||
- | 1666 | } |
|
- | 1667 | #ifdef CONFIG_SWIOTLB |
|
- | 1668 | if (swiotlb_nr_tbl()) { |
|
- | 1669 | st->nents++; |
|
- | 1670 | sg_set_page(sg, page, PAGE_SIZE, 0); |
|
- | 1671 | sg = sg_next(sg); |
|
1695 | } |
1672 | continue; |
- | 1673 | } |
|
1696 | 1674 | #endif |
|
1697 | if (!i || page_to_pfn(page) != last_pfn + 1) { |
1675 | if (!i || page_to_pfn(page) != last_pfn + 1) { |
1698 | if (i) |
1676 | if (i) |
1699 | sg = sg_next(sg); |
1677 | sg = sg_next(sg); |
1700 | st->nents++; |
1678 | st->nents++; |
1701 | sg_set_page(sg, page, PAGE_SIZE, 0); |
1679 | sg_set_page(sg, page, PAGE_SIZE, 0); |
1702 | } else { |
1680 | } else { |
1703 | sg->length += PAGE_SIZE; |
1681 | sg->length += PAGE_SIZE; |
1704 | } |
1682 | } |
1705 | last_pfn = page_to_pfn(page); |
1683 | last_pfn = page_to_pfn(page); |
- | 1684 | } |
|
- | 1685 | #ifdef CONFIG_SWIOTLB |
|
1706 | } |
1686 | if (!swiotlb_nr_tbl()) |
1707 | 1687 | #endif |
|
1708 | sg_mark_end(sg); |
1688 | sg_mark_end(sg); |
Line 1709... | Line 1689... | ||
1709 | obj->pages = st; |
1689 | obj->pages = st; |
Line 1754... | Line 1734... | ||
1754 | 1734 | ||
1755 | static void |
1735 | static void |
1756 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1736 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
1757 | struct intel_engine_cs *ring) |
1737 | struct intel_engine_cs *ring) |
1758 | { |
- | |
1759 | struct drm_device *dev = obj->base.dev; |
- | |
1760 | struct drm_i915_private *dev_priv = dev->dev_private; |
1738 | { |
Line 1761... | Line 1739... | ||
1761 | u32 seqno = intel_ring_get_seqno(ring); |
1739 | u32 seqno = intel_ring_get_seqno(ring); |
1762 | 1740 | ||
1763 | BUG_ON(ring == NULL); |
1741 | BUG_ON(ring == NULL); |
Line 1774... | Line 1752... | ||
1774 | } |
1752 | } |
Line 1775... | Line 1753... | ||
1775 | 1753 | ||
Line 1776... | Line 1754... | ||
1776 | list_move_tail(&obj->ring_list, &ring->active_list); |
1754 | list_move_tail(&obj->ring_list, &ring->active_list); |
1777 | - | ||
1778 | obj->last_read_seqno = seqno; |
- | |
1779 | - | ||
1780 | if (obj->fenced_gpu_access) { |
- | |
1781 | obj->last_fenced_seqno = seqno; |
- | |
1782 | - | ||
1783 | /* Bump MRU to take account of the delayed flush */ |
- | |
1784 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
- | |
1785 | struct drm_i915_fence_reg *reg; |
- | |
1786 | - | ||
1787 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
- | |
1788 | list_move_tail(®->lru_list, |
- | |
1789 | &dev_priv->mm.fence_list); |
- | |
1790 | } |
1755 | |
Line 1791... | Line 1756... | ||
1791 | } |
1756 | obj->last_read_seqno = seqno; |
1792 | } |
1757 | } |
1793 | 1758 | ||
Line 1812... | Line 1777... | ||
1812 | vma = i915_gem_obj_to_vma(obj, vm); |
1777 | vma = i915_gem_obj_to_vma(obj, vm); |
1813 | if (vma && !list_empty(&vma->mm_list)) |
1778 | if (vma && !list_empty(&vma->mm_list)) |
1814 | list_move_tail(&vma->mm_list, &vm->inactive_list); |
1779 | list_move_tail(&vma->mm_list, &vm->inactive_list); |
1815 | } |
1780 | } |
Line -... | Line 1781... | ||
- | 1781 | ||
- | 1782 | intel_fb_obj_flush(obj, true); |
|
1816 | 1783 | ||
1817 | list_del_init(&obj->ring_list); |
1784 | list_del_init(&obj->ring_list); |
Line 1818... | Line 1785... | ||
1818 | obj->ring = NULL; |
1785 | obj->ring = NULL; |
1819 | 1786 | ||
1820 | obj->last_read_seqno = 0; |
1787 | obj->last_read_seqno = 0; |
Line 1821... | Line 1788... | ||
1821 | obj->last_write_seqno = 0; |
1788 | obj->last_write_seqno = 0; |
1822 | obj->base.write_domain = 0; |
- | |
Line 1823... | Line 1789... | ||
1823 | 1789 | obj->base.write_domain = 0; |
|
1824 | obj->last_fenced_seqno = 0; |
1790 | |
Line 1825... | Line 1791... | ||
1825 | obj->fenced_gpu_access = false; |
1791 | obj->last_fenced_seqno = 0; |
Line 1918... | Line 1884... | ||
1918 | struct drm_i915_gem_object *obj, |
1884 | struct drm_i915_gem_object *obj, |
1919 | u32 *out_seqno) |
1885 | u32 *out_seqno) |
1920 | { |
1886 | { |
1921 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1887 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
1922 | struct drm_i915_gem_request *request; |
1888 | struct drm_i915_gem_request *request; |
- | 1889 | struct intel_ringbuffer *ringbuf; |
|
1923 | u32 request_ring_position, request_start; |
1890 | u32 request_ring_position, request_start; |
1924 | int ret; |
1891 | int ret; |
Line -... | Line 1892... | ||
- | 1892 | ||
- | 1893 | request = ring->preallocated_lazy_request; |
|
- | 1894 | if (WARN_ON(request == NULL)) |
|
- | 1895 | return -ENOMEM; |
|
- | 1896 | ||
- | 1897 | if (i915.enable_execlists) { |
|
- | 1898 | struct intel_context *ctx = request->ctx; |
|
- | 1899 | ringbuf = ctx->engine[ring->id].ringbuf; |
|
- | 1900 | } else |
|
- | 1901 | ringbuf = ring->buffer; |
|
1925 | 1902 | ||
1926 | request_start = intel_ring_get_tail(ring->buffer); |
1903 | request_start = intel_ring_get_tail(ringbuf); |
1927 | /* |
1904 | /* |
1928 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
1905 | * Emit any outstanding flushes - execbuf can fail to emit the flush |
1929 | * after having emitted the batchbuffer command. Hence we need to fix |
1906 | * after having emitted the batchbuffer command. Hence we need to fix |
1930 | * things up similar to emitting the lazy request. The difference here |
1907 | * things up similar to emitting the lazy request. The difference here |
1931 | * is that the flush _must_ happen before the next request, no matter |
1908 | * is that the flush _must_ happen before the next request, no matter |
1932 | * what. |
1909 | * what. |
- | 1910 | */ |
|
- | 1911 | if (i915.enable_execlists) { |
|
- | 1912 | ret = logical_ring_flush_all_caches(ringbuf); |
|
- | 1913 | if (ret) |
|
- | 1914 | return ret; |
|
1933 | */ |
1915 | } else { |
1934 | ret = intel_ring_flush_all_caches(ring); |
1916 | ret = intel_ring_flush_all_caches(ring); |
1935 | if (ret) |
1917 | if (ret) |
1936 | return ret; |
1918 | return ret; |
1937 | - | ||
1938 | request = ring->preallocated_lazy_request; |
- | |
1939 | if (WARN_ON(request == NULL)) |
- | |
Line 1940... | Line 1919... | ||
1940 | return -ENOMEM; |
1919 | } |
1941 | 1920 | ||
1942 | /* Record the position of the start of the request so that |
1921 | /* Record the position of the start of the request so that |
1943 | * should we detect the updated seqno part-way through the |
1922 | * should we detect the updated seqno part-way through the |
1944 | * GPU processing the request, we never over-estimate the |
1923 | * GPU processing the request, we never over-estimate the |
1945 | * position of the head. |
1924 | * position of the head. |
Line -... | Line 1925... | ||
- | 1925 | */ |
|
- | 1926 | request_ring_position = intel_ring_get_tail(ringbuf); |
|
- | 1927 | ||
- | 1928 | if (i915.enable_execlists) { |
|
- | 1929 | ret = ring->emit_request(ringbuf); |
|
1946 | */ |
1930 | if (ret) |
1947 | request_ring_position = intel_ring_get_tail(ring->buffer); |
1931 | return ret; |
1948 | 1932 | } else { |
|
- | 1933 | ret = ring->add_request(ring); |
|
Line 1949... | Line 1934... | ||
1949 | ret = ring->add_request(ring); |
1934 | if (ret) |
1950 | if (ret) |
1935 | return ret; |
1951 | return ret; |
1936 | } |
1952 | 1937 | ||
Line 1961... | Line 1946... | ||
1961 | * inactive_list and lose its active reference. Hence we do not need |
1946 | * inactive_list and lose its active reference. Hence we do not need |
1962 | * to explicitly hold another reference here. |
1947 | * to explicitly hold another reference here. |
1963 | */ |
1948 | */ |
1964 | request->batch_obj = obj; |
1949 | request->batch_obj = obj; |
Line -... | Line 1950... | ||
- | 1950 | ||
1965 | 1951 | if (!i915.enable_execlists) { |
|
1966 | /* Hold a reference to the current context so that we can inspect |
1952 | /* Hold a reference to the current context so that we can inspect |
1967 | * it later in case a hangcheck error event fires. |
1953 | * it later in case a hangcheck error event fires. |
1968 | */ |
1954 | */ |
1969 | request->ctx = ring->last_context; |
1955 | request->ctx = ring->last_context; |
1970 | if (request->ctx) |
1956 | if (request->ctx) |
- | 1957 | i915_gem_context_reference(request->ctx); |
|
Line 1971... | Line 1958... | ||
1971 | i915_gem_context_reference(request->ctx); |
1958 | } |
1972 | 1959 | ||
1973 | request->emitted_jiffies = jiffies; |
1960 | request->emitted_jiffies = jiffies; |
Line 1986... | Line 1973... | ||
1986 | 1973 | ||
1987 | trace_i915_gem_request_add(ring, request->seqno); |
1974 | trace_i915_gem_request_add(ring, request->seqno); |
1988 | ring->outstanding_lazy_seqno = 0; |
1975 | ring->outstanding_lazy_seqno = 0; |
Line 1989... | Line -... | ||
1989 | ring->preallocated_lazy_request = NULL; |
- | |
1990 | 1976 | ring->preallocated_lazy_request = NULL; |
|
Line 1991... | Line 1977... | ||
1991 | if (!dev_priv->ums.mm_suspended) { |
1977 | |
1992 | // i915_queue_hangcheck(ring->dev); |
1978 | // i915_queue_hangcheck(ring->dev); |
1993 | 1979 | ||
1994 | queue_delayed_work(dev_priv->wq, |
1980 | queue_delayed_work(dev_priv->wq, |
1995 | &dev_priv->mm.retire_work, |
- | |
Line 1996... | Line 1981... | ||
1996 | round_jiffies_up_relative(HZ)); |
1981 | &dev_priv->mm.retire_work, |
1997 | intel_mark_busy(dev_priv->dev); |
1982 | round_jiffies_up_relative(HZ)); |
1998 | } |
1983 | intel_mark_busy(dev_priv->dev); |
1999 | 1984 | ||
Line 2060... | Line 2045... | ||
2060 | } |
2045 | } |
2061 | } |
2046 | } |
Line 2062... | Line 2047... | ||
2062 | 2047 | ||
2063 | static void i915_gem_free_request(struct drm_i915_gem_request *request) |
2048 | static void i915_gem_free_request(struct drm_i915_gem_request *request) |
- | 2049 | { |
|
- | 2050 | struct intel_context *ctx = request->ctx; |
|
2064 | { |
2051 | |
2065 | list_del(&request->list); |
2052 | list_del(&request->list); |
Line 2066... | Line 2053... | ||
2066 | i915_gem_request_remove_from_client(request); |
2053 | i915_gem_request_remove_from_client(request); |
- | 2054 | ||
2067 | 2055 | if (ctx) { |
|
Line -... | Line 2056... | ||
- | 2056 | if (i915.enable_execlists) { |
|
- | 2057 | struct intel_engine_cs *ring = request->ring; |
|
- | 2058 | ||
- | 2059 | if (ctx != ring->default_context) |
|
- | 2060 | intel_lr_context_unpin(ring, ctx); |
|
2068 | if (request->ctx) |
2061 | } |
2069 | i915_gem_context_unreference(request->ctx); |
2062 | i915_gem_context_unreference(ctx); |
Line 2070... | Line 2063... | ||
2070 | 2063 | } |
|
2071 | kfree(request); |
2064 | kfree(request); |
Line 2120... | Line 2113... | ||
2120 | 2113 | ||
2121 | i915_gem_object_move_to_inactive(obj); |
2114 | i915_gem_object_move_to_inactive(obj); |
Line 2122... | Line 2115... | ||
2122 | } |
2115 | } |
- | 2116 | ||
- | 2117 | /* |
|
- | 2118 | * Clear the execlists queue up before freeing the requests, as those |
|
- | 2119 | * are the ones that keep the context and ringbuffer backing objects |
|
- | 2120 | * pinned in place. |
|
- | 2121 | */ |
|
- | 2122 | while (!list_empty(&ring->execlist_queue)) { |
|
- | 2123 | struct intel_ctx_submit_request *submit_req; |
|
- | 2124 | ||
- | 2125 | submit_req = list_first_entry(&ring->execlist_queue, |
|
- | 2126 | struct intel_ctx_submit_request, |
|
- | 2127 | execlist_link); |
|
- | 2128 | list_del(&submit_req->execlist_link); |
|
- | 2129 | intel_runtime_pm_put(dev_priv); |
|
- | 2130 | i915_gem_context_unreference(submit_req->ctx); |
|
- | 2131 | kfree(submit_req); |
|
- | 2132 | } |
|
2123 | 2133 | ||
2124 | /* |
2134 | /* |
2125 | * We must free the requests after all the corresponding objects have |
2135 | * We must free the requests after all the corresponding objects have |
2126 | * been moved off active lists. Which is the same order as the normal |
2136 | * been moved off active lists. Which is the same order as the normal |
2127 | * retire_requests function does. This is important if object hold |
2137 | * retire_requests function does. This is important if object hold |
Line 2220... | Line 2230... | ||
2220 | } |
2230 | } |
Line 2221... | Line 2231... | ||
2221 | 2231 | ||
2222 | 2232 | ||
- | 2233 | while (!list_empty(&ring->request_list)) { |
|
Line 2223... | Line 2234... | ||
2223 | while (!list_empty(&ring->request_list)) { |
2234 | struct drm_i915_gem_request *request; |
2224 | struct drm_i915_gem_request *request; |
2235 | struct intel_ringbuffer *ringbuf; |
2225 | 2236 | ||
Line 2226... | Line 2237... | ||
2226 | request = list_first_entry(&ring->request_list, |
2237 | request = list_first_entry(&ring->request_list, |
2227 | struct drm_i915_gem_request, |
2238 | struct drm_i915_gem_request, |
Line 2228... | Line 2239... | ||
2228 | list); |
2239 | list); |
- | 2240 | ||
- | 2241 | if (!i915_seqno_passed(seqno, request->seqno)) |
|
- | 2242 | break; |
|
- | 2243 | ||
- | 2244 | trace_i915_gem_request_retire(ring, request->seqno); |
|
- | 2245 | ||
- | 2246 | /* This is one of the few common intersection points |
|
- | 2247 | * between legacy ringbuffer submission and execlists: |
|
- | 2248 | * we need to tell them apart in order to find the correct |
|
- | 2249 | * ringbuffer to which the request belongs to. |
|
- | 2250 | */ |
|
- | 2251 | if (i915.enable_execlists) { |
|
2229 | 2252 | struct intel_context *ctx = request->ctx; |
|
2230 | if (!i915_seqno_passed(seqno, request->seqno)) |
2253 | ringbuf = ctx->engine[ring->id].ringbuf; |
2231 | break; |
2254 | } else |
2232 | 2255 | ringbuf = ring->buffer; |
|
2233 | trace_i915_gem_request_retire(ring, request->seqno); |
2256 | |
2234 | /* We know the GPU must have read the request to have |
2257 | /* We know the GPU must have read the request to have |
Line 2235... | Line 2258... | ||
2235 | * sent us the seqno + interrupt, so use the position |
2258 | * sent us the seqno + interrupt, so use the position |
2236 | * of tail of the request to update the last known position |
2259 | * of tail of the request to update the last known position |
Line 2237... | Line 2260... | ||
2237 | * of the GPU head. |
2260 | * of the GPU head. |
Line 2259... | Line 2282... | ||
2259 | int i; |
2282 | int i; |
Line 2260... | Line 2283... | ||
2260 | 2283 | ||
2261 | for_each_ring(ring, dev_priv, i) { |
2284 | for_each_ring(ring, dev_priv, i) { |
2262 | i915_gem_retire_requests_ring(ring); |
2285 | i915_gem_retire_requests_ring(ring); |
- | 2286 | idle &= list_empty(&ring->request_list); |
|
- | 2287 | if (i915.enable_execlists) { |
|
- | 2288 | unsigned long flags; |
|
- | 2289 | ||
- | 2290 | spin_lock_irqsave(&ring->execlist_lock, flags); |
|
- | 2291 | idle &= list_empty(&ring->execlist_queue); |
|
- | 2292 | spin_unlock_irqrestore(&ring->execlist_lock, flags); |
|
- | 2293 | ||
- | 2294 | intel_execlists_retire_requests(ring); |
|
2263 | idle &= list_empty(&ring->request_list); |
2295 | } |
Line 2264... | Line 2296... | ||
2264 | } |
2296 | } |
2265 | 2297 | ||
2266 | if (idle) |
2298 | if (idle) |
Line 2351... | Line 2383... | ||
2351 | struct intel_engine_cs *ring = NULL; |
2383 | struct intel_engine_cs *ring = NULL; |
2352 | unsigned reset_counter; |
2384 | unsigned reset_counter; |
2353 | u32 seqno = 0; |
2385 | u32 seqno = 0; |
2354 | int ret = 0; |
2386 | int ret = 0; |
Line -... | Line 2387... | ||
- | 2387 | ||
- | 2388 | if (args->flags != 0) |
|
- | 2389 | return -EINVAL; |
|
2355 | 2390 | ||
2356 | ret = i915_mutex_lock_interruptible(dev); |
2391 | ret = i915_mutex_lock_interruptible(dev); |
2357 | if (ret) |
2392 | if (ret) |
Line 2358... | Line 2393... | ||
2358 | return ret; |
2393 | return ret; |
Line 2386... | Line 2421... | ||
2386 | 2421 | ||
2387 | drm_gem_object_unreference(&obj->base); |
2422 | drm_gem_object_unreference(&obj->base); |
2388 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
2423 | reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
Line 2389... | Line 2424... | ||
2389 | mutex_unlock(&dev->struct_mutex); |
2424 | mutex_unlock(&dev->struct_mutex); |
2390 | 2425 | ||
Line 2391... | Line 2426... | ||
2391 | return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, |
2426 | return __i915_wait_seqno(ring, seqno, reset_counter, true, |
2392 | file->driver_priv); |
2427 | &args->timeout_ns, file->driver_priv); |
2393 | 2428 | ||
2394 | out: |
2429 | out: |
Line 2499... | Line 2534... | ||
2499 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2534 | /* Continue on if we fail due to EIO, the GPU is hung so we |
2500 | * should be safe and we need to cleanup or else we might |
2535 | * should be safe and we need to cleanup or else we might |
2501 | * cause memory corruption through use-after-free. |
2536 | * cause memory corruption through use-after-free. |
2502 | */ |
2537 | */ |
Line -... | Line 2538... | ||
- | 2538 | ||
- | 2539 | /* Throw away the active reference before moving to the unbound list */ |
|
- | 2540 | i915_gem_object_retire(obj); |
|
2503 | 2541 | ||
2504 | if (i915_is_ggtt(vma->vm)) { |
2542 | if (i915_is_ggtt(vma->vm)) { |
Line 2505... | Line 2543... | ||
2505 | i915_gem_object_finish_gtt(obj); |
2543 | i915_gem_object_finish_gtt(obj); |
2506 | 2544 | ||
Line 2513... | Line 2551... | ||
2513 | trace_i915_vma_unbind(vma); |
2551 | trace_i915_vma_unbind(vma); |
Line 2514... | Line 2552... | ||
2514 | 2552 | ||
Line 2515... | Line 2553... | ||
2515 | vma->unbind_vma(vma); |
2553 | vma->unbind_vma(vma); |
2516 | - | ||
2517 | list_del_init(&vma->mm_list); |
2554 | |
2518 | /* Avoid an unnecessary call to unbind on rebind. */ |
2555 | list_del_init(&vma->mm_list); |
Line 2519... | Line 2556... | ||
2519 | if (i915_is_ggtt(vma->vm)) |
2556 | if (i915_is_ggtt(vma->vm)) |
2520 | obj->map_and_fenceable = true; |
2557 | obj->map_and_fenceable = false; |
Line 2521... | Line 2558... | ||
2521 | 2558 | ||
Line 2544... | Line 2581... | ||
2544 | struct intel_engine_cs *ring; |
2581 | struct intel_engine_cs *ring; |
2545 | int ret, i; |
2582 | int ret, i; |
Line 2546... | Line 2583... | ||
2546 | 2583 | ||
2547 | /* Flush everything onto the inactive list. */ |
2584 | /* Flush everything onto the inactive list. */ |
- | 2585 | for_each_ring(ring, dev_priv, i) { |
|
2548 | for_each_ring(ring, dev_priv, i) { |
2586 | if (!i915.enable_execlists) { |
2549 | ret = i915_switch_context(ring, ring->default_context); |
2587 | ret = i915_switch_context(ring, ring->default_context); |
2550 | if (ret) |
2588 | if (ret) |
- | 2589 | return ret; |
|
Line 2551... | Line 2590... | ||
2551 | return ret; |
2590 | } |
2552 | 2591 | ||
2553 | ret = intel_ring_idle(ring); |
2592 | ret = intel_ring_idle(ring); |
2554 | if (ret) |
2593 | if (ret) |
Line 2705... | Line 2744... | ||
2705 | WARN(obj && (!obj->stride || !obj->tiling_mode), |
2744 | WARN(obj && (!obj->stride || !obj->tiling_mode), |
2706 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", |
2745 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", |
2707 | obj->stride, obj->tiling_mode); |
2746 | obj->stride, obj->tiling_mode); |
Line 2708... | Line 2747... | ||
2708 | 2747 | ||
- | 2748 | switch (INTEL_INFO(dev)->gen) { |
|
2709 | switch (INTEL_INFO(dev)->gen) { |
2749 | case 9: |
2710 | case 8: |
2750 | case 8: |
2711 | case 7: |
2751 | case 7: |
2712 | case 6: |
2752 | case 6: |
2713 | case 5: |
2753 | case 5: |
Line 2760... | Line 2800... | ||
2760 | return ret; |
2800 | return ret; |
Line 2761... | Line 2801... | ||
2761 | 2801 | ||
2762 | obj->last_fenced_seqno = 0; |
2802 | obj->last_fenced_seqno = 0; |
Line 2763... | Line -... | ||
2763 | } |
- | |
2764 | 2803 | } |
|
2765 | obj->fenced_gpu_access = false; |
2804 | |
Line 2766... | Line 2805... | ||
2766 | return 0; |
2805 | return 0; |
2767 | } |
2806 | } |
Line 2867... | Line 2906... | ||
2867 | list_move_tail(®->lru_list, |
2906 | list_move_tail(®->lru_list, |
2868 | &dev_priv->mm.fence_list); |
2907 | &dev_priv->mm.fence_list); |
2869 | return 0; |
2908 | return 0; |
2870 | } |
2909 | } |
2871 | } else if (enable) { |
2910 | } else if (enable) { |
- | 2911 | if (WARN_ON(!obj->map_and_fenceable)) |
|
- | 2912 | return -EINVAL; |
|
- | 2913 | ||
2872 | reg = i915_find_fence_reg(dev); |
2914 | reg = i915_find_fence_reg(dev); |
2873 | if (IS_ERR(reg)) |
2915 | if (IS_ERR(reg)) |
2874 | return PTR_ERR(reg); |
2916 | return PTR_ERR(reg); |
Line 2875... | Line 2917... | ||
2875 | 2917 | ||
Line 2888... | Line 2930... | ||
2888 | i915_gem_object_update_fence(obj, reg, enable); |
2930 | i915_gem_object_update_fence(obj, reg, enable); |
Line 2889... | Line 2931... | ||
2889 | 2931 | ||
2890 | return 0; |
2932 | return 0; |
Line 2891... | Line 2933... | ||
2891 | } |
2933 | } |
2892 | - | ||
2893 | static bool i915_gem_valid_gtt_space(struct drm_device *dev, |
2934 | |
2894 | struct drm_mm_node *gtt_space, |
2935 | static bool i915_gem_valid_gtt_space(struct i915_vma *vma, |
- | 2936 | unsigned long cache_level) |
|
2895 | unsigned long cache_level) |
2937 | { |
Line -... | Line 2938... | ||
- | 2938 | struct drm_mm_node *gtt_space = &vma->node; |
|
2896 | { |
2939 | struct drm_mm_node *other; |
2897 | struct drm_mm_node *other; |
2940 | |
- | 2941 | /* |
|
2898 | 2942 | * On some machines we have to be careful when putting differing types |
|
- | 2943 | * of snoopable memory together to avoid the prefetcher crossing memory |
|
2899 | /* On non-LLC machines we have to be careful when putting differing |
2944 | * domains and dying. During vm initialisation, we decide whether or not |
2900 | * types of snoopable memory together to avoid the prefetcher |
2945 | * these constraints apply and set the drm_mm.color_adjust |
2901 | * crossing memory domains and dying. |
2946 | * appropriately. |
Line 2902... | Line 2947... | ||
2902 | */ |
2947 | */ |
2903 | if (HAS_LLC(dev)) |
2948 | if (vma->vm->mm.color_adjust == NULL) |
Line 2918... | Line 2963... | ||
2918 | return false; |
2963 | return false; |
Line 2919... | Line 2964... | ||
2919 | 2964 | ||
2920 | return true; |
2965 | return true; |
Line 2921... | Line -... | ||
2921 | } |
- | |
2922 | - | ||
2923 | static void i915_gem_verify_gtt(struct drm_device *dev) |
- | |
2924 | { |
- | |
2925 | #if WATCH_GTT |
- | |
2926 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | |
2927 | struct drm_i915_gem_object *obj; |
- | |
2928 | int err = 0; |
- | |
2929 | - | ||
2930 | list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { |
- | |
2931 | if (obj->gtt_space == NULL) { |
- | |
2932 | printk(KERN_ERR "object found on GTT list with no space reserved\n"); |
- | |
2933 | err++; |
- | |
2934 | continue; |
- | |
2935 | } |
- | |
2936 | - | ||
2937 | if (obj->cache_level != obj->gtt_space->color) { |
- | |
2938 | printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", |
- | |
2939 | i915_gem_obj_ggtt_offset(obj), |
- | |
2940 | i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
- | |
2941 | obj->cache_level, |
- | |
2942 | obj->gtt_space->color); |
- | |
2943 | err++; |
- | |
2944 | continue; |
- | |
2945 | } |
- | |
2946 | - | ||
2947 | if (!i915_gem_valid_gtt_space(dev, |
- | |
2948 | obj->gtt_space, |
- | |
2949 | obj->cache_level)) { |
- | |
2950 | printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", |
- | |
2951 | i915_gem_obj_ggtt_offset(obj), |
- | |
2952 | i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj), |
- | |
2953 | obj->cache_level); |
- | |
2954 | err++; |
- | |
2955 | continue; |
- | |
2956 | } |
- | |
2957 | } |
- | |
2958 | - | ||
2959 | WARN_ON(err); |
- | |
2960 | #endif |
- | |
2961 | } |
2966 | } |
2962 | 2967 | ||
2963 | /** |
2968 | /** |
2964 | * Finds free space in the GTT aperture and binds the object there. |
2969 | * Finds free space in the GTT aperture and binds the object there. |
2965 | */ |
2970 | */ |
Line 3030... | Line 3035... | ||
3030 | DRM_MM_CREATE_DEFAULT); |
3035 | DRM_MM_CREATE_DEFAULT); |
3031 | if (ret) { |
3036 | if (ret) { |
Line 3032... | Line 3037... | ||
3032 | 3037 | ||
3033 | goto err_free_vma; |
3038 | goto err_free_vma; |
3034 | } |
3039 | } |
3035 | if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node, |
- | |
3036 | obj->cache_level))) { |
3040 | if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) { |
3037 | ret = -EINVAL; |
3041 | ret = -EINVAL; |
3038 | goto err_remove_node; |
3042 | goto err_remove_node; |
Line 3039... | Line 3043... | ||
3039 | } |
3043 | } |
Line 3043... | Line 3047... | ||
3043 | goto err_remove_node; |
3047 | goto err_remove_node; |
Line 3044... | Line 3048... | ||
3044 | 3048 | ||
3045 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3049 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
Line 3046... | Line -... | ||
3046 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
- | |
3047 | - | ||
3048 | if (i915_is_ggtt(vm)) { |
- | |
3049 | bool mappable, fenceable; |
- | |
3050 | - | ||
3051 | fenceable = (vma->node.size == fence_size && |
- | |
3052 | (vma->node.start & (fence_alignment - 1)) == 0); |
- | |
3053 | - | ||
3054 | mappable = (vma->node.start + obj->base.size <= |
- | |
3055 | dev_priv->gtt.mappable_end); |
- | |
3056 | - | ||
3057 | obj->map_and_fenceable = mappable && fenceable; |
- | |
3058 | } |
- | |
3059 | - | ||
3060 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); |
3050 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
3061 | 3051 | ||
3062 | trace_i915_vma_bind(vma, flags); |
3052 | trace_i915_vma_bind(vma, flags); |
Line 3063... | Line -... | ||
3063 | vma->bind_vma(vma, obj->cache_level, |
- | |
3064 | flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); |
3053 | vma->bind_vma(vma, obj->cache_level, |
Line 3065... | Line 3054... | ||
3065 | 3054 | flags & PIN_GLOBAL ? GLOBAL_BIND : 0); |
|
3066 | i915_gem_verify_gtt(dev); |
3055 | |
3067 | return vma; |
3056 | return vma; |
Line 3089... | Line 3078... | ||
3089 | 3078 | ||
3090 | /* |
3079 | /* |
3091 | * Stolen memory is always coherent with the GPU as it is explicitly |
3080 | * Stolen memory is always coherent with the GPU as it is explicitly |
3092 | * marked as wc by the system, or the system is cache-coherent. |
3081 | * marked as wc by the system, or the system is cache-coherent. |
3093 | */ |
3082 | */ |
3094 | if (obj->stolen) |
3083 | if (obj->stolen || obj->phys_handle) |
Line 3095... | Line 3084... | ||
3095 | return false; |
3084 | return false; |
3096 | 3085 | ||
3097 | /* If the GPU is snooping the contents of the CPU cache, |
3086 | /* If the GPU is snooping the contents of the CPU cache, |
Line 3131... | Line 3120... | ||
3131 | wmb(); |
3120 | wmb(); |
Line 3132... | Line 3121... | ||
3132 | 3121 | ||
3133 | old_write_domain = obj->base.write_domain; |
3122 | old_write_domain = obj->base.write_domain; |
Line -... | Line 3123... | ||
- | 3123 | obj->base.write_domain = 0; |
|
- | 3124 | ||
3134 | obj->base.write_domain = 0; |
3125 | intel_fb_obj_flush(obj, false); |
3135 | 3126 | ||
3136 | trace_i915_gem_object_change_domain(obj, |
3127 | trace_i915_gem_object_change_domain(obj, |
3137 | obj->base.read_domains, |
3128 | obj->base.read_domains, |
Line 3152... | Line 3143... | ||
3152 | i915_gem_chipset_flush(obj->base.dev); |
3143 | i915_gem_chipset_flush(obj->base.dev); |
Line 3153... | Line 3144... | ||
3153 | 3144 | ||
3154 | old_write_domain = obj->base.write_domain; |
3145 | old_write_domain = obj->base.write_domain; |
Line -... | Line 3146... | ||
- | 3146 | obj->base.write_domain = 0; |
|
- | 3147 | ||
3155 | obj->base.write_domain = 0; |
3148 | intel_fb_obj_flush(obj, false); |
3156 | 3149 | ||
3157 | trace_i915_gem_object_change_domain(obj, |
3150 | trace_i915_gem_object_change_domain(obj, |
3158 | obj->base.read_domains, |
3151 | obj->base.read_domains, |
Line 3167... | Line 3160... | ||
3167 | */ |
3160 | */ |
3168 | int |
3161 | int |
3169 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
3162 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
3170 | { |
3163 | { |
3171 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3164 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
- | 3165 | struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); |
|
3172 | uint32_t old_write_domain, old_read_domains; |
3166 | uint32_t old_write_domain, old_read_domains; |
3173 | int ret; |
3167 | int ret; |
Line 3174... | Line 3168... | ||
3174 | 3168 | ||
3175 | /* Not valid to be called on unbound objects. */ |
3169 | /* Not valid to be called on unbound objects. */ |
3176 | if (!i915_gem_obj_bound_any(obj)) |
3170 | if (vma == NULL) |
Line 3177... | Line 3171... | ||
3177 | return -EINVAL; |
3171 | return -EINVAL; |
3178 | 3172 | ||
Line 3205... | Line 3199... | ||
3205 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
3199 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
3206 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
3200 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
3207 | obj->dirty = 1; |
3201 | obj->dirty = 1; |
3208 | } |
3202 | } |
Line -... | Line 3203... | ||
- | 3203 | ||
- | 3204 | if (write) |
|
- | 3205 | intel_fb_obj_invalidate(obj, NULL); |
|
3209 | 3206 | ||
3210 | trace_i915_gem_object_change_domain(obj, |
3207 | trace_i915_gem_object_change_domain(obj, |
3211 | old_read_domains, |
3208 | old_read_domains, |
Line 3212... | Line 3209... | ||
3212 | old_write_domain); |
3209 | old_write_domain); |
3213 | 3210 | ||
3214 | /* And bump the LRU for this access */ |
- | |
3215 | if (i915_gem_object_is_inactive(obj)) { |
- | |
3216 | struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); |
3211 | /* And bump the LRU for this access */ |
3217 | if (vma) |
3212 | if (i915_gem_object_is_inactive(obj)) |
Line 3218... | Line -... | ||
3218 | list_move_tail(&vma->mm_list, |
- | |
3219 | &dev_priv->gtt.base.inactive_list); |
- | |
3220 | 3213 | list_move_tail(&vma->mm_list, |
|
3221 | } |
3214 | &dev_priv->gtt.base.inactive_list); |
Line 3222... | Line 3215... | ||
3222 | 3215 | ||
3223 | return 0; |
3216 | return 0; |
Line 3237... | Line 3230... | ||
3237 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
3230 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
3238 | return -EBUSY; |
3231 | return -EBUSY; |
3239 | } |
3232 | } |
Line 3240... | Line 3233... | ||
3240 | 3233 | ||
3241 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3234 | list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { |
3242 | if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) { |
3235 | if (!i915_gem_valid_gtt_space(vma, cache_level)) { |
3243 | ret = i915_vma_unbind(vma); |
3236 | ret = i915_vma_unbind(vma); |
3244 | if (ret) |
3237 | if (ret) |
3245 | return ret; |
3238 | return ret; |
3246 | } |
3239 | } |
Line 3264... | Line 3257... | ||
3264 | } |
3257 | } |
Line 3265... | Line 3258... | ||
3265 | 3258 | ||
3266 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3259 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3267 | if (drm_mm_node_allocated(&vma->node)) |
3260 | if (drm_mm_node_allocated(&vma->node)) |
3268 | vma->bind_vma(vma, cache_level, |
3261 | vma->bind_vma(vma, cache_level, |
3269 | obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); |
3262 | vma->bound & GLOBAL_BIND); |
Line 3270... | Line 3263... | ||
3270 | } |
3263 | } |
3271 | 3264 | ||
3272 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
3265 | list_for_each_entry(vma, &obj->vma_list, vma_link) |
Line 3294... | Line 3287... | ||
3294 | trace_i915_gem_object_change_domain(obj, |
3287 | trace_i915_gem_object_change_domain(obj, |
3295 | old_read_domains, |
3288 | old_read_domains, |
3296 | old_write_domain); |
3289 | old_write_domain); |
3297 | } |
3290 | } |
Line 3298... | Line -... | ||
3298 | - | ||
3299 | i915_gem_verify_gtt(dev); |
3291 | |
3300 | return 0; |
3292 | return 0; |
Line 3301... | Line 3293... | ||
3301 | } |
3293 | } |
3302 | 3294 | ||
Line 3380... | Line 3372... | ||
3380 | 3372 | ||
3381 | static bool is_pin_display(struct drm_i915_gem_object *obj) |
3373 | static bool is_pin_display(struct drm_i915_gem_object *obj) |
3382 | { |
3374 | { |
Line 3383... | Line -... | ||
3383 | struct i915_vma *vma; |
- | |
3384 | - | ||
3385 | if (list_empty(&obj->vma_list)) |
- | |
3386 | return false; |
3375 | struct i915_vma *vma; |
3387 | 3376 | ||
3388 | vma = i915_gem_obj_to_ggtt(obj); |
3377 | vma = i915_gem_obj_to_ggtt(obj); |
Line 3389... | Line 3378... | ||
3389 | if (!vma) |
3378 | if (!vma) |
Line 3541... | Line 3530... | ||
3541 | if (write) { |
3530 | if (write) { |
3542 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3531 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
3543 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3532 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
3544 | } |
3533 | } |
Line -... | Line 3534... | ||
- | 3534 | ||
- | 3535 | if (write) |
|
- | 3536 | intel_fb_obj_invalidate(obj, NULL); |
|
3545 | 3537 | ||
3546 | trace_i915_gem_object_change_domain(obj, |
3538 | trace_i915_gem_object_change_domain(obj, |
3547 | old_read_domains, |
3539 | old_read_domains, |
Line 3548... | Line 3540... | ||
3548 | old_write_domain); |
3540 | old_write_domain); |
Line 3592... | Line 3584... | ||
3592 | spin_unlock(&file_priv->mm.lock); |
3584 | spin_unlock(&file_priv->mm.lock); |
Line 3593... | Line 3585... | ||
3593 | 3585 | ||
3594 | if (seqno == 0) |
3586 | if (seqno == 0) |
Line 3595... | Line 3587... | ||
3595 | return 0; |
3587 | return 0; |
3596 | 3588 | ||
3597 | ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); |
3589 | ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); |
Line 3598... | Line 3590... | ||
3598 | if (ret == 0) |
3590 | if (ret == 0) |
3599 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
3591 | queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
Line 3626... | Line 3618... | ||
3626 | uint32_t alignment, |
3618 | uint32_t alignment, |
3627 | uint64_t flags) |
3619 | uint64_t flags) |
3628 | { |
3620 | { |
3629 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3621 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
3630 | struct i915_vma *vma; |
3622 | struct i915_vma *vma; |
- | 3623 | unsigned bound; |
|
3631 | int ret; |
3624 | int ret; |
Line 3632... | Line 3625... | ||
3632 | 3625 | ||
3633 | if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) |
3626 | if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) |
Line 3634... | Line 3627... | ||
3634 | return -ENODEV; |
3627 | return -ENODEV; |
3635 | 3628 | ||
Line -... | Line 3629... | ||
- | 3629 | if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) |
|
- | 3630 | return -EINVAL; |
|
- | 3631 | ||
3636 | if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) |
3632 | if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) |
3637 | return -EINVAL; |
3633 | return -EINVAL; |
3638 | 3634 | ||
3639 | vma = i915_gem_obj_to_vma(obj, vm); |
3635 | vma = i915_gem_obj_to_vma(obj, vm); |
Line 3655... | Line 3651... | ||
3655 | 3651 | ||
3656 | vma = NULL; |
3652 | vma = NULL; |
3657 | } |
3653 | } |
Line -... | Line 3654... | ||
- | 3654 | } |
|
3658 | } |
3655 | |
3659 | 3656 | bound = vma ? vma->bound : 0; |
|
3660 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { |
3657 | if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { |
3661 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); |
3658 | vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); |
3662 | if (IS_ERR(vma)) |
3659 | if (IS_ERR(vma)) |
Line 3663... | Line 3660... | ||
3663 | return PTR_ERR(vma); |
3660 | return PTR_ERR(vma); |
3664 | } |
3661 | } |
Line -... | Line 3662... | ||
- | 3662 | ||
- | 3663 | if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND)) |
|
- | 3664 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
|
- | 3665 | ||
- | 3666 | if ((bound ^ vma->bound) & GLOBAL_BIND) { |
|
- | 3667 | bool mappable, fenceable; |
|
- | 3668 | u32 fence_size, fence_alignment; |
|
- | 3669 | ||
- | 3670 | fence_size = i915_gem_get_gtt_size(obj->base.dev, |
|
- | 3671 | obj->base.size, |
|
- | 3672 | obj->tiling_mode); |
|
- | 3673 | fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev, |
|
- | 3674 | obj->base.size, |
|
- | 3675 | obj->tiling_mode, |
|
- | 3676 | true); |
|
- | 3677 | ||
- | 3678 | fenceable = (vma->node.size == fence_size && |
|
- | 3679 | (vma->node.start & (fence_alignment - 1)) == 0); |
|
- | 3680 | ||
- | 3681 | mappable = (vma->node.start + obj->base.size <= |
|
- | 3682 | dev_priv->gtt.mappable_end); |
|
- | 3683 | ||
- | 3684 | obj->map_and_fenceable = mappable && fenceable; |
|
3665 | 3685 | } |
|
3666 | if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) |
3686 | |
3667 | vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); |
3687 | WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable); |
Line 3668... | Line 3688... | ||
3668 | 3688 | ||
Line 3718... | Line 3738... | ||
3718 | { |
3738 | { |
3719 | struct drm_i915_gem_pin *args = data; |
3739 | struct drm_i915_gem_pin *args = data; |
3720 | struct drm_i915_gem_object *obj; |
3740 | struct drm_i915_gem_object *obj; |
3721 | int ret; |
3741 | int ret; |
Line 3722... | Line 3742... | ||
3722 | 3742 | ||
3723 | if (INTEL_INFO(dev)->gen >= 6) |
3743 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
Line 3724... | Line 3744... | ||
3724 | return -ENODEV; |
3744 | return -ENODEV; |
3725 | 3745 | ||
3726 | ret = i915_mutex_lock_interruptible(dev); |
3746 | ret = i915_mutex_lock_interruptible(dev); |
Line 3774... | Line 3794... | ||
3774 | { |
3794 | { |
3775 | struct drm_i915_gem_pin *args = data; |
3795 | struct drm_i915_gem_pin *args = data; |
3776 | struct drm_i915_gem_object *obj; |
3796 | struct drm_i915_gem_object *obj; |
3777 | int ret; |
3797 | int ret; |
Line -... | Line 3798... | ||
- | 3798 | ||
- | 3799 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
|
- | 3800 | return -ENODEV; |
|
3778 | 3801 | ||
3779 | ret = i915_mutex_lock_interruptible(dev); |
3802 | ret = i915_mutex_lock_interruptible(dev); |
3780 | if (ret) |
3803 | if (ret) |
Line 3781... | Line 3804... | ||
3781 | return ret; |
3804 | return ret; |
Line 3853... | Line 3876... | ||
3853 | 3876 | ||
3854 | int |
3877 | int |
3855 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
3878 | i915_gem_madvise_ioctl(struct drm_device *dev, void *data, |
3856 | struct drm_file *file_priv) |
3879 | struct drm_file *file_priv) |
- | 3880 | { |
|
3857 | { |
3881 | struct drm_i915_private *dev_priv = dev->dev_private; |
3858 | struct drm_i915_gem_madvise *args = data; |
3882 | struct drm_i915_gem_madvise *args = data; |
3859 | struct drm_i915_gem_object *obj; |
3883 | struct drm_i915_gem_object *obj; |
Line 3860... | Line 3884... | ||
3860 | int ret; |
3884 | int ret; |
Line 3880... | Line 3904... | ||
3880 | if (i915_gem_obj_is_pinned(obj)) { |
3904 | if (i915_gem_obj_is_pinned(obj)) { |
3881 | ret = -EINVAL; |
3905 | ret = -EINVAL; |
3882 | goto out; |
3906 | goto out; |
3883 | } |
3907 | } |
Line -... | Line 3908... | ||
- | 3908 | ||
- | 3909 | if (obj->pages && |
|
- | 3910 | obj->tiling_mode != I915_TILING_NONE && |
|
- | 3911 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) { |
|
- | 3912 | if (obj->madv == I915_MADV_WILLNEED) |
|
- | 3913 | i915_gem_object_unpin_pages(obj); |
|
- | 3914 | if (args->madv == I915_MADV_WILLNEED) |
|
- | 3915 | i915_gem_object_pin_pages(obj); |
|
- | 3916 | } |
|
3884 | 3917 | ||
3885 | if (obj->madv != __I915_MADV_PURGED) |
3918 | if (obj->madv != __I915_MADV_PURGED) |
Line 3886... | Line 3919... | ||
3886 | obj->madv = args->madv; |
3919 | obj->madv = args->madv; |
3887 | 3920 | ||
Line 3909... | Line 3942... | ||
3909 | 3942 | ||
Line 3910... | Line 3943... | ||
3910 | obj->ops = ops; |
3943 | obj->ops = ops; |
3911 | 3944 | ||
3912 | obj->fence_reg = I915_FENCE_REG_NONE; |
- | |
3913 | obj->madv = I915_MADV_WILLNEED; |
- | |
Line 3914... | Line 3945... | ||
3914 | /* Avoid an unnecessary call to unbind on the first bind. */ |
3945 | obj->fence_reg = I915_FENCE_REG_NONE; |
3915 | obj->map_and_fenceable = true; |
3946 | obj->madv = I915_MADV_WILLNEED; |
Line 3916... | Line 3947... | ||
3916 | 3947 | ||
Line 3999... | Line 4030... | ||
3999 | if (obj->stolen) |
4030 | if (obj->stolen) |
4000 | i915_gem_object_unpin_pages(obj); |
4031 | i915_gem_object_unpin_pages(obj); |
Line 4001... | Line 4032... | ||
4001 | 4032 | ||
Line -... | Line 4033... | ||
- | 4033 | WARN_ON(obj->frontbuffer_bits); |
|
- | 4034 | ||
- | 4035 | if (obj->pages && obj->madv == I915_MADV_WILLNEED && |
|
- | 4036 | dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES && |
|
- | 4037 | obj->tiling_mode != I915_TILING_NONE) |
|
4002 | WARN_ON(obj->frontbuffer_bits); |
4038 | i915_gem_object_unpin_pages(obj); |
4003 | 4039 | ||
4004 | if (WARN_ON(obj->pages_pin_count)) |
4040 | if (WARN_ON(obj->pages_pin_count)) |
4005 | obj->pages_pin_count = 0; |
4041 | obj->pages_pin_count = 0; |
Line 4035... | Line 4071... | ||
4035 | return NULL; |
4071 | return NULL; |
4036 | } |
4072 | } |
Line 4037... | Line 4073... | ||
4037 | 4073 | ||
4038 | void i915_gem_vma_destroy(struct i915_vma *vma) |
4074 | void i915_gem_vma_destroy(struct i915_vma *vma) |
- | 4075 | { |
|
4039 | { |
4076 | struct i915_address_space *vm = NULL; |
Line 4040... | Line 4077... | ||
4040 | WARN_ON(vma->node.allocated); |
4077 | WARN_ON(vma->node.allocated); |
4041 | 4078 | ||
4042 | /* Keep the vma as a placeholder in the execbuffer reservation lists */ |
4079 | /* Keep the vma as a placeholder in the execbuffer reservation lists */ |
Line -... | Line 4080... | ||
- | 4080 | if (!list_empty(&vma->exec_list)) |
|
- | 4081 | return; |
|
- | 4082 | ||
- | 4083 | vm = vma->vm; |
|
- | 4084 | ||
4043 | if (!list_empty(&vma->exec_list)) |
4085 | if (!i915_is_ggtt(vm)) |
Line 4044... | Line 4086... | ||
4044 | return; |
4086 | i915_ppgtt_put(i915_vm_to_ppgtt(vm)); |
4045 | 4087 | ||
Line 4054... | Line 4096... | ||
4054 | { |
4096 | { |
4055 | struct drm_i915_private *dev_priv = dev->dev_private; |
4097 | struct drm_i915_private *dev_priv = dev->dev_private; |
4056 | int ret = 0; |
4098 | int ret = 0; |
Line 4057... | Line 4099... | ||
4057 | 4099 | ||
4058 | mutex_lock(&dev->struct_mutex); |
- | |
4059 | if (dev_priv->ums.mm_suspended) |
- | |
4060 | goto err; |
- | |
4061 | 4100 | mutex_lock(&dev->struct_mutex); |
|
4062 | ret = i915_gpu_idle(dev); |
4101 | ret = i915_gpu_idle(dev); |
4063 | if (ret) |
4102 | if (ret) |
Line 4064... | Line 4103... | ||
4064 | goto err; |
4103 | goto err; |
Line 4065... | Line 4104... | ||
4065 | 4104 | ||
4066 | i915_gem_retire_requests(dev); |
4105 | i915_gem_retire_requests(dev); |
4067 | 4106 | ||
Line 4068... | Line -... | ||
4068 | /* Under UMS, be paranoid and evict. */ |
- | |
4069 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
4107 | /* Under UMS, be paranoid and evict. */ |
4070 | i915_gem_evict_everything(dev); |
- | |
4071 | - | ||
4072 | i915_kernel_lost_context(dev); |
- | |
4073 | i915_gem_stop_ringbuffers(dev); |
- | |
4074 | - | ||
4075 | /* Hack! Don't let anybody do execbuf while we don't control the chip. |
- | |
4076 | * We need to replace this with a semaphore, or something. |
- | |
4077 | * And not confound ums.mm_suspended! |
4108 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
Line 4078... | Line 4109... | ||
4078 | */ |
4109 | i915_gem_evict_everything(dev); |
4079 | dev_priv->ums.mm_suspended = !drm_core_check_feature(dev, |
4110 | |
4080 | DRIVER_MODESET); |
4111 | i915_gem_stop_ringbuffers(dev); |
Line 4162... | Line 4193... | ||
4162 | } |
4193 | } |
Line 4163... | Line 4194... | ||
4163 | 4194 | ||
4164 | return true; |
4195 | return true; |
Line -... | Line 4196... | ||
- | 4196 | } |
|
- | 4197 | ||
- | 4198 | static void init_unused_ring(struct drm_device *dev, u32 base) |
|
- | 4199 | { |
|
- | 4200 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 4201 | ||
- | 4202 | I915_WRITE(RING_CTL(base), 0); |
|
- | 4203 | I915_WRITE(RING_HEAD(base), 0); |
|
- | 4204 | I915_WRITE(RING_TAIL(base), 0); |
|
- | 4205 | I915_WRITE(RING_START(base), 0); |
|
- | 4206 | } |
|
- | 4207 | ||
- | 4208 | static void init_unused_rings(struct drm_device *dev) |
|
- | 4209 | { |
|
- | 4210 | if (IS_I830(dev)) { |
|
- | 4211 | init_unused_ring(dev, PRB1_BASE); |
|
- | 4212 | init_unused_ring(dev, SRB0_BASE); |
|
- | 4213 | init_unused_ring(dev, SRB1_BASE); |
|
- | 4214 | init_unused_ring(dev, SRB2_BASE); |
|
- | 4215 | init_unused_ring(dev, SRB3_BASE); |
|
- | 4216 | } else if (IS_GEN2(dev)) { |
|
- | 4217 | init_unused_ring(dev, SRB0_BASE); |
|
- | 4218 | init_unused_ring(dev, SRB1_BASE); |
|
- | 4219 | } else if (IS_GEN3(dev)) { |
|
- | 4220 | init_unused_ring(dev, PRB1_BASE); |
|
- | 4221 | init_unused_ring(dev, PRB2_BASE); |
|
- | 4222 | } |
|
4165 | } |
4223 | } |
4166 | 4224 | ||
4167 | static int i915_gem_init_rings(struct drm_device *dev) |
4225 | int i915_gem_init_rings(struct drm_device *dev) |
4168 | { |
4226 | { |
Line -... | Line 4227... | ||
- | 4227 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 4228 | int ret; |
|
- | 4229 | ||
- | 4230 | /* |
|
- | 4231 | * At least 830 can leave some of the unused rings |
|
- | 4232 | * "active" (ie. head != tail) after resume which |
|
- | 4233 | * will prevent c3 entry. Makes sure all unused rings |
|
- | 4234 | * are totally idle. |
|
4169 | struct drm_i915_private *dev_priv = dev->dev_private; |
4235 | */ |
4170 | int ret; |
4236 | init_unused_rings(dev); |
4171 | 4237 | ||
Line 4172... | Line 4238... | ||
4172 | ret = intel_init_render_ring_buffer(dev); |
4238 | ret = intel_init_render_ring_buffer(dev); |
Line 4245... | Line 4311... | ||
4245 | } |
4311 | } |
4246 | } |
4312 | } |
Line 4247... | Line 4313... | ||
4247 | 4313 | ||
Line 4248... | Line 4314... | ||
4248 | i915_gem_init_swizzling(dev); |
4314 | i915_gem_init_swizzling(dev); |
4249 | 4315 | ||
4250 | ret = i915_gem_init_rings(dev); |
4316 | ret = dev_priv->gt.init_rings(dev); |
Line 4251... | Line 4317... | ||
4251 | if (ret) |
4317 | if (ret) |
4252 | return ret; |
4318 | return ret; |
Line 4263... | Line 4329... | ||
4263 | */ |
4329 | */ |
4264 | ret = i915_gem_context_enable(dev_priv); |
4330 | ret = i915_gem_context_enable(dev_priv); |
4265 | if (ret && ret != -EIO) { |
4331 | if (ret && ret != -EIO) { |
4266 | DRM_ERROR("Context enable failed %d\n", ret); |
4332 | DRM_ERROR("Context enable failed %d\n", ret); |
4267 | i915_gem_cleanup_ringbuffer(dev); |
4333 | i915_gem_cleanup_ringbuffer(dev); |
- | 4334 | ||
- | 4335 | return ret; |
|
- | 4336 | } |
|
- | 4337 | ||
- | 4338 | ret = i915_ppgtt_init_hw(dev); |
|
- | 4339 | if (ret && ret != -EIO) { |
|
- | 4340 | DRM_ERROR("PPGTT enable failed %d\n", ret); |
|
- | 4341 | i915_gem_cleanup_ringbuffer(dev); |
|
4268 | } |
4342 | } |
Line 4269... | Line 4343... | ||
4269 | 4343 | ||
4270 | return ret; |
4344 | return ret; |
Line 4271... | Line 4345... | ||
4271 | } |
4345 | } |
4272 | 4346 | ||
4273 | int i915_gem_init(struct drm_device *dev) |
4347 | int i915_gem_init(struct drm_device *dev) |
4274 | { |
4348 | { |
Line -... | Line 4349... | ||
- | 4349 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
- | 4350 | int ret; |
|
- | 4351 | ||
4275 | struct drm_i915_private *dev_priv = dev->dev_private; |
4352 | i915.enable_execlists = intel_sanitize_enable_execlists(dev, |
Line 4276... | Line 4353... | ||
4276 | int ret; |
4353 | i915.enable_execlists); |
4277 | 4354 | ||
4278 | mutex_lock(&dev->struct_mutex); |
4355 | mutex_lock(&dev->struct_mutex); |
4279 | 4356 | ||
4280 | if (IS_VALLEYVIEW(dev)) { |
4357 | if (IS_VALLEYVIEW(dev)) { |
4281 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ |
4358 | /* VLVA0 (potential hack), BIOS isn't actually waking us */ |
4282 | I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); |
4359 | I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ); |
Line -... | Line 4360... | ||
- | 4360 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & |
|
- | 4361 | VLV_GTLC_ALLOWWAKEACK), 10)) |
|
- | 4362 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
|
- | 4363 | } |
|
- | 4364 | ||
- | 4365 | if (!i915.enable_execlists) { |
|
- | 4366 | dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission; |
|
- | 4367 | dev_priv->gt.init_rings = i915_gem_init_rings; |
|
- | 4368 | dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; |
|
- | 4369 | dev_priv->gt.stop_ring = intel_stop_ring_buffer; |
|
- | 4370 | } else { |
|
- | 4371 | dev_priv->gt.do_execbuf = intel_execlists_submission; |
|
- | 4372 | dev_priv->gt.init_rings = intel_logical_rings_init; |
|
- | 4373 | dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; |
|
- | 4374 | dev_priv->gt.stop_ring = intel_logical_ring_stop; |
|
- | 4375 | } |
|
- | 4376 | ||
- | 4377 | // ret = i915_gem_init_userptr(dev); |
|
4283 | if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & |
4378 | // if (ret) { |
Line 4284... | Line 4379... | ||
4284 | VLV_GTLC_ALLOWWAKEACK), 10)) |
4379 | // mutex_unlock(&dev->struct_mutex); |
4285 | DRM_DEBUG_DRIVER("allow wake ack timed out\n"); |
4380 | // return ret; |
4286 | } |
4381 | // } |
Line 4314... | Line 4409... | ||
4314 | struct drm_i915_private *dev_priv = dev->dev_private; |
4409 | struct drm_i915_private *dev_priv = dev->dev_private; |
4315 | struct intel_engine_cs *ring; |
4410 | struct intel_engine_cs *ring; |
4316 | int i; |
4411 | int i; |
Line 4317... | Line 4412... | ||
4317 | 4412 | ||
4318 | for_each_ring(ring, dev_priv, i) |
4413 | for_each_ring(ring, dev_priv, i) |
4319 | intel_cleanup_ring_buffer(ring); |
- | |
4320 | } |
- | |
4321 | - | ||
4322 | #if 0 |
- | |
4323 | - | ||
4324 | int |
- | |
4325 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
- | |
4326 | struct drm_file *file_priv) |
- | |
4327 | { |
- | |
4328 | struct drm_i915_private *dev_priv = dev->dev_private; |
- | |
4329 | int ret; |
- | |
4330 | - | ||
4331 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
- | |
4332 | return 0; |
- | |
4333 | - | ||
4334 | if (i915_reset_in_progress(&dev_priv->gpu_error)) { |
- | |
4335 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
- | |
4336 | atomic_set(&dev_priv->gpu_error.reset_counter, 0); |
- | |
4337 | } |
- | |
4338 | - | ||
4339 | mutex_lock(&dev->struct_mutex); |
- | |
4340 | dev_priv->ums.mm_suspended = 0; |
- | |
4341 | - | ||
4342 | ret = i915_gem_init_hw(dev); |
- | |
4343 | if (ret != 0) { |
- | |
4344 | mutex_unlock(&dev->struct_mutex); |
- | |
4345 | return ret; |
- | |
4346 | } |
- | |
4347 | - | ||
4348 | BUG_ON(!list_empty(&dev_priv->gtt.base.active_list)); |
- | |
4349 | - | ||
4350 | ret = drm_irq_install(dev, dev->pdev->irq); |
- | |
4351 | if (ret) |
- | |
4352 | goto cleanup_ringbuffer; |
- | |
4353 | mutex_unlock(&dev->struct_mutex); |
- | |
4354 | - | ||
4355 | return 0; |
- | |
4356 | - | ||
4357 | cleanup_ringbuffer: |
- | |
4358 | i915_gem_cleanup_ringbuffer(dev); |
- | |
4359 | dev_priv->ums.mm_suspended = 1; |
- | |
4360 | mutex_unlock(&dev->struct_mutex); |
- | |
4361 | - | ||
4362 | return ret; |
4414 | dev_priv->gt.cleanup_ring(ring); |
Line 4363... | Line -... | ||
4363 | } |
- | |
4364 | - | ||
4365 | int |
- | |
4366 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
- | |
4367 | struct drm_file *file_priv) |
- | |
4368 | { |
- | |
4369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
- | |
4370 | return 0; |
- | |
4371 | - | ||
4372 | mutex_lock(&dev->struct_mutex); |
- | |
4373 | drm_irq_uninstall(dev); |
- | |
4374 | mutex_unlock(&dev->struct_mutex); |
- | |
4375 | - | ||
4376 | return i915_gem_suspend(dev); |
- | |
4377 | } |
- | |
4378 | - | ||
4379 | void |
- | |
4380 | i915_gem_lastclose(struct drm_device *dev) |
- | |
4381 | { |
- | |
4382 | int ret; |
- | |
4383 | - | ||
4384 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
- | |
4385 | return; |
- | |
4386 | - | ||
4387 | ret = i915_gem_suspend(dev); |
- | |
4388 | if (ret) |
- | |
4389 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
- | |
4390 | } |
- | |
4391 | #endif |
4415 | } |
4392 | 4416 | ||
4393 | static void |
4417 | static void |
4394 | init_ring_lists(struct intel_engine_cs *ring) |
4418 | init_ring_lists(struct intel_engine_cs *ring) |
4395 | { |
4419 | { |
Line 4431... | Line 4455... | ||
4431 | INIT_DELAYED_WORK(&dev_priv->mm.idle_work, |
4455 | INIT_DELAYED_WORK(&dev_priv->mm.idle_work, |
4432 | i915_gem_idle_work_handler); |
4456 | i915_gem_idle_work_handler); |
4433 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
4457 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
Line 4434... | Line 4458... | ||
4434 | 4458 | ||
4435 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4459 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
4436 | if (IS_GEN3(dev)) { |
4460 | if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) { |
4437 | I915_WRITE(MI_ARB_STATE, |
4461 | I915_WRITE(MI_ARB_STATE, |
4438 | _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); |
4462 | _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); |
Line 4439... | Line 4463... | ||
4439 | } |
4463 | } |
Line -... | Line 4464... | ||
- | 4464 | ||
- | 4465 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
|
- | 4466 | ||
- | 4467 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
|
4440 | 4468 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
|
4441 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
4469 | dev_priv->fence_reg_start = 3; |
4442 | 4470 | ||
4443 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) |
4471 | if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) |
4444 | dev_priv->num_fence_regs = 32; |
4472 | dev_priv->num_fence_regs = 32; |
Line 4483... | Line 4511... | ||
4483 | kfree(file_priv); |
4511 | kfree(file_priv); |
Line 4484... | Line 4512... | ||
4484 | 4512 | ||
4485 | return ret; |
4513 | return ret; |
Line -... | Line 4514... | ||
- | 4514 | } |
|
- | 4515 | ||
- | 4516 | /** |
|
- | 4517 | * i915_gem_track_fb - update frontbuffer tracking |
|
- | 4518 | * old: current GEM buffer for the frontbuffer slots |
|
- | 4519 | * new: new GEM buffer for the frontbuffer slots |
|
- | 4520 | * frontbuffer_bits: bitmask of frontbuffer slots |
|
- | 4521 | * |
|
- | 4522 | * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them |
|
4486 | } |
4523 | * from @old and setting them in @new. Both @old and @new can be NULL. |
4487 | 4524 | */ |
|
4488 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
4525 | void i915_gem_track_fb(struct drm_i915_gem_object *old, |
4489 | struct drm_i915_gem_object *new, |
4526 | struct drm_i915_gem_object *new, |
4490 | unsigned frontbuffer_bits) |
4527 | unsigned frontbuffer_bits) |
Line 4520... | Line 4557... | ||
4520 | struct i915_address_space *vm) |
4557 | struct i915_address_space *vm) |
4521 | { |
4558 | { |
4522 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4559 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4523 | struct i915_vma *vma; |
4560 | struct i915_vma *vma; |
Line 4524... | Line -... | ||
4524 | - | ||
4525 | if (!dev_priv->mm.aliasing_ppgtt || |
4561 | |
4526 | vm == &dev_priv->mm.aliasing_ppgtt->base) |
- | |
Line 4527... | Line 4562... | ||
4527 | vm = &dev_priv->gtt.base; |
4562 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
4528 | 4563 | ||
4529 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
4564 | list_for_each_entry(vma, &o->vma_list, vma_link) { |
Line 4563... | Line 4598... | ||
4563 | struct i915_address_space *vm) |
4598 | struct i915_address_space *vm) |
4564 | { |
4599 | { |
4565 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4600 | struct drm_i915_private *dev_priv = o->base.dev->dev_private; |
4566 | struct i915_vma *vma; |
4601 | struct i915_vma *vma; |
Line 4567... | Line -... | ||
4567 | - | ||
4568 | if (!dev_priv->mm.aliasing_ppgtt || |
4602 | |
4569 | vm == &dev_priv->mm.aliasing_ppgtt->base) |
- | |
Line 4570... | Line 4603... | ||
4570 | vm = &dev_priv->gtt.base; |
4603 | WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); |
Line 4571... | Line 4604... | ||
4571 | 4604 | ||
4572 | BUG_ON(list_empty(&o->vma_list)); |
4605 | BUG_ON(list_empty(&o->vma_list)); |
Line 4582... | Line 4615... | ||
4582 | 4615 | ||
4583 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
4616 | struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) |
4584 | { |
4617 | { |
Line 4585... | Line -... | ||
4585 | struct i915_vma *vma; |
- | |
4586 | - | ||
4587 | /* This WARN has probably outlived its usefulness (callers already |
- | |
4588 | * WARN if they don't find the GGTT vma they expect). When removing, |
- | |
4589 | * remember to remove the pre-check in is_pin_display() as well */ |
- | |
4590 | if (WARN_ON(list_empty(&obj->vma_list))) |
- | |
4591 | return NULL; |
4618 | struct i915_vma *vma; |
4592 | 4619 | ||
4593 | vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); |
4620 | vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link); |
Line 4594... | Line 4621... | ||
4594 | if (vma->vm != obj_to_ggtt(obj)) |
4621 | if (vma->vm != i915_obj_to_ggtt(obj)) |
4595 | return NULL; |
4622 | return NULL; |