Rev 4392 | Rev 4560 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4392 | Rev 4539 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright © 2008,2010 Intel Corporation |
2 | * Copyright © 2008,2010 Intel Corporation |
3 | * |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
10 | * |
11 | * The above copyright notice and this permission notice (including the next |
11 | * The above copyright notice and this permission notice (including the next |
12 | * paragraph) shall be included in all copies or substantial portions of the |
12 | * paragraph) shall be included in all copies or substantial portions of the |
13 | * Software. |
13 | * Software. |
14 | * |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
21 | * IN THE SOFTWARE. |
21 | * IN THE SOFTWARE. |
22 | * |
22 | * |
23 | * Authors: |
23 | * Authors: |
24 | * Eric Anholt |
24 | * Eric Anholt |
25 | * Chris Wilson |
25 | * Chris Wilson |
26 | * |
26 | * |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include |
29 | #include |
30 | #include |
30 | #include |
31 | #include "i915_drv.h" |
31 | #include "i915_drv.h" |
32 | #include "i915_trace.h" |
32 | #include "i915_trace.h" |
33 | #include "intel_drv.h" |
33 | #include "intel_drv.h" |
34 | //#include |
34 | //#include |
35 | 35 | ||
36 | 36 | ||
37 | static unsigned long |
37 | static unsigned long |
38 | copy_to_user(void __user *to, const void *from, unsigned long n) |
38 | copy_to_user(void __user *to, const void *from, unsigned long n) |
39 | { |
39 | { |
40 | memcpy(to, from, n); |
40 | memcpy(to, from, n); |
41 | return 0; |
41 | return 0; |
42 | } |
42 | } |
43 | 43 | ||
44 | static unsigned long |
44 | static unsigned long |
45 | copy_from_user(void *to, const void __user *from, unsigned long n) |
45 | copy_from_user(void *to, const void __user *from, unsigned long n) |
46 | { |
46 | { |
47 | memcpy(to, from, n); |
47 | memcpy(to, from, n); |
48 | return 0; |
48 | return 0; |
49 | } |
49 | } |
50 | 50 | ||
51 | struct eb_objects { |
51 | struct eb_objects { |
52 | struct list_head objects; |
52 | struct list_head objects; |
53 | int and; |
53 | int and; |
54 | union { |
54 | union { |
55 | struct drm_i915_gem_object *lut[0]; |
55 | struct drm_i915_gem_object *lut[0]; |
56 | struct hlist_head buckets[0]; |
56 | struct hlist_head buckets[0]; |
57 | }; |
57 | }; |
58 | }; |
58 | }; |
59 | 59 | ||
60 | static struct eb_objects * |
60 | static struct eb_objects * |
61 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
61 | eb_create(struct drm_i915_gem_execbuffer2 *args) |
62 | { |
62 | { |
63 | struct eb_objects *eb = NULL; |
63 | struct eb_objects *eb = NULL; |
64 | 64 | ||
65 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
65 | if (args->flags & I915_EXEC_HANDLE_LUT) { |
66 | int size = args->buffer_count; |
66 | int size = args->buffer_count; |
67 | size *= sizeof(struct drm_i915_gem_object *); |
67 | size *= sizeof(struct drm_i915_gem_object *); |
68 | size += sizeof(struct eb_objects); |
68 | size += sizeof(struct eb_objects); |
69 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
69 | eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
70 | } |
70 | } |
71 | 71 | ||
72 | if (eb == NULL) { |
72 | if (eb == NULL) { |
73 | int size = args->buffer_count; |
73 | int size = args->buffer_count; |
74 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
74 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
75 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
75 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
76 | while (count > 2*size) |
76 | while (count > 2*size) |
77 | count >>= 1; |
77 | count >>= 1; |
78 | eb = kzalloc(count*sizeof(struct hlist_head) + |
78 | eb = kzalloc(count*sizeof(struct hlist_head) + |
79 | sizeof(struct eb_objects), |
79 | sizeof(struct eb_objects), |
80 | GFP_TEMPORARY); |
80 | GFP_TEMPORARY); |
81 | if (eb == NULL) |
81 | if (eb == NULL) |
82 | return eb; |
82 | return eb; |
83 | 83 | ||
84 | eb->and = count - 1; |
84 | eb->and = count - 1; |
85 | } else |
85 | } else |
86 | eb->and = -args->buffer_count; |
86 | eb->and = -args->buffer_count; |
87 | 87 | ||
88 | INIT_LIST_HEAD(&eb->objects); |
88 | INIT_LIST_HEAD(&eb->objects); |
89 | return eb; |
89 | return eb; |
90 | } |
90 | } |
91 | 91 | ||
92 | static void |
92 | static void |
93 | eb_reset(struct eb_objects *eb) |
93 | eb_reset(struct eb_objects *eb) |
94 | { |
94 | { |
95 | if (eb->and >= 0) |
95 | if (eb->and >= 0) |
96 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
96 | memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
97 | } |
97 | } |
98 | 98 | ||
99 | static int |
99 | static int |
100 | eb_lookup_objects(struct eb_objects *eb, |
100 | eb_lookup_objects(struct eb_objects *eb, |
101 | struct drm_i915_gem_exec_object2 *exec, |
101 | struct drm_i915_gem_exec_object2 *exec, |
102 | const struct drm_i915_gem_execbuffer2 *args, |
102 | const struct drm_i915_gem_execbuffer2 *args, |
103 | struct drm_file *file) |
103 | struct drm_file *file) |
104 | { |
104 | { |
105 | int i; |
105 | int i; |
106 | 106 | ||
107 | spin_lock(&file->table_lock); |
107 | spin_lock(&file->table_lock); |
108 | for (i = 0; i < args->buffer_count; i++) { |
108 | for (i = 0; i < args->buffer_count; i++) { |
109 | struct drm_i915_gem_object *obj; |
109 | struct drm_i915_gem_object *obj; |
110 | 110 | ||
111 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
111 | obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
112 | if (obj == NULL) { |
112 | if (obj == NULL) { |
113 | spin_unlock(&file->table_lock); |
113 | spin_unlock(&file->table_lock); |
114 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
114 | DRM_DEBUG("Invalid object handle %d at index %d\n", |
115 | exec[i].handle, i); |
115 | exec[i].handle, i); |
116 | return -ENOENT; |
116 | return -ENOENT; |
117 | } |
117 | } |
118 | 118 | ||
119 | if (!list_empty(&obj->exec_list)) { |
119 | if (!list_empty(&obj->exec_list)) { |
120 | spin_unlock(&file->table_lock); |
120 | spin_unlock(&file->table_lock); |
121 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
121 | DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
122 | obj, exec[i].handle, i); |
122 | obj, exec[i].handle, i); |
123 | return -EINVAL; |
123 | return -EINVAL; |
124 | } |
124 | } |
125 | 125 | ||
126 | drm_gem_object_reference(&obj->base); |
126 | drm_gem_object_reference(&obj->base); |
127 | list_add_tail(&obj->exec_list, &eb->objects); |
127 | list_add_tail(&obj->exec_list, &eb->objects); |
128 | 128 | ||
129 | obj->exec_entry = &exec[i]; |
129 | obj->exec_entry = &exec[i]; |
130 | if (eb->and < 0) { |
130 | if (eb->and < 0) { |
131 | eb->lut[i] = obj; |
131 | eb->lut[i] = obj; |
132 | } else { |
132 | } else { |
133 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
133 | uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
134 | obj->exec_handle = handle; |
134 | obj->exec_handle = handle; |
135 | hlist_add_head(&obj->exec_node, |
135 | hlist_add_head(&obj->exec_node, |
136 | &eb->buckets[handle & eb->and]); |
136 | &eb->buckets[handle & eb->and]); |
137 | } |
137 | } |
138 | } |
138 | } |
139 | spin_unlock(&file->table_lock); |
139 | spin_unlock(&file->table_lock); |
140 | 140 | ||
141 | return 0; |
141 | return 0; |
142 | } |
142 | } |
143 | 143 | ||
144 | static struct drm_i915_gem_object * |
144 | static struct drm_i915_gem_object * |
145 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
145 | eb_get_object(struct eb_objects *eb, unsigned long handle) |
146 | { |
146 | { |
147 | if (eb->and < 0) { |
147 | if (eb->and < 0) { |
148 | if (handle >= -eb->and) |
148 | if (handle >= -eb->and) |
149 | return NULL; |
149 | return NULL; |
150 | return eb->lut[handle]; |
150 | return eb->lut[handle]; |
151 | } else { |
151 | } else { |
152 | struct hlist_head *head; |
152 | struct hlist_head *head; |
153 | struct hlist_node *node; |
153 | struct hlist_node *node; |
154 | 154 | ||
155 | head = &eb->buckets[handle & eb->and]; |
155 | head = &eb->buckets[handle & eb->and]; |
156 | hlist_for_each(node, head) { |
156 | hlist_for_each(node, head) { |
157 | struct drm_i915_gem_object *obj; |
157 | struct drm_i915_gem_object *obj; |
158 | 158 | ||
159 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
159 | obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
160 | if (obj->exec_handle == handle) |
160 | if (obj->exec_handle == handle) |
161 | return obj; |
161 | return obj; |
162 | } |
162 | } |
163 | return NULL; |
163 | return NULL; |
164 | } |
164 | } |
165 | } |
165 | } |
166 | 166 | ||
167 | static void |
167 | static void |
168 | eb_destroy(struct eb_objects *eb) |
168 | eb_destroy(struct eb_objects *eb) |
169 | { |
169 | { |
170 | while (!list_empty(&eb->objects)) { |
170 | while (!list_empty(&eb->objects)) { |
171 | struct drm_i915_gem_object *obj; |
171 | struct drm_i915_gem_object *obj; |
172 | 172 | ||
173 | obj = list_first_entry(&eb->objects, |
173 | obj = list_first_entry(&eb->objects, |
174 | struct drm_i915_gem_object, |
174 | struct drm_i915_gem_object, |
175 | exec_list); |
175 | exec_list); |
176 | list_del_init(&obj->exec_list); |
176 | list_del_init(&obj->exec_list); |
177 | drm_gem_object_unreference(&obj->base); |
177 | drm_gem_object_unreference(&obj->base); |
178 | } |
178 | } |
179 | kfree(eb); |
179 | kfree(eb); |
180 | } |
180 | } |
181 | 181 | ||
182 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
182 | static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) |
183 | { |
183 | { |
184 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
184 | return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || |
185 | !obj->map_and_fenceable || |
185 | !obj->map_and_fenceable || |
186 | obj->cache_level != I915_CACHE_NONE); |
186 | obj->cache_level != I915_CACHE_NONE); |
187 | } |
187 | } |
188 | 188 | ||
189 | static int |
189 | static int |
190 | relocate_entry_cpu(struct drm_i915_gem_object *obj, |
190 | relocate_entry_cpu(struct drm_i915_gem_object *obj, |
191 | struct drm_i915_gem_relocation_entry *reloc) |
191 | struct drm_i915_gem_relocation_entry *reloc) |
192 | { |
192 | { |
- | 193 | struct drm_device *dev = obj->base.dev; |
|
- | 194 | struct drm_i915_private *dev_priv = dev->dev_private; |
|
193 | uint32_t page_offset = offset_in_page(reloc->offset); |
195 | uint32_t page_offset = offset_in_page(reloc->offset); |
194 | char *vaddr; |
196 | char *vaddr; |
195 | int ret = -EINVAL; |
197 | int ret = -EINVAL; |
196 | 198 | ||
197 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
199 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
198 | if (ret) |
200 | if (ret) |
199 | return ret; |
201 | return ret; |
200 | 202 | ||
201 | vaddr = (char *)MapIoMem((addr_t)i915_gem_object_get_page(obj, |
203 | vaddr = dev_priv->gtt.mappable+4096; |
202 | reloc->offset >> PAGE_SHIFT), 4096, 3); |
204 | MapPage(vaddr,(addr_t)i915_gem_object_get_page(obj,reloc->offset >> PAGE_SHIFT), PG_SW); |
203 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
- | |
204 | FreeKernelSpace(vaddr); |
205 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
205 | 206 | ||
206 | return 0; |
207 | return 0; |
207 | } |
208 | } |
208 | 209 | ||
209 | static int |
210 | static int |
210 | relocate_entry_gtt(struct drm_i915_gem_object *obj, |
211 | relocate_entry_gtt(struct drm_i915_gem_object *obj, |
211 | struct drm_i915_gem_relocation_entry *reloc) |
212 | struct drm_i915_gem_relocation_entry *reloc) |
212 | { |
213 | { |
213 | struct drm_device *dev = obj->base.dev; |
214 | struct drm_device *dev = obj->base.dev; |
214 | struct drm_i915_private *dev_priv = dev->dev_private; |
215 | struct drm_i915_private *dev_priv = dev->dev_private; |
215 | uint32_t __iomem *reloc_entry; |
216 | uint32_t __iomem *reloc_entry; |
216 | void __iomem *reloc_page; |
217 | void __iomem *reloc_page; |
217 | int ret = -EINVAL; |
218 | int ret = -EINVAL; |
218 | 219 | ||
219 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
220 | ret = i915_gem_object_set_to_gtt_domain(obj, true); |
220 | if (ret) |
221 | if (ret) |
221 | return ret; |
222 | return ret; |
222 | 223 | ||
223 | ret = i915_gem_object_put_fence(obj); |
224 | ret = i915_gem_object_put_fence(obj); |
224 | if (ret) |
225 | if (ret) |
225 | return ret; |
226 | return ret; |
226 | 227 | ||
227 | /* Map the page containing the relocation we're going to perform. */ |
228 | /* Map the page containing the relocation we're going to perform. */ |
228 | reloc->offset += i915_gem_obj_ggtt_offset(obj); |
229 | reloc->offset += i915_gem_obj_ggtt_offset(obj); |
229 | reloc_page = (void*)MapIoMem(dev_priv->gtt.mappable_base + |
230 | MapPage(dev_priv->gtt.mappable,dev_priv->gtt.mappable_base + |
230 | (reloc->offset & PAGE_MASK), 4096, 0x18|3); |
231 | (reloc->offset & PAGE_MASK), PG_SW); |
- | 232 | reloc_page = dev_priv->gtt.mappable; |
|
231 | reloc_entry = (uint32_t __iomem *) |
233 | reloc_entry = (uint32_t __iomem *) |
232 | (reloc_page + offset_in_page(reloc->offset)); |
234 | (reloc_page + offset_in_page(reloc->offset)); |
233 | iowrite32(reloc->delta, reloc_entry); |
235 | iowrite32(reloc->delta, reloc_entry); |
234 | FreeKernelSpace(reloc_page); |
- | |
235 | 236 | ||
236 | return 0; |
237 | return 0; |
237 | } |
238 | } |
238 | 239 | ||
239 | static int |
240 | static int |
240 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
241 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
241 | struct eb_objects *eb, |
242 | struct eb_objects *eb, |
242 | struct drm_i915_gem_relocation_entry *reloc, |
243 | struct drm_i915_gem_relocation_entry *reloc, |
243 | struct i915_address_space *vm) |
244 | struct i915_address_space *vm) |
244 | { |
245 | { |
245 | struct drm_device *dev = obj->base.dev; |
246 | struct drm_device *dev = obj->base.dev; |
246 | struct drm_gem_object *target_obj; |
247 | struct drm_gem_object *target_obj; |
247 | struct drm_i915_gem_object *target_i915_obj; |
248 | struct drm_i915_gem_object *target_i915_obj; |
248 | uint32_t target_offset; |
249 | uint32_t target_offset; |
249 | int ret = -EINVAL; |
250 | int ret = -EINVAL; |
250 | 251 | ||
251 | /* we've already hold a reference to all valid objects */ |
252 | /* we've already hold a reference to all valid objects */ |
252 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; |
253 | target_obj = &eb_get_object(eb, reloc->target_handle)->base; |
253 | if (unlikely(target_obj == NULL)) |
254 | if (unlikely(target_obj == NULL)) |
254 | return -ENOENT; |
255 | return -ENOENT; |
255 | 256 | ||
256 | target_i915_obj = to_intel_bo(target_obj); |
257 | target_i915_obj = to_intel_bo(target_obj); |
257 | target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); |
258 | target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); |
258 | 259 | ||
259 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
260 | /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and |
260 | * pipe_control writes because the gpu doesn't properly redirect them |
261 | * pipe_control writes because the gpu doesn't properly redirect them |
261 | * through the ppgtt for non_secure batchbuffers. */ |
262 | * through the ppgtt for non_secure batchbuffers. */ |
262 | if (unlikely(IS_GEN6(dev) && |
263 | if (unlikely(IS_GEN6(dev) && |
263 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
264 | reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && |
264 | !target_i915_obj->has_global_gtt_mapping)) { |
265 | !target_i915_obj->has_global_gtt_mapping)) { |
265 | i915_gem_gtt_bind_object(target_i915_obj, |
266 | i915_gem_gtt_bind_object(target_i915_obj, |
266 | target_i915_obj->cache_level); |
267 | target_i915_obj->cache_level); |
267 | } |
268 | } |
268 | 269 | ||
269 | /* Validate that the target is in a valid r/w GPU domain */ |
270 | /* Validate that the target is in a valid r/w GPU domain */ |
270 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
271 | if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { |
271 | DRM_DEBUG("reloc with multiple write domains: " |
272 | DRM_DEBUG("reloc with multiple write domains: " |
272 | "obj %p target %d offset %d " |
273 | "obj %p target %d offset %d " |
273 | "read %08x write %08x", |
274 | "read %08x write %08x", |
274 | obj, reloc->target_handle, |
275 | obj, reloc->target_handle, |
275 | (int) reloc->offset, |
276 | (int) reloc->offset, |
276 | reloc->read_domains, |
277 | reloc->read_domains, |
277 | reloc->write_domain); |
278 | reloc->write_domain); |
278 | return ret; |
279 | return ret; |
279 | } |
280 | } |
280 | if (unlikely((reloc->write_domain | reloc->read_domains) |
281 | if (unlikely((reloc->write_domain | reloc->read_domains) |
281 | & ~I915_GEM_GPU_DOMAINS)) { |
282 | & ~I915_GEM_GPU_DOMAINS)) { |
282 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
283 | DRM_DEBUG("reloc with read/write non-GPU domains: " |
283 | "obj %p target %d offset %d " |
284 | "obj %p target %d offset %d " |
284 | "read %08x write %08x", |
285 | "read %08x write %08x", |
285 | obj, reloc->target_handle, |
286 | obj, reloc->target_handle, |
286 | (int) reloc->offset, |
287 | (int) reloc->offset, |
287 | reloc->read_domains, |
288 | reloc->read_domains, |
288 | reloc->write_domain); |
289 | reloc->write_domain); |
289 | return ret; |
290 | return ret; |
290 | } |
291 | } |
291 | 292 | ||
292 | target_obj->pending_read_domains |= reloc->read_domains; |
293 | target_obj->pending_read_domains |= reloc->read_domains; |
293 | target_obj->pending_write_domain |= reloc->write_domain; |
294 | target_obj->pending_write_domain |= reloc->write_domain; |
294 | 295 | ||
295 | /* If the relocation already has the right value in it, no |
296 | /* If the relocation already has the right value in it, no |
296 | * more work needs to be done. |
297 | * more work needs to be done. |
297 | */ |
298 | */ |
298 | if (target_offset == reloc->presumed_offset) |
299 | if (target_offset == reloc->presumed_offset) |
299 | return 0; |
300 | return 0; |
300 | 301 | ||
301 | /* Check that the relocation address is valid... */ |
302 | /* Check that the relocation address is valid... */ |
302 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
303 | if (unlikely(reloc->offset > obj->base.size - 4)) { |
303 | DRM_DEBUG("Relocation beyond object bounds: " |
304 | DRM_DEBUG("Relocation beyond object bounds: " |
304 | "obj %p target %d offset %d size %d.\n", |
305 | "obj %p target %d offset %d size %d.\n", |
305 | obj, reloc->target_handle, |
306 | obj, reloc->target_handle, |
306 | (int) reloc->offset, |
307 | (int) reloc->offset, |
307 | (int) obj->base.size); |
308 | (int) obj->base.size); |
308 | return ret; |
309 | return ret; |
309 | } |
310 | } |
310 | if (unlikely(reloc->offset & 3)) { |
311 | if (unlikely(reloc->offset & 3)) { |
311 | DRM_DEBUG("Relocation not 4-byte aligned: " |
312 | DRM_DEBUG("Relocation not 4-byte aligned: " |
312 | "obj %p target %d offset %d.\n", |
313 | "obj %p target %d offset %d.\n", |
313 | obj, reloc->target_handle, |
314 | obj, reloc->target_handle, |
314 | (int) reloc->offset); |
315 | (int) reloc->offset); |
315 | return ret; |
316 | return ret; |
316 | } |
317 | } |
317 | 318 | ||
318 | /* We can't wait for rendering with pagefaults disabled */ |
319 | /* We can't wait for rendering with pagefaults disabled */ |
319 | 320 | ||
320 | reloc->delta += target_offset; |
321 | reloc->delta += target_offset; |
321 | if (use_cpu_reloc(obj)) |
322 | if (use_cpu_reloc(obj)) |
322 | ret = relocate_entry_cpu(obj, reloc); |
323 | ret = relocate_entry_cpu(obj, reloc); |
323 | else |
324 | else |
324 | ret = relocate_entry_gtt(obj, reloc); |
325 | ret = relocate_entry_gtt(obj, reloc); |
325 | 326 | ||
326 | if (ret) |
327 | if (ret) |
327 | return ret; |
328 | return ret; |
328 | 329 | ||
329 | /* and update the user's relocation entry */ |
330 | /* and update the user's relocation entry */ |
330 | reloc->presumed_offset = target_offset; |
331 | reloc->presumed_offset = target_offset; |
331 | 332 | ||
332 | return 0; |
333 | return 0; |
333 | } |
334 | } |
334 | 335 | ||
335 | static int |
336 | static int |
336 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
337 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
337 | struct eb_objects *eb, |
338 | struct eb_objects *eb, |
338 | struct i915_address_space *vm) |
339 | struct i915_address_space *vm) |
339 | { |
340 | { |
340 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
341 | #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) |
341 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
342 | struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(64)]; |
342 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
343 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
343 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
344 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
344 | int remain, ret; |
345 | int remain, ret; |
345 | 346 | ||
346 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
347 | user_relocs = to_user_ptr(entry->relocs_ptr); |
347 | 348 | ||
348 | remain = entry->relocation_count; |
349 | remain = entry->relocation_count; |
349 | while (remain) { |
350 | while (remain) { |
350 | struct drm_i915_gem_relocation_entry *r = stack_reloc; |
351 | struct drm_i915_gem_relocation_entry *r = stack_reloc; |
351 | int count = remain; |
352 | int count = remain; |
352 | if (count > ARRAY_SIZE(stack_reloc)) |
353 | if (count > ARRAY_SIZE(stack_reloc)) |
353 | count = ARRAY_SIZE(stack_reloc); |
354 | count = ARRAY_SIZE(stack_reloc); |
354 | remain -= count; |
355 | remain -= count; |
355 | 356 | ||
356 | memcpy(r, user_relocs, count*sizeof(r[0])); |
357 | memcpy(r, user_relocs, count*sizeof(r[0])); |
357 | 358 | ||
358 | do { |
359 | do { |
359 | u64 offset = r->presumed_offset; |
360 | u64 offset = r->presumed_offset; |
360 | 361 | ||
361 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, |
362 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, |
362 | vm); |
363 | vm); |
363 | if (ret) |
364 | if (ret) |
364 | return ret; |
365 | return ret; |
365 | 366 | ||
366 | if (r->presumed_offset != offset) |
367 | if (r->presumed_offset != offset) |
367 | { |
368 | { |
368 | memcpy(&user_relocs->presumed_offset, |
369 | memcpy(&user_relocs->presumed_offset, |
369 | &r->presumed_offset, |
370 | &r->presumed_offset, |
370 | sizeof(r->presumed_offset)); |
371 | sizeof(r->presumed_offset)); |
371 | } |
372 | } |
372 | 373 | ||
373 | user_relocs++; |
374 | user_relocs++; |
374 | r++; |
375 | r++; |
375 | } while (--count); |
376 | } while (--count); |
376 | } |
377 | } |
377 | 378 | ||
378 | return 0; |
379 | return 0; |
379 | #undef N_RELOC |
380 | #undef N_RELOC |
380 | } |
381 | } |
381 | 382 | ||
382 | static int |
383 | static int |
383 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
384 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
384 | struct eb_objects *eb, |
385 | struct eb_objects *eb, |
385 | struct drm_i915_gem_relocation_entry *relocs, |
386 | struct drm_i915_gem_relocation_entry *relocs, |
386 | struct i915_address_space *vm) |
387 | struct i915_address_space *vm) |
387 | { |
388 | { |
388 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
389 | const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
389 | int i, ret; |
390 | int i, ret; |
390 | 391 | ||
391 | for (i = 0; i < entry->relocation_count; i++) { |
392 | for (i = 0; i < entry->relocation_count; i++) { |
392 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], |
393 | ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], |
393 | vm); |
394 | vm); |
394 | if (ret) |
395 | if (ret) |
395 | return ret; |
396 | return ret; |
396 | } |
397 | } |
397 | 398 | ||
398 | return 0; |
399 | return 0; |
399 | } |
400 | } |
400 | 401 | ||
401 | static int |
402 | static int |
402 | i915_gem_execbuffer_relocate(struct eb_objects *eb, |
403 | i915_gem_execbuffer_relocate(struct eb_objects *eb, |
403 | struct i915_address_space *vm) |
404 | struct i915_address_space *vm) |
404 | { |
405 | { |
405 | struct drm_i915_gem_object *obj; |
406 | struct drm_i915_gem_object *obj; |
406 | int ret = 0; |
407 | int ret = 0; |
407 | 408 | ||
408 | /* This is the fast path and we cannot handle a pagefault whilst |
409 | /* This is the fast path and we cannot handle a pagefault whilst |
409 | * holding the struct mutex lest the user pass in the relocations |
410 | * holding the struct mutex lest the user pass in the relocations |
410 | * contained within a mmaped bo. For in such a case we, the page |
411 | * contained within a mmaped bo. For in such a case we, the page |
411 | * fault handler would call i915_gem_fault() and we would try to |
412 | * fault handler would call i915_gem_fault() and we would try to |
412 | * acquire the struct mutex again. Obviously this is bad and so |
413 | * acquire the struct mutex again. Obviously this is bad and so |
413 | * lockdep complains vehemently. |
414 | * lockdep complains vehemently. |
414 | */ |
415 | */ |
415 | // pagefault_disable(); |
416 | // pagefault_disable(); |
416 | list_for_each_entry(obj, &eb->objects, exec_list) { |
417 | list_for_each_entry(obj, &eb->objects, exec_list) { |
417 | ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); |
418 | ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); |
418 | if (ret) |
419 | if (ret) |
419 | break; |
420 | break; |
420 | } |
421 | } |
421 | // pagefault_enable(); |
422 | // pagefault_enable(); |
422 | 423 | ||
423 | return ret; |
424 | return ret; |
424 | } |
425 | } |
425 | 426 | ||
426 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
427 | #define __EXEC_OBJECT_HAS_PIN (1<<31) |
427 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
428 | #define __EXEC_OBJECT_HAS_FENCE (1<<30) |
428 | 429 | ||
429 | static int |
430 | static int |
430 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
431 | need_reloc_mappable(struct drm_i915_gem_object *obj) |
431 | { |
432 | { |
432 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
433 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
433 | return entry->relocation_count && !use_cpu_reloc(obj); |
434 | return entry->relocation_count && !use_cpu_reloc(obj); |
434 | } |
435 | } |
435 | 436 | ||
436 | static int |
437 | static int |
437 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
438 | i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
438 | struct intel_ring_buffer *ring, |
439 | struct intel_ring_buffer *ring, |
439 | struct i915_address_space *vm, |
440 | struct i915_address_space *vm, |
440 | bool *need_reloc) |
441 | bool *need_reloc) |
441 | { |
442 | { |
442 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
443 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
443 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
444 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
444 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
445 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
445 | bool need_fence, need_mappable; |
446 | bool need_fence, need_mappable; |
446 | int ret; |
447 | int ret; |
447 | 448 | ||
448 | need_fence = |
449 | need_fence = |
449 | has_fenced_gpu_access && |
450 | has_fenced_gpu_access && |
450 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
451 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
451 | obj->tiling_mode != I915_TILING_NONE; |
452 | obj->tiling_mode != I915_TILING_NONE; |
452 | need_mappable = need_fence || need_reloc_mappable(obj); |
453 | need_mappable = need_fence || need_reloc_mappable(obj); |
453 | 454 | ||
454 | ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, |
455 | ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, |
455 | false); |
456 | false); |
456 | if (ret) |
457 | if (ret) |
457 | return ret; |
458 | return ret; |
458 | 459 | ||
459 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
460 | entry->flags |= __EXEC_OBJECT_HAS_PIN; |
460 | 461 | ||
461 | if (has_fenced_gpu_access) { |
462 | if (has_fenced_gpu_access) { |
462 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
463 | if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { |
463 | ret = i915_gem_object_get_fence(obj); |
464 | ret = i915_gem_object_get_fence(obj); |
464 | if (ret) |
465 | if (ret) |
465 | return ret; |
466 | return ret; |
466 | 467 | ||
467 | if (i915_gem_object_pin_fence(obj)) |
468 | if (i915_gem_object_pin_fence(obj)) |
468 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
469 | entry->flags |= __EXEC_OBJECT_HAS_FENCE; |
469 | 470 | ||
470 | obj->pending_fenced_gpu_access = true; |
471 | obj->pending_fenced_gpu_access = true; |
471 | } |
472 | } |
472 | } |
473 | } |
473 | 474 | ||
474 | /* Ensure ppgtt mapping exists if needed */ |
475 | /* Ensure ppgtt mapping exists if needed */ |
475 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { |
476 | if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) { |
476 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
477 | i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt, |
477 | obj, obj->cache_level); |
478 | obj, obj->cache_level); |
478 | 479 | ||
479 | obj->has_aliasing_ppgtt_mapping = 1; |
480 | obj->has_aliasing_ppgtt_mapping = 1; |
480 | } |
481 | } |
481 | 482 | ||
482 | if (entry->offset != i915_gem_obj_offset(obj, vm)) { |
483 | if (entry->offset != i915_gem_obj_offset(obj, vm)) { |
483 | entry->offset = i915_gem_obj_offset(obj, vm); |
484 | entry->offset = i915_gem_obj_offset(obj, vm); |
484 | *need_reloc = true; |
485 | *need_reloc = true; |
485 | } |
486 | } |
486 | 487 | ||
487 | if (entry->flags & EXEC_OBJECT_WRITE) { |
488 | if (entry->flags & EXEC_OBJECT_WRITE) { |
488 | obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; |
489 | obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; |
489 | obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; |
490 | obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; |
490 | } |
491 | } |
491 | 492 | ||
492 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT && |
493 | if (entry->flags & EXEC_OBJECT_NEEDS_GTT && |
493 | !obj->has_global_gtt_mapping) |
494 | !obj->has_global_gtt_mapping) |
494 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
495 | i915_gem_gtt_bind_object(obj, obj->cache_level); |
495 | 496 | ||
496 | return 0; |
497 | return 0; |
497 | } |
498 | } |
498 | 499 | ||
499 | static void |
500 | static void |
500 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
501 | i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) |
501 | { |
502 | { |
502 | struct drm_i915_gem_exec_object2 *entry; |
503 | struct drm_i915_gem_exec_object2 *entry; |
503 | 504 | ||
504 | if (!i915_gem_obj_bound_any(obj)) |
505 | if (!i915_gem_obj_bound_any(obj)) |
505 | return; |
506 | return; |
506 | 507 | ||
507 | entry = obj->exec_entry; |
508 | entry = obj->exec_entry; |
508 | 509 | ||
509 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
510 | if (entry->flags & __EXEC_OBJECT_HAS_FENCE) |
510 | i915_gem_object_unpin_fence(obj); |
511 | i915_gem_object_unpin_fence(obj); |
511 | 512 | ||
512 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
513 | if (entry->flags & __EXEC_OBJECT_HAS_PIN) |
513 | i915_gem_object_unpin(obj); |
514 | i915_gem_object_unpin(obj); |
514 | 515 | ||
515 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
516 | entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN); |
516 | } |
517 | } |
517 | 518 | ||
518 | static int |
519 | static int |
519 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
520 | i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
520 | struct list_head *objects, |
521 | struct list_head *objects, |
521 | struct i915_address_space *vm, |
522 | struct i915_address_space *vm, |
522 | bool *need_relocs) |
523 | bool *need_relocs) |
523 | { |
524 | { |
524 | struct drm_i915_gem_object *obj; |
525 | struct drm_i915_gem_object *obj; |
525 | struct list_head ordered_objects; |
526 | struct list_head ordered_objects; |
526 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
527 | bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; |
527 | int retry; |
528 | int retry; |
528 | 529 | ||
529 | INIT_LIST_HEAD(&ordered_objects); |
530 | INIT_LIST_HEAD(&ordered_objects); |
530 | while (!list_empty(objects)) { |
531 | while (!list_empty(objects)) { |
531 | struct drm_i915_gem_exec_object2 *entry; |
532 | struct drm_i915_gem_exec_object2 *entry; |
532 | bool need_fence, need_mappable; |
533 | bool need_fence, need_mappable; |
533 | 534 | ||
534 | obj = list_first_entry(objects, |
535 | obj = list_first_entry(objects, |
535 | struct drm_i915_gem_object, |
536 | struct drm_i915_gem_object, |
536 | exec_list); |
537 | exec_list); |
537 | entry = obj->exec_entry; |
538 | entry = obj->exec_entry; |
538 | 539 | ||
539 | need_fence = |
540 | need_fence = |
540 | has_fenced_gpu_access && |
541 | has_fenced_gpu_access && |
541 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
542 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
542 | obj->tiling_mode != I915_TILING_NONE; |
543 | obj->tiling_mode != I915_TILING_NONE; |
543 | need_mappable = need_fence || need_reloc_mappable(obj); |
544 | need_mappable = need_fence || need_reloc_mappable(obj); |
544 | 545 | ||
545 | if (need_mappable) |
546 | if (need_mappable) |
546 | list_move(&obj->exec_list, &ordered_objects); |
547 | list_move(&obj->exec_list, &ordered_objects); |
547 | else |
548 | else |
548 | list_move_tail(&obj->exec_list, &ordered_objects); |
549 | list_move_tail(&obj->exec_list, &ordered_objects); |
549 | 550 | ||
550 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
551 | obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
551 | obj->base.pending_write_domain = 0; |
552 | obj->base.pending_write_domain = 0; |
552 | obj->pending_fenced_gpu_access = false; |
553 | obj->pending_fenced_gpu_access = false; |
553 | } |
554 | } |
554 | list_splice(&ordered_objects, objects); |
555 | list_splice(&ordered_objects, objects); |
555 | 556 | ||
556 | /* Attempt to pin all of the buffers into the GTT. |
557 | /* Attempt to pin all of the buffers into the GTT. |
557 | * This is done in 3 phases: |
558 | * This is done in 3 phases: |
558 | * |
559 | * |
559 | * 1a. Unbind all objects that do not match the GTT constraints for |
560 | * 1a. Unbind all objects that do not match the GTT constraints for |
560 | * the execbuffer (fenceable, mappable, alignment etc). |
561 | * the execbuffer (fenceable, mappable, alignment etc). |
561 | * 1b. Increment pin count for already bound objects. |
562 | * 1b. Increment pin count for already bound objects. |
562 | * 2. Bind new objects. |
563 | * 2. Bind new objects. |
563 | * 3. Decrement pin count. |
564 | * 3. Decrement pin count. |
564 | * |
565 | * |
565 | * This avoid unnecessary unbinding of later objects in order to make |
566 | * This avoid unnecessary unbinding of later objects in order to make |
566 | * room for the earlier objects *unless* we need to defragment. |
567 | * room for the earlier objects *unless* we need to defragment. |
567 | */ |
568 | */ |
568 | retry = 0; |
569 | retry = 0; |
569 | do { |
570 | do { |
570 | int ret = 0; |
571 | int ret = 0; |
571 | 572 | ||
572 | /* Unbind any ill-fitting objects or pin. */ |
573 | /* Unbind any ill-fitting objects or pin. */ |
573 | list_for_each_entry(obj, objects, exec_list) { |
574 | list_for_each_entry(obj, objects, exec_list) { |
574 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
575 | struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
575 | bool need_fence, need_mappable; |
576 | bool need_fence, need_mappable; |
576 | u32 obj_offset; |
577 | u32 obj_offset; |
577 | 578 | ||
578 | if (!i915_gem_obj_bound(obj, vm)) |
579 | if (!i915_gem_obj_bound(obj, vm)) |
579 | continue; |
580 | continue; |
580 | 581 | ||
581 | obj_offset = i915_gem_obj_offset(obj, vm); |
582 | obj_offset = i915_gem_obj_offset(obj, vm); |
582 | need_fence = |
583 | need_fence = |
583 | has_fenced_gpu_access && |
584 | has_fenced_gpu_access && |
584 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
585 | entry->flags & EXEC_OBJECT_NEEDS_FENCE && |
585 | obj->tiling_mode != I915_TILING_NONE; |
586 | obj->tiling_mode != I915_TILING_NONE; |
586 | need_mappable = need_fence || need_reloc_mappable(obj); |
587 | need_mappable = need_fence || need_reloc_mappable(obj); |
587 | 588 | ||
588 | WARN_ON((need_mappable || need_fence) && |
589 | WARN_ON((need_mappable || need_fence) && |
589 | !i915_is_ggtt(vm)); |
590 | !i915_is_ggtt(vm)); |
590 | 591 | ||
591 | if ((entry->alignment && |
592 | if ((entry->alignment && |
592 | obj_offset & (entry->alignment - 1)) || |
593 | obj_offset & (entry->alignment - 1)) || |
593 | (need_mappable && !obj->map_and_fenceable)) |
594 | (need_mappable && !obj->map_and_fenceable)) |
594 | ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); |
595 | ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); |
595 | else |
596 | else |
596 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
597 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
597 | if (ret) |
598 | if (ret) |
598 | goto err; |
599 | goto err; |
599 | } |
600 | } |
600 | 601 | ||
601 | /* Bind fresh objects */ |
602 | /* Bind fresh objects */ |
602 | list_for_each_entry(obj, objects, exec_list) { |
603 | list_for_each_entry(obj, objects, exec_list) { |
603 | if (i915_gem_obj_bound(obj, vm)) |
604 | if (i915_gem_obj_bound(obj, vm)) |
604 | continue; |
605 | continue; |
605 | 606 | ||
606 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
607 | ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); |
607 | if (ret) |
608 | if (ret) |
608 | goto err; |
609 | goto err; |
609 | } |
610 | } |
610 | 611 | ||
611 | err: /* Decrement pin count for bound objects */ |
612 | err: /* Decrement pin count for bound objects */ |
612 | list_for_each_entry(obj, objects, exec_list) |
613 | list_for_each_entry(obj, objects, exec_list) |
613 | i915_gem_execbuffer_unreserve_object(obj); |
614 | i915_gem_execbuffer_unreserve_object(obj); |
614 | 615 | ||
615 | if (ret != -ENOSPC || retry++) |
616 | if (ret != -ENOSPC || retry++) |
616 | return ret; |
617 | return ret; |
617 | 618 | ||
618 | // ret = i915_gem_evict_everything(ring->dev); |
619 | // ret = i915_gem_evict_everything(ring->dev); |
619 | if (ret) |
620 | if (ret) |
620 | return ret; |
621 | return ret; |
621 | } while (1); |
622 | } while (1); |
622 | } |
623 | } |
623 | 624 | ||
624 | static int |
625 | static int |
625 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
626 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
626 | struct drm_i915_gem_execbuffer2 *args, |
627 | struct drm_i915_gem_execbuffer2 *args, |
627 | struct drm_file *file, |
628 | struct drm_file *file, |
628 | struct intel_ring_buffer *ring, |
629 | struct intel_ring_buffer *ring, |
629 | struct eb_objects *eb, |
630 | struct eb_objects *eb, |
630 | struct drm_i915_gem_exec_object2 *exec, |
631 | struct drm_i915_gem_exec_object2 *exec, |
631 | struct i915_address_space *vm) |
632 | struct i915_address_space *vm) |
632 | { |
633 | { |
633 | struct drm_i915_gem_relocation_entry *reloc; |
634 | struct drm_i915_gem_relocation_entry *reloc; |
634 | struct drm_i915_gem_object *obj; |
635 | struct drm_i915_gem_object *obj; |
635 | bool need_relocs; |
636 | bool need_relocs; |
636 | int *reloc_offset; |
637 | int *reloc_offset; |
637 | int i, total, ret; |
638 | int i, total, ret; |
638 | int count = args->buffer_count; |
639 | int count = args->buffer_count; |
639 | 640 | ||
640 | /* We may process another execbuffer during the unlock... */ |
641 | /* We may process another execbuffer during the unlock... */ |
641 | while (!list_empty(&eb->objects)) { |
642 | while (!list_empty(&eb->objects)) { |
642 | obj = list_first_entry(&eb->objects, |
643 | obj = list_first_entry(&eb->objects, |
643 | struct drm_i915_gem_object, |
644 | struct drm_i915_gem_object, |
644 | exec_list); |
645 | exec_list); |
645 | list_del_init(&obj->exec_list); |
646 | list_del_init(&obj->exec_list); |
646 | drm_gem_object_unreference(&obj->base); |
647 | drm_gem_object_unreference(&obj->base); |
647 | } |
648 | } |
648 | 649 | ||
649 | mutex_unlock(&dev->struct_mutex); |
650 | mutex_unlock(&dev->struct_mutex); |
650 | 651 | ||
651 | total = 0; |
652 | total = 0; |
652 | for (i = 0; i < count; i++) |
653 | for (i = 0; i < count; i++) |
653 | total += exec[i].relocation_count; |
654 | total += exec[i].relocation_count; |
654 | 655 | ||
655 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
656 | reloc_offset = malloc(count * sizeof(*reloc_offset)); |
656 | reloc = malloc(total * sizeof(*reloc)); |
657 | reloc = malloc(total * sizeof(*reloc)); |
657 | if (reloc == NULL || reloc_offset == NULL) { |
658 | if (reloc == NULL || reloc_offset == NULL) { |
658 | kfree(reloc); |
659 | kfree(reloc); |
659 | kfree(reloc_offset); |
660 | kfree(reloc_offset); |
660 | mutex_lock(&dev->struct_mutex); |
661 | mutex_lock(&dev->struct_mutex); |
661 | return -ENOMEM; |
662 | return -ENOMEM; |
662 | } |
663 | } |
663 | 664 | ||
664 | total = 0; |
665 | total = 0; |
665 | for (i = 0; i < count; i++) { |
666 | for (i = 0; i < count; i++) { |
666 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
667 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
667 | u64 invalid_offset = (u64)-1; |
668 | u64 invalid_offset = (u64)-1; |
668 | int j; |
669 | int j; |
669 | 670 | ||
670 | user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr; |
671 | user_relocs = to_user_ptr(exec[i].relocs_ptr); |
671 | 672 | ||
672 | if (copy_from_user(reloc+total, user_relocs, |
673 | if (copy_from_user(reloc+total, user_relocs, |
673 | exec[i].relocation_count * sizeof(*reloc))) { |
674 | exec[i].relocation_count * sizeof(*reloc))) { |
674 | ret = -EFAULT; |
675 | ret = -EFAULT; |
675 | mutex_lock(&dev->struct_mutex); |
676 | mutex_lock(&dev->struct_mutex); |
676 | goto err; |
677 | goto err; |
677 | } |
678 | } |
678 | 679 | ||
679 | /* As we do not update the known relocation offsets after |
680 | /* As we do not update the known relocation offsets after |
680 | * relocating (due to the complexities in lock handling), |
681 | * relocating (due to the complexities in lock handling), |
681 | * we need to mark them as invalid now so that we force the |
682 | * we need to mark them as invalid now so that we force the |
682 | * relocation processing next time. Just in case the target |
683 | * relocation processing next time. Just in case the target |
683 | * object is evicted and then rebound into its old |
684 | * object is evicted and then rebound into its old |
684 | * presumed_offset before the next execbuffer - if that |
685 | * presumed_offset before the next execbuffer - if that |
685 | * happened we would make the mistake of assuming that the |
686 | * happened we would make the mistake of assuming that the |
686 | * relocations were valid. |
687 | * relocations were valid. |
687 | */ |
688 | */ |
688 | for (j = 0; j < exec[i].relocation_count; j++) { |
689 | for (j = 0; j < exec[i].relocation_count; j++) { |
689 | if (copy_to_user(&user_relocs[j].presumed_offset, |
690 | if (copy_to_user(&user_relocs[j].presumed_offset, |
690 | &invalid_offset, |
691 | &invalid_offset, |
691 | sizeof(invalid_offset))) { |
692 | sizeof(invalid_offset))) { |
692 | ret = -EFAULT; |
693 | ret = -EFAULT; |
693 | mutex_lock(&dev->struct_mutex); |
694 | mutex_lock(&dev->struct_mutex); |
694 | goto err; |
695 | goto err; |
695 | } |
696 | } |
696 | } |
697 | } |
697 | 698 | ||
698 | reloc_offset[i] = total; |
699 | reloc_offset[i] = total; |
699 | total += exec[i].relocation_count; |
700 | total += exec[i].relocation_count; |
700 | } |
701 | } |
701 | 702 | ||
702 | ret = i915_mutex_lock_interruptible(dev); |
703 | ret = i915_mutex_lock_interruptible(dev); |
703 | if (ret) { |
704 | if (ret) { |
704 | mutex_lock(&dev->struct_mutex); |
705 | mutex_lock(&dev->struct_mutex); |
705 | goto err; |
706 | goto err; |
706 | } |
707 | } |
707 | 708 | ||
708 | /* reacquire the objects */ |
709 | /* reacquire the objects */ |
709 | eb_reset(eb); |
710 | eb_reset(eb); |
710 | ret = eb_lookup_objects(eb, exec, args, file); |
711 | ret = eb_lookup_objects(eb, exec, args, file); |
711 | if (ret) |
712 | if (ret) |
712 | goto err; |
713 | goto err; |
713 | 714 | ||
714 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
715 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
715 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
716 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
716 | if (ret) |
717 | if (ret) |
717 | goto err; |
718 | goto err; |
718 | 719 | ||
719 | list_for_each_entry(obj, &eb->objects, exec_list) { |
720 | list_for_each_entry(obj, &eb->objects, exec_list) { |
720 | int offset = obj->exec_entry - exec; |
721 | int offset = obj->exec_entry - exec; |
721 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
722 | ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
722 | reloc + reloc_offset[offset], |
723 | reloc + reloc_offset[offset], |
723 | vm); |
724 | vm); |
724 | if (ret) |
725 | if (ret) |
725 | goto err; |
726 | goto err; |
726 | } |
727 | } |
727 | 728 | ||
728 | /* Leave the user relocations as are, this is the painfully slow path, |
729 | /* Leave the user relocations as are, this is the painfully slow path, |
729 | * and we want to avoid the complication of dropping the lock whilst |
730 | * and we want to avoid the complication of dropping the lock whilst |
730 | * having buffers reserved in the aperture and so causing spurious |
731 | * having buffers reserved in the aperture and so causing spurious |
731 | * ENOSPC for random operations. |
732 | * ENOSPC for random operations. |
732 | */ |
733 | */ |
733 | 734 | ||
734 | err: |
735 | err: |
735 | kfree(reloc); |
736 | kfree(reloc); |
736 | kfree(reloc_offset); |
737 | kfree(reloc_offset); |
737 | return ret; |
738 | return ret; |
738 | } |
739 | } |
739 | 740 | ||
740 | static int |
741 | static int |
741 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
742 | i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
742 | struct list_head *objects) |
743 | struct list_head *objects) |
743 | { |
744 | { |
744 | struct drm_i915_gem_object *obj; |
745 | struct drm_i915_gem_object *obj; |
745 | uint32_t flush_domains = 0; |
746 | uint32_t flush_domains = 0; |
746 | bool flush_chipset = false; |
747 | bool flush_chipset = false; |
747 | int ret; |
748 | int ret; |
748 | 749 | ||
749 | list_for_each_entry(obj, objects, exec_list) { |
750 | list_for_each_entry(obj, objects, exec_list) { |
750 | ret = i915_gem_object_sync(obj, ring); |
751 | ret = i915_gem_object_sync(obj, ring); |
751 | if (ret) |
752 | if (ret) |
752 | return ret; |
753 | return ret; |
753 | 754 | ||
754 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
755 | if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
755 | flush_chipset |= i915_gem_clflush_object(obj, false); |
756 | flush_chipset |= i915_gem_clflush_object(obj, false); |
756 | 757 | ||
757 | flush_domains |= obj->base.write_domain; |
758 | flush_domains |= obj->base.write_domain; |
758 | } |
759 | } |
759 | 760 | ||
760 | if (flush_chipset) |
761 | if (flush_chipset) |
761 | i915_gem_chipset_flush(ring->dev); |
762 | i915_gem_chipset_flush(ring->dev); |
762 | 763 | ||
763 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
764 | if (flush_domains & I915_GEM_DOMAIN_GTT) |
764 | wmb(); |
765 | wmb(); |
765 | 766 | ||
766 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
767 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
767 | * any residual writes from the previous batch. |
768 | * any residual writes from the previous batch. |
768 | */ |
769 | */ |
769 | return intel_ring_invalidate_all_caches(ring); |
770 | return intel_ring_invalidate_all_caches(ring); |
770 | } |
771 | } |
771 | 772 | ||
772 | static bool |
773 | static bool |
773 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
774 | i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
774 | { |
775 | { |
775 | if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
776 | if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
776 | return false; |
777 | return false; |
777 | 778 | ||
778 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
779 | return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
779 | } |
780 | } |
780 | 781 | ||
781 | static int |
782 | static int |
782 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
783 | validate_exec_list(struct drm_i915_gem_exec_object2 *exec, |
783 | int count) |
784 | int count) |
784 | { |
785 | { |
785 | int i; |
786 | int i; |
786 | int relocs_total = 0; |
787 | int relocs_total = 0; |
787 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
788 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
788 | 789 | ||
789 | for (i = 0; i < count; i++) { |
790 | for (i = 0; i < count; i++) { |
790 | char __user *ptr = to_user_ptr(exec[i].relocs_ptr); |
791 | char __user *ptr = to_user_ptr(exec[i].relocs_ptr); |
791 | int length; /* limited by fault_in_pages_readable() */ |
792 | int length; /* limited by fault_in_pages_readable() */ |
792 | 793 | ||
793 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
794 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
794 | return -EINVAL; |
795 | return -EINVAL; |
795 | 796 | ||
796 | /* First check for malicious input causing overflow in |
797 | /* First check for malicious input causing overflow in |
797 | * the worst case where we need to allocate the entire |
798 | * the worst case where we need to allocate the entire |
798 | * relocation tree as a single array. |
799 | * relocation tree as a single array. |
799 | */ |
800 | */ |
800 | if (exec[i].relocation_count > relocs_max - relocs_total) |
801 | if (exec[i].relocation_count > relocs_max - relocs_total) |
801 | return -EINVAL; |
802 | return -EINVAL; |
802 | relocs_total += exec[i].relocation_count; |
803 | relocs_total += exec[i].relocation_count; |
803 | 804 | ||
804 | length = exec[i].relocation_count * |
805 | length = exec[i].relocation_count * |
805 | sizeof(struct drm_i915_gem_relocation_entry); |
806 | sizeof(struct drm_i915_gem_relocation_entry); |
806 | /* |
807 | /* |
807 | * We must check that the entire relocation array is safe |
808 | * We must check that the entire relocation array is safe |
808 | * to read, but since we may need to update the presumed |
809 | * to read, but since we may need to update the presumed |
809 | * offsets during execution, check for full write access. |
810 | * offsets during execution, check for full write access. |
810 | */ |
811 | */ |
811 | 812 | ||
812 | } |
813 | } |
813 | 814 | ||
814 | return 0; |
815 | return 0; |
815 | } |
816 | } |
816 | 817 | ||
817 | static void |
818 | static void |
818 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
819 | i915_gem_execbuffer_move_to_active(struct list_head *objects, |
819 | struct i915_address_space *vm, |
820 | struct i915_address_space *vm, |
820 | struct intel_ring_buffer *ring) |
821 | struct intel_ring_buffer *ring) |
821 | { |
822 | { |
822 | struct drm_i915_gem_object *obj; |
823 | struct drm_i915_gem_object *obj; |
823 | 824 | ||
824 | list_for_each_entry(obj, objects, exec_list) { |
825 | list_for_each_entry(obj, objects, exec_list) { |
825 | u32 old_read = obj->base.read_domains; |
826 | u32 old_read = obj->base.read_domains; |
826 | u32 old_write = obj->base.write_domain; |
827 | u32 old_write = obj->base.write_domain; |
827 | 828 | ||
828 | obj->base.write_domain = obj->base.pending_write_domain; |
829 | obj->base.write_domain = obj->base.pending_write_domain; |
829 | if (obj->base.write_domain == 0) |
830 | if (obj->base.write_domain == 0) |
830 | obj->base.pending_read_domains |= obj->base.read_domains; |
831 | obj->base.pending_read_domains |= obj->base.read_domains; |
831 | obj->base.read_domains = obj->base.pending_read_domains; |
832 | obj->base.read_domains = obj->base.pending_read_domains; |
832 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
833 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
833 | 834 | ||
834 | /* FIXME: This lookup gets fixed later <-- danvet */ |
835 | /* FIXME: This lookup gets fixed later <-- danvet */ |
835 | list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); |
836 | list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); |
836 | i915_gem_object_move_to_active(obj, ring); |
837 | i915_gem_object_move_to_active(obj, ring); |
837 | if (obj->base.write_domain) { |
838 | if (obj->base.write_domain) { |
838 | obj->dirty = 1; |
839 | obj->dirty = 1; |
839 | obj->last_write_seqno = intel_ring_get_seqno(ring); |
840 | obj->last_write_seqno = intel_ring_get_seqno(ring); |
840 | if (obj->pin_count) /* check for potential scanout */ |
841 | if (obj->pin_count) /* check for potential scanout */ |
841 | intel_mark_fb_busy(obj, ring); |
842 | intel_mark_fb_busy(obj, ring); |
842 | } |
843 | } |
843 | 844 | ||
844 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
845 | trace_i915_gem_object_change_domain(obj, old_read, old_write); |
845 | } |
846 | } |
846 | } |
847 | } |
847 | 848 | ||
848 | static void |
849 | static void |
849 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
850 | i915_gem_execbuffer_retire_commands(struct drm_device *dev, |
850 | struct drm_file *file, |
851 | struct drm_file *file, |
851 | struct intel_ring_buffer *ring, |
852 | struct intel_ring_buffer *ring, |
852 | struct drm_i915_gem_object *obj) |
853 | struct drm_i915_gem_object *obj) |
853 | { |
854 | { |
854 | /* Unconditionally force add_request to emit a full flush. */ |
855 | /* Unconditionally force add_request to emit a full flush. */ |
855 | ring->gpu_caches_dirty = true; |
856 | ring->gpu_caches_dirty = true; |
856 | 857 | ||
857 | /* Add a breadcrumb for the completion of the batch buffer */ |
858 | /* Add a breadcrumb for the completion of the batch buffer */ |
858 | (void)__i915_add_request(ring, file, obj, NULL); |
859 | (void)__i915_add_request(ring, file, obj, NULL); |
859 | } |
860 | } |
860 | 861 | ||
861 | static int |
862 | static int |
862 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
863 | i915_reset_gen7_sol_offsets(struct drm_device *dev, |
863 | struct intel_ring_buffer *ring) |
864 | struct intel_ring_buffer *ring) |
864 | { |
865 | { |
865 | drm_i915_private_t *dev_priv = dev->dev_private; |
866 | drm_i915_private_t *dev_priv = dev->dev_private; |
866 | int ret, i; |
867 | int ret, i; |
867 | 868 | ||
868 | if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) |
869 | if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) |
869 | return 0; |
870 | return 0; |
870 | 871 | ||
871 | ret = intel_ring_begin(ring, 4 * 3); |
872 | ret = intel_ring_begin(ring, 4 * 3); |
872 | if (ret) |
873 | if (ret) |
873 | return ret; |
874 | return ret; |
874 | 875 | ||
875 | for (i = 0; i < 4; i++) { |
876 | for (i = 0; i < 4; i++) { |
876 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
877 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
877 | intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); |
878 | intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i)); |
878 | intel_ring_emit(ring, 0); |
879 | intel_ring_emit(ring, 0); |
879 | } |
880 | } |
880 | 881 | ||
881 | intel_ring_advance(ring); |
882 | intel_ring_advance(ring); |
882 | 883 | ||
883 | return 0; |
884 | return 0; |
884 | } |
885 | } |
885 | 886 | ||
886 | static int |
887 | static int |
887 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
888 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
888 | struct drm_file *file, |
889 | struct drm_file *file, |
889 | struct drm_i915_gem_execbuffer2 *args, |
890 | struct drm_i915_gem_execbuffer2 *args, |
890 | struct drm_i915_gem_exec_object2 *exec, |
891 | struct drm_i915_gem_exec_object2 *exec, |
891 | struct i915_address_space *vm) |
892 | struct i915_address_space *vm) |
892 | { |
893 | { |
893 | drm_i915_private_t *dev_priv = dev->dev_private; |
894 | drm_i915_private_t *dev_priv = dev->dev_private; |
894 | struct eb_objects *eb; |
895 | struct eb_objects *eb; |
895 | struct drm_i915_gem_object *batch_obj; |
896 | struct drm_i915_gem_object *batch_obj; |
896 | struct drm_clip_rect *cliprects = NULL; |
897 | struct drm_clip_rect *cliprects = NULL; |
897 | struct intel_ring_buffer *ring; |
898 | struct intel_ring_buffer *ring; |
898 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
899 | u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
899 | u32 exec_start, exec_len; |
900 | u32 exec_start, exec_len; |
900 | u32 mask, flags; |
901 | u32 mask, flags; |
901 | int ret, mode, i; |
902 | int ret, mode, i; |
902 | bool need_relocs; |
903 | bool need_relocs; |
903 | 904 | ||
904 | if (!i915_gem_check_execbuffer(args)) |
905 | if (!i915_gem_check_execbuffer(args)) |
905 | return -EINVAL; |
906 | return -EINVAL; |
906 | 907 | ||
907 | ret = validate_exec_list(exec, args->buffer_count); |
908 | ret = validate_exec_list(exec, args->buffer_count); |
908 | if (ret) |
909 | if (ret) |
909 | return ret; |
910 | return ret; |
910 | 911 | ||
911 | flags = 0; |
912 | flags = 0; |
912 | if (args->flags & I915_EXEC_SECURE) { |
913 | if (args->flags & I915_EXEC_SECURE) { |
913 | 914 | ||
914 | flags |= I915_DISPATCH_SECURE; |
915 | flags |= I915_DISPATCH_SECURE; |
915 | } |
916 | } |
916 | if (args->flags & I915_EXEC_IS_PINNED) |
917 | if (args->flags & I915_EXEC_IS_PINNED) |
917 | flags |= I915_DISPATCH_PINNED; |
918 | flags |= I915_DISPATCH_PINNED; |
918 | 919 | ||
919 | switch (args->flags & I915_EXEC_RING_MASK) { |
920 | switch (args->flags & I915_EXEC_RING_MASK) { |
920 | case I915_EXEC_DEFAULT: |
921 | case I915_EXEC_DEFAULT: |
921 | case I915_EXEC_RENDER: |
922 | case I915_EXEC_RENDER: |
922 | ring = &dev_priv->ring[RCS]; |
923 | ring = &dev_priv->ring[RCS]; |
923 | break; |
924 | break; |
924 | case I915_EXEC_BSD: |
925 | case I915_EXEC_BSD: |
925 | ring = &dev_priv->ring[VCS]; |
926 | ring = &dev_priv->ring[VCS]; |
926 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
927 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
927 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
928 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
928 | ring->name); |
929 | ring->name); |
929 | return -EPERM; |
930 | return -EPERM; |
930 | } |
931 | } |
931 | break; |
932 | break; |
932 | case I915_EXEC_BLT: |
933 | case I915_EXEC_BLT: |
933 | ring = &dev_priv->ring[BCS]; |
934 | ring = &dev_priv->ring[BCS]; |
934 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
935 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
935 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
936 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
936 | ring->name); |
937 | ring->name); |
937 | return -EPERM; |
938 | return -EPERM; |
938 | } |
939 | } |
939 | break; |
940 | break; |
940 | case I915_EXEC_VEBOX: |
941 | case I915_EXEC_VEBOX: |
941 | ring = &dev_priv->ring[VECS]; |
942 | ring = &dev_priv->ring[VECS]; |
942 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
943 | if (ctx_id != DEFAULT_CONTEXT_ID) { |
943 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
944 | DRM_DEBUG("Ring %s doesn't support contexts\n", |
944 | ring->name); |
945 | ring->name); |
945 | return -EPERM; |
946 | return -EPERM; |
946 | } |
947 | } |
947 | break; |
948 | break; |
948 | 949 | ||
949 | default: |
950 | default: |
950 | DRM_DEBUG("execbuf with unknown ring: %d\n", |
951 | DRM_DEBUG("execbuf with unknown ring: %d\n", |
951 | (int)(args->flags & I915_EXEC_RING_MASK)); |
952 | (int)(args->flags & I915_EXEC_RING_MASK)); |
952 | return -EINVAL; |
953 | return -EINVAL; |
953 | } |
954 | } |
954 | if (!intel_ring_initialized(ring)) { |
955 | if (!intel_ring_initialized(ring)) { |
955 | DRM_DEBUG("execbuf with invalid ring: %d\n", |
956 | DRM_DEBUG("execbuf with invalid ring: %d\n", |
956 | (int)(args->flags & I915_EXEC_RING_MASK)); |
957 | (int)(args->flags & I915_EXEC_RING_MASK)); |
957 | return -EINVAL; |
958 | return -EINVAL; |
958 | } |
959 | } |
959 | 960 | ||
960 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
961 | mode = args->flags & I915_EXEC_CONSTANTS_MASK; |
961 | mask = I915_EXEC_CONSTANTS_MASK; |
962 | mask = I915_EXEC_CONSTANTS_MASK; |
962 | switch (mode) { |
963 | switch (mode) { |
963 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
964 | case I915_EXEC_CONSTANTS_REL_GENERAL: |
964 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
965 | case I915_EXEC_CONSTANTS_ABSOLUTE: |
965 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
966 | case I915_EXEC_CONSTANTS_REL_SURFACE: |
966 | if (ring == &dev_priv->ring[RCS] && |
967 | if (ring == &dev_priv->ring[RCS] && |
967 | mode != dev_priv->relative_constants_mode) { |
968 | mode != dev_priv->relative_constants_mode) { |
968 | if (INTEL_INFO(dev)->gen < 4) |
969 | if (INTEL_INFO(dev)->gen < 4) |
969 | return -EINVAL; |
970 | return -EINVAL; |
970 | 971 | ||
971 | if (INTEL_INFO(dev)->gen > 5 && |
972 | if (INTEL_INFO(dev)->gen > 5 && |
972 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) |
973 | mode == I915_EXEC_CONSTANTS_REL_SURFACE) |
973 | return -EINVAL; |
974 | return -EINVAL; |
974 | 975 | ||
975 | /* The HW changed the meaning on this bit on gen6 */ |
976 | /* The HW changed the meaning on this bit on gen6 */ |
976 | if (INTEL_INFO(dev)->gen >= 6) |
977 | if (INTEL_INFO(dev)->gen >= 6) |
977 | mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; |
978 | mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; |
978 | } |
979 | } |
979 | break; |
980 | break; |
980 | default: |
981 | default: |
981 | DRM_DEBUG("execbuf with unknown constants: %d\n", mode); |
982 | DRM_DEBUG("execbuf with unknown constants: %d\n", mode); |
982 | return -EINVAL; |
983 | return -EINVAL; |
983 | } |
984 | } |
984 | 985 | ||
985 | if (args->buffer_count < 1) { |
986 | if (args->buffer_count < 1) { |
986 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
987 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
987 | return -EINVAL; |
988 | return -EINVAL; |
988 | } |
989 | } |
989 | 990 | ||
990 | if (args->num_cliprects != 0) { |
991 | if (args->num_cliprects != 0) { |
991 | if (ring != &dev_priv->ring[RCS]) { |
992 | if (ring != &dev_priv->ring[RCS]) { |
992 | DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
993 | DRM_DEBUG("clip rectangles are only valid with the render ring\n"); |
993 | return -EINVAL; |
994 | return -EINVAL; |
994 | } |
995 | } |
995 | 996 | ||
996 | if (INTEL_INFO(dev)->gen >= 5) { |
997 | if (INTEL_INFO(dev)->gen >= 5) { |
997 | DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
998 | DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); |
998 | return -EINVAL; |
999 | return -EINVAL; |
999 | } |
1000 | } |
1000 | 1001 | ||
1001 | if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { |
1002 | if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { |
1002 | DRM_DEBUG("execbuf with %u cliprects\n", |
1003 | DRM_DEBUG("execbuf with %u cliprects\n", |
1003 | args->num_cliprects); |
1004 | args->num_cliprects); |
1004 | return -EINVAL; |
1005 | return -EINVAL; |
1005 | } |
1006 | } |
1006 | 1007 | ||
1007 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
1008 | cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), |
1008 | GFP_KERNEL); |
1009 | GFP_KERNEL); |
1009 | if (cliprects == NULL) { |
1010 | if (cliprects == NULL) { |
1010 | ret = -ENOMEM; |
1011 | ret = -ENOMEM; |
1011 | goto pre_mutex_err; |
1012 | goto pre_mutex_err; |
1012 | } |
1013 | } |
1013 | 1014 | ||
1014 | if (copy_from_user(cliprects, |
1015 | if (copy_from_user(cliprects, |
1015 | to_user_ptr(args->cliprects_ptr), |
1016 | to_user_ptr(args->cliprects_ptr), |
1016 | sizeof(*cliprects)*args->num_cliprects)) { |
1017 | sizeof(*cliprects)*args->num_cliprects)) { |
1017 | ret = -EFAULT; |
1018 | ret = -EFAULT; |
1018 | goto pre_mutex_err; |
1019 | goto pre_mutex_err; |
1019 | } |
1020 | } |
1020 | } |
1021 | } |
1021 | 1022 | ||
1022 | ret = i915_mutex_lock_interruptible(dev); |
1023 | ret = i915_mutex_lock_interruptible(dev); |
1023 | if (ret) |
1024 | if (ret) |
1024 | goto pre_mutex_err; |
1025 | goto pre_mutex_err; |
1025 | 1026 | ||
1026 | if (dev_priv->ums.mm_suspended) { |
1027 | if (dev_priv->ums.mm_suspended) { |
1027 | mutex_unlock(&dev->struct_mutex); |
1028 | mutex_unlock(&dev->struct_mutex); |
1028 | ret = -EBUSY; |
1029 | ret = -EBUSY; |
1029 | goto pre_mutex_err; |
1030 | goto pre_mutex_err; |
1030 | } |
1031 | } |
1031 | 1032 | ||
1032 | eb = eb_create(args); |
1033 | eb = eb_create(args); |
1033 | if (eb == NULL) { |
1034 | if (eb == NULL) { |
1034 | mutex_unlock(&dev->struct_mutex); |
1035 | mutex_unlock(&dev->struct_mutex); |
1035 | ret = -ENOMEM; |
1036 | ret = -ENOMEM; |
1036 | goto pre_mutex_err; |
1037 | goto pre_mutex_err; |
1037 | } |
1038 | } |
1038 | 1039 | ||
1039 | /* Look up object handles */ |
1040 | /* Look up object handles */ |
1040 | ret = eb_lookup_objects(eb, exec, args, file); |
1041 | ret = eb_lookup_objects(eb, exec, args, file); |
1041 | if (ret) |
1042 | if (ret) |
1042 | goto err; |
1043 | goto err; |
1043 | 1044 | ||
1044 | /* take note of the batch buffer before we might reorder the lists */ |
1045 | /* take note of the batch buffer before we might reorder the lists */ |
1045 | batch_obj = list_entry(eb->objects.prev, |
1046 | batch_obj = list_entry(eb->objects.prev, |
1046 | struct drm_i915_gem_object, |
1047 | struct drm_i915_gem_object, |
1047 | exec_list); |
1048 | exec_list); |
1048 | 1049 | ||
1049 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1050 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
1050 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
1051 | need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
1051 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
1052 | ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); |
1052 | if (ret) |
1053 | if (ret) |
1053 | goto err; |
1054 | goto err; |
1054 | 1055 | ||
1055 | /* The objects are in their final locations, apply the relocations. */ |
1056 | /* The objects are in their final locations, apply the relocations. */ |
1056 | if (need_relocs) |
1057 | if (need_relocs) |
1057 | ret = i915_gem_execbuffer_relocate(eb, vm); |
1058 | ret = i915_gem_execbuffer_relocate(eb, vm); |
1058 | if (ret) { |
1059 | if (ret) { |
1059 | if (ret == -EFAULT) { |
1060 | if (ret == -EFAULT) { |
1060 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
1061 | ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
1061 | eb, exec, vm); |
1062 | eb, exec, vm); |
1062 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1063 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
1063 | } |
1064 | } |
1064 | if (ret) |
1065 | if (ret) |
1065 | goto err; |
1066 | goto err; |
1066 | } |
1067 | } |
1067 | 1068 | ||
1068 | /* Set the pending read domains for the batch buffer to COMMAND */ |
1069 | /* Set the pending read domains for the batch buffer to COMMAND */ |
1069 | if (batch_obj->base.pending_write_domain) { |
1070 | if (batch_obj->base.pending_write_domain) { |
1070 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
1071 | DRM_DEBUG("Attempting to use self-modifying batch buffer\n"); |
1071 | ret = -EINVAL; |
1072 | ret = -EINVAL; |
1072 | goto err; |
1073 | goto err; |
1073 | } |
1074 | } |
1074 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
1075 | batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; |
1075 | 1076 | ||
1076 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
1077 | /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure |
1077 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
1078 | * batch" bit. Hence we need to pin secure batches into the global gtt. |
1078 | * hsw should have this fixed, but let's be paranoid and do it |
1079 | * hsw should have this fixed, but let's be paranoid and do it |
1079 | * unconditionally for now. */ |
1080 | * unconditionally for now. */ |
1080 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
1081 | if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
1081 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
1082 | i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
1082 | 1083 | ||
1083 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
1084 | ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
1084 | if (ret) |
1085 | if (ret) |
1085 | goto err; |
1086 | goto err; |
1086 | 1087 | ||
1087 | ret = i915_switch_context(ring, file, ctx_id); |
1088 | ret = i915_switch_context(ring, file, ctx_id); |
1088 | if (ret) |
1089 | if (ret) |
1089 | goto err; |
1090 | goto err; |
1090 | 1091 | ||
1091 | if (ring == &dev_priv->ring[RCS] && |
1092 | if (ring == &dev_priv->ring[RCS] && |
1092 | mode != dev_priv->relative_constants_mode) { |
1093 | mode != dev_priv->relative_constants_mode) { |
1093 | ret = intel_ring_begin(ring, 4); |
1094 | ret = intel_ring_begin(ring, 4); |
1094 | if (ret) |
1095 | if (ret) |
1095 | goto err; |
1096 | goto err; |
1096 | 1097 | ||
1097 | intel_ring_emit(ring, MI_NOOP); |
1098 | intel_ring_emit(ring, MI_NOOP); |
1098 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
1099 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); |
1099 | intel_ring_emit(ring, INSTPM); |
1100 | intel_ring_emit(ring, INSTPM); |
1100 | intel_ring_emit(ring, mask << 16 | mode); |
1101 | intel_ring_emit(ring, mask << 16 | mode); |
1101 | intel_ring_advance(ring); |
1102 | intel_ring_advance(ring); |
1102 | 1103 | ||
1103 | dev_priv->relative_constants_mode = mode; |
1104 | dev_priv->relative_constants_mode = mode; |
1104 | } |
1105 | } |
1105 | 1106 | ||
1106 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
1107 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
1107 | ret = i915_reset_gen7_sol_offsets(dev, ring); |
1108 | ret = i915_reset_gen7_sol_offsets(dev, ring); |
1108 | if (ret) |
1109 | if (ret) |
1109 | goto err; |
1110 | goto err; |
1110 | } |
1111 | } |
1111 | 1112 | ||
1112 | exec_start = i915_gem_obj_offset(batch_obj, vm) + |
1113 | exec_start = i915_gem_obj_offset(batch_obj, vm) + |
1113 | args->batch_start_offset; |
1114 | args->batch_start_offset; |
1114 | exec_len = args->batch_len; |
1115 | exec_len = args->batch_len; |
1115 | if (cliprects) { |
1116 | if (cliprects) { |
1116 | for (i = 0; i < args->num_cliprects; i++) { |
1117 | for (i = 0; i < args->num_cliprects; i++) { |
1117 | ret = i915_emit_box(dev, &cliprects[i], |
1118 | ret = i915_emit_box(dev, &cliprects[i], |
1118 | args->DR1, args->DR4); |
1119 | args->DR1, args->DR4); |
1119 | if (ret) |
1120 | if (ret) |
1120 | goto err; |
1121 | goto err; |
1121 | 1122 | ||
1122 | ret = ring->dispatch_execbuffer(ring, |
1123 | ret = ring->dispatch_execbuffer(ring, |
1123 | exec_start, exec_len, |
1124 | exec_start, exec_len, |
1124 | flags); |
1125 | flags); |
1125 | if (ret) |
1126 | if (ret) |
1126 | goto err; |
1127 | goto err; |
1127 | } |
1128 | } |
1128 | } else { |
1129 | } else { |
1129 | ret = ring->dispatch_execbuffer(ring, |
1130 | ret = ring->dispatch_execbuffer(ring, |
1130 | exec_start, exec_len, |
1131 | exec_start, exec_len, |
1131 | flags); |
1132 | flags); |
1132 | if (ret) |
1133 | if (ret) |
1133 | goto err; |
1134 | goto err; |
1134 | } |
1135 | } |
1135 | 1136 | ||
1136 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
1137 | trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
1137 | 1138 | ||
1138 | i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); |
1139 | i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); |
1139 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
1140 | i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); |
1140 | 1141 | ||
1141 | err: |
1142 | err: |
1142 | eb_destroy(eb); |
1143 | eb_destroy(eb); |
1143 | 1144 | ||
1144 | mutex_unlock(&dev->struct_mutex); |
1145 | mutex_unlock(&dev->struct_mutex); |
1145 | 1146 | ||
1146 | pre_mutex_err: |
1147 | pre_mutex_err: |
1147 | kfree(cliprects); |
1148 | kfree(cliprects); |
1148 | return ret; |
1149 | return ret; |
1149 | } |
1150 | } |
1150 | 1151 | ||
1151 | #if 0 |
1152 | #if 0 |
1152 | /* |
1153 | /* |
1153 | * Legacy execbuffer just creates an exec2 list from the original exec object |
1154 | * Legacy execbuffer just creates an exec2 list from the original exec object |
1154 | * list array and passes it to the real function. |
1155 | * list array and passes it to the real function. |
1155 | */ |
1156 | */ |
1156 | int |
1157 | int |
1157 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
1158 | i915_gem_execbuffer(struct drm_device *dev, void *data, |
1158 | struct drm_file *file) |
1159 | struct drm_file *file) |
1159 | { |
1160 | { |
1160 | struct drm_i915_private *dev_priv = dev->dev_private; |
1161 | struct drm_i915_private *dev_priv = dev->dev_private; |
1161 | struct drm_i915_gem_execbuffer *args = data; |
1162 | struct drm_i915_gem_execbuffer *args = data; |
1162 | struct drm_i915_gem_execbuffer2 exec2; |
1163 | struct drm_i915_gem_execbuffer2 exec2; |
1163 | struct drm_i915_gem_exec_object *exec_list = NULL; |
1164 | struct drm_i915_gem_exec_object *exec_list = NULL; |
1164 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1165 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1165 | int ret, i; |
1166 | int ret, i; |
1166 | 1167 | ||
1167 | if (args->buffer_count < 1) { |
1168 | if (args->buffer_count < 1) { |
1168 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
1169 | DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count); |
1169 | return -EINVAL; |
1170 | return -EINVAL; |
1170 | } |
1171 | } |
1171 | 1172 | ||
1172 | /* Copy in the exec list from userland */ |
1173 | /* Copy in the exec list from userland */ |
1173 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); |
1174 | exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); |
1174 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
1175 | exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); |
1175 | if (exec_list == NULL || exec2_list == NULL) { |
1176 | if (exec_list == NULL || exec2_list == NULL) { |
1176 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1177 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1177 | args->buffer_count); |
1178 | args->buffer_count); |
1178 | drm_free_large(exec_list); |
1179 | drm_free_large(exec_list); |
1179 | drm_free_large(exec2_list); |
1180 | drm_free_large(exec2_list); |
1180 | return -ENOMEM; |
1181 | return -ENOMEM; |
1181 | } |
1182 | } |
1182 | ret = copy_from_user(exec_list, |
1183 | ret = copy_from_user(exec_list, |
1183 | to_user_ptr(args->buffers_ptr), |
1184 | to_user_ptr(args->buffers_ptr), |
1184 | sizeof(*exec_list) * args->buffer_count); |
1185 | sizeof(*exec_list) * args->buffer_count); |
1185 | if (ret != 0) { |
1186 | if (ret != 0) { |
1186 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1187 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1187 | args->buffer_count, ret); |
1188 | args->buffer_count, ret); |
1188 | drm_free_large(exec_list); |
1189 | drm_free_large(exec_list); |
1189 | drm_free_large(exec2_list); |
1190 | drm_free_large(exec2_list); |
1190 | return -EFAULT; |
1191 | return -EFAULT; |
1191 | } |
1192 | } |
1192 | 1193 | ||
1193 | for (i = 0; i < args->buffer_count; i++) { |
1194 | for (i = 0; i < args->buffer_count; i++) { |
1194 | exec2_list[i].handle = exec_list[i].handle; |
1195 | exec2_list[i].handle = exec_list[i].handle; |
1195 | exec2_list[i].relocation_count = exec_list[i].relocation_count; |
1196 | exec2_list[i].relocation_count = exec_list[i].relocation_count; |
1196 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; |
1197 | exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; |
1197 | exec2_list[i].alignment = exec_list[i].alignment; |
1198 | exec2_list[i].alignment = exec_list[i].alignment; |
1198 | exec2_list[i].offset = exec_list[i].offset; |
1199 | exec2_list[i].offset = exec_list[i].offset; |
1199 | if (INTEL_INFO(dev)->gen < 4) |
1200 | if (INTEL_INFO(dev)->gen < 4) |
1200 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; |
1201 | exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; |
1201 | else |
1202 | else |
1202 | exec2_list[i].flags = 0; |
1203 | exec2_list[i].flags = 0; |
1203 | } |
1204 | } |
1204 | 1205 | ||
1205 | exec2.buffers_ptr = args->buffers_ptr; |
1206 | exec2.buffers_ptr = args->buffers_ptr; |
1206 | exec2.buffer_count = args->buffer_count; |
1207 | exec2.buffer_count = args->buffer_count; |
1207 | exec2.batch_start_offset = args->batch_start_offset; |
1208 | exec2.batch_start_offset = args->batch_start_offset; |
1208 | exec2.batch_len = args->batch_len; |
1209 | exec2.batch_len = args->batch_len; |
1209 | exec2.DR1 = args->DR1; |
1210 | exec2.DR1 = args->DR1; |
1210 | exec2.DR4 = args->DR4; |
1211 | exec2.DR4 = args->DR4; |
1211 | exec2.num_cliprects = args->num_cliprects; |
1212 | exec2.num_cliprects = args->num_cliprects; |
1212 | exec2.cliprects_ptr = args->cliprects_ptr; |
1213 | exec2.cliprects_ptr = args->cliprects_ptr; |
1213 | exec2.flags = I915_EXEC_RENDER; |
1214 | exec2.flags = I915_EXEC_RENDER; |
1214 | i915_execbuffer2_set_context_id(exec2, 0); |
1215 | i915_execbuffer2_set_context_id(exec2, 0); |
1215 | 1216 | ||
1216 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, |
1217 | ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list, |
1217 | &dev_priv->gtt.base); |
1218 | &dev_priv->gtt.base); |
1218 | if (!ret) { |
1219 | if (!ret) { |
1219 | /* Copy the new buffer offsets back to the user's exec list. */ |
1220 | /* Copy the new buffer offsets back to the user's exec list. */ |
1220 | for (i = 0; i < args->buffer_count; i++) |
1221 | for (i = 0; i < args->buffer_count; i++) |
1221 | exec_list[i].offset = exec2_list[i].offset; |
1222 | exec_list[i].offset = exec2_list[i].offset; |
1222 | /* ... and back out to userspace */ |
1223 | /* ... and back out to userspace */ |
1223 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), |
1224 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), |
1224 | exec_list, |
1225 | exec_list, |
1225 | sizeof(*exec_list) * args->buffer_count); |
1226 | sizeof(*exec_list) * args->buffer_count); |
1226 | if (ret) { |
1227 | if (ret) { |
1227 | ret = -EFAULT; |
1228 | ret = -EFAULT; |
1228 | DRM_DEBUG("failed to copy %d exec entries " |
1229 | DRM_DEBUG("failed to copy %d exec entries " |
1229 | "back to user (%d)\n", |
1230 | "back to user (%d)\n", |
1230 | args->buffer_count, ret); |
1231 | args->buffer_count, ret); |
1231 | } |
1232 | } |
1232 | } |
1233 | } |
1233 | 1234 | ||
1234 | drm_free_large(exec_list); |
1235 | drm_free_large(exec_list); |
1235 | drm_free_large(exec2_list); |
1236 | drm_free_large(exec2_list); |
1236 | return ret; |
1237 | return ret; |
1237 | } |
1238 | } |
1238 | #endif |
1239 | #endif |
1239 | 1240 | ||
1240 | int |
1241 | int |
1241 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1242 | i915_gem_execbuffer2(struct drm_device *dev, void *data, |
1242 | struct drm_file *file) |
1243 | struct drm_file *file) |
1243 | { |
1244 | { |
1244 | struct drm_i915_private *dev_priv = dev->dev_private; |
1245 | struct drm_i915_private *dev_priv = dev->dev_private; |
1245 | struct drm_i915_gem_execbuffer2 *args = data; |
1246 | struct drm_i915_gem_execbuffer2 *args = data; |
1246 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1247 | struct drm_i915_gem_exec_object2 *exec2_list = NULL; |
1247 | int ret; |
1248 | int ret; |
1248 | 1249 | ||
1249 | if (args->buffer_count < 1 || |
1250 | if (args->buffer_count < 1 || |
1250 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
1251 | args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { |
1251 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
1252 | DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); |
1252 | return -EINVAL; |
1253 | return -EINVAL; |
1253 | } |
1254 | } |
1254 | 1255 | ||
1255 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1256 | exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
1256 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
1257 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
1257 | if (exec2_list == NULL) { |
1258 | if (exec2_list == NULL) { |
1258 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1259 | DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
1259 | args->buffer_count); |
1260 | args->buffer_count); |
1260 | return -ENOMEM; |
1261 | return -ENOMEM; |
1261 | } |
1262 | } |
1262 | ret = copy_from_user(exec2_list, |
1263 | ret = copy_from_user(exec2_list, |
1263 | (struct drm_i915_relocation_entry __user *) |
- | |
1264 | (uintptr_t) args->buffers_ptr, |
1264 | to_user_ptr(args->buffers_ptr), |
1265 | sizeof(*exec2_list) * args->buffer_count); |
1265 | sizeof(*exec2_list) * args->buffer_count); |
1266 | if (ret != 0) { |
1266 | if (ret != 0) { |
1267 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1267 | DRM_DEBUG("copy %d exec entries failed %d\n", |
1268 | args->buffer_count, ret); |
1268 | args->buffer_count, ret); |
1269 | kfree(exec2_list); |
1269 | kfree(exec2_list); |
1270 | FAIL(); |
1270 | FAIL(); |
1271 | return -EFAULT; |
1271 | return -EFAULT; |
1272 | } |
1272 | } |
1273 | 1273 | ||
1274 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, |
1274 | ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list, |
1275 | &dev_priv->gtt.base); |
1275 | &dev_priv->gtt.base); |
1276 | if (!ret) { |
1276 | if (!ret) { |
1277 | /* Copy the new buffer offsets back to the user's exec list. */ |
1277 | /* Copy the new buffer offsets back to the user's exec list. */ |
1278 | ret = copy_to_user((void __user *)(uintptr_t)args->buffers_ptr, |
1278 | ret = copy_to_user(to_user_ptr(args->buffers_ptr), |
1279 | exec2_list, |
1279 | exec2_list, |
1280 | sizeof(*exec2_list) * args->buffer_count); |
1280 | sizeof(*exec2_list) * args->buffer_count); |
1281 | if (ret) { |
1281 | if (ret) { |
1282 | ret = -EFAULT; |
1282 | ret = -EFAULT; |
1283 | DRM_DEBUG("failed to copy %d exec entries " |
1283 | DRM_DEBUG("failed to copy %d exec entries " |
1284 | "back to user (%d)\n", |
1284 | "back to user (%d)\n", |
1285 | args->buffer_count, ret); |
1285 | args->buffer_count, ret); |
1286 | } |
1286 | } |
1287 | } |
1287 | } |
1288 | 1288 | ||
1289 | kfree(exec2_list); |
1289 | kfree(exec2_list); |
1290 | return ret; |
1290 | return ret; |
1291 | }>>>>>>><>>>>-->>>>>>>30) |
1291 | }>>>>>>><>>>>-->>>>>>>30) |
1292 | 1292 | ||
1293 | static><30) |
1293 | static><30) |
1294 | 1294 | ||
1295 | static>31) |
1295 | static>31) |
1296 | #define><31) |
1296 | #define><31) |
1297 | #define>>>>> |
1297 | #define>>>>> |