Rev 2344 | Rev 2352 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
2326 | Serge | 1 | /* |
2 | * Copyright © 2008 Intel Corporation |
||
3 | * |
||
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
5 | * copy of this software and associated documentation files (the "Software"), |
||
6 | * to deal in the Software without restriction, including without limitation |
||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
8 | * and/or sell copies of the Software, and to permit persons to whom the |
||
9 | * Software is furnished to do so, subject to the following conditions: |
||
10 | * |
||
11 | * The above copyright notice and this permission notice (including the next |
||
12 | * paragraph) shall be included in all copies or substantial portions of the |
||
13 | * Software. |
||
14 | * |
||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
||
21 | * IN THE SOFTWARE. |
||
22 | * |
||
23 | * Authors: |
||
24 | * Eric Anholt |
||
25 | * |
||
26 | */ |
||
27 | |||
28 | #include "drmP.h" |
||
29 | #include "drm.h" |
||
2330 | Serge | 30 | #include "i915_drm.h" |
2326 | Serge | 31 | #include "i915_drv.h" |
2351 | Serge | 32 | #include "i915_trace.h" |
2326 | Serge | 33 | #include "intel_drv.h" |
34 | //#include |
||
2330 | Serge | 35 | #include |
2326 | Serge | 36 | //#include |
37 | #include |
||
38 | |||
2344 | Serge | 39 | extern int x86_clflush_size; |
2332 | Serge | 40 | |
2344 | Serge | 41 | #undef mb |
42 | #undef rmb |
||
43 | #undef wmb |
||
44 | #define mb() asm volatile("mfence") |
||
45 | #define rmb() asm volatile ("lfence") |
||
46 | #define wmb() asm volatile ("sfence") |
||
47 | |||
48 | static inline void clflush(volatile void *__p) |
||
49 | { |
||
50 | asm volatile("clflush %0" : "+m" (*(volatile char*)__p)); |
||
51 | } |
||
52 | |||
2332 | Serge | 53 | #define MAX_ERRNO 4095 |
54 | |||
55 | #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) |
||
56 | |||
57 | static inline long IS_ERR(const void *ptr) |
||
58 | { |
||
59 | return IS_ERR_VALUE((unsigned long)ptr); |
||
60 | } |
||
61 | |||
62 | static inline void *ERR_PTR(long error) |
||
63 | { |
||
64 | return (void *) error; |
||
65 | } |
||
66 | |||
67 | static inline long PTR_ERR(const void *ptr) |
||
68 | { |
||
69 | return (long) ptr; |
||
70 | } |
||
71 | |||
2344 | Serge | 72 | void |
73 | drm_gem_object_free(struct kref *kref) |
||
74 | { |
||
75 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; |
||
76 | struct drm_device *dev = obj->dev; |
||
2332 | Serge | 77 | |
2344 | Serge | 78 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
79 | |||
80 | i915_gem_free_object(obj); |
||
81 | } |
||
82 | |||
2332 | Serge | 83 | /** |
84 | * Initialize an already allocated GEM object of the specified size with |
||
85 | * shmfs backing store. |
||
86 | */ |
||
87 | int drm_gem_object_init(struct drm_device *dev, |
||
88 | struct drm_gem_object *obj, size_t size) |
||
89 | { |
||
90 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
||
91 | |||
92 | obj->dev = dev; |
||
2344 | Serge | 93 | kref_init(&obj->refcount); |
2332 | Serge | 94 | atomic_set(&obj->handle_count, 0); |
95 | obj->size = size; |
||
96 | |||
97 | return 0; |
||
98 | } |
||
99 | |||
2344 | Serge | 100 | void |
101 | drm_gem_object_release(struct drm_gem_object *obj) |
||
102 | { } |
||
2332 | Serge | 103 | |
104 | |||
2326 | Serge | 105 | #define I915_EXEC_CONSTANTS_MASK (3<<6) |
106 | #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ |
||
107 | #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) |
||
108 | #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ |
||
109 | |||
2332 | Serge | 110 | static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj); |
111 | static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj); |
||
112 | static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); |
||
113 | static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, |
||
114 | bool write); |
||
115 | static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj, |
||
116 | uint64_t offset, |
||
117 | uint64_t size); |
||
118 | static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj); |
||
119 | static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
||
120 | unsigned alignment, |
||
121 | bool map_and_fenceable); |
||
122 | static void i915_gem_clear_fence_reg(struct drm_device *dev, |
||
123 | struct drm_i915_fence_reg *reg); |
||
124 | static int i915_gem_phys_pwrite(struct drm_device *dev, |
||
125 | struct drm_i915_gem_object *obj, |
||
126 | struct drm_i915_gem_pwrite *args, |
||
127 | struct drm_file *file); |
||
128 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); |
||
2326 | Serge | 129 | |
2332 | Serge | 130 | static int i915_gem_inactive_shrink(struct shrinker *shrinker, |
131 | struct shrink_control *sc); |
||
132 | |||
133 | /* some bookkeeping */ |
||
134 | static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, |
||
135 | size_t size) |
||
136 | { |
||
137 | dev_priv->mm.object_count++; |
||
138 | dev_priv->mm.object_memory += size; |
||
139 | } |
||
140 | |||
141 | static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv, |
||
142 | size_t size) |
||
143 | { |
||
144 | dev_priv->mm.object_count--; |
||
145 | dev_priv->mm.object_memory -= size; |
||
146 | } |
||
147 | |||
148 | #if 0 |
||
149 | |||
150 | static int |
||
151 | i915_gem_wait_for_error(struct drm_device *dev) |
||
152 | { |
||
153 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
154 | struct completion *x = &dev_priv->error_completion; |
||
155 | unsigned long flags; |
||
156 | int ret; |
||
157 | |||
158 | if (!atomic_read(&dev_priv->mm.wedged)) |
||
159 | return 0; |
||
160 | |||
161 | ret = wait_for_completion_interruptible(x); |
||
162 | if (ret) |
||
163 | return ret; |
||
164 | |||
165 | if (atomic_read(&dev_priv->mm.wedged)) { |
||
166 | /* GPU is hung, bump the completion count to account for |
||
167 | * the token we just consumed so that we never hit zero and |
||
168 | * end up waiting upon a subsequent completion event that |
||
169 | * will never happen. |
||
170 | */ |
||
171 | spin_lock_irqsave(&x->wait.lock, flags); |
||
172 | x->done++; |
||
173 | spin_unlock_irqrestore(&x->wait.lock, flags); |
||
174 | } |
||
175 | return 0; |
||
176 | } |
||
177 | |||
178 | int i915_mutex_lock_interruptible(struct drm_device *dev) |
||
179 | { |
||
180 | int ret; |
||
181 | |||
182 | ret = i915_gem_wait_for_error(dev); |
||
183 | if (ret) |
||
184 | return ret; |
||
185 | |||
186 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
||
187 | if (ret) |
||
188 | return ret; |
||
189 | |||
190 | WARN_ON(i915_verify_lists(dev)); |
||
191 | return 0; |
||
192 | } |
||
193 | |||
194 | static inline bool |
||
195 | i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) |
||
196 | { |
||
197 | return obj->gtt_space && !obj->active && obj->pin_count == 0; |
||
198 | } |
||
199 | |||
200 | #endif |
||
201 | |||
202 | void i915_gem_do_init(struct drm_device *dev, |
||
203 | unsigned long start, |
||
204 | unsigned long mappable_end, |
||
205 | unsigned long end) |
||
206 | { |
||
207 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
208 | |||
209 | drm_mm_init(&dev_priv->mm.gtt_space, start, end - start); |
||
210 | |||
211 | dev_priv->mm.gtt_start = start; |
||
212 | dev_priv->mm.gtt_mappable_end = mappable_end; |
||
213 | dev_priv->mm.gtt_end = end; |
||
214 | dev_priv->mm.gtt_total = end - start; |
||
215 | dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; |
||
216 | |||
217 | /* Take over this portion of the GTT */ |
||
218 | intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); |
||
219 | } |
||
220 | |||
221 | #if 0 |
||
222 | |||
223 | int |
||
224 | i915_gem_init_ioctl(struct drm_device *dev, void *data, |
||
225 | struct drm_file *file) |
||
226 | { |
||
227 | struct drm_i915_gem_init *args = data; |
||
228 | |||
229 | if (args->gtt_start >= args->gtt_end || |
||
230 | (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1)) |
||
231 | return -EINVAL; |
||
232 | |||
233 | mutex_lock(&dev->struct_mutex); |
||
234 | i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end); |
||
235 | mutex_unlock(&dev->struct_mutex); |
||
236 | |||
237 | return 0; |
||
238 | } |
||
2351 | Serge | 239 | #endif |
2332 | Serge | 240 | |
241 | int |
||
242 | i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, |
||
243 | struct drm_file *file) |
||
244 | { |
||
245 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
246 | struct drm_i915_gem_get_aperture *args = data; |
||
247 | struct drm_i915_gem_object *obj; |
||
248 | size_t pinned; |
||
249 | |||
250 | |||
251 | pinned = 0; |
||
252 | mutex_lock(&dev->struct_mutex); |
||
253 | list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) |
||
254 | pinned += obj->gtt_space->size; |
||
255 | mutex_unlock(&dev->struct_mutex); |
||
256 | |||
257 | args->aper_size = dev_priv->mm.gtt_total; |
||
2342 | Serge | 258 | args->aper_available_size = args->aper_size - pinned; |
2332 | Serge | 259 | |
260 | return 0; |
||
261 | } |
||
262 | |||
2351 | Serge | 263 | #if 0 |
264 | |||
265 | int i915_gem_create(struct drm_file *file, |
||
2332 | Serge | 266 | struct drm_device *dev, |
267 | uint64_t size, |
||
268 | uint32_t *handle_p) |
||
269 | { |
||
270 | struct drm_i915_gem_object *obj; |
||
271 | int ret; |
||
272 | u32 handle; |
||
273 | |||
274 | size = roundup(size, PAGE_SIZE); |
||
2342 | Serge | 275 | if (size == 0) |
276 | return -EINVAL; |
||
2332 | Serge | 277 | |
278 | /* Allocate the new object */ |
||
279 | obj = i915_gem_alloc_object(dev, size); |
||
280 | if (obj == NULL) |
||
281 | return -ENOMEM; |
||
282 | |||
283 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
||
284 | if (ret) { |
||
285 | drm_gem_object_release(&obj->base); |
||
286 | i915_gem_info_remove_obj(dev->dev_private, obj->base.size); |
||
287 | kfree(obj); |
||
288 | return ret; |
||
289 | } |
||
290 | |||
291 | /* drop reference from allocate - handle holds it now */ |
||
292 | drm_gem_object_unreference(&obj->base); |
||
2351 | Serge | 293 | trace_i915_gem_object_create(obj); |
2332 | Serge | 294 | |
295 | *handle_p = handle; |
||
296 | return 0; |
||
297 | } |
||
298 | |||
299 | int |
||
300 | i915_gem_dumb_create(struct drm_file *file, |
||
301 | struct drm_device *dev, |
||
302 | struct drm_mode_create_dumb *args) |
||
303 | { |
||
304 | /* have to work out size/pitch and return them */ |
||
305 | args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); |
||
306 | args->size = args->pitch * args->height; |
||
307 | return i915_gem_create(file, dev, |
||
308 | args->size, &args->handle); |
||
309 | } |
||
310 | |||
311 | int i915_gem_dumb_destroy(struct drm_file *file, |
||
312 | struct drm_device *dev, |
||
313 | uint32_t handle) |
||
314 | { |
||
315 | return drm_gem_handle_delete(file, handle); |
||
316 | } |
||
317 | |||
2326 | Serge | 318 | /** |
2332 | Serge | 319 | * Creates a new mm object and returns a handle to it. |
320 | */ |
||
321 | int |
||
322 | i915_gem_create_ioctl(struct drm_device *dev, void *data, |
||
323 | struct drm_file *file) |
||
324 | { |
||
325 | struct drm_i915_gem_create *args = data; |
||
326 | return i915_gem_create(file, dev, |
||
327 | args->size, &args->handle); |
||
328 | } |
||
329 | |||
330 | static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
||
331 | { |
||
332 | drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
||
333 | |||
334 | return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
||
335 | obj->tiling_mode != I915_TILING_NONE; |
||
336 | } |
||
337 | |||
338 | static inline void |
||
339 | slow_shmem_copy(struct page *dst_page, |
||
340 | int dst_offset, |
||
341 | struct page *src_page, |
||
342 | int src_offset, |
||
343 | int length) |
||
344 | { |
||
345 | char *dst_vaddr, *src_vaddr; |
||
346 | |||
347 | dst_vaddr = kmap(dst_page); |
||
348 | src_vaddr = kmap(src_page); |
||
349 | |||
350 | memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length); |
||
351 | |||
352 | kunmap(src_page); |
||
353 | kunmap(dst_page); |
||
354 | } |
||
355 | |||
356 | static inline void |
||
357 | slow_shmem_bit17_copy(struct page *gpu_page, |
||
358 | int gpu_offset, |
||
359 | struct page *cpu_page, |
||
360 | int cpu_offset, |
||
361 | int length, |
||
362 | int is_read) |
||
363 | { |
||
364 | char *gpu_vaddr, *cpu_vaddr; |
||
365 | |||
366 | /* Use the unswizzled path if this page isn't affected. */ |
||
367 | if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { |
||
368 | if (is_read) |
||
369 | return slow_shmem_copy(cpu_page, cpu_offset, |
||
370 | gpu_page, gpu_offset, length); |
||
371 | else |
||
372 | return slow_shmem_copy(gpu_page, gpu_offset, |
||
373 | cpu_page, cpu_offset, length); |
||
374 | } |
||
375 | |||
376 | gpu_vaddr = kmap(gpu_page); |
||
377 | cpu_vaddr = kmap(cpu_page); |
||
378 | |||
379 | /* Copy the data, XORing A6 with A17 (1). The user already knows he's |
||
380 | * XORing with the other bits (A9 for Y, A9 and A10 for X) |
||
381 | */ |
||
382 | while (length > 0) { |
||
383 | int cacheline_end = ALIGN(gpu_offset + 1, 64); |
||
384 | int this_length = min(cacheline_end - gpu_offset, length); |
||
385 | int swizzled_gpu_offset = gpu_offset ^ 64; |
||
386 | |||
387 | if (is_read) { |
||
388 | memcpy(cpu_vaddr + cpu_offset, |
||
389 | gpu_vaddr + swizzled_gpu_offset, |
||
390 | this_length); |
||
391 | } else { |
||
392 | memcpy(gpu_vaddr + swizzled_gpu_offset, |
||
393 | cpu_vaddr + cpu_offset, |
||
394 | this_length); |
||
395 | } |
||
396 | cpu_offset += this_length; |
||
397 | gpu_offset += this_length; |
||
398 | length -= this_length; |
||
399 | } |
||
400 | |||
401 | kunmap(cpu_page); |
||
402 | kunmap(gpu_page); |
||
403 | } |
||
404 | |||
405 | /** |
||
406 | * This is the fast shmem pread path, which attempts to copy_from_user directly |
||
407 | * from the backing pages of the object to the user's address space. On a |
||
408 | * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). |
||
409 | */ |
||
410 | static int |
||
411 | i915_gem_shmem_pread_fast(struct drm_device *dev, |
||
412 | struct drm_i915_gem_object *obj, |
||
413 | struct drm_i915_gem_pread *args, |
||
414 | struct drm_file *file) |
||
415 | { |
||
416 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
||
417 | ssize_t remain; |
||
418 | loff_t offset; |
||
419 | char __user *user_data; |
||
420 | int page_offset, page_length; |
||
421 | |||
422 | user_data = (char __user *) (uintptr_t) args->data_ptr; |
||
423 | remain = args->size; |
||
424 | |||
425 | offset = args->offset; |
||
426 | |||
427 | while (remain > 0) { |
||
428 | struct page *page; |
||
429 | char *vaddr; |
||
430 | int ret; |
||
431 | |||
432 | /* Operation in this page |
||
433 | * |
||
434 | * page_offset = offset within page |
||
435 | * page_length = bytes to copy for this page |
||
436 | */ |
||
437 | page_offset = offset_in_page(offset); |
||
438 | page_length = remain; |
||
439 | if ((page_offset + remain) > PAGE_SIZE) |
||
440 | page_length = PAGE_SIZE - page_offset; |
||
441 | |||
442 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
||
443 | if (IS_ERR(page)) |
||
444 | return PTR_ERR(page); |
||
445 | |||
446 | vaddr = kmap_atomic(page); |
||
447 | ret = __copy_to_user_inatomic(user_data, |
||
448 | vaddr + page_offset, |
||
449 | page_length); |
||
450 | kunmap_atomic(vaddr); |
||
451 | |||
452 | mark_page_accessed(page); |
||
453 | page_cache_release(page); |
||
454 | if (ret) |
||
455 | return -EFAULT; |
||
456 | |||
457 | remain -= page_length; |
||
458 | user_data += page_length; |
||
459 | offset += page_length; |
||
460 | } |
||
461 | |||
462 | return 0; |
||
463 | } |
||
464 | |||
465 | /** |
||
466 | * This is the fallback shmem pread path, which allocates temporary storage |
||
467 | * in kernel space to copy_to_user into outside of the struct_mutex, so we |
||
468 | * can copy out of the object's backing pages while holding the struct mutex |
||
469 | * and not take page faults. |
||
470 | */ |
||
471 | static int |
||
472 | i915_gem_shmem_pread_slow(struct drm_device *dev, |
||
473 | struct drm_i915_gem_object *obj, |
||
474 | struct drm_i915_gem_pread *args, |
||
475 | struct drm_file *file) |
||
476 | { |
||
477 | struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; |
||
478 | struct mm_struct *mm = current->mm; |
||
479 | struct page **user_pages; |
||
480 | ssize_t remain; |
||
481 | loff_t offset, pinned_pages, i; |
||
482 | loff_t first_data_page, last_data_page, num_pages; |
||
483 | int shmem_page_offset; |
||
484 | int data_page_index, data_page_offset; |
||
485 | int page_length; |
||
486 | int ret; |
||
487 | uint64_t data_ptr = args->data_ptr; |
||
488 | int do_bit17_swizzling; |
||
489 | |||
490 | remain = args->size; |
||
491 | |||
492 | /* Pin the user pages containing the data. We can't fault while |
||
493 | * holding the struct mutex, yet we want to hold it while |
||
494 | * dereferencing the user data. |
||
495 | */ |
||
496 | first_data_page = data_ptr / PAGE_SIZE; |
||
497 | last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; |
||
498 | num_pages = last_data_page - first_data_page + 1; |
||
499 | |||
500 | user_pages = drm_malloc_ab(num_pages, sizeof(struct page *)); |
||
501 | if (user_pages == NULL) |
||
502 | return -ENOMEM; |
||
503 | |||
504 | mutex_unlock(&dev->struct_mutex); |
||
505 | down_read(&mm->mmap_sem); |
||
506 | pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, |
||
507 | num_pages, 1, 0, user_pages, NULL); |
||
508 | up_read(&mm->mmap_sem); |
||
509 | mutex_lock(&dev->struct_mutex); |
||
510 | if (pinned_pages < num_pages) { |
||
511 | ret = -EFAULT; |
||
512 | goto out; |
||
513 | } |
||
514 | |||
515 | ret = i915_gem_object_set_cpu_read_domain_range(obj, |
||
516 | args->offset, |
||
517 | args->size); |
||
518 | if (ret) |
||
519 | goto out; |
||
520 | |||
521 | do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); |
||
522 | |||
523 | offset = args->offset; |
||
524 | |||
525 | while (remain > 0) { |
||
526 | struct page *page; |
||
527 | |||
528 | /* Operation in this page |
||
529 | * |
||
530 | * shmem_page_offset = offset within page in shmem file |
||
531 | * data_page_index = page number in get_user_pages return |
||
532 | * data_page_offset = offset with data_page_index page. |
||
533 | * page_length = bytes to copy for this page |
||
534 | */ |
||
535 | shmem_page_offset = offset_in_page(offset); |
||
536 | data_page_index = data_ptr / PAGE_SIZE - first_data_page; |
||
537 | data_page_offset = offset_in_page(data_ptr); |
||
538 | |||
539 | page_length = remain; |
||
540 | if ((shmem_page_offset + page_length) > PAGE_SIZE) |
||
541 | page_length = PAGE_SIZE - shmem_page_offset; |
||
542 | if ((data_page_offset + page_length) > PAGE_SIZE) |
||
543 | page_length = PAGE_SIZE - data_page_offset; |
||
544 | |||
545 | page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); |
||
546 | if (IS_ERR(page)) { |
||
547 | ret = PTR_ERR(page); |
||
548 | goto out; |
||
549 | } |
||
550 | |||
551 | if (do_bit17_swizzling) { |
||
552 | slow_shmem_bit17_copy(page, |
||
553 | shmem_page_offset, |
||
554 | user_pages[data_page_index], |
||
555 | data_page_offset, |
||
556 | page_length, |
||
557 | 1); |
||
558 | } else { |
||
559 | slow_shmem_copy(user_pages[data_page_index], |
||
560 | data_page_offset, |
||
561 | page, |
||
562 | shmem_page_offset, |
||
563 | page_length); |
||
564 | } |
||
565 | |||
566 | mark_page_accessed(page); |
||
567 | page_cache_release(page); |
||
568 | |||
569 | remain -= page_length; |
||
570 | data_ptr += page_length; |
||
571 | offset += page_length; |
||
572 | } |
||
573 | |||
574 | out: |
||
575 | for (i = 0; i < pinned_pages; i++) { |
||
576 | SetPageDirty(user_pages[i]); |
||
577 | mark_page_accessed(user_pages[i]); |
||
578 | page_cache_release(user_pages[i]); |
||
579 | } |
||
580 | drm_free_large(user_pages); |
||
581 | |||
582 | return ret; |
||
583 | } |
||
584 | #endif |
||
585 | |||
586 | |||
587 | |||
588 | |||
589 | |||
590 | |||
591 | |||
592 | |||
593 | |||
594 | |||
595 | |||
596 | |||
597 | |||
598 | |||
599 | |||
600 | |||
601 | |||
602 | |||
603 | |||
604 | |||
605 | |||
606 | |||
607 | |||
608 | |||
609 | |||
610 | |||
611 | |||
612 | |||
613 | |||
614 | |||
615 | |||
616 | |||
617 | |||
618 | |||
619 | |||
620 | |||
621 | |||
622 | |||
623 | |||
624 | |||
625 | |||
626 | |||
627 | |||
628 | |||
629 | |||
630 | |||
631 | |||
632 | |||
633 | |||
634 | |||
635 | |||
636 | |||
637 | |||
638 | |||
639 | |||
640 | |||
641 | |||
642 | |||
643 | |||
644 | |||
645 | |||
646 | |||
647 | |||
648 | |||
649 | |||
650 | |||
651 | |||
652 | |||
653 | |||
654 | |||
655 | |||
656 | |||
657 | |||
658 | static uint32_t |
||
659 | i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
||
660 | { |
||
661 | uint32_t gtt_size; |
||
662 | |||
663 | if (INTEL_INFO(dev)->gen >= 4 || |
||
664 | tiling_mode == I915_TILING_NONE) |
||
665 | return size; |
||
666 | |||
667 | /* Previous chips need a power-of-two fence region when tiling */ |
||
668 | if (INTEL_INFO(dev)->gen == 3) |
||
669 | gtt_size = 1024*1024; |
||
670 | else |
||
671 | gtt_size = 512*1024; |
||
672 | |||
673 | while (gtt_size < size) |
||
674 | gtt_size <<= 1; |
||
675 | |||
676 | return gtt_size; |
||
677 | } |
||
678 | |||
679 | /** |
||
680 | * i915_gem_get_gtt_alignment - return required GTT alignment for an object |
||
681 | * @obj: object to check |
||
682 | * |
||
683 | * Return the required GTT alignment for an object, taking into account |
||
684 | * potential fence register mapping. |
||
685 | */ |
||
686 | static uint32_t |
||
687 | i915_gem_get_gtt_alignment(struct drm_device *dev, |
||
688 | uint32_t size, |
||
689 | int tiling_mode) |
||
690 | { |
||
691 | /* |
||
692 | * Minimum alignment is 4k (GTT page size), but might be greater |
||
693 | * if a fence register is needed for the object. |
||
694 | */ |
||
695 | if (INTEL_INFO(dev)->gen >= 4 || |
||
696 | tiling_mode == I915_TILING_NONE) |
||
697 | return 4096; |
||
698 | |||
699 | /* |
||
700 | * Previous chips need to be aligned to the size of the smallest |
||
701 | * fence register that can contain the object. |
||
702 | */ |
||
703 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
||
704 | } |
||
705 | |||
706 | /** |
||
707 | * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an |
||
708 | * unfenced object |
||
709 | * @dev: the device |
||
710 | * @size: size of the object |
||
711 | * @tiling_mode: tiling mode of the object |
||
712 | * |
||
713 | * Return the required GTT alignment for an object, only taking into account |
||
714 | * unfenced tiled surface requirements. |
||
715 | */ |
||
716 | uint32_t |
||
717 | i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
||
718 | uint32_t size, |
||
719 | int tiling_mode) |
||
720 | { |
||
721 | /* |
||
722 | * Minimum alignment is 4k (GTT page size) for sane hw. |
||
723 | */ |
||
724 | if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || |
||
725 | tiling_mode == I915_TILING_NONE) |
||
726 | return 4096; |
||
727 | |||
728 | /* Previous hardware however needs to be aligned to a power-of-two |
||
729 | * tile height. The simplest method for determining this is to reuse |
||
730 | * the power-of-tile object size. |
||
731 | */ |
||
732 | return i915_gem_get_gtt_size(dev, size, tiling_mode); |
||
733 | } |
||
734 | |||
735 | |||
736 | |||
737 | |||
738 | |||
739 | |||
740 | |||
741 | |||
742 | |||
743 | |||
744 | |||
745 | |||
746 | |||
747 | |||
748 | |||
749 | static int |
||
750 | i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, |
||
751 | gfp_t gfpmask) |
||
752 | { |
||
753 | int page_count, i; |
||
754 | struct page *page; |
||
755 | |||
756 | /* Get the list of pages out of our struct file. They'll be pinned |
||
757 | * at this point until we release them. |
||
758 | */ |
||
759 | page_count = obj->base.size / PAGE_SIZE; |
||
760 | BUG_ON(obj->pages != NULL); |
||
761 | obj->pages = malloc(page_count * sizeof(struct page *)); |
||
762 | if (obj->pages == NULL) |
||
763 | return -ENOMEM; |
||
764 | |||
765 | |||
766 | for (i = 0; i < page_count; i++) { |
||
767 | page = (struct page*)AllocPage(); // oh-oh |
||
768 | if (IS_ERR(page)) |
||
769 | goto err_pages; |
||
770 | |||
771 | obj->pages[i] = page; |
||
772 | } |
||
773 | |||
774 | // if (obj->tiling_mode != I915_TILING_NONE) |
||
775 | // i915_gem_object_do_bit_17_swizzle(obj); |
||
776 | |||
777 | |||
2340 | Serge | 778 | |
2332 | Serge | 779 | return 0; |
780 | |||
781 | err_pages: |
||
2344 | Serge | 782 | while (i--) |
783 | FreePage(obj->pages[i]); |
||
2332 | Serge | 784 | |
785 | free(obj->pages); |
||
786 | obj->pages = NULL; |
||
787 | return PTR_ERR(page); |
||
788 | } |
||
789 | |||
790 | static void |
||
791 | i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) |
||
792 | { |
||
793 | int page_count = obj->base.size / PAGE_SIZE; |
||
794 | int i; |
||
795 | |||
796 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
||
797 | |||
798 | // if (obj->tiling_mode != I915_TILING_NONE) |
||
799 | // i915_gem_object_save_bit_17_swizzle(obj); |
||
800 | |||
801 | if (obj->madv == I915_MADV_DONTNEED) |
||
802 | obj->dirty = 0; |
||
2344 | Serge | 803 | |
2332 | Serge | 804 | for (i = 0; i < page_count; i++) { |
2344 | Serge | 805 | FreePage(obj->pages[i]); |
2332 | Serge | 806 | } |
807 | obj->dirty = 0; |
||
808 | |||
809 | free(obj->pages); |
||
810 | obj->pages = NULL; |
||
811 | } |
||
812 | |||
813 | void |
||
814 | i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, |
||
815 | struct intel_ring_buffer *ring, |
||
816 | u32 seqno) |
||
817 | { |
||
818 | struct drm_device *dev = obj->base.dev; |
||
819 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
820 | |||
821 | BUG_ON(ring == NULL); |
||
822 | obj->ring = ring; |
||
823 | |||
824 | /* Add a reference if we're newly entering the active list. */ |
||
825 | if (!obj->active) { |
||
2344 | Serge | 826 | drm_gem_object_reference(&obj->base); |
2332 | Serge | 827 | obj->active = 1; |
828 | } |
||
829 | |||
830 | /* Move from whatever list we were on to the tail of execution. */ |
||
831 | list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); |
||
832 | list_move_tail(&obj->ring_list, &ring->active_list); |
||
833 | |||
834 | obj->last_rendering_seqno = seqno; |
||
835 | if (obj->fenced_gpu_access) { |
||
836 | struct drm_i915_fence_reg *reg; |
||
837 | |||
838 | BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE); |
||
839 | |||
840 | obj->last_fenced_seqno = seqno; |
||
841 | obj->last_fenced_ring = ring; |
||
842 | |||
843 | reg = &dev_priv->fence_regs[obj->fence_reg]; |
||
844 | list_move_tail(®->lru_list, &dev_priv->mm.fence_list); |
||
845 | } |
||
846 | } |
||
847 | |||
2344 | Serge | 848 | static void |
849 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) |
||
850 | { |
||
851 | list_del_init(&obj->ring_list); |
||
852 | obj->last_rendering_seqno = 0; |
||
853 | } |
||
2332 | Serge | 854 | |
2344 | Serge | 855 | static void |
856 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) |
||
857 | { |
||
858 | struct drm_device *dev = obj->base.dev; |
||
859 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2332 | Serge | 860 | |
2344 | Serge | 861 | BUG_ON(!obj->active); |
862 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); |
||
2332 | Serge | 863 | |
2344 | Serge | 864 | i915_gem_object_move_off_active(obj); |
865 | } |
||
866 | |||
867 | |||
868 | |||
869 | |||
870 | |||
871 | /* Immediately discard the backing storage */ |
||
2332 | Serge | 872 | static void |
2344 | Serge | 873 | i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
874 | { |
||
875 | struct inode *inode; |
||
876 | |||
877 | /* Our goal here is to return as much of the memory as |
||
878 | * is possible back to the system as we are called from OOM. |
||
879 | * To do this we must instruct the shmfs to drop all of its |
||
880 | * backing pages, *now*. |
||
881 | */ |
||
882 | |||
883 | obj->madv = __I915_MADV_PURGED; |
||
884 | } |
||
885 | |||
886 | static inline int |
||
887 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) |
||
888 | { |
||
889 | return obj->madv == I915_MADV_DONTNEED; |
||
890 | } |
||
891 | |||
892 | static void |
||
2332 | Serge | 893 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, |
894 | uint32_t flush_domains) |
||
895 | { |
||
896 | struct drm_i915_gem_object *obj, *next; |
||
897 | |||
898 | list_for_each_entry_safe(obj, next, |
||
899 | &ring->gpu_write_list, |
||
900 | gpu_write_list) { |
||
901 | if (obj->base.write_domain & flush_domains) { |
||
902 | uint32_t old_write_domain = obj->base.write_domain; |
||
903 | |||
904 | obj->base.write_domain = 0; |
||
905 | list_del_init(&obj->gpu_write_list); |
||
906 | i915_gem_object_move_to_active(obj, ring, |
||
907 | i915_gem_next_request_seqno(ring)); |
||
908 | |||
909 | } |
||
910 | } |
||
911 | } |
||
912 | |||
913 | |||
914 | |||
915 | |||
916 | |||
917 | |||
918 | |||
919 | |||
920 | |||
921 | |||
922 | |||
923 | |||
924 | |||
925 | |||
926 | |||
927 | |||
928 | |||
929 | |||
930 | |||
931 | |||
932 | |||
933 | |||
934 | |||
935 | |||
936 | |||
937 | |||
938 | |||
939 | |||
940 | |||
941 | |||
942 | |||
943 | |||
944 | |||
945 | |||
946 | |||
947 | |||
948 | |||
949 | |||
950 | |||
2344 | Serge | 951 | /** |
952 | * Ensures that all rendering to the object has completed and the object is |
||
953 | * safe to unbind from the GTT or access from the CPU. |
||
954 | */ |
||
955 | int |
||
956 | i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj) |
||
957 | { |
||
958 | int ret; |
||
2332 | Serge | 959 | |
2344 | Serge | 960 | /* This function only exists to support waiting for existing rendering, |
961 | * not for emitting required flushes. |
||
962 | */ |
||
963 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); |
||
2332 | Serge | 964 | |
2344 | Serge | 965 | /* If there is rendering queued on the buffer being evicted, wait for |
966 | * it. |
||
967 | */ |
||
968 | if (obj->active) { |
||
969 | // ret = i915_wait_request(obj->ring, obj->last_rendering_seqno); |
||
970 | // if (ret) |
||
971 | // return ret; |
||
972 | } |
||
2332 | Serge | 973 | |
2344 | Serge | 974 | return 0; |
975 | } |
||
2332 | Serge | 976 | |
2344 | Serge | 977 | static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) |
978 | { |
||
979 | u32 old_write_domain, old_read_domains; |
||
2332 | Serge | 980 | |
2344 | Serge | 981 | /* Act a barrier for all accesses through the GTT */ |
982 | mb(); |
||
2332 | Serge | 983 | |
2344 | Serge | 984 | /* Force a pagefault for domain tracking on next user access */ |
985 | // i915_gem_release_mmap(obj); |
||
2332 | Serge | 986 | |
2344 | Serge | 987 | if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
988 | return; |
||
2332 | Serge | 989 | |
2344 | Serge | 990 | old_read_domains = obj->base.read_domains; |
991 | old_write_domain = obj->base.write_domain; |
||
2351 | Serge | 992 | |
2344 | Serge | 993 | obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT; |
994 | obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT; |
||
2332 | Serge | 995 | |
2351 | Serge | 996 | trace_i915_gem_object_change_domain(obj, |
997 | old_read_domains, |
||
998 | old_write_domain); |
||
2344 | Serge | 999 | } |
2332 | Serge | 1000 | |
2344 | Serge | 1001 | /** |
1002 | * Unbinds an object from the GTT aperture. |
||
1003 | */ |
||
1004 | int |
||
1005 | i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
||
1006 | { |
||
1007 | int ret = 0; |
||
2332 | Serge | 1008 | |
2344 | Serge | 1009 | if (obj->gtt_space == NULL) |
1010 | return 0; |
||
2332 | Serge | 1011 | |
2344 | Serge | 1012 | if (obj->pin_count != 0) { |
1013 | DRM_ERROR("Attempting to unbind pinned buffer\n"); |
||
1014 | return -EINVAL; |
||
1015 | } |
||
2332 | Serge | 1016 | |
2344 | Serge | 1017 | ret = i915_gem_object_finish_gpu(obj); |
1018 | if (ret == -ERESTARTSYS) |
||
1019 | return ret; |
||
1020 | /* Continue on if we fail due to EIO, the GPU is hung so we |
||
1021 | * should be safe and we need to cleanup or else we might |
||
1022 | * cause memory corruption through use-after-free. |
||
1023 | */ |
||
2332 | Serge | 1024 | |
2344 | Serge | 1025 | i915_gem_object_finish_gtt(obj); |
2332 | Serge | 1026 | |
2344 | Serge | 1027 | /* Move the object to the CPU domain to ensure that |
1028 | * any possible CPU writes while it's not in the GTT |
||
1029 | * are flushed when we go to remap it. |
||
1030 | */ |
||
1031 | if (ret == 0) |
||
1032 | ret = i915_gem_object_set_to_cpu_domain(obj, 1); |
||
1033 | if (ret == -ERESTARTSYS) |
||
1034 | return ret; |
||
1035 | if (ret) { |
||
1036 | /* In the event of a disaster, abandon all caches and |
||
1037 | * hope for the best. |
||
1038 | */ |
||
1039 | i915_gem_clflush_object(obj); |
||
1040 | obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
||
1041 | } |
||
2332 | Serge | 1042 | |
2344 | Serge | 1043 | /* release the fence reg _after_ flushing */ |
1044 | ret = i915_gem_object_put_fence(obj); |
||
1045 | if (ret == -ERESTARTSYS) |
||
1046 | return ret; |
||
2332 | Serge | 1047 | |
2351 | Serge | 1048 | trace_i915_gem_object_unbind(obj); |
2332 | Serge | 1049 | |
2344 | Serge | 1050 | i915_gem_gtt_unbind_object(obj); |
1051 | i915_gem_object_put_pages_gtt(obj); |
||
2332 | Serge | 1052 | |
2344 | Serge | 1053 | list_del_init(&obj->gtt_list); |
1054 | list_del_init(&obj->mm_list); |
||
1055 | /* Avoid an unnecessary call to unbind on rebind. */ |
||
1056 | obj->map_and_fenceable = true; |
||
2332 | Serge | 1057 | |
2344 | Serge | 1058 | drm_mm_put_block(obj->gtt_space); |
1059 | obj->gtt_space = NULL; |
||
1060 | obj->gtt_offset = 0; |
||
2332 | Serge | 1061 | |
2344 | Serge | 1062 | if (i915_gem_object_is_purgeable(obj)) |
1063 | i915_gem_object_truncate(obj); |
||
2332 | Serge | 1064 | |
2344 | Serge | 1065 | return ret; |
1066 | } |
||
2332 | Serge | 1067 | |
2344 | Serge | 1068 | int |
1069 | i915_gem_flush_ring(struct intel_ring_buffer *ring, |
||
1070 | uint32_t invalidate_domains, |
||
1071 | uint32_t flush_domains) |
||
1072 | { |
||
1073 | int ret; |
||
2332 | Serge | 1074 | |
2344 | Serge | 1075 | if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) |
1076 | return 0; |
||
2332 | Serge | 1077 | |
2351 | Serge | 1078 | trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); |
2332 | Serge | 1079 | |
2344 | Serge | 1080 | ret = ring->flush(ring, invalidate_domains, flush_domains); |
1081 | if (ret) |
||
1082 | return ret; |
||
2332 | Serge | 1083 | |
2344 | Serge | 1084 | if (flush_domains & I915_GEM_GPU_DOMAINS) |
1085 | i915_gem_process_flushing_list(ring, flush_domains); |
||
2332 | Serge | 1086 | |
2344 | Serge | 1087 | return 0; |
1088 | } |
||
2332 | Serge | 1089 | |
2344 | Serge | 1090 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
1091 | { |
||
1092 | int ret; |
||
2332 | Serge | 1093 | |
2344 | Serge | 1094 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) |
1095 | return 0; |
||
2332 | Serge | 1096 | |
2344 | Serge | 1097 | if (!list_empty(&ring->gpu_write_list)) { |
1098 | ret = i915_gem_flush_ring(ring, |
||
1099 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
||
1100 | if (ret) |
||
1101 | return ret; |
||
1102 | } |
||
2332 | Serge | 1103 | |
2344 | Serge | 1104 | return 0; //i915_wait_request(ring, i915_gem_next_request_seqno(ring)); |
1105 | } |
||
2332 | Serge | 1106 | |
2344 | Serge | 1107 | int |
1108 | i915_gpu_idle(struct drm_device *dev) |
||
1109 | { |
||
1110 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1111 | int ret, i; |
||
2332 | Serge | 1112 | |
2344 | Serge | 1113 | /* Flush everything onto the inactive list. */ |
1114 | for (i = 0; i < I915_NUM_RINGS; i++) { |
||
1115 | ret = i915_ring_idle(&dev_priv->ring[i]); |
||
1116 | if (ret) |
||
1117 | return ret; |
||
1118 | } |
||
2332 | Serge | 1119 | |
2344 | Serge | 1120 | return 0; |
1121 | } |
||
2332 | Serge | 1122 | |
1123 | |||
1124 | |||
1125 | |||
1126 | |||
1127 | |||
1128 | |||
1129 | |||
1130 | |||
1131 | |||
1132 | |||
1133 | |||
1134 | |||
1135 | |||
1136 | |||
1137 | |||
1138 | |||
1139 | |||
2344 | Serge | 1140 | |
1141 | |||
1142 | |||
1143 | |||
1144 | static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) |
||
2332 | Serge | 1145 | { |
2344 | Serge | 1146 | return i915_seqno_passed(ring->get_seqno(ring), seqno); |
1147 | } |
||
1148 | |||
1149 | static int |
||
1150 | i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, |
||
1151 | struct intel_ring_buffer *pipelined) |
||
1152 | { |
||
2332 | Serge | 1153 | int ret; |
1154 | |||
2344 | Serge | 1155 | if (obj->fenced_gpu_access) { |
1156 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
||
1157 | ret = i915_gem_flush_ring(obj->last_fenced_ring, |
||
1158 | 0, obj->base.write_domain); |
||
1159 | if (ret) |
||
1160 | return ret; |
||
1161 | } |
||
2332 | Serge | 1162 | |
2344 | Serge | 1163 | obj->fenced_gpu_access = false; |
1164 | } |
||
1165 | |||
1166 | if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { |
||
1167 | if (!ring_passed_seqno(obj->last_fenced_ring, |
||
1168 | obj->last_fenced_seqno)) { |
||
1169 | // ret = i915_wait_request(obj->last_fenced_ring, |
||
1170 | // obj->last_fenced_seqno); |
||
1171 | // if (ret) |
||
1172 | // return ret; |
||
1173 | } |
||
1174 | |||
1175 | obj->last_fenced_seqno = 0; |
||
1176 | obj->last_fenced_ring = NULL; |
||
1177 | } |
||
1178 | |||
1179 | /* Ensure that all CPU reads are completed before installing a fence |
||
1180 | * and all writes before removing the fence. |
||
2332 | Serge | 1181 | */ |
2344 | Serge | 1182 | if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
1183 | mb(); |
||
2332 | Serge | 1184 | |
1185 | return 0; |
||
1186 | } |
||
1187 | |||
1188 | int |
||
2344 | Serge | 1189 | i915_gem_object_put_fence(struct drm_i915_gem_object *obj) |
2332 | Serge | 1190 | { |
1191 | int ret; |
||
1192 | |||
2344 | Serge | 1193 | // if (obj->tiling_mode) |
1194 | // i915_gem_release_mmap(obj); |
||
2332 | Serge | 1195 | |
2344 | Serge | 1196 | ret = i915_gem_object_flush_fence(obj, NULL); |
2332 | Serge | 1197 | if (ret) |
1198 | return ret; |
||
1199 | |||
2344 | Serge | 1200 | if (obj->fence_reg != I915_FENCE_REG_NONE) { |
1201 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
||
1202 | i915_gem_clear_fence_reg(obj->base.dev, |
||
1203 | &dev_priv->fence_regs[obj->fence_reg]); |
||
2332 | Serge | 1204 | |
2344 | Serge | 1205 | obj->fence_reg = I915_FENCE_REG_NONE; |
1206 | } |
||
1207 | |||
2332 | Serge | 1208 | return 0; |
1209 | } |
||
1210 | |||
1211 | |||
1212 | |||
1213 | |||
1214 | |||
1215 | |||
1216 | |||
1217 | |||
1218 | |||
1219 | |||
1220 | |||
1221 | |||
1222 | |||
1223 | |||
1224 | |||
1225 | |||
1226 | |||
1227 | |||
1228 | |||
1229 | |||
2344 | Serge | 1230 | |
1231 | |||
1232 | |||
1233 | |||
1234 | |||
1235 | |||
1236 | |||
1237 | |||
1238 | |||
1239 | |||
1240 | |||
2332 | Serge | 1241 | /** |
2326 | Serge | 1242 | * i915_gem_clear_fence_reg - clear out fence register info |
1243 | * @obj: object to clear |
||
1244 | * |
||
1245 | * Zeroes out the fence register itself and clears out the associated |
||
1246 | * data structures in dev_priv and obj. |
||
1247 | */ |
||
1248 | static void |
||
1249 | i915_gem_clear_fence_reg(struct drm_device *dev, |
||
1250 | struct drm_i915_fence_reg *reg) |
||
1251 | { |
||
1252 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1253 | uint32_t fence_reg = reg - dev_priv->fence_regs; |
||
1254 | |||
1255 | switch (INTEL_INFO(dev)->gen) { |
||
1256 | case 7: |
||
1257 | case 6: |
||
1258 | I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0); |
||
1259 | break; |
||
1260 | case 5: |
||
1261 | case 4: |
||
1262 | I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0); |
||
1263 | break; |
||
1264 | case 3: |
||
1265 | if (fence_reg >= 8) |
||
1266 | fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4; |
||
1267 | else |
||
1268 | case 2: |
||
1269 | fence_reg = FENCE_REG_830_0 + fence_reg * 4; |
||
1270 | |||
1271 | I915_WRITE(fence_reg, 0); |
||
1272 | break; |
||
1273 | } |
||
1274 | |||
1275 | list_del_init(®->lru_list); |
||
1276 | reg->obj = NULL; |
||
1277 | reg->setup_seqno = 0; |
||
1278 | } |
||
1279 | |||
2332 | Serge | 1280 | /** |
1281 | * Finds free space in the GTT aperture and binds the object there. |
||
1282 | */ |
||
1283 | static int |
||
1284 | i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, |
||
1285 | unsigned alignment, |
||
1286 | bool map_and_fenceable) |
||
1287 | { |
||
1288 | struct drm_device *dev = obj->base.dev; |
||
1289 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
1290 | struct drm_mm_node *free_space; |
||
1291 | gfp_t gfpmask = 0; //__GFP_NORETRY | __GFP_NOWARN; |
||
1292 | u32 size, fence_size, fence_alignment, unfenced_alignment; |
||
1293 | bool mappable, fenceable; |
||
1294 | int ret; |
||
2326 | Serge | 1295 | |
2332 | Serge | 1296 | if (obj->madv != I915_MADV_WILLNEED) { |
1297 | DRM_ERROR("Attempting to bind a purgeable object\n"); |
||
1298 | return -EINVAL; |
||
1299 | } |
||
1300 | |||
1301 | fence_size = i915_gem_get_gtt_size(dev, |
||
1302 | obj->base.size, |
||
1303 | obj->tiling_mode); |
||
1304 | fence_alignment = i915_gem_get_gtt_alignment(dev, |
||
1305 | obj->base.size, |
||
1306 | obj->tiling_mode); |
||
1307 | unfenced_alignment = |
||
1308 | i915_gem_get_unfenced_gtt_alignment(dev, |
||
1309 | obj->base.size, |
||
1310 | obj->tiling_mode); |
||
1311 | |||
1312 | if (alignment == 0) |
||
1313 | alignment = map_and_fenceable ? fence_alignment : |
||
1314 | unfenced_alignment; |
||
1315 | if (map_and_fenceable && alignment & (fence_alignment - 1)) { |
||
1316 | DRM_ERROR("Invalid object alignment requested %u\n", alignment); |
||
1317 | return -EINVAL; |
||
1318 | } |
||
1319 | |||
1320 | size = map_and_fenceable ? fence_size : obj->base.size; |
||
1321 | |||
1322 | /* If the object is bigger than the entire aperture, reject it early |
||
1323 | * before evicting everything in a vain attempt to find space. |
||
1324 | */ |
||
1325 | if (obj->base.size > |
||
1326 | (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
||
1327 | DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
||
1328 | return -E2BIG; |
||
1329 | } |
||
1330 | |||
1331 | search_free: |
||
1332 | if (map_and_fenceable) |
||
1333 | free_space = |
||
1334 | drm_mm_search_free_in_range(&dev_priv->mm.gtt_space, |
||
1335 | size, alignment, 0, |
||
1336 | dev_priv->mm.gtt_mappable_end, |
||
1337 | 0); |
||
1338 | else |
||
1339 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, |
||
1340 | size, alignment, 0); |
||
1341 | |||
1342 | if (free_space != NULL) { |
||
1343 | if (map_and_fenceable) |
||
1344 | obj->gtt_space = |
||
1345 | drm_mm_get_block_range_generic(free_space, |
||
1346 | size, alignment, 0, |
||
1347 | dev_priv->mm.gtt_mappable_end, |
||
1348 | 0); |
||
1349 | else |
||
1350 | obj->gtt_space = |
||
1351 | drm_mm_get_block(free_space, size, alignment); |
||
1352 | } |
||
1353 | if (obj->gtt_space == NULL) { |
||
1354 | /* If the gtt is empty and we're still having trouble |
||
1355 | * fitting our object in, we're out of memory. |
||
1356 | */ |
||
1357 | ret = 1; //i915_gem_evict_something(dev, size, alignment, |
||
1358 | // map_and_fenceable); |
||
1359 | if (ret) |
||
1360 | return ret; |
||
1361 | |||
1362 | goto search_free; |
||
1363 | } |
||
1364 | |||
1365 | ret = i915_gem_object_get_pages_gtt(obj, gfpmask); |
||
1366 | if (ret) { |
||
1367 | drm_mm_put_block(obj->gtt_space); |
||
1368 | obj->gtt_space = NULL; |
||
1369 | #if 0 |
||
1370 | if (ret == -ENOMEM) { |
||
1371 | /* first try to reclaim some memory by clearing the GTT */ |
||
1372 | ret = i915_gem_evict_everything(dev, false); |
||
1373 | if (ret) { |
||
1374 | /* now try to shrink everyone else */ |
||
1375 | if (gfpmask) { |
||
1376 | gfpmask = 0; |
||
1377 | goto search_free; |
||
1378 | } |
||
1379 | |||
1380 | return -ENOMEM; |
||
1381 | } |
||
1382 | |||
1383 | goto search_free; |
||
1384 | } |
||
1385 | #endif |
||
1386 | return ret; |
||
1387 | } |
||
1388 | |||
1389 | ret = i915_gem_gtt_bind_object(obj); |
||
1390 | if (ret) { |
||
2344 | Serge | 1391 | i915_gem_object_put_pages_gtt(obj); |
2332 | Serge | 1392 | drm_mm_put_block(obj->gtt_space); |
1393 | obj->gtt_space = NULL; |
||
1394 | |||
1395 | // if (i915_gem_evict_everything(dev, false)) |
||
1396 | return ret; |
||
1397 | |||
1398 | // goto search_free; |
||
1399 | } |
||
1400 | |||
1401 | list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); |
||
1402 | list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
||
1403 | |||
1404 | /* Assert that the object is not currently in any GPU domain. As it |
||
1405 | * wasn't in the GTT, there shouldn't be any way it could have been in |
||
1406 | * a GPU cache |
||
1407 | */ |
||
1408 | BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); |
||
1409 | BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); |
||
1410 | |||
1411 | obj->gtt_offset = obj->gtt_space->start; |
||
1412 | |||
1413 | fenceable = |
||
1414 | obj->gtt_space->size == fence_size && |
||
2342 | Serge | 1415 | (obj->gtt_space->start & (fence_alignment - 1)) == 0; |
2332 | Serge | 1416 | |
1417 | mappable = |
||
1418 | obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
||
1419 | |||
1420 | obj->map_and_fenceable = mappable && fenceable; |
||
1421 | |||
2351 | Serge | 1422 | trace_i915_gem_object_bind(obj, map_and_fenceable); |
2332 | Serge | 1423 | return 0; |
1424 | } |
||
1425 | |||
1426 | void |
||
1427 | i915_gem_clflush_object(struct drm_i915_gem_object *obj) |
||
1428 | { |
||
1429 | /* If we don't have a page list set up, then we're not pinned |
||
1430 | * to GPU, and we can ignore the cache flush because it'll happen |
||
1431 | * again at bind time. |
||
1432 | */ |
||
1433 | if (obj->pages == NULL) |
||
1434 | return; |
||
1435 | |||
1436 | /* If the GPU is snooping the contents of the CPU cache, |
||
1437 | * we do not need to manually clear the CPU cache lines. However, |
||
1438 | * the caches are only snooped when the render cache is |
||
1439 | * flushed/invalidated. As we always have to emit invalidations |
||
1440 | * and flushes when moving into and out of the RENDER domain, correct |
||
1441 | * snooping behaviour occurs naturally as the result of our domain |
||
1442 | * tracking. |
||
1443 | */ |
||
1444 | if (obj->cache_level != I915_CACHE_NONE) |
||
1445 | return; |
||
1446 | |||
2344 | Serge | 1447 | if(obj->mapped != NULL) |
1448 | { |
||
1449 | uint8_t *page_virtual; |
||
1450 | unsigned int i; |
||
2332 | Serge | 1451 | |
2344 | Serge | 1452 | page_virtual = obj->mapped; |
1453 | asm volatile("mfence"); |
||
1454 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
||
1455 | clflush(page_virtual + i); |
||
1456 | asm volatile("mfence"); |
||
1457 | } |
||
1458 | else |
||
1459 | { |
||
1460 | uint8_t *page_virtual; |
||
1461 | unsigned int i; |
||
1462 | page_virtual = AllocKernelSpace(obj->base.size); |
||
1463 | if(page_virtual != NULL) |
||
1464 | { |
||
1465 | u32_t *src, *dst; |
||
1466 | u32 count; |
||
1467 | |||
1468 | #define page_tabs 0xFDC00000 /* really dirty hack */ |
||
1469 | |||
1470 | src = (u32_t*)obj->pages; |
||
1471 | dst = &((u32_t*)page_tabs)[(u32_t)page_virtual >> 12]; |
||
1472 | count = obj->base.size/4096; |
||
1473 | |||
1474 | while(count--) |
||
1475 | { |
||
1476 | *dst++ = (0xFFFFF000 & *src++) | 0x001 ; |
||
1477 | }; |
||
1478 | |||
1479 | asm volatile("mfence"); |
||
1480 | for (i = 0; i < obj->base.size; i += x86_clflush_size) |
||
1481 | clflush(page_virtual + i); |
||
1482 | asm volatile("mfence"); |
||
1483 | FreeKernelSpace(page_virtual); |
||
1484 | } |
||
1485 | else |
||
1486 | { |
||
1487 | asm volatile ( |
||
1488 | "mfence \n" |
||
1489 | "wbinvd \n" /* this is really ugly */ |
||
1490 | "mfence"); |
||
1491 | } |
||
1492 | } |
||
2332 | Serge | 1493 | } |
1494 | |||
1495 | /** Flushes any GPU write domain for the object if it's dirty. */ |
||
1496 | static int |
||
1497 | i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj) |
||
1498 | { |
||
1499 | if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) |
||
1500 | return 0; |
||
1501 | |||
1502 | /* Queue the GPU write cache flushing we need. */ |
||
1503 | return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
||
1504 | } |
||
1505 | |||
2344 | Serge | 1506 | /** Flushes the GTT write domain for the object if it's dirty. */ |
1507 | static void |
||
1508 | i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj) |
||
1509 | { |
||
1510 | uint32_t old_write_domain; |
||
2332 | Serge | 1511 | |
2344 | Serge | 1512 | if (obj->base.write_domain != I915_GEM_DOMAIN_GTT) |
1513 | return; |
||
2332 | Serge | 1514 | |
2344 | Serge | 1515 | /* No actual flushing is required for the GTT write domain. Writes |
1516 | * to it immediately go to main memory as far as we know, so there's |
||
1517 | * no chipset flush. It also doesn't land in render cache. |
||
1518 | * |
||
1519 | * However, we do have to enforce the order so that all writes through |
||
1520 | * the GTT land before any writes to the device, such as updates to |
||
1521 | * the GATT itself. |
||
1522 | */ |
||
1523 | wmb(); |
||
2332 | Serge | 1524 | |
2344 | Serge | 1525 | old_write_domain = obj->base.write_domain; |
1526 | obj->base.write_domain = 0; |
||
2332 | Serge | 1527 | |
2351 | Serge | 1528 | trace_i915_gem_object_change_domain(obj, |
1529 | obj->base.read_domains, |
||
1530 | old_write_domain); |
||
2344 | Serge | 1531 | } |
2332 | Serge | 1532 | |
1533 | /** Flushes the CPU write domain for the object if it's dirty. */ |
||
2326 | Serge | 1534 | static void |
2332 | Serge | 1535 | i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) |
1536 | { |
||
1537 | uint32_t old_write_domain; |
||
1538 | |||
1539 | if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) |
||
1540 | return; |
||
1541 | |||
1542 | i915_gem_clflush_object(obj); |
||
1543 | intel_gtt_chipset_flush(); |
||
1544 | old_write_domain = obj->base.write_domain; |
||
1545 | obj->base.write_domain = 0; |
||
1546 | |||
2351 | Serge | 1547 | trace_i915_gem_object_change_domain(obj, |
1548 | obj->base.read_domains, |
||
1549 | old_write_domain); |
||
2332 | Serge | 1550 | } |
1551 | |||
1552 | /** |
||
1553 | * Moves a single object to the GTT read, and possibly write domain. |
||
1554 | * |
||
1555 | * This function returns when the move is complete, including waiting on |
||
1556 | * flushes to occur. |
||
1557 | */ |
||
1558 | int |
||
1559 | i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) |
||
1560 | { |
||
1561 | uint32_t old_write_domain, old_read_domains; |
||
1562 | int ret; |
||
1563 | |||
1564 | /* Not valid to be called on unbound objects. */ |
||
1565 | if (obj->gtt_space == NULL) |
||
1566 | return -EINVAL; |
||
1567 | |||
1568 | if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) |
||
1569 | return 0; |
||
1570 | |||
1571 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
||
1572 | if (ret) |
||
1573 | return ret; |
||
1574 | |||
1575 | if (obj->pending_gpu_write || write) { |
||
1576 | ret = i915_gem_object_wait_rendering(obj); |
||
1577 | if (ret) |
||
1578 | return ret; |
||
1579 | } |
||
1580 | |||
1581 | i915_gem_object_flush_cpu_write_domain(obj); |
||
1582 | |||
1583 | old_write_domain = obj->base.write_domain; |
||
1584 | old_read_domains = obj->base.read_domains; |
||
1585 | |||
1586 | /* It should now be out of any other write domains, and we can update |
||
1587 | * the domain values for our changes. |
||
1588 | */ |
||
1589 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
||
1590 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
||
1591 | if (write) { |
||
1592 | obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
||
1593 | obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
||
1594 | obj->dirty = 1; |
||
1595 | } |
||
1596 | |||
2351 | Serge | 1597 | trace_i915_gem_object_change_domain(obj, |
1598 | old_read_domains, |
||
1599 | old_write_domain); |
||
1600 | |||
2332 | Serge | 1601 | return 0; |
1602 | } |
||
1603 | |||
2335 | Serge | 1604 | #if 0 |
1605 | int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
||
1606 | enum i915_cache_level cache_level) |
||
1607 | { |
||
1608 | int ret; |
||
2332 | Serge | 1609 | |
2335 | Serge | 1610 | if (obj->cache_level == cache_level) |
1611 | return 0; |
||
2332 | Serge | 1612 | |
2335 | Serge | 1613 | if (obj->pin_count) { |
1614 | DRM_DEBUG("can not change the cache level of pinned objects\n"); |
||
1615 | return -EBUSY; |
||
1616 | } |
||
2332 | Serge | 1617 | |
2335 | Serge | 1618 | if (obj->gtt_space) { |
1619 | ret = i915_gem_object_finish_gpu(obj); |
||
1620 | if (ret) |
||
1621 | return ret; |
||
2332 | Serge | 1622 | |
2335 | Serge | 1623 | i915_gem_object_finish_gtt(obj); |
2332 | Serge | 1624 | |
2335 | Serge | 1625 | /* Before SandyBridge, you could not use tiling or fence |
1626 | * registers with snooped memory, so relinquish any fences |
||
1627 | * currently pointing to our region in the aperture. |
||
1628 | */ |
||
1629 | if (INTEL_INFO(obj->base.dev)->gen < 6) { |
||
1630 | ret = i915_gem_object_put_fence(obj); |
||
1631 | if (ret) |
||
1632 | return ret; |
||
1633 | } |
||
2332 | Serge | 1634 | |
2335 | Serge | 1635 | i915_gem_gtt_rebind_object(obj, cache_level); |
1636 | } |
||
2332 | Serge | 1637 | |
2335 | Serge | 1638 | if (cache_level == I915_CACHE_NONE) { |
1639 | u32 old_read_domains, old_write_domain; |
||
2332 | Serge | 1640 | |
2335 | Serge | 1641 | /* If we're coming from LLC cached, then we haven't |
1642 | * actually been tracking whether the data is in the |
||
1643 | * CPU cache or not, since we only allow one bit set |
||
1644 | * in obj->write_domain and have been skipping the clflushes. |
||
1645 | * Just set it to the CPU cache for now. |
||
1646 | */ |
||
1647 | WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); |
||
1648 | WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU); |
||
2332 | Serge | 1649 | |
2335 | Serge | 1650 | old_read_domains = obj->base.read_domains; |
1651 | old_write_domain = obj->base.write_domain; |
||
2332 | Serge | 1652 | |
2335 | Serge | 1653 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
1654 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
||
2332 | Serge | 1655 | |
2351 | Serge | 1656 | trace_i915_gem_object_change_domain(obj, |
1657 | old_read_domains, |
||
1658 | old_write_domain); |
||
2344 | Serge | 1659 | } |
2332 | Serge | 1660 | |
2335 | Serge | 1661 | obj->cache_level = cache_level; |
1662 | return 0; |
||
1663 | } |
||
1664 | #endif |
||
2332 | Serge | 1665 | |
2335 | Serge | 1666 | /* |
1667 | * Prepare buffer for display plane (scanout, cursors, etc). |
||
1668 | * Can be called from an uninterruptible phase (modesetting) and allows |
||
1669 | * any flushes to be pipelined (for pageflips). |
||
1670 | * |
||
1671 | * For the display plane, we want to be in the GTT but out of any write |
||
1672 | * domains. So in many ways this looks like set_to_gtt_domain() apart from the |
||
1673 | * ability to pipeline the waits, pinning and any additional subtleties |
||
1674 | * that may differentiate the display plane from ordinary buffers. |
||
1675 | */ |
||
1676 | int |
||
1677 | i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, |
||
1678 | u32 alignment, |
||
1679 | struct intel_ring_buffer *pipelined) |
||
1680 | { |
||
1681 | u32 old_read_domains, old_write_domain; |
||
1682 | int ret; |
||
2332 | Serge | 1683 | |
2335 | Serge | 1684 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
1685 | if (ret) |
||
1686 | return ret; |
||
2332 | Serge | 1687 | |
2335 | Serge | 1688 | if (pipelined != obj->ring) { |
1689 | ret = i915_gem_object_wait_rendering(obj); |
||
1690 | if (ret == -ERESTARTSYS) |
||
1691 | return ret; |
||
1692 | } |
||
2332 | Serge | 1693 | |
2335 | Serge | 1694 | /* The display engine is not coherent with the LLC cache on gen6. As |
1695 | * a result, we make sure that the pinning that is about to occur is |
||
1696 | * done with uncached PTEs. This is lowest common denominator for all |
||
1697 | * chipsets. |
||
1698 | * |
||
1699 | * However for gen6+, we could do better by using the GFDT bit instead |
||
1700 | * of uncaching, which would allow us to flush all the LLC-cached data |
||
1701 | * with that bit in the PTE to main memory with just one PIPE_CONTROL. |
||
1702 | */ |
||
1703 | // ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); |
||
1704 | // if (ret) |
||
1705 | // return ret; |
||
2332 | Serge | 1706 | |
2335 | Serge | 1707 | /* As the user may map the buffer once pinned in the display plane |
1708 | * (e.g. libkms for the bootup splash), we have to ensure that we |
||
1709 | * always use map_and_fenceable for all scanout buffers. |
||
1710 | */ |
||
1711 | ret = i915_gem_object_pin(obj, alignment, true); |
||
1712 | if (ret) |
||
1713 | return ret; |
||
2332 | Serge | 1714 | |
2335 | Serge | 1715 | i915_gem_object_flush_cpu_write_domain(obj); |
2332 | Serge | 1716 | |
2335 | Serge | 1717 | old_write_domain = obj->base.write_domain; |
1718 | old_read_domains = obj->base.read_domains; |
||
2332 | Serge | 1719 | |
2335 | Serge | 1720 | /* It should now be out of any other write domains, and we can update |
1721 | * the domain values for our changes. |
||
1722 | */ |
||
1723 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); |
||
1724 | obj->base.read_domains |= I915_GEM_DOMAIN_GTT; |
||
2332 | Serge | 1725 | |
2351 | Serge | 1726 | trace_i915_gem_object_change_domain(obj, |
1727 | old_read_domains, |
||
1728 | old_write_domain); |
||
2332 | Serge | 1729 | |
2335 | Serge | 1730 | return 0; |
1731 | } |
||
2332 | Serge | 1732 | |
2344 | Serge | 1733 | int |
1734 | i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
||
1735 | { |
||
1736 | int ret; |
||
2332 | Serge | 1737 | |
2344 | Serge | 1738 | if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) |
1739 | return 0; |
||
2332 | Serge | 1740 | |
2344 | Serge | 1741 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { |
1742 | ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain); |
||
1743 | if (ret) |
||
1744 | return ret; |
||
1745 | } |
||
2332 | Serge | 1746 | |
2344 | Serge | 1747 | /* Ensure that we invalidate the GPU's caches and TLBs. */ |
1748 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
||
2332 | Serge | 1749 | |
2344 | Serge | 1750 | return i915_gem_object_wait_rendering(obj); |
1751 | } |
||
2332 | Serge | 1752 | |
2344 | Serge | 1753 | /** |
1754 | * Moves a single object to the CPU read, and possibly write domain. |
||
1755 | * |
||
1756 | * This function returns when the move is complete, including waiting on |
||
1757 | * flushes to occur. |
||
1758 | */ |
||
1759 | static int |
||
1760 | i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) |
||
1761 | { |
||
1762 | uint32_t old_write_domain, old_read_domains; |
||
1763 | int ret; |
||
2332 | Serge | 1764 | |
2344 | Serge | 1765 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) |
1766 | return 0; |
||
2332 | Serge | 1767 | |
2344 | Serge | 1768 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
1769 | if (ret) |
||
1770 | return ret; |
||
2332 | Serge | 1771 | |
2344 | Serge | 1772 | ret = i915_gem_object_wait_rendering(obj); |
1773 | if (ret) |
||
1774 | return ret; |
||
2332 | Serge | 1775 | |
2344 | Serge | 1776 | i915_gem_object_flush_gtt_write_domain(obj); |
2332 | Serge | 1777 | |
2344 | Serge | 1778 | /* If we have a partially-valid cache of the object in the CPU, |
1779 | * finish invalidating it and free the per-page flags. |
||
1780 | */ |
||
1781 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
||
2332 | Serge | 1782 | |
2344 | Serge | 1783 | old_write_domain = obj->base.write_domain; |
1784 | old_read_domains = obj->base.read_domains; |
||
2332 | Serge | 1785 | |
2344 | Serge | 1786 | /* Flush the CPU cache if it's still invalid. */ |
1787 | if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { |
||
1788 | i915_gem_clflush_object(obj); |
||
2332 | Serge | 1789 | |
2344 | Serge | 1790 | obj->base.read_domains |= I915_GEM_DOMAIN_CPU; |
1791 | } |
||
2332 | Serge | 1792 | |
2344 | Serge | 1793 | /* It should now be out of any other write domains, and we can update |
1794 | * the domain values for our changes. |
||
1795 | */ |
||
1796 | BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0); |
||
2332 | Serge | 1797 | |
2344 | Serge | 1798 | /* If we're writing through the CPU, then the GPU read domains will |
1799 | * need to be invalidated at next use. |
||
1800 | */ |
||
1801 | if (write) { |
||
1802 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
||
1803 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
||
1804 | } |
||
2332 | Serge | 1805 | |
2351 | Serge | 1806 | trace_i915_gem_object_change_domain(obj, |
1807 | old_read_domains, |
||
1808 | old_write_domain); |
||
2332 | Serge | 1809 | |
2344 | Serge | 1810 | return 0; |
1811 | } |
||
2332 | Serge | 1812 | |
2344 | Serge | 1813 | /** |
1814 | * Moves the object from a partially CPU read to a full one. |
||
1815 | * |
||
1816 | * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), |
||
1817 | * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). |
||
1818 | */ |
||
1819 | static void |
||
1820 | i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj) |
||
1821 | { |
||
1822 | if (!obj->page_cpu_valid) |
||
1823 | return; |
||
2332 | Serge | 1824 | |
2344 | Serge | 1825 | /* If we're partially in the CPU read domain, finish moving it in. |
1826 | */ |
||
1827 | if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) { |
||
1828 | } |
||
2332 | Serge | 1829 | |
2344 | Serge | 1830 | /* Free the page_cpu_valid mappings which are now stale, whether |
1831 | * or not we've got I915_GEM_DOMAIN_CPU. |
||
1832 | */ |
||
1833 | kfree(obj->page_cpu_valid); |
||
1834 | obj->page_cpu_valid = NULL; |
||
1835 | } |
||
2332 | Serge | 1836 | |
1837 | |||
1838 | |||
1839 | |||
1840 | |||
1841 | |||
1842 | |||
1843 | |||
1844 | |||
1845 | |||
1846 | |||
1847 | |||
1848 | |||
1849 | |||
1850 | |||
1851 | |||
1852 | |||
1853 | |||
1854 | |||
1855 | |||
1856 | |||
1857 | |||
1858 | |||
2335 | Serge | 1859 | |
2332 | Serge | 1860 | int |
1861 | i915_gem_object_pin(struct drm_i915_gem_object *obj, |
||
1862 | uint32_t alignment, |
||
1863 | bool map_and_fenceable) |
||
1864 | { |
||
1865 | struct drm_device *dev = obj->base.dev; |
||
1866 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1867 | int ret; |
||
1868 | |||
1869 | BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); |
||
1870 | |||
1871 | #if 0 |
||
1872 | if (obj->gtt_space != NULL) { |
||
1873 | if ((alignment && obj->gtt_offset & (alignment - 1)) || |
||
1874 | (map_and_fenceable && !obj->map_and_fenceable)) { |
||
1875 | WARN(obj->pin_count, |
||
1876 | "bo is already pinned with incorrect alignment:" |
||
1877 | " offset=%x, req.alignment=%x, req.map_and_fenceable=%d," |
||
1878 | " obj->map_and_fenceable=%d\n", |
||
1879 | obj->gtt_offset, alignment, |
||
1880 | map_and_fenceable, |
||
1881 | obj->map_and_fenceable); |
||
1882 | ret = i915_gem_object_unbind(obj); |
||
1883 | if (ret) |
||
1884 | return ret; |
||
1885 | } |
||
1886 | } |
||
1887 | #endif |
||
1888 | |||
1889 | if (obj->gtt_space == NULL) { |
||
1890 | ret = i915_gem_object_bind_to_gtt(obj, alignment, |
||
1891 | map_and_fenceable); |
||
1892 | if (ret) |
||
1893 | return ret; |
||
1894 | } |
||
1895 | |||
1896 | if (obj->pin_count++ == 0) { |
||
1897 | if (!obj->active) |
||
1898 | list_move_tail(&obj->mm_list, |
||
1899 | &dev_priv->mm.pinned_list); |
||
1900 | } |
||
1901 | obj->pin_mappable |= map_and_fenceable; |
||
1902 | |||
1903 | return 0; |
||
1904 | } |
||
1905 | |||
2344 | Serge | 1906 | void |
1907 | i915_gem_object_unpin(struct drm_i915_gem_object *obj) |
||
1908 | { |
||
1909 | struct drm_device *dev = obj->base.dev; |
||
1910 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2332 | Serge | 1911 | |
2344 | Serge | 1912 | BUG_ON(obj->pin_count == 0); |
1913 | BUG_ON(obj->gtt_space == NULL); |
||
2332 | Serge | 1914 | |
2344 | Serge | 1915 | if (--obj->pin_count == 0) { |
1916 | if (!obj->active) |
||
1917 | list_move_tail(&obj->mm_list, |
||
1918 | &dev_priv->mm.inactive_list); |
||
1919 | obj->pin_mappable = false; |
||
1920 | } |
||
1921 | } |
||
2332 | Serge | 1922 | |
1923 | |||
1924 | |||
1925 | |||
1926 | |||
1927 | |||
1928 | |||
1929 | |||
1930 | |||
1931 | |||
1932 | |||
1933 | |||
1934 | |||
1935 | |||
1936 | |||
1937 | |||
1938 | |||
1939 | |||
1940 | |||
1941 | |||
1942 | |||
1943 | |||
1944 | |||
1945 | |||
1946 | |||
1947 | |||
1948 | |||
1949 | |||
1950 | |||
1951 | |||
1952 | struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
||
1953 | size_t size) |
||
1954 | { |
||
1955 | struct drm_i915_private *dev_priv = dev->dev_private; |
||
1956 | struct drm_i915_gem_object *obj; |
||
2340 | Serge | 1957 | |
2332 | Serge | 1958 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
1959 | if (obj == NULL) |
||
1960 | return NULL; |
||
1961 | |||
1962 | if (drm_gem_object_init(dev, &obj->base, size) != 0) { |
||
1963 | kfree(obj); |
||
1964 | return NULL; |
||
1965 | } |
||
1966 | |||
1967 | |||
1968 | i915_gem_info_add_obj(dev_priv, size); |
||
1969 | |||
1970 | obj->base.write_domain = I915_GEM_DOMAIN_CPU; |
||
1971 | obj->base.read_domains = I915_GEM_DOMAIN_CPU; |
||
1972 | |||
2342 | Serge | 1973 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
2332 | Serge | 1974 | /* On Gen6, we can have the GPU use the LLC (the CPU |
1975 | * cache) for about a 10% performance improvement |
||
1976 | * compared to uncached. Graphics requests other than |
||
1977 | * display scanout are coherent with the CPU in |
||
1978 | * accessing this cache. This means in this mode we |
||
1979 | * don't need to clflush on the CPU side, and on the |
||
1980 | * GPU side we only need to flush internal caches to |
||
1981 | * get data visible to the CPU. |
||
1982 | * |
||
1983 | * However, we maintain the display planes as UC, and so |
||
1984 | * need to rebind when first used as such. |
||
1985 | */ |
||
1986 | obj->cache_level = I915_CACHE_LLC; |
||
1987 | } else |
||
1988 | obj->cache_level = I915_CACHE_NONE; |
||
1989 | |||
1990 | obj->base.driver_private = NULL; |
||
1991 | obj->fence_reg = I915_FENCE_REG_NONE; |
||
1992 | INIT_LIST_HEAD(&obj->mm_list); |
||
1993 | INIT_LIST_HEAD(&obj->gtt_list); |
||
1994 | INIT_LIST_HEAD(&obj->ring_list); |
||
1995 | INIT_LIST_HEAD(&obj->exec_list); |
||
1996 | INIT_LIST_HEAD(&obj->gpu_write_list); |
||
1997 | obj->madv = I915_MADV_WILLNEED; |
||
1998 | /* Avoid an unnecessary call to unbind on the first bind. */ |
||
1999 | obj->map_and_fenceable = true; |
||
2340 | Serge | 2000 | |
2332 | Serge | 2001 | return obj; |
2002 | } |
||
2003 | |||
2344 | Serge | 2004 | int i915_gem_init_object(struct drm_gem_object *obj) |
2005 | { |
||
2006 | BUG(); |
||
2332 | Serge | 2007 | |
2344 | Serge | 2008 | return 0; |
2009 | } |
||
2332 | Serge | 2010 | |
2344 | Serge | 2011 | static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj) |
2012 | { |
||
2013 | struct drm_device *dev = obj->base.dev; |
||
2014 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2015 | int ret; |
||
2332 | Serge | 2016 | |
2344 | Serge | 2017 | ret = i915_gem_object_unbind(obj); |
2018 | if (ret == -ERESTARTSYS) { |
||
2019 | list_move(&obj->mm_list, |
||
2020 | &dev_priv->mm.deferred_free_list); |
||
2021 | return; |
||
2022 | } |
||
2332 | Serge | 2023 | |
2351 | Serge | 2024 | trace_i915_gem_object_destroy(obj); |
2332 | Serge | 2025 | |
2344 | Serge | 2026 | // if (obj->base.map_list.map) |
2027 | // drm_gem_free_mmap_offset(&obj->base); |
||
2332 | Serge | 2028 | |
2344 | Serge | 2029 | drm_gem_object_release(&obj->base); |
2030 | i915_gem_info_remove_obj(dev_priv, obj->base.size); |
||
2332 | Serge | 2031 | |
2344 | Serge | 2032 | kfree(obj->page_cpu_valid); |
2033 | kfree(obj->bit_17); |
||
2034 | kfree(obj); |
||
2035 | } |
||
2332 | Serge | 2036 | |
2344 | Serge | 2037 | void i915_gem_free_object(struct drm_gem_object *gem_obj) |
2038 | { |
||
2039 | struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); |
||
2040 | struct drm_device *dev = obj->base.dev; |
||
2332 | Serge | 2041 | |
2351 | Serge | 2042 | while (obj->pin_count > 0) |
2344 | Serge | 2043 | i915_gem_object_unpin(obj); |
2332 | Serge | 2044 | |
2344 | Serge | 2045 | // if (obj->phys_obj) |
2046 | // i915_gem_detach_phys_object(dev, obj); |
||
2332 | Serge | 2047 | |
2344 | Serge | 2048 | i915_gem_free_object_tail(obj); |
2049 | } |
||
2332 | Serge | 2050 | |
2051 | |||
2052 | |||
2053 | |||
2054 | |||
2055 | |||
2056 | |||
2057 | |||
2058 | |||
2059 | |||
2060 | |||
2344 | Serge | 2061 | |
2332 | Serge | 2062 | int |
2063 | i915_gem_init_ringbuffer(struct drm_device *dev) |
||
2064 | { |
||
2065 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2066 | int ret; |
||
2351 | Serge | 2067 | |
2332 | Serge | 2068 | ret = intel_init_render_ring_buffer(dev); |
2069 | if (ret) |
||
2070 | return ret; |
||
2071 | |||
2072 | if (HAS_BSD(dev)) { |
||
2073 | ret = intel_init_bsd_ring_buffer(dev); |
||
2074 | if (ret) |
||
2075 | goto cleanup_render_ring; |
||
2076 | } |
||
2077 | |||
2078 | if (HAS_BLT(dev)) { |
||
2079 | ret = intel_init_blt_ring_buffer(dev); |
||
2080 | if (ret) |
||
2081 | goto cleanup_bsd_ring; |
||
2082 | } |
||
2083 | |||
2084 | dev_priv->next_seqno = 1; |
||
2351 | Serge | 2085 | |
2332 | Serge | 2086 | return 0; |
2087 | |||
2088 | cleanup_bsd_ring: |
||
2089 | intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
||
2090 | cleanup_render_ring: |
||
2091 | intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
||
2092 | return ret; |
||
2093 | } |
||
2094 | |||
2095 | #if 0 |
||
2096 | void |
||
2097 | i915_gem_cleanup_ringbuffer(struct drm_device *dev) |
||
2098 | { |
||
2099 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2100 | int i; |
||
2101 | |||
2102 | for (i = 0; i < I915_NUM_RINGS; i++) |
||
2103 | intel_cleanup_ring_buffer(&dev_priv->ring[i]); |
||
2104 | } |
||
2105 | |||
2106 | int |
||
2107 | i915_gem_entervt_ioctl(struct drm_device *dev, void *data, |
||
2108 | struct drm_file *file_priv) |
||
2109 | { |
||
2110 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2111 | int ret, i; |
||
2112 | |||
2113 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
||
2114 | return 0; |
||
2115 | |||
2116 | if (atomic_read(&dev_priv->mm.wedged)) { |
||
2117 | DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
||
2118 | atomic_set(&dev_priv->mm.wedged, 0); |
||
2119 | } |
||
2120 | |||
2121 | mutex_lock(&dev->struct_mutex); |
||
2122 | dev_priv->mm.suspended = 0; |
||
2123 | |||
2124 | ret = i915_gem_init_ringbuffer(dev); |
||
2125 | if (ret != 0) { |
||
2126 | mutex_unlock(&dev->struct_mutex); |
||
2127 | return ret; |
||
2128 | } |
||
2129 | |||
2130 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
||
2131 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); |
||
2132 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
||
2133 | for (i = 0; i < I915_NUM_RINGS; i++) { |
||
2134 | BUG_ON(!list_empty(&dev_priv->ring[i].active_list)); |
||
2135 | BUG_ON(!list_empty(&dev_priv->ring[i].request_list)); |
||
2136 | } |
||
2137 | mutex_unlock(&dev->struct_mutex); |
||
2138 | |||
2139 | ret = drm_irq_install(dev); |
||
2140 | if (ret) |
||
2141 | goto cleanup_ringbuffer; |
||
2142 | |||
2143 | return 0; |
||
2144 | |||
2145 | cleanup_ringbuffer: |
||
2146 | mutex_lock(&dev->struct_mutex); |
||
2147 | i915_gem_cleanup_ringbuffer(dev); |
||
2148 | dev_priv->mm.suspended = 1; |
||
2149 | mutex_unlock(&dev->struct_mutex); |
||
2150 | |||
2151 | return ret; |
||
2152 | } |
||
2153 | |||
2154 | int |
||
2155 | i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, |
||
2156 | struct drm_file *file_priv) |
||
2157 | { |
||
2158 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
||
2159 | return 0; |
||
2160 | |||
2161 | drm_irq_uninstall(dev); |
||
2162 | return i915_gem_idle(dev); |
||
2163 | } |
||
2164 | |||
2165 | void |
||
2166 | i915_gem_lastclose(struct drm_device *dev) |
||
2167 | { |
||
2168 | int ret; |
||
2169 | |||
2170 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
||
2171 | return; |
||
2172 | |||
2173 | ret = i915_gem_idle(dev); |
||
2174 | if (ret) |
||
2175 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
||
2176 | } |
||
2177 | #endif |
||
2178 | |||
2179 | static void |
||
2326 | Serge | 2180 | init_ring_lists(struct intel_ring_buffer *ring) |
2181 | { |
||
2182 | INIT_LIST_HEAD(&ring->active_list); |
||
2183 | INIT_LIST_HEAD(&ring->request_list); |
||
2184 | INIT_LIST_HEAD(&ring->gpu_write_list); |
||
2185 | } |
||
2186 | |||
2187 | void |
||
2188 | i915_gem_load(struct drm_device *dev) |
||
2189 | { |
||
2190 | int i; |
||
2191 | drm_i915_private_t *dev_priv = dev->dev_private; |
||
2192 | |||
2193 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
||
2194 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
||
2195 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
||
2196 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
||
2197 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
||
2198 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
||
2199 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
||
2200 | for (i = 0; i < I915_NUM_RINGS; i++) |
||
2201 | init_ring_lists(&dev_priv->ring[i]); |
||
2342 | Serge | 2202 | for (i = 0; i < I915_MAX_NUM_FENCES; i++) |
2326 | Serge | 2203 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
2204 | |||
2205 | /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
||
2206 | if (IS_GEN3(dev)) { |
||
2207 | u32 tmp = I915_READ(MI_ARB_STATE); |
||
2208 | if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) { |
||
2209 | /* arb state is a masked write, so set bit + bit in mask */ |
||
2210 | tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT); |
||
2211 | I915_WRITE(MI_ARB_STATE, tmp); |
||
2212 | } |
||
2213 | } |
||
2214 | |||
2215 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; |
||
2216 | |||
2217 | if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
||
2218 | dev_priv->num_fence_regs = 16; |
||
2219 | else |
||
2220 | dev_priv->num_fence_regs = 8; |
||
2221 | |||
2222 | /* Initialize fence registers to zero */ |
||
2223 | for (i = 0; i < dev_priv->num_fence_regs; i++) { |
||
2224 | i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]); |
||
2225 | } |
||
2226 | |||
2227 | i915_gem_detect_bit_6_swizzle(dev); |
||
2228 | |||
2229 | dev_priv->mm.interruptible = true; |
||
2230 | |||
2231 | // dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; |
||
2232 | // dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; |
||
2233 | // register_shrinker(&dev_priv->mm.inactive_shrinker); |
||
2234 | }>><>>>>>>>>=>>>>=><=>>>>><>6)><6)>6) |
||
2235 | |||
2236 | |||
2237 |