Rev 5271 | Go to most recent revision | Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
5078 | serge | 1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | * Thomas Hellstrom |
||
30 | * Dave Airlie |
||
31 | */ |
||
32 | #include |
||
33 | #include |
||
34 | #include |
||
35 | #include |
||
36 | #include "radeon.h" |
||
37 | #include "radeon_trace.h" |
||
38 | |||
39 | |||
40 | int radeon_ttm_init(struct radeon_device *rdev); |
||
41 | void radeon_ttm_fini(struct radeon_device *rdev); |
||
42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
||
43 | |||
44 | /* |
||
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
||
46 | * function are calling it. |
||
47 | */ |
||
48 | |||
49 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
||
50 | unsigned mem_type, int sign) |
||
51 | { |
||
52 | struct radeon_device *rdev = bo->rdev; |
||
53 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; |
||
54 | |||
55 | switch (mem_type) { |
||
56 | case TTM_PL_TT: |
||
57 | if (sign > 0) |
||
58 | __atomic_add_fetch(&rdev->gtt_usage.counter, size,__ATOMIC_RELAXED); |
||
59 | else |
||
60 | __atomic_sub_fetch(&rdev->gtt_usage.counter, size,__ATOMIC_RELAXED); |
||
61 | break; |
||
62 | case TTM_PL_VRAM: |
||
63 | if (sign > 0) |
||
64 | __atomic_add_fetch(&rdev->vram_usage.counter, size,__ATOMIC_RELAXED); |
||
65 | else |
||
66 | __atomic_sub_fetch(&rdev->vram_usage.counter, size,__ATOMIC_RELAXED ); |
||
67 | break; |
||
68 | } |
||
69 | } |
||
70 | |||
71 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
||
72 | { |
||
73 | struct radeon_bo *bo; |
||
74 | |||
75 | bo = container_of(tbo, struct radeon_bo, tbo); |
||
76 | |||
77 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); |
||
78 | |||
79 | mutex_lock(&bo->rdev->gem.mutex); |
||
80 | list_del_init(&bo->list); |
||
81 | mutex_unlock(&bo->rdev->gem.mutex); |
||
82 | radeon_bo_clear_surface_reg(bo); |
||
83 | WARN_ON(!list_empty(&bo->va)); |
||
84 | drm_gem_object_release(&bo->gem_base); |
||
85 | kfree(bo); |
||
86 | } |
||
87 | |||
88 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
||
89 | { |
||
90 | if (bo->destroy == &radeon_ttm_bo_destroy) |
||
91 | return true; |
||
92 | return false; |
||
93 | } |
||
94 | |||
95 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
||
96 | { |
||
97 | u32 c = 0, i; |
||
98 | |||
99 | rbo->placement.fpfn = 0; |
||
100 | rbo->placement.lpfn = 0; |
||
101 | rbo->placement.placement = rbo->placements; |
||
102 | rbo->placement.busy_placement = rbo->placements; |
||
103 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
||
104 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
||
105 | TTM_PL_FLAG_VRAM; |
||
106 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
||
107 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
||
108 | rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT; |
||
109 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
||
110 | (rbo->rdev->flags & RADEON_IS_AGP)) { |
||
111 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
||
112 | TTM_PL_FLAG_TT; |
||
113 | } else { |
||
114 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; |
||
115 | } |
||
116 | } |
||
117 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
||
118 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
||
119 | rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM; |
||
120 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
||
121 | rbo->rdev->flags & RADEON_IS_AGP) { |
||
122 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | |
||
123 | TTM_PL_FLAG_SYSTEM; |
||
124 | } else { |
||
125 | rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; |
||
126 | } |
||
127 | } |
||
128 | if (!c) |
||
129 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; |
||
130 | rbo->placement.num_placement = c; |
||
131 | rbo->placement.num_busy_placement = c; |
||
132 | |||
133 | /* |
||
134 | * Use two-ended allocation depending on the buffer size to |
||
135 | * improve fragmentation quality. |
||
136 | * 512kb was measured as the most optimal number. |
||
137 | */ |
||
138 | if (rbo->tbo.mem.size > 512 * 1024) { |
||
139 | for (i = 0; i < c; i++) { |
||
140 | rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN; |
||
141 | } |
||
142 | } |
||
143 | } |
||
144 | |||
145 | int radeon_bo_create(struct radeon_device *rdev, |
||
146 | unsigned long size, int byte_align, bool kernel, u32 domain, |
||
147 | u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) |
||
148 | { |
||
149 | struct radeon_bo *bo; |
||
150 | enum ttm_bo_type type; |
||
151 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
||
152 | size_t acc_size; |
||
153 | int r; |
||
154 | |||
155 | size = ALIGN(size, PAGE_SIZE); |
||
156 | |||
157 | if (kernel) { |
||
158 | type = ttm_bo_type_kernel; |
||
159 | } else if (sg) { |
||
160 | type = ttm_bo_type_sg; |
||
161 | } else { |
||
162 | type = ttm_bo_type_device; |
||
163 | } |
||
164 | *bo_ptr = NULL; |
||
165 | |||
166 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
||
167 | sizeof(struct radeon_bo)); |
||
168 | |||
169 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
||
170 | if (bo == NULL) |
||
171 | return -ENOMEM; |
||
172 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
||
173 | if (unlikely(r)) { |
||
174 | kfree(bo); |
||
175 | return r; |
||
176 | } |
||
177 | bo->rdev = rdev; |
||
178 | bo->surface_reg = -1; |
||
179 | INIT_LIST_HEAD(&bo->list); |
||
180 | INIT_LIST_HEAD(&bo->va); |
||
181 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
||
182 | RADEON_GEM_DOMAIN_GTT | |
||
183 | RADEON_GEM_DOMAIN_CPU); |
||
184 | |||
185 | bo->flags = flags; |
||
186 | /* PCI GART is always snooped */ |
||
187 | if (!(rdev->flags & RADEON_IS_PCIE)) |
||
188 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); |
||
189 | |||
190 | // printf("%s rdev->flags %x bo->flags %x\n", |
||
191 | // __FUNCTION__, bo->flags); |
||
192 | |||
193 | if(flags & RADEON_GEM_GTT_WC) |
||
194 | bo->flags&= ~RADEON_GEM_GTT_WC; |
||
195 | |||
196 | radeon_ttm_placement_from_domain(bo, domain); |
||
197 | /* Kernel allocation are uninterruptible */ |
||
198 | // down_read(&rdev->pm.mclk_lock); |
||
199 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
||
200 | &bo->placement, page_align, !kernel, NULL, |
||
201 | acc_size, sg, &radeon_ttm_bo_destroy); |
||
202 | // up_read(&rdev->pm.mclk_lock); |
||
203 | if (unlikely(r != 0)) { |
||
204 | return r; |
||
205 | } |
||
206 | *bo_ptr = bo; |
||
207 | |||
208 | trace_radeon_bo_create(bo); |
||
209 | |||
210 | return 0; |
||
211 | } |
||
212 | |||
213 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
||
214 | { |
||
215 | bool is_iomem; |
||
216 | int r; |
||
217 | |||
218 | if (bo->kptr) { |
||
219 | if (ptr) { |
||
220 | *ptr = bo->kptr; |
||
221 | } |
||
222 | return 0; |
||
223 | } |
||
224 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
||
225 | if (r) { |
||
226 | return r; |
||
227 | } |
||
228 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
||
229 | if (ptr) { |
||
230 | *ptr = bo->kptr; |
||
231 | } |
||
232 | radeon_bo_check_tiling(bo, 0, 0); |
||
233 | return 0; |
||
234 | } |
||
235 | |||
236 | void radeon_bo_kunmap(struct radeon_bo *bo) |
||
237 | { |
||
238 | if (bo->kptr == NULL) |
||
239 | return; |
||
240 | bo->kptr = NULL; |
||
241 | radeon_bo_check_tiling(bo, 0, 0); |
||
242 | ttm_bo_kunmap(&bo->kmap); |
||
243 | } |
||
244 | |||
245 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
||
246 | { |
||
247 | if (bo == NULL) |
||
248 | return NULL; |
||
249 | |||
250 | ttm_bo_reference(&bo->tbo); |
||
251 | return bo; |
||
252 | } |
||
253 | |||
254 | void radeon_bo_unref(struct radeon_bo **bo) |
||
255 | { |
||
256 | struct ttm_buffer_object *tbo; |
||
257 | struct radeon_device *rdev; |
||
258 | |||
259 | if ((*bo) == NULL) |
||
260 | return; |
||
261 | rdev = (*bo)->rdev; |
||
262 | tbo = &((*bo)->tbo); |
||
263 | ttm_bo_unref(&tbo); |
||
264 | if (tbo == NULL) |
||
265 | *bo = NULL; |
||
266 | } |
||
267 | |||
268 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
||
269 | u64 *gpu_addr) |
||
270 | { |
||
271 | int r, i; |
||
272 | |||
273 | if (bo->pin_count) { |
||
274 | bo->pin_count++; |
||
275 | if (gpu_addr) |
||
276 | *gpu_addr = radeon_bo_gpu_offset(bo); |
||
277 | |||
278 | if (max_offset != 0) { |
||
279 | u64 domain_start; |
||
280 | |||
281 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
||
282 | domain_start = bo->rdev->mc.vram_start; |
||
283 | else |
||
284 | domain_start = bo->rdev->mc.gtt_start; |
||
285 | WARN_ON_ONCE(max_offset < |
||
286 | (radeon_bo_gpu_offset(bo) - domain_start)); |
||
287 | } |
||
288 | |||
289 | return 0; |
||
290 | } |
||
291 | radeon_ttm_placement_from_domain(bo, domain); |
||
292 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
||
293 | /* force to pin into visible video ram */ |
||
294 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
||
295 | } |
||
296 | if (max_offset) { |
||
297 | u64 lpfn = max_offset >> PAGE_SHIFT; |
||
298 | |||
299 | if (!bo->placement.lpfn) |
||
300 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; |
||
301 | |||
302 | if (lpfn < bo->placement.lpfn) |
||
303 | bo->placement.lpfn = lpfn; |
||
304 | } |
||
305 | for (i = 0; i < bo->placement.num_placement; i++) |
||
306 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
||
307 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
||
308 | if (likely(r == 0)) { |
||
309 | bo->pin_count = 1; |
||
310 | if (gpu_addr != NULL) |
||
311 | *gpu_addr = radeon_bo_gpu_offset(bo); |
||
312 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
||
313 | bo->rdev->vram_pin_size += radeon_bo_size(bo); |
||
314 | else |
||
315 | bo->rdev->gart_pin_size += radeon_bo_size(bo); |
||
316 | } else { |
||
317 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
||
318 | } |
||
319 | return r; |
||
320 | } |
||
321 | |||
322 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
||
323 | { |
||
324 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); |
||
325 | } |
||
326 | |||
327 | int radeon_bo_unpin(struct radeon_bo *bo) |
||
328 | { |
||
329 | int r, i; |
||
330 | |||
331 | if (!bo->pin_count) { |
||
332 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); |
||
333 | return 0; |
||
334 | } |
||
335 | bo->pin_count--; |
||
336 | if (bo->pin_count) |
||
337 | return 0; |
||
338 | for (i = 0; i < bo->placement.num_placement; i++) |
||
339 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; |
||
340 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
||
341 | if (likely(r == 0)) { |
||
342 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) |
||
343 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); |
||
344 | else |
||
345 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); |
||
346 | } else { |
||
347 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
||
348 | } |
||
349 | return r; |
||
350 | } |
||
351 | |||
352 | int radeon_bo_init(struct radeon_device *rdev) |
||
353 | { |
||
354 | /* Add an MTRR for the VRAM */ |
||
355 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
||
356 | rdev->mc.mc_vram_size >> 20, |
||
357 | (unsigned long long)rdev->mc.aper_size >> 20); |
||
358 | DRM_INFO("RAM width %dbits %cDR\n", |
||
359 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
||
360 | return radeon_ttm_init(rdev); |
||
361 | } |
||
362 | |||
363 | void radeon_bo_fini(struct radeon_device *rdev) |
||
364 | { |
||
365 | // radeon_ttm_fini(rdev); |
||
366 | // arch_phys_wc_del(rdev->mc.vram_mtrr); |
||
367 | } |
||
368 | |||
369 | /* Returns how many bytes TTM can move per IB. |
||
370 | */ |
||
371 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) |
||
372 | { |
||
373 | u64 real_vram_size = rdev->mc.real_vram_size; |
||
374 | u64 vram_usage = atomic64_read(&rdev->vram_usage); |
||
375 | |||
376 | /* This function is based on the current VRAM usage. |
||
377 | * |
||
378 | * - If all of VRAM is free, allow relocating the number of bytes that |
||
379 | * is equal to 1/4 of the size of VRAM for this IB. |
||
380 | |||
381 | * - If more than one half of VRAM is occupied, only allow relocating |
||
382 | * 1 MB of data for this IB. |
||
383 | * |
||
384 | * - From 0 to one half of used VRAM, the threshold decreases |
||
385 | * linearly. |
||
386 | * __________________ |
||
387 | * 1/4 of -|\ | |
||
388 | * VRAM | \ | |
||
389 | * | \ | |
||
390 | * | \ | |
||
391 | * | \ | |
||
392 | * | \ | |
||
393 | * | \ | |
||
394 | * | \________|1 MB |
||
395 | * |----------------| |
||
396 | * VRAM 0 % 100 % |
||
397 | * used used |
||
398 | * |
||
399 | * Note: It's a threshold, not a limit. The threshold must be crossed |
||
400 | * for buffer relocations to stop, so any buffer of an arbitrary size |
||
401 | * can be moved as long as the threshold isn't crossed before |
||
402 | * the relocation takes place. We don't want to disable buffer |
||
403 | * relocations completely. |
||
404 | * |
||
405 | * The idea is that buffers should be placed in VRAM at creation time |
||
406 | * and TTM should only do a minimum number of relocations during |
||
407 | * command submission. In practice, you need to submit at least |
||
408 | * a dozen IBs to move all buffers to VRAM if they are in GTT. |
||
409 | * |
||
410 | * Also, things can get pretty crazy under memory pressure and actual |
||
411 | * VRAM usage can change a lot, so playing safe even at 50% does |
||
412 | * consistently increase performance. |
||
413 | */ |
||
414 | |||
415 | u64 half_vram = real_vram_size >> 1; |
||
416 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; |
||
417 | u64 bytes_moved_threshold = half_free_vram >> 1; |
||
418 | return max(bytes_moved_threshold, 1024*1024ull); |
||
419 | } |
||
420 | |||
421 | int radeon_bo_list_validate(struct radeon_device *rdev, |
||
422 | struct ww_acquire_ctx *ticket, |
||
423 | struct list_head *head, int ring) |
||
424 | { |
||
425 | struct radeon_cs_reloc *lobj; |
||
426 | struct radeon_bo *bo; |
||
427 | int r; |
||
428 | u64 bytes_moved = 0, initial_bytes_moved; |
||
429 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); |
||
430 | |||
431 | r = ttm_eu_reserve_buffers(ticket, head); |
||
432 | if (unlikely(r != 0)) { |
||
433 | return r; |
||
434 | } |
||
435 | |||
436 | list_for_each_entry(lobj, head, tv.head) { |
||
437 | bo = lobj->robj; |
||
438 | if (!bo->pin_count) { |
||
439 | u32 domain = lobj->prefered_domains; |
||
440 | u32 current_domain = |
||
441 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
||
442 | |||
443 | /* Check if this buffer will be moved and don't move it |
||
444 | * if we have moved too many buffers for this IB already. |
||
445 | * |
||
446 | * Note that this allows moving at least one buffer of |
||
447 | * any size, because it doesn't take the current "bo" |
||
448 | * into account. We don't want to disallow buffer moves |
||
449 | * completely. |
||
450 | */ |
||
451 | if ((lobj->allowed_domains & current_domain) != 0 && |
||
452 | (domain & current_domain) == 0 && /* will be moved */ |
||
453 | bytes_moved > bytes_moved_threshold) { |
||
454 | /* don't move it */ |
||
455 | domain = current_domain; |
||
456 | } |
||
457 | |||
458 | retry: |
||
459 | radeon_ttm_placement_from_domain(bo, domain); |
||
460 | if (ring == R600_RING_TYPE_UVD_INDEX) |
||
461 | radeon_uvd_force_into_uvd_segment(bo); |
||
462 | |||
463 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); |
||
464 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
||
465 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - |
||
466 | initial_bytes_moved; |
||
467 | |||
468 | if (unlikely(r)) { |
||
469 | if (r != -ERESTARTSYS && |
||
470 | domain != lobj->allowed_domains) { |
||
471 | domain = lobj->allowed_domains; |
||
472 | goto retry; |
||
473 | } |
||
474 | ttm_eu_backoff_reservation(ticket, head); |
||
475 | return r; |
||
476 | } |
||
477 | } |
||
478 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
||
479 | lobj->tiling_flags = bo->tiling_flags; |
||
480 | } |
||
481 | return 0; |
||
482 | } |
||
483 | |||
484 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
||
485 | { |
||
486 | struct radeon_device *rdev = bo->rdev; |
||
487 | struct radeon_surface_reg *reg; |
||
488 | struct radeon_bo *old_object; |
||
489 | int steal; |
||
490 | int i; |
||
491 | |||
492 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
||
493 | |||
494 | if (!bo->tiling_flags) |
||
495 | return 0; |
||
496 | |||
497 | if (bo->surface_reg >= 0) { |
||
498 | reg = &rdev->surface_regs[bo->surface_reg]; |
||
499 | i = bo->surface_reg; |
||
500 | goto out; |
||
501 | } |
||
502 | |||
503 | steal = -1; |
||
504 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
||
505 | |||
506 | reg = &rdev->surface_regs[i]; |
||
507 | if (!reg->bo) |
||
508 | break; |
||
509 | |||
510 | old_object = reg->bo; |
||
511 | if (old_object->pin_count == 0) |
||
512 | steal = i; |
||
513 | } |
||
514 | |||
515 | /* if we are all out */ |
||
516 | if (i == RADEON_GEM_MAX_SURFACES) { |
||
517 | if (steal == -1) |
||
518 | return -ENOMEM; |
||
519 | /* find someone with a surface reg and nuke their BO */ |
||
520 | reg = &rdev->surface_regs[steal]; |
||
521 | old_object = reg->bo; |
||
522 | /* blow away the mapping */ |
||
523 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); |
||
524 | ttm_bo_unmap_virtual(&old_object->tbo); |
||
525 | old_object->surface_reg = -1; |
||
526 | i = steal; |
||
527 | } |
||
528 | |||
529 | bo->surface_reg = i; |
||
530 | reg->bo = bo; |
||
531 | |||
532 | out: |
||
533 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
||
534 | bo->tbo.mem.start << PAGE_SHIFT, |
||
535 | bo->tbo.num_pages << PAGE_SHIFT); |
||
536 | return 0; |
||
537 | } |
||
538 | |||
539 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
||
540 | { |
||
541 | struct radeon_device *rdev = bo->rdev; |
||
542 | struct radeon_surface_reg *reg; |
||
543 | |||
544 | if (bo->surface_reg == -1) |
||
545 | return; |
||
546 | |||
547 | reg = &rdev->surface_regs[bo->surface_reg]; |
||
548 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
||
549 | |||
550 | reg->bo = NULL; |
||
551 | bo->surface_reg = -1; |
||
552 | } |
||
553 | |||
554 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
||
555 | uint32_t tiling_flags, uint32_t pitch) |
||
556 | { |
||
557 | struct radeon_device *rdev = bo->rdev; |
||
558 | int r; |
||
559 | |||
560 | if (rdev->family >= CHIP_CEDAR) { |
||
561 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; |
||
562 | |||
563 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; |
||
564 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; |
||
565 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; |
||
566 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; |
||
567 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; |
||
568 | switch (bankw) { |
||
569 | case 0: |
||
570 | case 1: |
||
571 | case 2: |
||
572 | case 4: |
||
573 | case 8: |
||
574 | break; |
||
575 | default: |
||
576 | return -EINVAL; |
||
577 | } |
||
578 | switch (bankh) { |
||
579 | case 0: |
||
580 | case 1: |
||
581 | case 2: |
||
582 | case 4: |
||
583 | case 8: |
||
584 | break; |
||
585 | default: |
||
586 | return -EINVAL; |
||
587 | } |
||
588 | switch (mtaspect) { |
||
589 | case 0: |
||
590 | case 1: |
||
591 | case 2: |
||
592 | case 4: |
||
593 | case 8: |
||
594 | break; |
||
595 | default: |
||
596 | return -EINVAL; |
||
597 | } |
||
598 | if (tilesplit > 6) { |
||
599 | return -EINVAL; |
||
600 | } |
||
601 | if (stilesplit > 6) { |
||
602 | return -EINVAL; |
||
603 | } |
||
604 | } |
||
605 | r = radeon_bo_reserve(bo, false); |
||
606 | if (unlikely(r != 0)) |
||
607 | return r; |
||
608 | bo->tiling_flags = tiling_flags; |
||
609 | bo->pitch = pitch; |
||
610 | radeon_bo_unreserve(bo); |
||
611 | return 0; |
||
612 | } |
||
613 | |||
614 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
||
615 | uint32_t *tiling_flags, |
||
616 | uint32_t *pitch) |
||
617 | { |
||
618 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
||
619 | |||
620 | if (tiling_flags) |
||
621 | *tiling_flags = bo->tiling_flags; |
||
622 | if (pitch) |
||
623 | *pitch = bo->pitch; |
||
624 | } |
||
625 | |||
626 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
||
627 | bool force_drop) |
||
628 | { |
||
629 | if (!force_drop) |
||
630 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
||
631 | |||
632 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) |
||
633 | return 0; |
||
634 | |||
635 | if (force_drop) { |
||
636 | radeon_bo_clear_surface_reg(bo); |
||
637 | return 0; |
||
638 | } |
||
639 | |||
640 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
||
641 | if (!has_moved) |
||
642 | return 0; |
||
643 | |||
644 | if (bo->surface_reg >= 0) |
||
645 | radeon_bo_clear_surface_reg(bo); |
||
646 | return 0; |
||
647 | } |
||
648 | |||
649 | if ((bo->surface_reg >= 0) && !has_moved) |
||
650 | return 0; |
||
651 | |||
652 | return radeon_bo_get_surface_reg(bo); |
||
653 | } |
||
654 | |||
655 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
||
656 | struct ttm_mem_reg *new_mem) |
||
657 | { |
||
658 | struct radeon_bo *rbo; |
||
659 | |||
660 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
||
661 | return; |
||
662 | |||
663 | rbo = container_of(bo, struct radeon_bo, tbo); |
||
664 | radeon_bo_check_tiling(rbo, 0, 1); |
||
665 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
||
666 | |||
667 | /* update statistics */ |
||
668 | if (!new_mem) |
||
669 | return; |
||
670 | |||
671 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); |
||
672 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); |
||
673 | } |
||
674 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
||
675 | { |
||
676 | int r; |
||
677 | |||
678 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
||
679 | if (unlikely(r != 0)) |
||
680 | return r; |
||
681 | spin_lock(&bo->tbo.bdev->fence_lock); |
||
682 | if (mem_type) |
||
683 | *mem_type = bo->tbo.mem.mem_type; |
||
684 | if (bo->tbo.sync_obj) |
||
685 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
||
686 | spin_unlock(&bo->tbo.bdev->fence_lock); |
||
687 | ttm_bo_unreserve(&bo->tbo); |
||
688 | return r; |
||
689 | }><>><>>>>> |