Rev 6661 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1404 | serge | 1 | /* |
2 | * Copyright 2009 Jerome Glisse. |
||
3 | * All Rights Reserved. |
||
4 | * |
||
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
6 | * copy of this software and associated documentation files (the |
||
7 | * "Software"), to deal in the Software without restriction, including |
||
8 | * without limitation the rights to use, copy, modify, merge, publish, |
||
9 | * distribute, sub license, and/or sell copies of the Software, and to |
||
10 | * permit persons to whom the Software is furnished to do so, subject to |
||
11 | * the following conditions: |
||
12 | * |
||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
20 | * |
||
21 | * The above copyright notice and this permission notice (including the |
||
22 | * next paragraph) shall be included in all copies or substantial portions |
||
23 | * of the Software. |
||
24 | * |
||
25 | */ |
||
26 | /* |
||
27 | * Authors: |
||
28 | * Jerome Glisse |
||
29 | * Thomas Hellstrom |
||
30 | * Dave Airlie |
||
31 | */ |
||
32 | #include |
||
33 | #include |
||
34 | #include |
||
35 | #include |
||
2997 | Serge | 36 | #include |
1404 | serge | 37 | #include |
38 | #include |
||
39 | #include |
||
2997 | Serge | 40 | #include |
1404 | serge | 41 | #include "radeon_reg.h" |
42 | #include "radeon.h" |
||
43 | |||
44 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
||
45 | |||
46 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
||
5078 | serge | 47 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); |
1404 | serge | 48 | |
49 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
||
50 | { |
||
51 | struct radeon_mman *mman; |
||
52 | struct radeon_device *rdev; |
||
53 | |||
54 | mman = container_of(bdev, struct radeon_mman, bdev); |
||
55 | rdev = container_of(mman, struct radeon_device, mman); |
||
56 | return rdev; |
||
57 | } |
||
58 | |||
59 | |||
60 | /* |
||
61 | * Global memory. |
||
62 | */ |
||
2997 | Serge | 63 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
1404 | serge | 64 | { |
65 | return ttm_mem_global_init(ref->object); |
||
66 | } |
||
67 | |||
2997 | Serge | 68 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
1404 | serge | 69 | { |
70 | ttm_mem_global_release(ref->object); |
||
71 | } |
||
72 | |||
73 | static int radeon_ttm_global_init(struct radeon_device *rdev) |
||
74 | { |
||
2997 | Serge | 75 | struct drm_global_reference *global_ref; |
1404 | serge | 76 | int r; |
77 | |||
78 | rdev->mman.mem_global_referenced = false; |
||
79 | global_ref = &rdev->mman.mem_global_ref; |
||
2997 | Serge | 80 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
1404 | serge | 81 | global_ref->size = sizeof(struct ttm_mem_global); |
82 | global_ref->init = &radeon_ttm_mem_global_init; |
||
83 | global_ref->release = &radeon_ttm_mem_global_release; |
||
2997 | Serge | 84 | r = drm_global_item_ref(global_ref); |
1404 | serge | 85 | if (r != 0) { |
86 | DRM_ERROR("Failed setting up TTM memory accounting " |
||
87 | "subsystem.\n"); |
||
88 | return r; |
||
89 | } |
||
90 | |||
91 | rdev->mman.bo_global_ref.mem_glob = |
||
92 | rdev->mman.mem_global_ref.object; |
||
93 | global_ref = &rdev->mman.bo_global_ref.ref; |
||
2997 | Serge | 94 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
1404 | serge | 95 | global_ref->size = sizeof(struct ttm_bo_global); |
96 | global_ref->init = &ttm_bo_global_init; |
||
97 | global_ref->release = &ttm_bo_global_release; |
||
2997 | Serge | 98 | r = drm_global_item_ref(global_ref); |
1404 | serge | 99 | if (r != 0) { |
100 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
||
2997 | Serge | 101 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
1404 | serge | 102 | return r; |
103 | } |
||
104 | |||
105 | rdev->mman.mem_global_referenced = true; |
||
106 | return 0; |
||
107 | } |
||
108 | |||
109 | |||
2997 | Serge | 110 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
111 | { |
||
112 | return 0; |
||
113 | } |
||
1404 | serge | 114 | |
115 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
||
116 | struct ttm_mem_type_manager *man) |
||
117 | { |
||
118 | struct radeon_device *rdev; |
||
119 | |||
120 | rdev = radeon_get_rdev(bdev); |
||
121 | |||
122 | switch (type) { |
||
123 | case TTM_PL_SYSTEM: |
||
124 | /* System memory */ |
||
125 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
||
126 | man->available_caching = TTM_PL_MASK_CACHING; |
||
127 | man->default_caching = TTM_PL_FLAG_CACHED; |
||
128 | break; |
||
129 | case TTM_PL_TT: |
||
2997 | Serge | 130 | man->func = &ttm_bo_manager_func; |
131 | man->gpu_offset = rdev->mc.gtt_start; |
||
1404 | serge | 132 | man->available_caching = TTM_PL_MASK_CACHING; |
133 | man->default_caching = TTM_PL_FLAG_CACHED; |
||
134 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
||
6104 | serge | 135 | #if IS_ENABLED(CONFIG_AGP) |
1404 | serge | 136 | if (rdev->flags & RADEON_IS_AGP) { |
5078 | serge | 137 | if (!rdev->ddev->agp) { |
1404 | serge | 138 | DRM_ERROR("AGP is not enabled for memory type %u\n", |
139 | (unsigned)type); |
||
140 | return -EINVAL; |
||
141 | } |
||
142 | if (!rdev->ddev->agp->cant_use_aperture) |
||
2997 | Serge | 143 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
1404 | serge | 144 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
145 | TTM_PL_FLAG_WC; |
||
146 | man->default_caching = TTM_PL_FLAG_WC; |
||
2997 | Serge | 147 | } |
1404 | serge | 148 | #endif |
149 | break; |
||
150 | case TTM_PL_VRAM: |
||
151 | /* "On-card" video ram */ |
||
2997 | Serge | 152 | man->func = &ttm_bo_manager_func; |
153 | man->gpu_offset = rdev->mc.vram_start; |
||
1404 | serge | 154 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
155 | TTM_MEMTYPE_FLAG_MAPPABLE; |
||
156 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
||
157 | man->default_caching = TTM_PL_FLAG_WC; |
||
158 | break; |
||
159 | default: |
||
160 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
||
161 | return -EINVAL; |
||
162 | } |
||
163 | return 0; |
||
164 | } |
||
165 | |||
3764 | Serge | 166 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
167 | struct ttm_placement *placement) |
||
168 | { |
||
5271 | serge | 169 | static struct ttm_place placements = { |
170 | .fpfn = 0, |
||
171 | .lpfn = 0, |
||
172 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM |
||
173 | }; |
||
174 | |||
3764 | Serge | 175 | struct radeon_bo *rbo; |
176 | |||
177 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { |
||
178 | placement->placement = &placements; |
||
179 | placement->busy_placement = &placements; |
||
180 | placement->num_placement = 1; |
||
181 | placement->num_busy_placement = 1; |
||
182 | return; |
||
183 | } |
||
184 | rbo = container_of(bo, struct radeon_bo, tbo); |
||
185 | switch (bo->mem.mem_type) { |
||
186 | case TTM_PL_VRAM: |
||
5271 | serge | 187 | if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false) |
3764 | Serge | 188 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
5271 | serge | 189 | else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size && |
190 | bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) { |
||
191 | unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
||
192 | int i; |
||
193 | |||
194 | /* Try evicting to the CPU inaccessible part of VRAM |
||
195 | * first, but only set GTT as busy placement, so this |
||
196 | * BO will be evicted to GTT rather than causing other |
||
197 | * BOs to be evicted from VRAM |
||
198 | */ |
||
199 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM | |
||
200 | RADEON_GEM_DOMAIN_GTT); |
||
201 | rbo->placement.num_busy_placement = 0; |
||
202 | for (i = 0; i < rbo->placement.num_placement; i++) { |
||
203 | if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) { |
||
204 | if (rbo->placements[0].fpfn < fpfn) |
||
205 | rbo->placements[0].fpfn = fpfn; |
||
206 | } else { |
||
207 | rbo->placement.busy_placement = |
||
208 | &rbo->placements[i]; |
||
209 | rbo->placement.num_busy_placement = 1; |
||
210 | } |
||
211 | } |
||
212 | } else |
||
3764 | Serge | 213 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
214 | break; |
||
215 | case TTM_PL_TT: |
||
216 | default: |
||
217 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
||
218 | } |
||
219 | *placement = rbo->placement; |
||
220 | } |
||
221 | |||
222 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
||
223 | { |
||
224 | return 0; |
||
225 | } |
||
226 | |||
227 | static void radeon_move_null(struct ttm_buffer_object *bo, |
||
228 | struct ttm_mem_reg *new_mem) |
||
229 | { |
||
230 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
231 | |||
232 | BUG_ON(old_mem->mm_node != NULL); |
||
233 | *old_mem = *new_mem; |
||
234 | new_mem->mm_node = NULL; |
||
235 | } |
||
236 | |||
5078 | serge | 237 | static int radeon_move_blit(struct ttm_buffer_object *bo, |
238 | bool evict, bool no_wait_gpu, |
||
239 | struct ttm_mem_reg *new_mem, |
||
240 | struct ttm_mem_reg *old_mem) |
||
241 | { |
||
242 | struct radeon_device *rdev; |
||
243 | uint64_t old_start, new_start; |
||
244 | struct radeon_fence *fence; |
||
5271 | serge | 245 | unsigned num_pages; |
5078 | serge | 246 | int r, ridx; |
247 | |||
248 | rdev = radeon_get_rdev(bo->bdev); |
||
249 | ridx = radeon_copy_ring_index(rdev); |
||
6938 | serge | 250 | old_start = old_mem->start << PAGE_SHIFT; |
251 | new_start = new_mem->start << PAGE_SHIFT; |
||
5078 | serge | 252 | |
253 | switch (old_mem->mem_type) { |
||
254 | case TTM_PL_VRAM: |
||
255 | old_start += rdev->mc.vram_start; |
||
256 | break; |
||
257 | case TTM_PL_TT: |
||
258 | old_start += rdev->mc.gtt_start; |
||
259 | break; |
||
260 | default: |
||
261 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
||
262 | return -EINVAL; |
||
263 | } |
||
264 | switch (new_mem->mem_type) { |
||
265 | case TTM_PL_VRAM: |
||
266 | new_start += rdev->mc.vram_start; |
||
267 | break; |
||
268 | case TTM_PL_TT: |
||
269 | new_start += rdev->mc.gtt_start; |
||
270 | break; |
||
271 | default: |
||
272 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
||
273 | return -EINVAL; |
||
274 | } |
||
275 | if (!rdev->ring[ridx].ready) { |
||
276 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
||
277 | return -EINVAL; |
||
278 | } |
||
279 | |||
280 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
||
281 | |||
5271 | serge | 282 | num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
283 | fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); |
||
284 | if (IS_ERR(fence)) |
||
285 | return PTR_ERR(fence); |
||
286 | |||
287 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, |
||
5078 | serge | 288 | evict, no_wait_gpu, new_mem); |
289 | radeon_fence_unref(&fence); |
||
290 | return r; |
||
291 | } |
||
292 | |||
293 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, |
||
294 | bool evict, bool interruptible, |
||
295 | bool no_wait_gpu, |
||
296 | struct ttm_mem_reg *new_mem) |
||
297 | { |
||
298 | struct radeon_device *rdev; |
||
299 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
300 | struct ttm_mem_reg tmp_mem; |
||
5271 | serge | 301 | struct ttm_place placements; |
5078 | serge | 302 | struct ttm_placement placement; |
303 | int r; |
||
304 | |||
305 | rdev = radeon_get_rdev(bo->bdev); |
||
306 | tmp_mem = *new_mem; |
||
307 | tmp_mem.mm_node = NULL; |
||
308 | placement.num_placement = 1; |
||
309 | placement.placement = &placements; |
||
310 | placement.num_busy_placement = 1; |
||
311 | placement.busy_placement = &placements; |
||
5271 | serge | 312 | placements.fpfn = 0; |
313 | placements.lpfn = 0; |
||
314 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
||
5078 | serge | 315 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
316 | interruptible, no_wait_gpu); |
||
317 | if (unlikely(r)) { |
||
318 | return r; |
||
319 | } |
||
320 | |||
321 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); |
||
322 | if (unlikely(r)) { |
||
323 | goto out_cleanup; |
||
324 | } |
||
325 | |||
326 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
||
327 | if (unlikely(r)) { |
||
328 | goto out_cleanup; |
||
329 | } |
||
330 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
||
331 | if (unlikely(r)) { |
||
332 | goto out_cleanup; |
||
333 | } |
||
334 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
||
335 | out_cleanup: |
||
336 | ttm_bo_mem_put(bo, &tmp_mem); |
||
337 | return r; |
||
338 | } |
||
339 | |||
340 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, |
||
341 | bool evict, bool interruptible, |
||
342 | bool no_wait_gpu, |
||
343 | struct ttm_mem_reg *new_mem) |
||
344 | { |
||
345 | struct radeon_device *rdev; |
||
346 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
347 | struct ttm_mem_reg tmp_mem; |
||
348 | struct ttm_placement placement; |
||
5271 | serge | 349 | struct ttm_place placements; |
5078 | serge | 350 | int r; |
351 | |||
352 | rdev = radeon_get_rdev(bo->bdev); |
||
353 | tmp_mem = *new_mem; |
||
354 | tmp_mem.mm_node = NULL; |
||
355 | placement.num_placement = 1; |
||
356 | placement.placement = &placements; |
||
357 | placement.num_busy_placement = 1; |
||
358 | placement.busy_placement = &placements; |
||
5271 | serge | 359 | placements.fpfn = 0; |
360 | placements.lpfn = 0; |
||
361 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
||
5078 | serge | 362 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
363 | interruptible, no_wait_gpu); |
||
364 | if (unlikely(r)) { |
||
365 | return r; |
||
366 | } |
||
367 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
||
368 | if (unlikely(r)) { |
||
369 | goto out_cleanup; |
||
370 | } |
||
371 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
||
372 | if (unlikely(r)) { |
||
373 | goto out_cleanup; |
||
374 | } |
||
375 | out_cleanup: |
||
376 | ttm_bo_mem_put(bo, &tmp_mem); |
||
377 | return r; |
||
378 | } |
||
379 | |||
380 | static int radeon_bo_move(struct ttm_buffer_object *bo, |
||
381 | bool evict, bool interruptible, |
||
382 | bool no_wait_gpu, |
||
383 | struct ttm_mem_reg *new_mem) |
||
384 | { |
||
385 | struct radeon_device *rdev; |
||
386 | struct ttm_mem_reg *old_mem = &bo->mem; |
||
387 | int r; |
||
388 | |||
389 | rdev = radeon_get_rdev(bo->bdev); |
||
390 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
||
391 | radeon_move_null(bo, new_mem); |
||
392 | return 0; |
||
393 | } |
||
394 | if ((old_mem->mem_type == TTM_PL_TT && |
||
395 | new_mem->mem_type == TTM_PL_SYSTEM) || |
||
396 | (old_mem->mem_type == TTM_PL_SYSTEM && |
||
397 | new_mem->mem_type == TTM_PL_TT)) { |
||
398 | /* bind is enough */ |
||
399 | radeon_move_null(bo, new_mem); |
||
400 | return 0; |
||
401 | } |
||
402 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
||
403 | rdev->asic->copy.copy == NULL) { |
||
404 | /* use memcpy */ |
||
405 | goto memcpy; |
||
406 | } |
||
407 | |||
408 | if (old_mem->mem_type == TTM_PL_VRAM && |
||
409 | new_mem->mem_type == TTM_PL_SYSTEM) { |
||
410 | r = radeon_move_vram_ram(bo, evict, interruptible, |
||
411 | no_wait_gpu, new_mem); |
||
412 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
||
413 | new_mem->mem_type == TTM_PL_VRAM) { |
||
414 | r = radeon_move_ram_vram(bo, evict, interruptible, |
||
415 | no_wait_gpu, new_mem); |
||
416 | } else { |
||
417 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
||
418 | } |
||
419 | |||
420 | if (r) { |
||
421 | memcpy: |
||
422 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
||
423 | if (r) { |
||
424 | return r; |
||
425 | } |
||
426 | } |
||
427 | |||
428 | /* update statistics */ |
||
429 | // atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); |
||
430 | return 0; |
||
431 | } |
||
432 | |||
433 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
434 | { |
||
435 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
436 | struct radeon_device *rdev = radeon_get_rdev(bdev); |
||
437 | |||
438 | mem->bus.addr = NULL; |
||
439 | mem->bus.offset = 0; |
||
440 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
||
441 | mem->bus.base = 0; |
||
442 | mem->bus.is_iomem = false; |
||
443 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
||
444 | return -EINVAL; |
||
445 | switch (mem->mem_type) { |
||
446 | case TTM_PL_SYSTEM: |
||
447 | /* system memory */ |
||
448 | return 0; |
||
449 | case TTM_PL_TT: |
||
6104 | serge | 450 | #if IS_ENABLED(CONFIG_AGP) |
5078 | serge | 451 | if (rdev->flags & RADEON_IS_AGP) { |
452 | /* RADEON_IS_AGP is set only if AGP is active */ |
||
453 | mem->bus.offset = mem->start << PAGE_SHIFT; |
||
454 | mem->bus.base = rdev->mc.agp_base; |
||
455 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
||
456 | } |
||
457 | #endif |
||
458 | break; |
||
459 | case TTM_PL_VRAM: |
||
460 | mem->bus.offset = mem->start << PAGE_SHIFT; |
||
461 | /* check if it's visible */ |
||
462 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) |
||
463 | return -EINVAL; |
||
464 | mem->bus.base = rdev->mc.aper_base; |
||
465 | mem->bus.is_iomem = true; |
||
466 | #ifdef __alpha__ |
||
467 | /* |
||
468 | * Alpha: use bus.addr to hold the ioremap() return, |
||
469 | * so we can modify bus.base below. |
||
470 | */ |
||
471 | if (mem->placement & TTM_PL_FLAG_WC) |
||
472 | mem->bus.addr = |
||
473 | ioremap_wc(mem->bus.base + mem->bus.offset, |
||
474 | mem->bus.size); |
||
475 | else |
||
476 | mem->bus.addr = |
||
477 | ioremap_nocache(mem->bus.base + mem->bus.offset, |
||
478 | mem->bus.size); |
||
479 | |||
480 | /* |
||
481 | * Alpha: Use just the bus offset plus |
||
482 | * the hose/domain memory base for bus.base. |
||
483 | * It then can be used to build PTEs for VRAM |
||
484 | * access, as done in ttm_bo_vm_fault(). |
||
485 | */ |
||
486 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + |
||
487 | rdev->ddev->hose->dense_mem_base; |
||
488 | #endif |
||
489 | break; |
||
490 | default: |
||
491 | return -EINVAL; |
||
492 | } |
||
493 | return 0; |
||
494 | } |
||
495 | |||
3764 | Serge | 496 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
497 | { |
||
498 | } |
||
499 | |||
500 | /* |
||
501 | * TTM backend functions. |
||
502 | */ |
||
503 | struct radeon_ttm_tt { |
||
504 | struct ttm_dma_tt ttm; |
||
505 | struct radeon_device *rdev; |
||
506 | u64 offset; |
||
5271 | serge | 507 | |
508 | uint64_t userptr; |
||
509 | struct mm_struct *usermm; |
||
510 | uint32_t userflags; |
||
3764 | Serge | 511 | }; |
512 | |||
513 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
||
514 | struct ttm_mem_reg *bo_mem) |
||
515 | { |
||
516 | struct radeon_ttm_tt *gtt = (void*)ttm; |
||
5078 | serge | 517 | uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | |
518 | RADEON_GART_PAGE_WRITE; |
||
3764 | Serge | 519 | int r; |
520 | |||
521 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
||
522 | if (!ttm->num_pages) { |
||
523 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
||
524 | ttm->num_pages, bo_mem, ttm); |
||
525 | } |
||
5078 | serge | 526 | if (ttm->caching_state == tt_cached) |
527 | flags |= RADEON_GART_PAGE_SNOOP; |
||
528 | r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, |
||
529 | ttm->pages, gtt->ttm.dma_address, flags); |
||
3764 | Serge | 530 | if (r) { |
531 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", |
||
532 | ttm->num_pages, (unsigned)gtt->offset); |
||
533 | return r; |
||
534 | } |
||
535 | return 0; |
||
536 | } |
||
537 | |||
538 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) |
||
539 | { |
||
540 | struct radeon_ttm_tt *gtt = (void *)ttm; |
||
541 | |||
542 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
||
543 | return 0; |
||
544 | } |
||
545 | |||
546 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) |
||
547 | { |
||
548 | struct radeon_ttm_tt *gtt = (void *)ttm; |
||
549 | |||
5078 | serge | 550 | // ttm_dma_tt_fini(>t->ttm); |
3764 | Serge | 551 | kfree(gtt); |
552 | } |
||
553 | |||
554 | static struct ttm_backend_func radeon_backend_func = { |
||
555 | .bind = &radeon_ttm_backend_bind, |
||
556 | .unbind = &radeon_ttm_backend_unbind, |
||
557 | .destroy = &radeon_ttm_backend_destroy, |
||
558 | }; |
||
559 | |||
560 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
||
561 | unsigned long size, uint32_t page_flags, |
||
562 | struct page *dummy_read_page) |
||
563 | { |
||
564 | struct radeon_device *rdev; |
||
565 | struct radeon_ttm_tt *gtt; |
||
566 | |||
567 | rdev = radeon_get_rdev(bdev); |
||
6104 | serge | 568 | #if IS_ENABLED(CONFIG_AGP) |
3764 | Serge | 569 | if (rdev->flags & RADEON_IS_AGP) { |
570 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, |
||
571 | size, page_flags, dummy_read_page); |
||
572 | } |
||
573 | #endif |
||
574 | |||
575 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); |
||
576 | if (gtt == NULL) { |
||
577 | return NULL; |
||
578 | } |
||
579 | gtt->ttm.ttm.func = &radeon_backend_func; |
||
580 | gtt->rdev = rdev; |
||
581 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
||
582 | kfree(gtt); |
||
583 | return NULL; |
||
584 | } |
||
585 | return >t->ttm.ttm; |
||
586 | } |
||
587 | |||
5271 | serge | 588 | static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) |
589 | { |
||
590 | if (!ttm || ttm->func != &radeon_backend_func) |
||
591 | return NULL; |
||
592 | return (struct radeon_ttm_tt *)ttm; |
||
593 | } |
||
594 | |||
5078 | serge | 595 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
596 | { |
||
5271 | serge | 597 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
5078 | serge | 598 | struct radeon_device *rdev; |
599 | unsigned i; |
||
600 | int r; |
||
601 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
||
602 | |||
603 | if (ttm->state != tt_unpopulated) |
||
604 | return 0; |
||
605 | |||
606 | if (slave && ttm->sg) { |
||
607 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
||
608 | gtt->ttm.dma_address, ttm->num_pages); |
||
609 | ttm->state = tt_unbound; |
||
610 | return 0; |
||
611 | } |
||
612 | |||
613 | rdev = radeon_get_rdev(ttm->bdev); |
||
6104 | serge | 614 | #if IS_ENABLED(CONFIG_AGP) |
5078 | serge | 615 | if (rdev->flags & RADEON_IS_AGP) { |
616 | return ttm_agp_tt_populate(ttm); |
||
617 | } |
||
618 | #endif |
||
619 | |||
620 | #ifdef CONFIG_SWIOTLB |
||
621 | if (swiotlb_nr_tbl()) { |
||
622 | return ttm_dma_populate(>t->ttm, rdev->dev); |
||
623 | } |
||
624 | #endif |
||
625 | |||
626 | r = ttm_pool_populate(ttm); |
||
627 | if (r) { |
||
628 | return r; |
||
629 | } |
||
630 | |||
631 | for (i = 0; i < ttm->num_pages; i++) { |
||
632 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
||
633 | 0, PAGE_SIZE, |
||
634 | PCI_DMA_BIDIRECTIONAL); |
||
635 | |||
636 | } |
||
637 | return 0; |
||
638 | } |
||
639 | |||
640 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) |
||
641 | { |
||
642 | struct radeon_device *rdev; |
||
5271 | serge | 643 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
5078 | serge | 644 | unsigned i; |
645 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
||
646 | |||
647 | if (slave) |
||
648 | return; |
||
649 | |||
650 | rdev = radeon_get_rdev(ttm->bdev); |
||
6104 | serge | 651 | #if IS_ENABLED(CONFIG_AGP) |
5078 | serge | 652 | if (rdev->flags & RADEON_IS_AGP) { |
653 | ttm_agp_tt_unpopulate(ttm); |
||
654 | return; |
||
655 | } |
||
656 | #endif |
||
657 | |||
658 | #ifdef CONFIG_SWIOTLB |
||
659 | if (swiotlb_nr_tbl()) { |
||
660 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
||
661 | return; |
||
662 | } |
||
663 | #endif |
||
664 | |||
665 | |||
666 | ttm_pool_unpopulate(ttm); |
||
667 | } |
||
668 | |||
1404 | serge | 669 | static struct ttm_bo_driver radeon_bo_driver = { |
3764 | Serge | 670 | .ttm_tt_create = &radeon_ttm_tt_create, |
5078 | serge | 671 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
672 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, |
||
673 | .invalidate_caches = &radeon_invalidate_caches, |
||
3764 | Serge | 674 | .init_mem_type = &radeon_init_mem_type, |
5078 | serge | 675 | .evict_flags = &radeon_evict_flags, |
676 | .move = &radeon_bo_move, |
||
677 | .verify_access = &radeon_verify_access, |
||
678 | .move_notify = &radeon_bo_move_notify, |
||
3764 | Serge | 679 | // .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
5078 | serge | 680 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
681 | .io_mem_free = &radeon_ttm_io_mem_free, |
||
1404 | serge | 682 | }; |
683 | |||
684 | int radeon_ttm_init(struct radeon_device *rdev) |
||
685 | { |
||
686 | int r; |
||
687 | |||
688 | r = radeon_ttm_global_init(rdev); |
||
689 | if (r) { |
||
690 | return r; |
||
691 | } |
||
692 | /* No others user of address space so set it to 0 */ |
||
693 | r = ttm_bo_device_init(&rdev->mman.bdev, |
||
694 | rdev->mman.bo_global_ref.ref.object, |
||
5078 | serge | 695 | &radeon_bo_driver, |
696 | NULL, |
||
697 | DRM_FILE_PAGE_OFFSET, |
||
1404 | serge | 698 | rdev->need_dma32); |
699 | if (r) { |
||
700 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
||
701 | return r; |
||
702 | } |
||
703 | rdev->mman.initialized = true; |
||
704 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
||
705 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
||
706 | if (r) { |
||
707 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
||
708 | return r; |
||
709 | } |
||
5078 | serge | 710 | /* Change the size here instead of the init above so only lpfn is affected */ |
711 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
||
3764 | Serge | 712 | |
5271 | serge | 713 | r = radeon_bo_create(rdev, 16 * 1024 * 1024, PAGE_SIZE, true, |
714 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
||
5078 | serge | 715 | NULL, &rdev->stollen_vga_memory); |
716 | if (r) { |
||
717 | return r; |
||
718 | } |
||
719 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
||
720 | if (r) |
||
721 | return r; |
||
722 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); |
||
723 | radeon_bo_unreserve(rdev->stollen_vga_memory); |
||
724 | if (r) { |
||
725 | radeon_bo_unref(&rdev->stollen_vga_memory); |
||
726 | return r; |
||
727 | } |
||
1404 | serge | 728 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
5078 | serge | 729 | (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); |
1404 | serge | 730 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
731 | rdev->mc.gtt_size >> PAGE_SHIFT); |
||
732 | if (r) { |
||
733 | DRM_ERROR("Failed initializing GTT heap.\n"); |
||
734 | return r; |
||
735 | } |
||
736 | DRM_INFO("radeon: %uM of GTT memory ready.\n", |
||
737 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
||
738 | |||
3764 | Serge | 739 | return 0; |
1404 | serge | 740 | } |
741 | |||
742 | |||
3764 | Serge | 743 | /* this should only be called at bootup or when userspace |
744 | * isn't running */ |
||
745 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) |
||
746 | { |
||
747 | struct ttm_mem_type_manager *man; |
||
1404 | serge | 748 | |
3764 | Serge | 749 | if (!rdev->mman.initialized) |
750 | return; |
||
1404 | serge | 751 | |
3764 | Serge | 752 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; |
753 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ |
||
754 | man->size = size >> PAGE_SHIFT; |
||
755 | } |
||
1404 | serge | 756 | |
3764 | Serge | 757 | static struct vm_operations_struct radeon_ttm_vm_ops; |
758 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
||
1404 | serge | 759 | |
3764 | Serge | 760 | #if 0 |
1404 | serge | 761 | |
3764 | Serge | 762 | radeon_bo_init |
763 | { |
||
764 | <6>[drm] Detected VRAM RAM=1024M, BAR=256M |
||
765 | <6>[drm] RAM width 128bits DDR |
||
1404 | serge | 766 | |
3764 | Serge | 767 | radeon_ttm_init |
768 | { |
||
769 | radeon_ttm_global_init |
||
770 | { |
||
771 | radeon_ttm_mem_global_init |
||
1404 | serge | 772 | |
3764 | Serge | 773 | ttm_bo_global_init |
774 | } |
||
1404 | serge | 775 | |
3764 | Serge | 776 | ttm_bo_device_init |
777 | { |
||
778 | ttm_bo_init_mm |
||
779 | { |
||
780 | radeon_init_mem_type |
||
781 | }; |
||
782 | } |
||
1404 | serge | 783 | |
3764 | Serge | 784 | ttm_bo_init_mm |
785 | { |
||
786 | radeon_init_mem_type |
||
1404 | serge | 787 | |
3764 | Serge | 788 | ttm_bo_man_init |
789 | } |
||
1404 | serge | 790 | |
3764 | Serge | 791 | <6>[drm] radeon: 1024M of VRAM memory ready |
1404 | serge | 792 | |
3764 | Serge | 793 | ttm_bo_init_mm |
794 | { |
||
795 | radeon_init_mem_type |
||
1404 | serge | 796 | |
3764 | Serge | 797 | ttm_bo_man_init |
798 | } |
||
1404 | serge | 799 | |
3764 | Serge | 800 | <6>[drm] radeon: 512M of GTT memory ready. |
801 | } |
||
802 | }; |
||
1404 | serge | 803 | |
3764 | Serge | 804 | #endif |
1404 | serge | 805 | |
806 | |||
807 | |||
808 | |||
5078 | serge | 809 | int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, |
810 | dma_addr_t *addrs, int max_pages) |
||
811 | { |
||
812 | unsigned count; |
||
813 | struct scatterlist *sg; |
||
814 | struct page *page; |
||
815 | u32 len; |
||
816 | int pg_index; |
||
817 | dma_addr_t addr; |
||
818 | |||
819 | pg_index = 0; |
||
820 | for_each_sg(sgt->sgl, sg, sgt->nents, count) { |
||
821 | len = sg->length; |
||
822 | page = sg_page(sg); |
||
823 | addr = sg_dma_address(sg); |
||
824 | |||
825 | while (len > 0) { |
||
826 | if (WARN_ON(pg_index >= max_pages)) |
||
827 | return -1; |
||
828 | pages[pg_index] = page; |
||
829 | if (addrs) |
||
830 | addrs[pg_index] = addr; |
||
831 | |||
832 | page++; |
||
833 | addr += PAGE_SIZE; |
||
834 | len -= PAGE_SIZE; |
||
835 | pg_index++; |
||
836 | } |
||
837 | } |
||
838 | return 0; |
||
839 | }6>6>6>6>>><>><>><>><>><>><>><>>>>> |