Rev 2005 | Rev 3192 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1120 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
2997 | Serge | 28 | #include |
29 | #include |
||
1120 | serge | 30 | #include "radeon.h" |
31 | #include "radeon_reg.h" |
||
32 | |||
1631 | serge | 33 | |
34 | static inline void * |
||
35 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
||
36 | addr_t *dma_handle) |
||
37 | { |
||
38 | |||
39 | size = (size + 0x7FFF) & ~0x7FFF; |
||
40 | |||
41 | *dma_handle = AllocPages(size >> 12); |
||
42 | return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE); |
||
43 | } |
||
44 | |||
1120 | serge | 45 | /* |
2997 | Serge | 46 | * GART |
47 | * The GART (Graphics Aperture Remapping Table) is an aperture |
||
48 | * in the GPU's address space. System pages can be mapped into |
||
49 | * the aperture and look like contiguous pages from the GPU's |
||
50 | * perspective. A page table maps the pages in the aperture |
||
51 | * to the actual backing pages in system memory. |
||
52 | * |
||
53 | * Radeon GPUs support both an internal GART, as described above, |
||
54 | * and AGP. AGP works similarly, but the GART table is configured |
||
55 | * and maintained by the northbridge rather than the driver. |
||
56 | * Radeon hw has a separate AGP aperture that is programmed to |
||
57 | * point to the AGP aperture provided by the northbridge and the |
||
58 | * requests are passed through to the northbridge aperture. |
||
59 | * Both AGP and internal GART can be used at the same time, however |
||
60 | * that is not currently supported by the driver. |
||
61 | * |
||
62 | * This file handles the common internal GART management. |
||
63 | */ |
||
64 | |||
65 | /* |
||
1120 | serge | 66 | * Common GART table functions. |
67 | */ |
||
2997 | Serge | 68 | /** |
69 | * radeon_gart_table_ram_alloc - allocate system ram for gart page table |
||
70 | * |
||
71 | * @rdev: radeon_device pointer |
||
72 | * |
||
73 | * Allocate system memory for GART page table |
||
74 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
||
75 | * gart table to be in system memory. |
||
76 | * Returns 0 for success, -ENOMEM for failure. |
||
77 | */ |
||
1120 | serge | 78 | int radeon_gart_table_ram_alloc(struct radeon_device *rdev) |
79 | { |
||
80 | void *ptr; |
||
81 | |||
1246 | serge | 82 | ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
83 | &rdev->gart.table_addr); |
||
1120 | serge | 84 | if (ptr == NULL) { |
85 | return -ENOMEM; |
||
86 | } |
||
87 | #ifdef CONFIG_X86 |
||
88 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
||
89 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
||
90 | set_memory_uc((unsigned long)ptr, |
||
91 | rdev->gart.table_size >> PAGE_SHIFT); |
||
92 | } |
||
93 | #endif |
||
2997 | Serge | 94 | rdev->gart.ptr = ptr; |
95 | memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); |
||
1120 | serge | 96 | return 0; |
97 | } |
||
98 | |||
2997 | Serge | 99 | /** |
100 | * radeon_gart_table_ram_free - free system ram for gart page table |
||
101 | * |
||
102 | * @rdev: radeon_device pointer |
||
103 | * |
||
104 | * Free system memory for GART page table |
||
105 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
||
106 | * gart table to be in system memory. |
||
107 | */ |
||
1120 | serge | 108 | void radeon_gart_table_ram_free(struct radeon_device *rdev) |
109 | { |
||
2997 | Serge | 110 | if (rdev->gart.ptr == NULL) { |
1120 | serge | 111 | return; |
112 | } |
||
113 | #ifdef CONFIG_X86 |
||
114 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
||
115 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
||
2997 | Serge | 116 | set_memory_wb((unsigned long)rdev->gart.ptr, |
1120 | serge | 117 | rdev->gart.table_size >> PAGE_SHIFT); |
118 | } |
||
119 | #endif |
||
2997 | Serge | 120 | rdev->gart.ptr = NULL; |
1120 | serge | 121 | rdev->gart.table_addr = 0; |
122 | } |
||
123 | |||
2997 | Serge | 124 | /** |
125 | * radeon_gart_table_vram_alloc - allocate vram for gart page table |
||
126 | * |
||
127 | * @rdev: radeon_device pointer |
||
128 | * |
||
129 | * Allocate video memory for GART page table |
||
130 | * (pcie r4xx, r5xx+). These asics require the |
||
131 | * gart table to be in video memory. |
||
132 | * Returns 0 for success, error for failure. |
||
133 | */ |
||
1120 | serge | 134 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
135 | { |
||
136 | int r; |
||
137 | |||
2997 | Serge | 138 | if (rdev->gart.robj == NULL) { |
1963 | serge | 139 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
140 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
||
2997 | Serge | 141 | NULL, &rdev->gart.robj); |
1120 | serge | 142 | if (r) { |
143 | return r; |
||
144 | } |
||
145 | } |
||
1179 | serge | 146 | return 0; |
147 | } |
||
148 | |||
2997 | Serge | 149 | /** |
150 | * radeon_gart_table_vram_pin - pin gart page table in vram |
||
151 | * |
||
152 | * @rdev: radeon_device pointer |
||
153 | * |
||
154 | * Pin the GART page table in vram so it will not be moved |
||
155 | * by the memory manager (pcie r4xx, r5xx+). These asics require the |
||
156 | * gart table to be in video memory. |
||
157 | * Returns 0 for success, error for failure. |
||
158 | */ |
||
1179 | serge | 159 | int radeon_gart_table_vram_pin(struct radeon_device *rdev) |
160 | { |
||
161 | uint64_t gpu_addr; |
||
162 | int r; |
||
163 | |||
2997 | Serge | 164 | r = radeon_bo_reserve(rdev->gart.robj, false); |
1404 | serge | 165 | if (unlikely(r != 0)) |
166 | return r; |
||
2997 | Serge | 167 | r = radeon_bo_pin(rdev->gart.robj, |
1120 | serge | 168 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
169 | if (r) { |
||
2997 | Serge | 170 | radeon_bo_unreserve(rdev->gart.robj); |
1120 | serge | 171 | return r; |
172 | } |
||
2997 | Serge | 173 | r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); |
1404 | serge | 174 | if (r) |
2997 | Serge | 175 | radeon_bo_unpin(rdev->gart.robj); |
176 | radeon_bo_unreserve(rdev->gart.robj); |
||
1404 | serge | 177 | rdev->gart.table_addr = gpu_addr; |
178 | return r; |
||
1120 | serge | 179 | } |
180 | |||
2997 | Serge | 181 | /** |
182 | * radeon_gart_table_vram_unpin - unpin gart page table in vram |
||
183 | * |
||
184 | * @rdev: radeon_device pointer |
||
185 | * |
||
186 | * Unpin the GART page table in vram (pcie r4xx, r5xx+). |
||
187 | * These asics require the gart table to be in video memory. |
||
188 | */ |
||
189 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev) |
||
1120 | serge | 190 | { |
1404 | serge | 191 | int r; |
192 | |||
2997 | Serge | 193 | if (rdev->gart.robj == NULL) { |
1120 | serge | 194 | return; |
195 | } |
||
2997 | Serge | 196 | r = radeon_bo_reserve(rdev->gart.robj, false); |
1404 | serge | 197 | if (likely(r == 0)) { |
2997 | Serge | 198 | radeon_bo_kunmap(rdev->gart.robj); |
199 | radeon_bo_unpin(rdev->gart.robj); |
||
200 | radeon_bo_unreserve(rdev->gart.robj); |
||
201 | rdev->gart.ptr = NULL; |
||
1404 | serge | 202 | } |
1120 | serge | 203 | } |
204 | |||
2997 | Serge | 205 | /** |
206 | * radeon_gart_table_vram_free - free gart page table vram |
||
207 | * |
||
208 | * @rdev: radeon_device pointer |
||
209 | * |
||
210 | * Free the video memory used for the GART page table |
||
211 | * (pcie r4xx, r5xx+). These asics require the gart table to |
||
212 | * be in video memory. |
||
213 | */ |
||
214 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
||
215 | { |
||
216 | if (rdev->gart.robj == NULL) { |
||
217 | return; |
||
218 | } |
||
219 | radeon_gart_table_vram_unpin(rdev); |
||
220 | radeon_bo_unref(&rdev->gart.robj); |
||
221 | } |
||
1120 | serge | 222 | |
223 | /* |
||
224 | * Common gart functions. |
||
225 | */ |
||
2997 | Serge | 226 | /** |
227 | * radeon_gart_unbind - unbind pages from the gart page table |
||
228 | * |
||
229 | * @rdev: radeon_device pointer |
||
230 | * @offset: offset into the GPU's gart aperture |
||
231 | * @pages: number of pages to unbind |
||
232 | * |
||
233 | * Unbinds the requested pages from the gart page table and |
||
234 | * replaces them with the dummy page (all asics). |
||
235 | */ |
||
1120 | serge | 236 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
237 | int pages) |
||
238 | { |
||
239 | unsigned t; |
||
240 | unsigned p; |
||
241 | int i, j; |
||
1430 | serge | 242 | u64 page_base; |
1120 | serge | 243 | |
244 | if (!rdev->gart.ready) { |
||
2997 | Serge | 245 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
1120 | serge | 246 | return; |
247 | } |
||
1268 | serge | 248 | t = offset / RADEON_GPU_PAGE_SIZE; |
249 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
||
1120 | serge | 250 | for (i = 0; i < pages; i++, p++) { |
251 | if (rdev->gart.pages[p]) { |
||
252 | // pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
||
253 | // PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
||
254 | rdev->gart.pages[p] = NULL; |
||
1430 | serge | 255 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
256 | page_base = rdev->gart.pages_addr[p]; |
||
1268 | serge | 257 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
2997 | Serge | 258 | if (rdev->gart.ptr) { |
1430 | serge | 259 | radeon_gart_set_page(rdev, t, page_base); |
2997 | Serge | 260 | } |
1430 | serge | 261 | page_base += RADEON_GPU_PAGE_SIZE; |
1120 | serge | 262 | } |
263 | } |
||
264 | } |
||
265 | mb(); |
||
266 | radeon_gart_tlb_flush(rdev); |
||
267 | } |
||
268 | |||
2997 | Serge | 269 | /** |
270 | * radeon_gart_bind - bind pages into the gart page table |
||
271 | * |
||
272 | * @rdev: radeon_device pointer |
||
273 | * @offset: offset into the GPU's gart aperture |
||
274 | * @pages: number of pages to bind |
||
275 | * @pagelist: pages to bind |
||
276 | * @dma_addr: DMA addresses of pages |
||
277 | * |
||
278 | * Binds the requested pages to the gart page table |
||
279 | * (all asics). |
||
280 | * Returns 0 for success, -EINVAL for failure. |
||
281 | */ |
||
1120 | serge | 282 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
2997 | Serge | 283 | int pages, u32 *pagelist, dma_addr_t *dma_addr) |
1120 | serge | 284 | { |
285 | unsigned t; |
||
286 | unsigned p; |
||
287 | uint64_t page_base; |
||
288 | int i, j; |
||
289 | |||
2005 | serge | 290 | // dbgprintf("offset %x pages %d list %x\n", |
291 | // offset, pages, pagelist); |
||
1120 | serge | 292 | if (!rdev->gart.ready) { |
2997 | Serge | 293 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
1120 | serge | 294 | return -EINVAL; |
295 | } |
||
1268 | serge | 296 | t = offset / RADEON_GPU_PAGE_SIZE; |
297 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
||
1120 | serge | 298 | |
299 | for (i = 0; i < pages; i++, p++) { |
||
300 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
||
301 | rdev->gart.pages[p] = pagelist[i]; |
||
2997 | Serge | 302 | if (rdev->gart.ptr) { |
1268 | serge | 303 | page_base = rdev->gart.pages_addr[p]; |
304 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
||
1120 | serge | 305 | radeon_gart_set_page(rdev, t, page_base); |
1268 | serge | 306 | page_base += RADEON_GPU_PAGE_SIZE; |
1120 | serge | 307 | } |
308 | } |
||
2997 | Serge | 309 | } |
1120 | serge | 310 | mb(); |
311 | radeon_gart_tlb_flush(rdev); |
||
312 | return 0; |
||
313 | } |
||
314 | |||
2997 | Serge | 315 | /** |
316 | * radeon_gart_restore - bind all pages in the gart page table |
||
317 | * |
||
318 | * @rdev: radeon_device pointer |
||
319 | * |
||
320 | * Binds all pages in the gart page table (all asics). |
||
321 | * Used to rebuild the gart table on device startup or resume. |
||
322 | */ |
||
1430 | serge | 323 | void radeon_gart_restore(struct radeon_device *rdev) |
324 | { |
||
325 | int i, j, t; |
||
326 | u64 page_base; |
||
327 | |||
2997 | Serge | 328 | if (!rdev->gart.ptr) { |
329 | return; |
||
330 | } |
||
1430 | serge | 331 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
332 | page_base = rdev->gart.pages_addr[i]; |
||
333 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
||
334 | radeon_gart_set_page(rdev, t, page_base); |
||
335 | page_base += RADEON_GPU_PAGE_SIZE; |
||
336 | } |
||
337 | } |
||
338 | mb(); |
||
339 | radeon_gart_tlb_flush(rdev); |
||
340 | } |
||
341 | |||
2997 | Serge | 342 | /** |
343 | * radeon_gart_init - init the driver info for managing the gart |
||
344 | * |
||
345 | * @rdev: radeon_device pointer |
||
346 | * |
||
347 | * Allocate the dummy page and init the gart driver info (all asics). |
||
348 | * Returns 0 for success, error for failure. |
||
349 | */ |
||
1120 | serge | 350 | int radeon_gart_init(struct radeon_device *rdev) |
351 | { |
||
1430 | serge | 352 | int r, i; |
353 | |||
1120 | serge | 354 | if (rdev->gart.pages) { |
355 | return 0; |
||
356 | } |
||
1268 | serge | 357 | /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
358 | if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
||
1120 | serge | 359 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
360 | return -EINVAL; |
||
361 | } |
||
1430 | serge | 362 | r = radeon_dummy_page_init(rdev); |
363 | if (r) |
||
364 | return r; |
||
1120 | serge | 365 | /* Compute table size */ |
366 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
||
1268 | serge | 367 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
1120 | serge | 368 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
369 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
||
370 | /* Allocate pages table */ |
||
2997 | Serge | 371 | rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); |
1120 | serge | 372 | if (rdev->gart.pages == NULL) { |
1404 | serge | 373 | radeon_gart_fini(rdev); |
1120 | serge | 374 | return -ENOMEM; |
375 | } |
||
2997 | Serge | 376 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
377 | rdev->gart.num_cpu_pages); |
||
1120 | serge | 378 | if (rdev->gart.pages_addr == NULL) { |
1404 | serge | 379 | radeon_gart_fini(rdev); |
1120 | serge | 380 | return -ENOMEM; |
381 | } |
||
1430 | serge | 382 | /* set GART entry to point to the dummy page by default */ |
383 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
||
384 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
||
385 | } |
||
1120 | serge | 386 | return 0; |
387 | } |
||
388 | |||
2997 | Serge | 389 | /** |
390 | * radeon_gart_fini - tear down the driver info for managing the gart |
||
391 | * |
||
392 | * @rdev: radeon_device pointer |
||
393 | * |
||
394 | * Tear down the gart driver info and free the dummy page (all asics). |
||
395 | */ |
||
1120 | serge | 396 | void radeon_gart_fini(struct radeon_device *rdev) |
397 | { |
||
398 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
||
399 | /* unbind pages */ |
||
400 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
||
401 | } |
||
402 | rdev->gart.ready = false; |
||
2997 | Serge | 403 | vfree(rdev->gart.pages); |
404 | vfree(rdev->gart.pages_addr); |
||
1120 | serge | 405 | rdev->gart.pages = NULL; |
406 | rdev->gart.pages_addr = NULL; |
||
407 | } |
||
2997 | Serge | 408 | |
409 | /* |
||
410 | * GPUVM |
||
411 | * GPUVM is similar to the legacy gart on older asics, however |
||
412 | * rather than there being a single global gart table |
||
413 | * for the entire GPU, there are multiple VM page tables active |
||
414 | * at any given time. The VM page tables can contain a mix |
||
415 | * vram pages and system memory pages and system memory pages |
||
416 | * can be mapped as snooped (cached system pages) or unsnooped |
||
417 | * (uncached system pages). |
||
418 | * Each VM has an ID associated with it and there is a page table |
||
419 | * associated with each VMID. When execting a command buffer, |
||
420 | * the kernel tells the the ring what VMID to use for that command |
||
421 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
||
422 | * The userspace drivers maintain their own address space and the kernel |
||
423 | * sets up their pages tables accordingly when they submit their |
||
424 | * command buffers and a VMID is assigned. |
||
425 | * Cayman/Trinity support up to 8 active VMs at any given time; |
||
426 | * SI supports 16. |
||
427 | */ |
||
428 | |||
429 | /* |
||
430 | * vm helpers |
||
431 | * |
||
432 | * TODO bind a default page at vm initialization for default address |
||
433 | */ |
||
434 | |||
435 | /** |
||
436 | * radeon_vm_num_pde - return the number of page directory entries |
||
437 | * |
||
438 | * @rdev: radeon_device pointer |
||
439 | * |
||
440 | * Calculate the number of page directory entries (cayman+). |
||
441 | */ |
||
442 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) |
||
443 | { |
||
444 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; |
||
445 | } |
||
446 | |||
447 | /** |
||
448 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
||
449 | * |
||
450 | * @rdev: radeon_device pointer |
||
451 | * |
||
452 | * Calculate the size of the page directory in bytes (cayman+). |
||
453 | */ |
||
454 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
||
455 | { |
||
456 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
||
457 | } |
||
458 | |||
459 | /** |
||
460 | * radeon_vm_manager_init - init the vm manager |
||
461 | * |
||
462 | * @rdev: radeon_device pointer |
||
463 | * |
||
464 | * Init the vm manager (cayman+). |
||
465 | * Returns 0 for success, error for failure. |
||
466 | */ |
||
467 | int radeon_vm_manager_init(struct radeon_device *rdev) |
||
468 | { |
||
469 | struct radeon_vm *vm; |
||
470 | struct radeon_bo_va *bo_va; |
||
471 | int r; |
||
472 | unsigned size; |
||
473 | |||
474 | if (!rdev->vm_manager.enabled) { |
||
475 | /* allocate enough for 2 full VM pts */ |
||
476 | size = radeon_vm_directory_size(rdev); |
||
477 | size += rdev->vm_manager.max_pfn * 8; |
||
478 | size *= 2; |
||
479 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
||
480 | RADEON_GPU_PAGE_ALIGN(size), |
||
481 | RADEON_GEM_DOMAIN_VRAM); |
||
482 | if (r) { |
||
483 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
||
484 | (rdev->vm_manager.max_pfn * 8) >> 10); |
||
485 | return r; |
||
486 | } |
||
487 | |||
488 | r = radeon_asic_vm_init(rdev); |
||
489 | if (r) |
||
490 | return r; |
||
491 | |||
492 | rdev->vm_manager.enabled = true; |
||
493 | |||
494 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); |
||
495 | if (r) |
||
496 | return r; |
||
497 | } |
||
498 | |||
499 | /* restore page table */ |
||
500 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
||
501 | if (vm->page_directory == NULL) |
||
502 | continue; |
||
503 | |||
504 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
||
505 | bo_va->valid = false; |
||
506 | } |
||
507 | } |
||
508 | return 0; |
||
509 | } |
||
510 | |||
511 | /** |
||
512 | * radeon_vm_free_pt - free the page table for a specific vm |
||
513 | * |
||
514 | * @rdev: radeon_device pointer |
||
515 | * @vm: vm to unbind |
||
516 | * |
||
517 | * Free the page table of a specific vm (cayman+). |
||
518 | * |
||
519 | * Global and local mutex must be lock! |
||
520 | */ |
||
521 | static void radeon_vm_free_pt(struct radeon_device *rdev, |
||
522 | struct radeon_vm *vm) |
||
523 | { |
||
524 | struct radeon_bo_va *bo_va; |
||
525 | int i; |
||
526 | |||
527 | if (!vm->page_directory) |
||
528 | return; |
||
529 | |||
530 | list_del_init(&vm->list); |
||
531 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
||
532 | |||
533 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
||
534 | bo_va->valid = false; |
||
535 | } |
||
536 | |||
537 | if (vm->page_tables == NULL) |
||
538 | return; |
||
539 | |||
540 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) |
||
541 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); |
||
542 | |||
543 | kfree(vm->page_tables); |
||
544 | } |
||
545 | |||
546 | /** |
||
547 | * radeon_vm_manager_fini - tear down the vm manager |
||
548 | * |
||
549 | * @rdev: radeon_device pointer |
||
550 | * |
||
551 | * Tear down the VM manager (cayman+). |
||
552 | */ |
||
553 | void radeon_vm_manager_fini(struct radeon_device *rdev) |
||
554 | { |
||
555 | struct radeon_vm *vm, *tmp; |
||
556 | int i; |
||
557 | |||
558 | if (!rdev->vm_manager.enabled) |
||
559 | return; |
||
560 | |||
561 | mutex_lock(&rdev->vm_manager.lock); |
||
562 | /* free all allocated page tables */ |
||
563 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { |
||
564 | mutex_lock(&vm->mutex); |
||
565 | radeon_vm_free_pt(rdev, vm); |
||
566 | mutex_unlock(&vm->mutex); |
||
567 | } |
||
568 | for (i = 0; i < RADEON_NUM_VM; ++i) { |
||
569 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
||
570 | } |
||
571 | radeon_asic_vm_fini(rdev); |
||
572 | mutex_unlock(&rdev->vm_manager.lock); |
||
573 | |||
574 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); |
||
575 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); |
||
576 | rdev->vm_manager.enabled = false; |
||
577 | } |
||
578 | |||
579 | /** |
||
580 | * radeon_vm_evict - evict page table to make room for new one |
||
581 | * |
||
582 | * @rdev: radeon_device pointer |
||
583 | * @vm: VM we want to allocate something for |
||
584 | * |
||
585 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). |
||
586 | * Returns 0 for success, -ENOMEM for failure. |
||
587 | * |
||
588 | * Global and local mutex must be locked! |
||
589 | */ |
||
590 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
||
591 | { |
||
592 | struct radeon_vm *vm_evict; |
||
593 | |||
594 | if (list_empty(&rdev->vm_manager.lru_vm)) |
||
595 | return -ENOMEM; |
||
596 | |||
597 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, |
||
598 | struct radeon_vm, list); |
||
599 | if (vm_evict == vm) |
||
600 | return -ENOMEM; |
||
601 | |||
602 | mutex_lock(&vm_evict->mutex); |
||
603 | radeon_vm_free_pt(rdev, vm_evict); |
||
604 | mutex_unlock(&vm_evict->mutex); |
||
605 | return 0; |
||
606 | } |
||
607 | |||
608 | /** |
||
609 | * radeon_vm_alloc_pt - allocates a page table for a VM |
||
610 | * |
||
611 | * @rdev: radeon_device pointer |
||
612 | * @vm: vm to bind |
||
613 | * |
||
614 | * Allocate a page table for the requested vm (cayman+). |
||
615 | * Returns 0 for success, error for failure. |
||
616 | * |
||
617 | * Global and local mutex must be locked! |
||
618 | */ |
||
619 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
||
620 | { |
||
621 | unsigned pd_size, pts_size; |
||
622 | u64 *pd_addr; |
||
623 | int r; |
||
624 | |||
625 | if (vm == NULL) { |
||
626 | return -EINVAL; |
||
627 | } |
||
628 | |||
629 | if (vm->page_directory != NULL) { |
||
630 | return 0; |
||
631 | } |
||
632 | |||
633 | retry: |
||
634 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
||
635 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
||
636 | &vm->page_directory, pd_size, |
||
637 | RADEON_GPU_PAGE_SIZE, false); |
||
638 | if (r == -ENOMEM) { |
||
639 | r = radeon_vm_evict(rdev, vm); |
||
640 | if (r) |
||
641 | return r; |
||
642 | goto retry; |
||
643 | |||
644 | } else if (r) { |
||
645 | return r; |
||
646 | } |
||
647 | |||
648 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
||
649 | |||
650 | /* Initially clear the page directory */ |
||
651 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); |
||
652 | memset(pd_addr, 0, pd_size); |
||
653 | |||
654 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); |
||
655 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); |
||
656 | |||
657 | if (vm->page_tables == NULL) { |
||
658 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
||
659 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
||
660 | return -ENOMEM; |
||
661 | } |
||
662 | |||
663 | return 0; |
||
664 | } |
||
665 | |||
666 | /** |
||
667 | * radeon_vm_add_to_lru - add VMs page table to LRU list |
||
668 | * |
||
669 | * @rdev: radeon_device pointer |
||
670 | * @vm: vm to add to LRU |
||
671 | * |
||
672 | * Add the allocated page table to the LRU list (cayman+). |
||
673 | * |
||
674 | * Global mutex must be locked! |
||
675 | */ |
||
676 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) |
||
677 | { |
||
678 | list_del_init(&vm->list); |
||
679 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
||
680 | } |
||
681 | |||
682 | /** |
||
683 | * radeon_vm_grab_id - allocate the next free VMID |
||
684 | * |
||
685 | * @rdev: radeon_device pointer |
||
686 | * @vm: vm to allocate id for |
||
687 | * @ring: ring we want to submit job to |
||
688 | * |
||
689 | * Allocate an id for the vm (cayman+). |
||
690 | * Returns the fence we need to sync to (if any). |
||
691 | * |
||
692 | * Global and local mutex must be locked! |
||
693 | */ |
||
694 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
||
695 | struct radeon_vm *vm, int ring) |
||
696 | { |
||
697 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
||
698 | unsigned choices[2] = {}; |
||
699 | unsigned i; |
||
700 | |||
701 | /* check if the id is still valid */ |
||
702 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) |
||
703 | return NULL; |
||
704 | |||
705 | /* we definately need to flush */ |
||
706 | radeon_fence_unref(&vm->last_flush); |
||
707 | |||
708 | /* skip over VMID 0, since it is the system VM */ |
||
709 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
||
710 | struct radeon_fence *fence = rdev->vm_manager.active[i]; |
||
711 | |||
712 | if (fence == NULL) { |
||
713 | /* found a free one */ |
||
714 | vm->id = i; |
||
715 | return NULL; |
||
716 | } |
||
717 | |||
718 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { |
||
719 | best[fence->ring] = fence; |
||
720 | choices[fence->ring == ring ? 0 : 1] = i; |
||
721 | } |
||
722 | } |
||
723 | |||
724 | for (i = 0; i < 2; ++i) { |
||
725 | if (choices[i]) { |
||
726 | vm->id = choices[i]; |
||
727 | return rdev->vm_manager.active[choices[i]]; |
||
728 | } |
||
729 | } |
||
730 | |||
731 | /* should never happen */ |
||
732 | BUG(); |
||
733 | return NULL; |
||
734 | } |
||
735 | |||
736 | /** |
||
737 | * radeon_vm_fence - remember fence for vm |
||
738 | * |
||
739 | * @rdev: radeon_device pointer |
||
740 | * @vm: vm we want to fence |
||
741 | * @fence: fence to remember |
||
742 | * |
||
743 | * Fence the vm (cayman+). |
||
744 | * Set the fence used to protect page table and id. |
||
745 | * |
||
746 | * Global and local mutex must be locked! |
||
747 | */ |
||
748 | void radeon_vm_fence(struct radeon_device *rdev, |
||
749 | struct radeon_vm *vm, |
||
750 | struct radeon_fence *fence) |
||
751 | { |
||
752 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
||
753 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
||
754 | |||
755 | radeon_fence_unref(&vm->fence); |
||
756 | vm->fence = radeon_fence_ref(fence); |
||
757 | } |
||
758 | |||
759 | /** |
||
760 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo |
||
761 | * |
||
762 | * @vm: requested vm |
||
763 | * @bo: requested buffer object |
||
764 | * |
||
765 | * Find @bo inside the requested vm (cayman+). |
||
766 | * Search inside the @bos vm list for the requested vm |
||
767 | * Returns the found bo_va or NULL if none is found |
||
768 | * |
||
769 | * Object has to be reserved! |
||
770 | */ |
||
771 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
||
772 | struct radeon_bo *bo) |
||
773 | { |
||
774 | struct radeon_bo_va *bo_va; |
||
775 | |||
776 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
||
777 | if (bo_va->vm == vm) { |
||
778 | return bo_va; |
||
779 | } |
||
780 | } |
||
781 | return NULL; |
||
782 | } |
||
783 | |||
784 | /** |
||
785 | * radeon_vm_bo_add - add a bo to a specific vm |
||
786 | * |
||
787 | * @rdev: radeon_device pointer |
||
788 | * @vm: requested vm |
||
789 | * @bo: radeon buffer object |
||
790 | * |
||
791 | * Add @bo into the requested vm (cayman+). |
||
792 | * Add @bo to the list of bos associated with the vm |
||
793 | * Returns newly added bo_va or NULL for failure |
||
794 | * |
||
795 | * Object has to be reserved! |
||
796 | */ |
||
797 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
||
798 | struct radeon_vm *vm, |
||
799 | struct radeon_bo *bo) |
||
800 | { |
||
801 | struct radeon_bo_va *bo_va; |
||
802 | |||
803 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
||
804 | if (bo_va == NULL) { |
||
805 | return NULL; |
||
806 | } |
||
807 | bo_va->vm = vm; |
||
808 | bo_va->bo = bo; |
||
809 | bo_va->soffset = 0; |
||
810 | bo_va->eoffset = 0; |
||
811 | bo_va->flags = 0; |
||
812 | bo_va->valid = false; |
||
813 | bo_va->ref_count = 1; |
||
814 | INIT_LIST_HEAD(&bo_va->bo_list); |
||
815 | INIT_LIST_HEAD(&bo_va->vm_list); |
||
816 | |||
817 | mutex_lock(&vm->mutex); |
||
818 | list_add(&bo_va->vm_list, &vm->va); |
||
819 | list_add_tail(&bo_va->bo_list, &bo->va); |
||
820 | mutex_unlock(&vm->mutex); |
||
821 | |||
822 | return bo_va; |
||
823 | } |
||
824 | |||
825 | /** |
||
826 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm |
||
827 | * |
||
828 | * @rdev: radeon_device pointer |
||
829 | * @bo_va: bo_va to store the address |
||
830 | * @soffset: requested offset of the buffer in the VM address space |
||
831 | * @flags: attributes of pages (read/write/valid/etc.) |
||
832 | * |
||
833 | * Set offset of @bo_va (cayman+). |
||
834 | * Validate and set the offset requested within the vm address space. |
||
835 | * Returns 0 for success, error for failure. |
||
836 | * |
||
837 | * Object has to be reserved! |
||
838 | */ |
||
839 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
||
840 | struct radeon_bo_va *bo_va, |
||
841 | uint64_t soffset, |
||
842 | uint32_t flags) |
||
843 | { |
||
844 | uint64_t size = radeon_bo_size(bo_va->bo); |
||
845 | uint64_t eoffset, last_offset = 0; |
||
846 | struct radeon_vm *vm = bo_va->vm; |
||
847 | struct radeon_bo_va *tmp; |
||
848 | struct list_head *head; |
||
849 | unsigned last_pfn; |
||
850 | |||
851 | if (soffset) { |
||
852 | /* make sure object fit at this offset */ |
||
853 | eoffset = soffset + size; |
||
854 | if (soffset >= eoffset) { |
||
855 | return -EINVAL; |
||
856 | } |
||
857 | |||
858 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
||
859 | if (last_pfn > rdev->vm_manager.max_pfn) { |
||
860 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
||
861 | last_pfn, rdev->vm_manager.max_pfn); |
||
862 | return -EINVAL; |
||
863 | } |
||
864 | |||
865 | } else { |
||
866 | eoffset = last_pfn = 0; |
||
867 | } |
||
868 | |||
869 | mutex_lock(&vm->mutex); |
||
870 | head = &vm->va; |
||
871 | last_offset = 0; |
||
872 | list_for_each_entry(tmp, &vm->va, vm_list) { |
||
873 | if (bo_va == tmp) { |
||
874 | /* skip over currently modified bo */ |
||
875 | continue; |
||
876 | } |
||
877 | |||
878 | if (soffset >= last_offset && eoffset <= tmp->soffset) { |
||
879 | /* bo can be added before this one */ |
||
880 | break; |
||
881 | } |
||
882 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
||
883 | /* bo and tmp overlap, invalid offset */ |
||
884 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
||
885 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
||
886 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
||
887 | mutex_unlock(&vm->mutex); |
||
888 | return -EINVAL; |
||
889 | } |
||
890 | last_offset = tmp->eoffset; |
||
891 | head = &tmp->vm_list; |
||
892 | } |
||
893 | |||
894 | bo_va->soffset = soffset; |
||
895 | bo_va->eoffset = eoffset; |
||
896 | bo_va->flags = flags; |
||
897 | bo_va->valid = false; |
||
898 | list_move(&bo_va->vm_list, head); |
||
899 | |||
900 | mutex_unlock(&vm->mutex); |
||
901 | return 0; |
||
902 | } |
||
903 | |||
904 | /** |
||
905 | * radeon_vm_map_gart - get the physical address of a gart page |
||
906 | * |
||
907 | * @rdev: radeon_device pointer |
||
908 | * @addr: the unmapped addr |
||
909 | * |
||
910 | * Look up the physical address of the page that the pte resolves |
||
911 | * to (cayman+). |
||
912 | * Returns the physical address of the page. |
||
913 | */ |
||
914 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) |
||
915 | { |
||
916 | uint64_t result; |
||
917 | |||
918 | /* page table offset */ |
||
919 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
||
920 | |||
921 | /* in case cpu page size != gpu page size*/ |
||
922 | result |= addr & (~PAGE_MASK); |
||
923 | |||
924 | return result; |
||
925 | } |
||
926 | |||
927 | /** |
||
928 | * radeon_vm_update_pdes - make sure that page directory is valid |
||
929 | * |
||
930 | * @rdev: radeon_device pointer |
||
931 | * @vm: requested vm |
||
932 | * @start: start of GPU address range |
||
933 | * @end: end of GPU address range |
||
934 | * |
||
935 | * Allocates new page tables if necessary |
||
936 | * and updates the page directory (cayman+). |
||
937 | * Returns 0 for success, error for failure. |
||
938 | * |
||
939 | * Global and local mutex must be locked! |
||
940 | */ |
||
941 | static int radeon_vm_update_pdes(struct radeon_device *rdev, |
||
942 | struct radeon_vm *vm, |
||
943 | uint64_t start, uint64_t end) |
||
944 | { |
||
945 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
||
946 | |||
947 | uint64_t last_pde = ~0, last_pt = ~0; |
||
948 | unsigned count = 0; |
||
949 | uint64_t pt_idx; |
||
950 | int r; |
||
951 | |||
952 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
||
953 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
||
954 | |||
955 | /* walk over the address space and update the page directory */ |
||
956 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { |
||
957 | uint64_t pde, pt; |
||
958 | |||
959 | if (vm->page_tables[pt_idx]) |
||
960 | continue; |
||
961 | |||
962 | retry: |
||
963 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
||
964 | &vm->page_tables[pt_idx], |
||
965 | RADEON_VM_PTE_COUNT * 8, |
||
966 | RADEON_GPU_PAGE_SIZE, false); |
||
967 | |||
968 | if (r == -ENOMEM) { |
||
969 | r = radeon_vm_evict(rdev, vm); |
||
970 | if (r) |
||
971 | return r; |
||
972 | goto retry; |
||
973 | } else if (r) { |
||
974 | return r; |
||
975 | } |
||
976 | |||
977 | pde = vm->pd_gpu_addr + pt_idx * 8; |
||
978 | |||
979 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
||
980 | |||
981 | if (((last_pde + 8 * count) != pde) || |
||
982 | ((last_pt + incr * count) != pt)) { |
||
983 | |||
984 | if (count) { |
||
985 | radeon_asic_vm_set_page(rdev, last_pde, |
||
986 | last_pt, count, incr, |
||
987 | RADEON_VM_PAGE_VALID); |
||
988 | } |
||
989 | |||
990 | count = 1; |
||
991 | last_pde = pde; |
||
992 | last_pt = pt; |
||
993 | } else { |
||
994 | ++count; |
||
995 | } |
||
996 | } |
||
997 | |||
998 | if (count) { |
||
999 | radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, |
||
1000 | incr, RADEON_VM_PAGE_VALID); |
||
1001 | |||
1002 | } |
||
1003 | |||
1004 | return 0; |
||
1005 | } |
||
1006 | |||
1007 | /** |
||
1008 | * radeon_vm_update_ptes - make sure that page tables are valid |
||
1009 | * |
||
1010 | * @rdev: radeon_device pointer |
||
1011 | * @vm: requested vm |
||
1012 | * @start: start of GPU address range |
||
1013 | * @end: end of GPU address range |
||
1014 | * @dst: destination address to map to |
||
1015 | * @flags: mapping flags |
||
1016 | * |
||
1017 | * Update the page tables in the range @start - @end (cayman+). |
||
1018 | * |
||
1019 | * Global and local mutex must be locked! |
||
1020 | */ |
||
1021 | static void radeon_vm_update_ptes(struct radeon_device *rdev, |
||
1022 | struct radeon_vm *vm, |
||
1023 | uint64_t start, uint64_t end, |
||
1024 | uint64_t dst, uint32_t flags) |
||
1025 | { |
||
1026 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
||
1027 | |||
1028 | uint64_t last_pte = ~0, last_dst = ~0; |
||
1029 | unsigned count = 0; |
||
1030 | uint64_t addr; |
||
1031 | |||
1032 | start = start / RADEON_GPU_PAGE_SIZE; |
||
1033 | end = end / RADEON_GPU_PAGE_SIZE; |
||
1034 | |||
1035 | /* walk over the address space and update the page tables */ |
||
1036 | for (addr = start; addr < end; ) { |
||
1037 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
||
1038 | unsigned nptes; |
||
1039 | uint64_t pte; |
||
1040 | |||
1041 | if ((addr & ~mask) == (end & ~mask)) |
||
1042 | nptes = end - addr; |
||
1043 | else |
||
1044 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
||
1045 | |||
1046 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
||
1047 | pte += (addr & mask) * 8; |
||
1048 | |||
1049 | if ((last_pte + 8 * count) != pte) { |
||
1050 | |||
1051 | if (count) { |
||
1052 | radeon_asic_vm_set_page(rdev, last_pte, |
||
1053 | last_dst, count, |
||
1054 | RADEON_GPU_PAGE_SIZE, |
||
1055 | flags); |
||
1056 | } |
||
1057 | |||
1058 | count = nptes; |
||
1059 | last_pte = pte; |
||
1060 | last_dst = dst; |
||
1061 | } else { |
||
1062 | count += nptes; |
||
1063 | } |
||
1064 | |||
1065 | addr += nptes; |
||
1066 | dst += nptes * RADEON_GPU_PAGE_SIZE; |
||
1067 | } |
||
1068 | |||
1069 | if (count) { |
||
1070 | radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, |
||
1071 | RADEON_GPU_PAGE_SIZE, flags); |
||
1072 | } |
||
1073 | } |
||
1074 | |||
1075 | /** |
||
1076 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
||
1077 | * |
||
1078 | * @rdev: radeon_device pointer |
||
1079 | * @vm: requested vm |
||
1080 | * @bo: radeon buffer object |
||
1081 | * @mem: ttm mem |
||
1082 | * |
||
1083 | * Fill in the page table entries for @bo (cayman+). |
||
1084 | * Returns 0 for success, -EINVAL for failure. |
||
1085 | * |
||
1086 | * Object have to be reserved & global and local mutex must be locked! |
||
1087 | */ |
||
1088 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
||
1089 | struct radeon_vm *vm, |
||
1090 | struct radeon_bo *bo, |
||
1091 | struct ttm_mem_reg *mem) |
||
1092 | { |
||
1093 | unsigned ridx = rdev->asic->vm.pt_ring_index; |
||
1094 | struct radeon_ring *ring = &rdev->ring[ridx]; |
||
1095 | struct radeon_semaphore *sem = NULL; |
||
1096 | struct radeon_bo_va *bo_va; |
||
1097 | unsigned nptes, npdes, ndw; |
||
1098 | uint64_t addr; |
||
1099 | int r; |
||
1100 | |||
1101 | /* nothing to do if vm isn't bound */ |
||
1102 | if (vm->page_directory == NULL) |
||
1103 | return 0; |
||
1104 | |||
1105 | bo_va = radeon_vm_bo_find(vm, bo); |
||
1106 | if (bo_va == NULL) { |
||
1107 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); |
||
1108 | return -EINVAL; |
||
1109 | } |
||
1110 | |||
1111 | if (!bo_va->soffset) { |
||
1112 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
||
1113 | bo, vm); |
||
1114 | return -EINVAL; |
||
1115 | } |
||
1116 | |||
1117 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
||
1118 | return 0; |
||
1119 | |||
1120 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
||
1121 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
||
1122 | if (mem) { |
||
1123 | addr = mem->start << PAGE_SHIFT; |
||
1124 | if (mem->mem_type != TTM_PL_SYSTEM) { |
||
1125 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
||
1126 | bo_va->valid = true; |
||
1127 | } |
||
1128 | if (mem->mem_type == TTM_PL_TT) { |
||
1129 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
||
1130 | } else { |
||
1131 | addr += rdev->vm_manager.vram_base_offset; |
||
1132 | } |
||
1133 | } else { |
||
1134 | addr = 0; |
||
1135 | bo_va->valid = false; |
||
1136 | } |
||
1137 | |||
1138 | if (vm->fence && radeon_fence_signaled(vm->fence)) { |
||
1139 | radeon_fence_unref(&vm->fence); |
||
1140 | } |
||
1141 | |||
1142 | if (vm->fence && vm->fence->ring != ridx) { |
||
1143 | r = radeon_semaphore_create(rdev, &sem); |
||
1144 | if (r) { |
||
1145 | return r; |
||
1146 | } |
||
1147 | } |
||
1148 | |||
1149 | nptes = radeon_bo_ngpu_pages(bo); |
||
1150 | |||
1151 | /* assume two extra pdes in case the mapping overlaps the borders */ |
||
1152 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; |
||
1153 | |||
1154 | /* estimate number of dw needed */ |
||
1155 | /* semaphore, fence and padding */ |
||
1156 | ndw = 32; |
||
1157 | |||
1158 | if (RADEON_VM_BLOCK_SIZE > 11) |
||
1159 | /* reserve space for one header for every 2k dwords */ |
||
1160 | ndw += (nptes >> 11) * 4; |
||
1161 | else |
||
1162 | /* reserve space for one header for |
||
1163 | every (1 << BLOCK_SIZE) entries */ |
||
1164 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
||
1165 | |||
1166 | /* reserve space for pte addresses */ |
||
1167 | ndw += nptes * 2; |
||
1168 | |||
1169 | /* reserve space for one header for every 2k dwords */ |
||
1170 | ndw += (npdes >> 11) * 4; |
||
1171 | |||
1172 | /* reserve space for pde addresses */ |
||
1173 | ndw += npdes * 2; |
||
1174 | |||
1175 | r = radeon_ring_lock(rdev, ring, ndw); |
||
1176 | if (r) { |
||
1177 | return r; |
||
1178 | } |
||
1179 | |||
1180 | if (sem && radeon_fence_need_sync(vm->fence, ridx)) { |
||
1181 | radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx); |
||
1182 | radeon_fence_note_sync(vm->fence, ridx); |
||
1183 | } |
||
1184 | |||
1185 | r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); |
||
1186 | if (r) { |
||
1187 | radeon_ring_unlock_undo(rdev, ring); |
||
1188 | return r; |
||
1189 | } |
||
1190 | |||
1191 | radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, |
||
1192 | addr, bo_va->flags); |
||
1193 | |||
1194 | radeon_fence_unref(&vm->fence); |
||
1195 | r = radeon_fence_emit(rdev, &vm->fence, ridx); |
||
1196 | if (r) { |
||
1197 | radeon_ring_unlock_undo(rdev, ring); |
||
1198 | return r; |
||
1199 | } |
||
1200 | radeon_ring_unlock_commit(rdev, ring); |
||
1201 | radeon_semaphore_free(rdev, &sem, vm->fence); |
||
1202 | radeon_fence_unref(&vm->last_flush); |
||
1203 | |||
1204 | return 0; |
||
1205 | } |
||
1206 | |||
1207 | /** |
||
1208 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
||
1209 | * |
||
1210 | * @rdev: radeon_device pointer |
||
1211 | * @bo_va: requested bo_va |
||
1212 | * |
||
1213 | * Remove @bo_va->bo from the requested vm (cayman+). |
||
1214 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
||
1215 | * remove the ptes for @bo_va in the page table. |
||
1216 | * Returns 0 for success. |
||
1217 | * |
||
1218 | * Object have to be reserved! |
||
1219 | */ |
||
1220 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
||
1221 | struct radeon_bo_va *bo_va) |
||
1222 | { |
||
1223 | int r; |
||
1224 | |||
1225 | mutex_lock(&rdev->vm_manager.lock); |
||
1226 | mutex_lock(&bo_va->vm->mutex); |
||
1227 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
||
1228 | mutex_unlock(&rdev->vm_manager.lock); |
||
1229 | list_del(&bo_va->vm_list); |
||
1230 | mutex_unlock(&bo_va->vm->mutex); |
||
1231 | list_del(&bo_va->bo_list); |
||
1232 | |||
1233 | kfree(bo_va); |
||
1234 | return r; |
||
1235 | } |
||
1236 | |||
1237 | /** |
||
1238 | * radeon_vm_bo_invalidate - mark the bo as invalid |
||
1239 | * |
||
1240 | * @rdev: radeon_device pointer |
||
1241 | * @vm: requested vm |
||
1242 | * @bo: radeon buffer object |
||
1243 | * |
||
1244 | * Mark @bo as invalid (cayman+). |
||
1245 | */ |
||
1246 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
||
1247 | struct radeon_bo *bo) |
||
1248 | { |
||
1249 | struct radeon_bo_va *bo_va; |
||
1250 | |||
1251 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
||
1252 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
||
1253 | bo_va->valid = false; |
||
1254 | } |
||
1255 | } |
||
1256 | |||
1257 | /** |
||
1258 | * radeon_vm_init - initialize a vm instance |
||
1259 | * |
||
1260 | * @rdev: radeon_device pointer |
||
1261 | * @vm: requested vm |
||
1262 | * |
||
1263 | * Init @vm fields (cayman+). |
||
1264 | */ |
||
1265 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
||
1266 | { |
||
1267 | vm->id = 0; |
||
1268 | vm->fence = NULL; |
||
1269 | mutex_init(&vm->mutex); |
||
1270 | INIT_LIST_HEAD(&vm->list); |
||
1271 | INIT_LIST_HEAD(&vm->va); |
||
1272 | } |
||
1273 | |||
1274 | /** |
||
1275 | * radeon_vm_fini - tear down a vm instance |
||
1276 | * |
||
1277 | * @rdev: radeon_device pointer |
||
1278 | * @vm: requested vm |
||
1279 | * |
||
1280 | * Tear down @vm (cayman+). |
||
1281 | * Unbind the VM and remove all bos from the vm bo list |
||
1282 | */ |
||
1283 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) |
||
1284 | { |
||
1285 | struct radeon_bo_va *bo_va, *tmp; |
||
1286 | int r; |
||
1287 | |||
1288 | mutex_lock(&rdev->vm_manager.lock); |
||
1289 | mutex_lock(&vm->mutex); |
||
1290 | radeon_vm_free_pt(rdev, vm); |
||
1291 | mutex_unlock(&rdev->vm_manager.lock); |
||
1292 | |||
1293 | if (!list_empty(&vm->va)) { |
||
1294 | dev_err(rdev->dev, "still active bo inside vm\n"); |
||
1295 | } |
||
1296 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { |
||
1297 | list_del_init(&bo_va->vm_list); |
||
1298 | r = radeon_bo_reserve(bo_va->bo, false); |
||
1299 | if (!r) { |
||
1300 | list_del_init(&bo_va->bo_list); |
||
1301 | radeon_bo_unreserve(bo_va->bo); |
||
1302 | kfree(bo_va); |
||
1303 | } |
||
1304 | } |
||
1305 | radeon_fence_unref(&vm->fence); |
||
1306 | radeon_fence_unref(&vm->last_flush); |
||
1307 | mutex_unlock(&vm->mutex); |
||
1308 | }><>><>>=>>=>>>>>>>>>>>>> |