Rev 3192 | Rev 5078 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1120 | serge | 1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
||
3 | * Copyright 2008 Red Hat Inc. |
||
4 | * Copyright 2009 Jerome Glisse. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the "Software"), |
||
8 | * to deal in the Software without restriction, including without limitation |
||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
||
10 | * and/or sell copies of the Software, and to permit persons to whom the |
||
11 | * Software is furnished to do so, subject to the following conditions: |
||
12 | * |
||
13 | * The above copyright notice and this permission notice shall be included in |
||
14 | * all copies or substantial portions of the Software. |
||
15 | * |
||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
||
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
||
22 | * OTHER DEALINGS IN THE SOFTWARE. |
||
23 | * |
||
24 | * Authors: Dave Airlie |
||
25 | * Alex Deucher |
||
26 | * Jerome Glisse |
||
27 | */ |
||
2997 | Serge | 28 | #include |
29 | #include |
||
1120 | serge | 30 | #include "radeon.h" |
31 | #include "radeon_reg.h" |
||
32 | |||
1631 | serge | 33 | |
34 | static inline void * |
||
35 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
||
36 | addr_t *dma_handle) |
||
37 | { |
||
38 | |||
39 | size = (size + 0x7FFF) & ~0x7FFF; |
||
40 | |||
41 | *dma_handle = AllocPages(size >> 12); |
||
42 | return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE); |
||
43 | } |
||
44 | |||
1120 | serge | 45 | /* |
2997 | Serge | 46 | * GART |
47 | * The GART (Graphics Aperture Remapping Table) is an aperture |
||
48 | * in the GPU's address space. System pages can be mapped into |
||
49 | * the aperture and look like contiguous pages from the GPU's |
||
50 | * perspective. A page table maps the pages in the aperture |
||
51 | * to the actual backing pages in system memory. |
||
52 | * |
||
53 | * Radeon GPUs support both an internal GART, as described above, |
||
54 | * and AGP. AGP works similarly, but the GART table is configured |
||
55 | * and maintained by the northbridge rather than the driver. |
||
56 | * Radeon hw has a separate AGP aperture that is programmed to |
||
57 | * point to the AGP aperture provided by the northbridge and the |
||
58 | * requests are passed through to the northbridge aperture. |
||
59 | * Both AGP and internal GART can be used at the same time, however |
||
60 | * that is not currently supported by the driver. |
||
61 | * |
||
62 | * This file handles the common internal GART management. |
||
63 | */ |
||
64 | |||
65 | /* |
||
1120 | serge | 66 | * Common GART table functions. |
67 | */ |
||
2997 | Serge | 68 | /** |
69 | * radeon_gart_table_ram_alloc - allocate system ram for gart page table |
||
70 | * |
||
71 | * @rdev: radeon_device pointer |
||
72 | * |
||
73 | * Allocate system memory for GART page table |
||
74 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
||
75 | * gart table to be in system memory. |
||
76 | * Returns 0 for success, -ENOMEM for failure. |
||
77 | */ |
||
1120 | serge | 78 | int radeon_gart_table_ram_alloc(struct radeon_device *rdev) |
79 | { |
||
80 | void *ptr; |
||
81 | |||
1246 | serge | 82 | ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
83 | &rdev->gart.table_addr); |
||
1120 | serge | 84 | if (ptr == NULL) { |
85 | return -ENOMEM; |
||
86 | } |
||
87 | #ifdef CONFIG_X86 |
||
88 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
||
89 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
||
90 | set_memory_uc((unsigned long)ptr, |
||
91 | rdev->gart.table_size >> PAGE_SHIFT); |
||
92 | } |
||
93 | #endif |
||
2997 | Serge | 94 | rdev->gart.ptr = ptr; |
95 | memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); |
||
1120 | serge | 96 | return 0; |
97 | } |
||
98 | |||
2997 | Serge | 99 | /** |
100 | * radeon_gart_table_ram_free - free system ram for gart page table |
||
101 | * |
||
102 | * @rdev: radeon_device pointer |
||
103 | * |
||
104 | * Free system memory for GART page table |
||
105 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
||
106 | * gart table to be in system memory. |
||
107 | */ |
||
1120 | serge | 108 | void radeon_gart_table_ram_free(struct radeon_device *rdev) |
109 | { |
||
2997 | Serge | 110 | if (rdev->gart.ptr == NULL) { |
1120 | serge | 111 | return; |
112 | } |
||
113 | #ifdef CONFIG_X86 |
||
114 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
||
115 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
||
2997 | Serge | 116 | set_memory_wb((unsigned long)rdev->gart.ptr, |
1120 | serge | 117 | rdev->gart.table_size >> PAGE_SHIFT); |
118 | } |
||
119 | #endif |
||
2997 | Serge | 120 | rdev->gart.ptr = NULL; |
1120 | serge | 121 | rdev->gart.table_addr = 0; |
122 | } |
||
123 | |||
2997 | Serge | 124 | /** |
125 | * radeon_gart_table_vram_alloc - allocate vram for gart page table |
||
126 | * |
||
127 | * @rdev: radeon_device pointer |
||
128 | * |
||
129 | * Allocate video memory for GART page table |
||
130 | * (pcie r4xx, r5xx+). These asics require the |
||
131 | * gart table to be in video memory. |
||
132 | * Returns 0 for success, error for failure. |
||
133 | */ |
||
1120 | serge | 134 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
135 | { |
||
136 | int r; |
||
137 | |||
2997 | Serge | 138 | if (rdev->gart.robj == NULL) { |
1963 | serge | 139 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
140 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
||
2997 | Serge | 141 | NULL, &rdev->gart.robj); |
1120 | serge | 142 | if (r) { |
143 | return r; |
||
144 | } |
||
145 | } |
||
1179 | serge | 146 | return 0; |
147 | } |
||
148 | |||
2997 | Serge | 149 | /** |
150 | * radeon_gart_table_vram_pin - pin gart page table in vram |
||
151 | * |
||
152 | * @rdev: radeon_device pointer |
||
153 | * |
||
154 | * Pin the GART page table in vram so it will not be moved |
||
155 | * by the memory manager (pcie r4xx, r5xx+). These asics require the |
||
156 | * gart table to be in video memory. |
||
157 | * Returns 0 for success, error for failure. |
||
158 | */ |
||
1179 | serge | 159 | int radeon_gart_table_vram_pin(struct radeon_device *rdev) |
160 | { |
||
161 | uint64_t gpu_addr; |
||
162 | int r; |
||
163 | |||
2997 | Serge | 164 | r = radeon_bo_reserve(rdev->gart.robj, false); |
1404 | serge | 165 | if (unlikely(r != 0)) |
166 | return r; |
||
2997 | Serge | 167 | r = radeon_bo_pin(rdev->gart.robj, |
1120 | serge | 168 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
169 | if (r) { |
||
2997 | Serge | 170 | radeon_bo_unreserve(rdev->gart.robj); |
1120 | serge | 171 | return r; |
172 | } |
||
2997 | Serge | 173 | r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); |
1404 | serge | 174 | if (r) |
2997 | Serge | 175 | radeon_bo_unpin(rdev->gart.robj); |
176 | radeon_bo_unreserve(rdev->gart.robj); |
||
1404 | serge | 177 | rdev->gart.table_addr = gpu_addr; |
178 | return r; |
||
1120 | serge | 179 | } |
180 | |||
2997 | Serge | 181 | /** |
182 | * radeon_gart_table_vram_unpin - unpin gart page table in vram |
||
183 | * |
||
184 | * @rdev: radeon_device pointer |
||
185 | * |
||
186 | * Unpin the GART page table in vram (pcie r4xx, r5xx+). |
||
187 | * These asics require the gart table to be in video memory. |
||
188 | */ |
||
189 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev) |
||
1120 | serge | 190 | { |
1404 | serge | 191 | int r; |
192 | |||
2997 | Serge | 193 | if (rdev->gart.robj == NULL) { |
1120 | serge | 194 | return; |
195 | } |
||
2997 | Serge | 196 | r = radeon_bo_reserve(rdev->gart.robj, false); |
1404 | serge | 197 | if (likely(r == 0)) { |
2997 | Serge | 198 | radeon_bo_kunmap(rdev->gart.robj); |
199 | radeon_bo_unpin(rdev->gart.robj); |
||
200 | radeon_bo_unreserve(rdev->gart.robj); |
||
201 | rdev->gart.ptr = NULL; |
||
1404 | serge | 202 | } |
1120 | serge | 203 | } |
204 | |||
2997 | Serge | 205 | /** |
206 | * radeon_gart_table_vram_free - free gart page table vram |
||
207 | * |
||
208 | * @rdev: radeon_device pointer |
||
209 | * |
||
210 | * Free the video memory used for the GART page table |
||
211 | * (pcie r4xx, r5xx+). These asics require the gart table to |
||
212 | * be in video memory. |
||
213 | */ |
||
214 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
||
215 | { |
||
216 | if (rdev->gart.robj == NULL) { |
||
217 | return; |
||
218 | } |
||
219 | radeon_gart_table_vram_unpin(rdev); |
||
220 | radeon_bo_unref(&rdev->gart.robj); |
||
221 | } |
||
1120 | serge | 222 | |
223 | /* |
||
224 | * Common gart functions. |
||
225 | */ |
||
2997 | Serge | 226 | /** |
227 | * radeon_gart_unbind - unbind pages from the gart page table |
||
228 | * |
||
229 | * @rdev: radeon_device pointer |
||
230 | * @offset: offset into the GPU's gart aperture |
||
231 | * @pages: number of pages to unbind |
||
232 | * |
||
233 | * Unbinds the requested pages from the gart page table and |
||
234 | * replaces them with the dummy page (all asics). |
||
235 | */ |
||
1120 | serge | 236 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
237 | int pages) |
||
238 | { |
||
239 | unsigned t; |
||
240 | unsigned p; |
||
241 | int i, j; |
||
1430 | serge | 242 | u64 page_base; |
1120 | serge | 243 | |
244 | if (!rdev->gart.ready) { |
||
2997 | Serge | 245 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
1120 | serge | 246 | return; |
247 | } |
||
1268 | serge | 248 | t = offset / RADEON_GPU_PAGE_SIZE; |
249 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
||
1120 | serge | 250 | for (i = 0; i < pages; i++, p++) { |
251 | if (rdev->gart.pages[p]) { |
||
252 | rdev->gart.pages[p] = NULL; |
||
1430 | serge | 253 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
254 | page_base = rdev->gart.pages_addr[p]; |
||
1268 | serge | 255 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
2997 | Serge | 256 | if (rdev->gart.ptr) { |
1430 | serge | 257 | radeon_gart_set_page(rdev, t, page_base); |
2997 | Serge | 258 | } |
1430 | serge | 259 | page_base += RADEON_GPU_PAGE_SIZE; |
1120 | serge | 260 | } |
261 | } |
||
262 | } |
||
263 | mb(); |
||
264 | radeon_gart_tlb_flush(rdev); |
||
265 | } |
||
266 | |||
2997 | Serge | 267 | /** |
268 | * radeon_gart_bind - bind pages into the gart page table |
||
269 | * |
||
270 | * @rdev: radeon_device pointer |
||
271 | * @offset: offset into the GPU's gart aperture |
||
272 | * @pages: number of pages to bind |
||
273 | * @pagelist: pages to bind |
||
274 | * @dma_addr: DMA addresses of pages |
||
275 | * |
||
276 | * Binds the requested pages to the gart page table |
||
277 | * (all asics). |
||
278 | * Returns 0 for success, -EINVAL for failure. |
||
279 | */ |
||
1120 | serge | 280 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
2997 | Serge | 281 | int pages, u32 *pagelist, dma_addr_t *dma_addr) |
1120 | serge | 282 | { |
283 | unsigned t; |
||
284 | unsigned p; |
||
285 | uint64_t page_base; |
||
286 | int i, j; |
||
287 | |||
2005 | serge | 288 | // dbgprintf("offset %x pages %d list %x\n", |
289 | // offset, pages, pagelist); |
||
1120 | serge | 290 | if (!rdev->gart.ready) { |
2997 | Serge | 291 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
1120 | serge | 292 | return -EINVAL; |
293 | } |
||
1268 | serge | 294 | t = offset / RADEON_GPU_PAGE_SIZE; |
295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
||
1120 | serge | 296 | |
297 | for (i = 0; i < pages; i++, p++) { |
||
298 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
||
299 | rdev->gart.pages[p] = pagelist[i]; |
||
2997 | Serge | 300 | if (rdev->gart.ptr) { |
1268 | serge | 301 | page_base = rdev->gart.pages_addr[p]; |
302 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
||
1120 | serge | 303 | radeon_gart_set_page(rdev, t, page_base); |
1268 | serge | 304 | page_base += RADEON_GPU_PAGE_SIZE; |
1120 | serge | 305 | } |
306 | } |
||
2997 | Serge | 307 | } |
1120 | serge | 308 | mb(); |
309 | radeon_gart_tlb_flush(rdev); |
||
310 | return 0; |
||
311 | } |
||
312 | |||
2997 | Serge | 313 | /** |
314 | * radeon_gart_restore - bind all pages in the gart page table |
||
315 | * |
||
316 | * @rdev: radeon_device pointer |
||
317 | * |
||
318 | * Binds all pages in the gart page table (all asics). |
||
319 | * Used to rebuild the gart table on device startup or resume. |
||
320 | */ |
||
1430 | serge | 321 | void radeon_gart_restore(struct radeon_device *rdev) |
322 | { |
||
323 | int i, j, t; |
||
324 | u64 page_base; |
||
325 | |||
2997 | Serge | 326 | if (!rdev->gart.ptr) { |
327 | return; |
||
328 | } |
||
1430 | serge | 329 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
330 | page_base = rdev->gart.pages_addr[i]; |
||
331 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
||
332 | radeon_gart_set_page(rdev, t, page_base); |
||
333 | page_base += RADEON_GPU_PAGE_SIZE; |
||
334 | } |
||
335 | } |
||
336 | mb(); |
||
337 | radeon_gart_tlb_flush(rdev); |
||
338 | } |
||
339 | |||
2997 | Serge | 340 | /** |
341 | * radeon_gart_init - init the driver info for managing the gart |
||
342 | * |
||
343 | * @rdev: radeon_device pointer |
||
344 | * |
||
345 | * Allocate the dummy page and init the gart driver info (all asics). |
||
346 | * Returns 0 for success, error for failure. |
||
347 | */ |
||
1120 | serge | 348 | int radeon_gart_init(struct radeon_device *rdev) |
349 | { |
||
1430 | serge | 350 | int r, i; |
351 | |||
1120 | serge | 352 | if (rdev->gart.pages) { |
353 | return 0; |
||
354 | } |
||
1268 | serge | 355 | /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
356 | if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
||
1120 | serge | 357 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
358 | return -EINVAL; |
||
359 | } |
||
1430 | serge | 360 | r = radeon_dummy_page_init(rdev); |
361 | if (r) |
||
362 | return r; |
||
1120 | serge | 363 | /* Compute table size */ |
364 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
||
1268 | serge | 365 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
1120 | serge | 366 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
367 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
||
368 | /* Allocate pages table */ |
||
2997 | Serge | 369 | rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); |
1120 | serge | 370 | if (rdev->gart.pages == NULL) { |
1404 | serge | 371 | radeon_gart_fini(rdev); |
1120 | serge | 372 | return -ENOMEM; |
373 | } |
||
2997 | Serge | 374 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
375 | rdev->gart.num_cpu_pages); |
||
1120 | serge | 376 | if (rdev->gart.pages_addr == NULL) { |
1404 | serge | 377 | radeon_gart_fini(rdev); |
1120 | serge | 378 | return -ENOMEM; |
379 | } |
||
1430 | serge | 380 | /* set GART entry to point to the dummy page by default */ |
381 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
||
382 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
||
383 | } |
||
1120 | serge | 384 | return 0; |
385 | } |
||
386 | |||
2997 | Serge | 387 | /** |
388 | * radeon_gart_fini - tear down the driver info for managing the gart |
||
389 | * |
||
390 | * @rdev: radeon_device pointer |
||
391 | * |
||
392 | * Tear down the gart driver info and free the dummy page (all asics). |
||
393 | */ |
||
1120 | serge | 394 | void radeon_gart_fini(struct radeon_device *rdev) |
395 | { |
||
396 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
||
397 | /* unbind pages */ |
||
398 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
||
399 | } |
||
400 | rdev->gart.ready = false; |
||
2997 | Serge | 401 | vfree(rdev->gart.pages); |
402 | vfree(rdev->gart.pages_addr); |
||
1120 | serge | 403 | rdev->gart.pages = NULL; |
404 | rdev->gart.pages_addr = NULL; |
||
405 | } |
||
2997 | Serge | 406 | |
407 | /* |
||
408 | * GPUVM |
||
409 | * GPUVM is similar to the legacy gart on older asics, however |
||
410 | * rather than there being a single global gart table |
||
411 | * for the entire GPU, there are multiple VM page tables active |
||
412 | * at any given time. The VM page tables can contain a mix |
||
413 | * vram pages and system memory pages and system memory pages |
||
414 | * can be mapped as snooped (cached system pages) or unsnooped |
||
415 | * (uncached system pages). |
||
416 | * Each VM has an ID associated with it and there is a page table |
||
417 | * associated with each VMID. When execting a command buffer, |
||
418 | * the kernel tells the the ring what VMID to use for that command |
||
419 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
||
420 | * The userspace drivers maintain their own address space and the kernel |
||
421 | * sets up their pages tables accordingly when they submit their |
||
422 | * command buffers and a VMID is assigned. |
||
423 | * Cayman/Trinity support up to 8 active VMs at any given time; |
||
424 | * SI supports 16. |
||
425 | */ |
||
426 | |||
427 | /* |
||
428 | * vm helpers |
||
429 | * |
||
430 | * TODO bind a default page at vm initialization for default address |
||
431 | */ |
||
432 | |||
433 | /** |
||
434 | * radeon_vm_num_pde - return the number of page directory entries |
||
435 | * |
||
436 | * @rdev: radeon_device pointer |
||
437 | * |
||
438 | * Calculate the number of page directory entries (cayman+). |
||
439 | */ |
||
440 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) |
||
441 | { |
||
442 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; |
||
443 | } |
||
444 | |||
445 | /** |
||
446 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
||
447 | * |
||
448 | * @rdev: radeon_device pointer |
||
449 | * |
||
450 | * Calculate the size of the page directory in bytes (cayman+). |
||
451 | */ |
||
452 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
||
453 | { |
||
454 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
||
455 | } |
||
456 | |||
457 | /** |
||
458 | * radeon_vm_manager_init - init the vm manager |
||
459 | * |
||
460 | * @rdev: radeon_device pointer |
||
461 | * |
||
462 | * Init the vm manager (cayman+). |
||
463 | * Returns 0 for success, error for failure. |
||
464 | */ |
||
465 | int radeon_vm_manager_init(struct radeon_device *rdev) |
||
466 | { |
||
467 | struct radeon_vm *vm; |
||
468 | struct radeon_bo_va *bo_va; |
||
469 | int r; |
||
470 | unsigned size; |
||
471 | |||
472 | if (!rdev->vm_manager.enabled) { |
||
473 | /* allocate enough for 2 full VM pts */ |
||
474 | size = radeon_vm_directory_size(rdev); |
||
475 | size += rdev->vm_manager.max_pfn * 8; |
||
476 | size *= 2; |
||
477 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
||
478 | RADEON_GPU_PAGE_ALIGN(size), |
||
479 | RADEON_GEM_DOMAIN_VRAM); |
||
480 | if (r) { |
||
481 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
||
482 | (rdev->vm_manager.max_pfn * 8) >> 10); |
||
483 | return r; |
||
484 | } |
||
485 | |||
486 | r = radeon_asic_vm_init(rdev); |
||
487 | if (r) |
||
488 | return r; |
||
489 | |||
490 | rdev->vm_manager.enabled = true; |
||
491 | |||
492 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); |
||
493 | if (r) |
||
494 | return r; |
||
495 | } |
||
496 | |||
497 | /* restore page table */ |
||
498 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
||
499 | if (vm->page_directory == NULL) |
||
500 | continue; |
||
501 | |||
502 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
||
503 | bo_va->valid = false; |
||
504 | } |
||
505 | } |
||
506 | return 0; |
||
507 | } |
||
508 | |||
509 | /** |
||
510 | * radeon_vm_free_pt - free the page table for a specific vm |
||
511 | * |
||
512 | * @rdev: radeon_device pointer |
||
513 | * @vm: vm to unbind |
||
514 | * |
||
515 | * Free the page table of a specific vm (cayman+). |
||
516 | * |
||
517 | * Global and local mutex must be lock! |
||
518 | */ |
||
519 | static void radeon_vm_free_pt(struct radeon_device *rdev, |
||
520 | struct radeon_vm *vm) |
||
521 | { |
||
522 | struct radeon_bo_va *bo_va; |
||
523 | int i; |
||
524 | |||
525 | if (!vm->page_directory) |
||
526 | return; |
||
527 | |||
528 | list_del_init(&vm->list); |
||
529 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
||
530 | |||
531 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
||
532 | bo_va->valid = false; |
||
533 | } |
||
534 | |||
535 | if (vm->page_tables == NULL) |
||
536 | return; |
||
537 | |||
538 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) |
||
539 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); |
||
540 | |||
541 | kfree(vm->page_tables); |
||
542 | } |
||
543 | |||
544 | /** |
||
545 | * radeon_vm_manager_fini - tear down the vm manager |
||
546 | * |
||
547 | * @rdev: radeon_device pointer |
||
548 | * |
||
549 | * Tear down the VM manager (cayman+). |
||
550 | */ |
||
551 | void radeon_vm_manager_fini(struct radeon_device *rdev) |
||
552 | { |
||
553 | struct radeon_vm *vm, *tmp; |
||
554 | int i; |
||
555 | |||
556 | if (!rdev->vm_manager.enabled) |
||
557 | return; |
||
558 | |||
559 | mutex_lock(&rdev->vm_manager.lock); |
||
560 | /* free all allocated page tables */ |
||
561 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { |
||
562 | mutex_lock(&vm->mutex); |
||
563 | radeon_vm_free_pt(rdev, vm); |
||
564 | mutex_unlock(&vm->mutex); |
||
565 | } |
||
566 | for (i = 0; i < RADEON_NUM_VM; ++i) { |
||
567 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
||
568 | } |
||
569 | radeon_asic_vm_fini(rdev); |
||
570 | mutex_unlock(&rdev->vm_manager.lock); |
||
571 | |||
572 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); |
||
573 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); |
||
574 | rdev->vm_manager.enabled = false; |
||
575 | } |
||
576 | |||
577 | /** |
||
578 | * radeon_vm_evict - evict page table to make room for new one |
||
579 | * |
||
580 | * @rdev: radeon_device pointer |
||
581 | * @vm: VM we want to allocate something for |
||
582 | * |
||
583 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). |
||
584 | * Returns 0 for success, -ENOMEM for failure. |
||
585 | * |
||
586 | * Global and local mutex must be locked! |
||
587 | */ |
||
588 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
||
589 | { |
||
590 | struct radeon_vm *vm_evict; |
||
591 | |||
592 | if (list_empty(&rdev->vm_manager.lru_vm)) |
||
593 | return -ENOMEM; |
||
594 | |||
595 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, |
||
596 | struct radeon_vm, list); |
||
597 | if (vm_evict == vm) |
||
598 | return -ENOMEM; |
||
599 | |||
600 | mutex_lock(&vm_evict->mutex); |
||
601 | radeon_vm_free_pt(rdev, vm_evict); |
||
602 | mutex_unlock(&vm_evict->mutex); |
||
603 | return 0; |
||
604 | } |
||
605 | |||
606 | /** |
||
607 | * radeon_vm_alloc_pt - allocates a page table for a VM |
||
608 | * |
||
609 | * @rdev: radeon_device pointer |
||
610 | * @vm: vm to bind |
||
611 | * |
||
612 | * Allocate a page table for the requested vm (cayman+). |
||
613 | * Returns 0 for success, error for failure. |
||
614 | * |
||
615 | * Global and local mutex must be locked! |
||
616 | */ |
||
617 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
||
618 | { |
||
619 | unsigned pd_size, pts_size; |
||
620 | u64 *pd_addr; |
||
621 | int r; |
||
622 | |||
623 | if (vm == NULL) { |
||
624 | return -EINVAL; |
||
625 | } |
||
626 | |||
627 | if (vm->page_directory != NULL) { |
||
628 | return 0; |
||
629 | } |
||
630 | |||
631 | retry: |
||
632 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
||
633 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
||
634 | &vm->page_directory, pd_size, |
||
635 | RADEON_GPU_PAGE_SIZE, false); |
||
636 | if (r == -ENOMEM) { |
||
637 | r = radeon_vm_evict(rdev, vm); |
||
638 | if (r) |
||
639 | return r; |
||
640 | goto retry; |
||
641 | |||
642 | } else if (r) { |
||
643 | return r; |
||
644 | } |
||
645 | |||
646 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
||
647 | |||
648 | /* Initially clear the page directory */ |
||
649 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); |
||
650 | memset(pd_addr, 0, pd_size); |
||
651 | |||
652 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); |
||
653 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); |
||
654 | |||
655 | if (vm->page_tables == NULL) { |
||
656 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
||
657 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
||
658 | return -ENOMEM; |
||
659 | } |
||
660 | |||
661 | return 0; |
||
662 | } |
||
663 | |||
664 | /** |
||
665 | * radeon_vm_add_to_lru - add VMs page table to LRU list |
||
666 | * |
||
667 | * @rdev: radeon_device pointer |
||
668 | * @vm: vm to add to LRU |
||
669 | * |
||
670 | * Add the allocated page table to the LRU list (cayman+). |
||
671 | * |
||
672 | * Global mutex must be locked! |
||
673 | */ |
||
674 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) |
||
675 | { |
||
676 | list_del_init(&vm->list); |
||
677 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
||
678 | } |
||
679 | |||
680 | /** |
||
681 | * radeon_vm_grab_id - allocate the next free VMID |
||
682 | * |
||
683 | * @rdev: radeon_device pointer |
||
684 | * @vm: vm to allocate id for |
||
685 | * @ring: ring we want to submit job to |
||
686 | * |
||
687 | * Allocate an id for the vm (cayman+). |
||
688 | * Returns the fence we need to sync to (if any). |
||
689 | * |
||
690 | * Global and local mutex must be locked! |
||
691 | */ |
||
692 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
||
693 | struct radeon_vm *vm, int ring) |
||
694 | { |
||
695 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
||
696 | unsigned choices[2] = {}; |
||
697 | unsigned i; |
||
698 | |||
699 | /* check if the id is still valid */ |
||
700 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) |
||
701 | return NULL; |
||
702 | |||
703 | /* we definately need to flush */ |
||
704 | radeon_fence_unref(&vm->last_flush); |
||
705 | |||
706 | /* skip over VMID 0, since it is the system VM */ |
||
707 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
||
708 | struct radeon_fence *fence = rdev->vm_manager.active[i]; |
||
709 | |||
710 | if (fence == NULL) { |
||
711 | /* found a free one */ |
||
712 | vm->id = i; |
||
713 | return NULL; |
||
714 | } |
||
715 | |||
716 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { |
||
717 | best[fence->ring] = fence; |
||
718 | choices[fence->ring == ring ? 0 : 1] = i; |
||
719 | } |
||
720 | } |
||
721 | |||
722 | for (i = 0; i < 2; ++i) { |
||
723 | if (choices[i]) { |
||
724 | vm->id = choices[i]; |
||
725 | return rdev->vm_manager.active[choices[i]]; |
||
726 | } |
||
727 | } |
||
728 | |||
729 | /* should never happen */ |
||
730 | BUG(); |
||
731 | return NULL; |
||
732 | } |
||
733 | |||
734 | /** |
||
735 | * radeon_vm_fence - remember fence for vm |
||
736 | * |
||
737 | * @rdev: radeon_device pointer |
||
738 | * @vm: vm we want to fence |
||
739 | * @fence: fence to remember |
||
740 | * |
||
741 | * Fence the vm (cayman+). |
||
742 | * Set the fence used to protect page table and id. |
||
743 | * |
||
744 | * Global and local mutex must be locked! |
||
745 | */ |
||
746 | void radeon_vm_fence(struct radeon_device *rdev, |
||
747 | struct radeon_vm *vm, |
||
748 | struct radeon_fence *fence) |
||
749 | { |
||
750 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
||
751 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
||
752 | |||
753 | radeon_fence_unref(&vm->fence); |
||
754 | vm->fence = radeon_fence_ref(fence); |
||
755 | } |
||
756 | |||
757 | /** |
||
758 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo |
||
759 | * |
||
760 | * @vm: requested vm |
||
761 | * @bo: requested buffer object |
||
762 | * |
||
763 | * Find @bo inside the requested vm (cayman+). |
||
764 | * Search inside the @bos vm list for the requested vm |
||
765 | * Returns the found bo_va or NULL if none is found |
||
766 | * |
||
767 | * Object has to be reserved! |
||
768 | */ |
||
769 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
||
770 | struct radeon_bo *bo) |
||
771 | { |
||
772 | struct radeon_bo_va *bo_va; |
||
773 | |||
774 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
||
775 | if (bo_va->vm == vm) { |
||
776 | return bo_va; |
||
777 | } |
||
778 | } |
||
779 | return NULL; |
||
780 | } |
||
781 | |||
782 | /** |
||
783 | * radeon_vm_bo_add - add a bo to a specific vm |
||
784 | * |
||
785 | * @rdev: radeon_device pointer |
||
786 | * @vm: requested vm |
||
787 | * @bo: radeon buffer object |
||
788 | * |
||
789 | * Add @bo into the requested vm (cayman+). |
||
790 | * Add @bo to the list of bos associated with the vm |
||
791 | * Returns newly added bo_va or NULL for failure |
||
792 | * |
||
793 | * Object has to be reserved! |
||
794 | */ |
||
795 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
||
796 | struct radeon_vm *vm, |
||
797 | struct radeon_bo *bo) |
||
798 | { |
||
799 | struct radeon_bo_va *bo_va; |
||
800 | |||
801 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
||
802 | if (bo_va == NULL) { |
||
803 | return NULL; |
||
804 | } |
||
805 | bo_va->vm = vm; |
||
806 | bo_va->bo = bo; |
||
807 | bo_va->soffset = 0; |
||
808 | bo_va->eoffset = 0; |
||
809 | bo_va->flags = 0; |
||
810 | bo_va->valid = false; |
||
811 | bo_va->ref_count = 1; |
||
812 | INIT_LIST_HEAD(&bo_va->bo_list); |
||
813 | INIT_LIST_HEAD(&bo_va->vm_list); |
||
814 | |||
815 | mutex_lock(&vm->mutex); |
||
816 | list_add(&bo_va->vm_list, &vm->va); |
||
817 | list_add_tail(&bo_va->bo_list, &bo->va); |
||
818 | mutex_unlock(&vm->mutex); |
||
819 | |||
820 | return bo_va; |
||
821 | } |
||
822 | |||
823 | /** |
||
824 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm |
||
825 | * |
||
826 | * @rdev: radeon_device pointer |
||
827 | * @bo_va: bo_va to store the address |
||
828 | * @soffset: requested offset of the buffer in the VM address space |
||
829 | * @flags: attributes of pages (read/write/valid/etc.) |
||
830 | * |
||
831 | * Set offset of @bo_va (cayman+). |
||
832 | * Validate and set the offset requested within the vm address space. |
||
833 | * Returns 0 for success, error for failure. |
||
834 | * |
||
835 | * Object has to be reserved! |
||
836 | */ |
||
837 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
||
838 | struct radeon_bo_va *bo_va, |
||
839 | uint64_t soffset, |
||
840 | uint32_t flags) |
||
841 | { |
||
842 | uint64_t size = radeon_bo_size(bo_va->bo); |
||
843 | uint64_t eoffset, last_offset = 0; |
||
844 | struct radeon_vm *vm = bo_va->vm; |
||
845 | struct radeon_bo_va *tmp; |
||
846 | struct list_head *head; |
||
847 | unsigned last_pfn; |
||
848 | |||
849 | if (soffset) { |
||
850 | /* make sure object fit at this offset */ |
||
851 | eoffset = soffset + size; |
||
852 | if (soffset >= eoffset) { |
||
853 | return -EINVAL; |
||
854 | } |
||
855 | |||
856 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
||
857 | if (last_pfn > rdev->vm_manager.max_pfn) { |
||
858 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
||
859 | last_pfn, rdev->vm_manager.max_pfn); |
||
860 | return -EINVAL; |
||
861 | } |
||
862 | |||
863 | } else { |
||
864 | eoffset = last_pfn = 0; |
||
865 | } |
||
866 | |||
867 | mutex_lock(&vm->mutex); |
||
868 | head = &vm->va; |
||
869 | last_offset = 0; |
||
870 | list_for_each_entry(tmp, &vm->va, vm_list) { |
||
871 | if (bo_va == tmp) { |
||
872 | /* skip over currently modified bo */ |
||
873 | continue; |
||
874 | } |
||
875 | |||
876 | if (soffset >= last_offset && eoffset <= tmp->soffset) { |
||
877 | /* bo can be added before this one */ |
||
878 | break; |
||
879 | } |
||
880 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
||
881 | /* bo and tmp overlap, invalid offset */ |
||
882 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
||
883 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
||
884 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
||
885 | mutex_unlock(&vm->mutex); |
||
886 | return -EINVAL; |
||
887 | } |
||
888 | last_offset = tmp->eoffset; |
||
889 | head = &tmp->vm_list; |
||
890 | } |
||
891 | |||
892 | bo_va->soffset = soffset; |
||
893 | bo_va->eoffset = eoffset; |
||
894 | bo_va->flags = flags; |
||
895 | bo_va->valid = false; |
||
896 | list_move(&bo_va->vm_list, head); |
||
897 | |||
898 | mutex_unlock(&vm->mutex); |
||
899 | return 0; |
||
900 | } |
||
901 | |||
902 | /** |
||
903 | * radeon_vm_map_gart - get the physical address of a gart page |
||
904 | * |
||
905 | * @rdev: radeon_device pointer |
||
906 | * @addr: the unmapped addr |
||
907 | * |
||
908 | * Look up the physical address of the page that the pte resolves |
||
909 | * to (cayman+). |
||
910 | * Returns the physical address of the page. |
||
911 | */ |
||
912 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) |
||
913 | { |
||
914 | uint64_t result; |
||
915 | |||
916 | /* page table offset */ |
||
917 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
||
918 | |||
919 | /* in case cpu page size != gpu page size*/ |
||
920 | result |= addr & (~PAGE_MASK); |
||
921 | |||
922 | return result; |
||
923 | } |
||
924 | |||
925 | /** |
||
926 | * radeon_vm_update_pdes - make sure that page directory is valid |
||
927 | * |
||
928 | * @rdev: radeon_device pointer |
||
929 | * @vm: requested vm |
||
930 | * @start: start of GPU address range |
||
931 | * @end: end of GPU address range |
||
932 | * |
||
933 | * Allocates new page tables if necessary |
||
934 | * and updates the page directory (cayman+). |
||
935 | * Returns 0 for success, error for failure. |
||
936 | * |
||
937 | * Global and local mutex must be locked! |
||
938 | */ |
||
939 | static int radeon_vm_update_pdes(struct radeon_device *rdev, |
||
940 | struct radeon_vm *vm, |
||
3764 | Serge | 941 | struct radeon_ib *ib, |
2997 | Serge | 942 | uint64_t start, uint64_t end) |
943 | { |
||
944 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
||
945 | |||
946 | uint64_t last_pde = ~0, last_pt = ~0; |
||
947 | unsigned count = 0; |
||
948 | uint64_t pt_idx; |
||
949 | int r; |
||
950 | |||
951 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
||
952 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
||
953 | |||
954 | /* walk over the address space and update the page directory */ |
||
955 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { |
||
956 | uint64_t pde, pt; |
||
957 | |||
958 | if (vm->page_tables[pt_idx]) |
||
959 | continue; |
||
960 | |||
961 | retry: |
||
962 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
||
963 | &vm->page_tables[pt_idx], |
||
964 | RADEON_VM_PTE_COUNT * 8, |
||
965 | RADEON_GPU_PAGE_SIZE, false); |
||
966 | |||
967 | if (r == -ENOMEM) { |
||
968 | r = radeon_vm_evict(rdev, vm); |
||
969 | if (r) |
||
970 | return r; |
||
971 | goto retry; |
||
972 | } else if (r) { |
||
973 | return r; |
||
974 | } |
||
975 | |||
976 | pde = vm->pd_gpu_addr + pt_idx * 8; |
||
977 | |||
978 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
||
979 | |||
980 | if (((last_pde + 8 * count) != pde) || |
||
981 | ((last_pt + incr * count) != pt)) { |
||
982 | |||
983 | if (count) { |
||
3764 | Serge | 984 | radeon_asic_vm_set_page(rdev, ib, last_pde, |
2997 | Serge | 985 | last_pt, count, incr, |
986 | RADEON_VM_PAGE_VALID); |
||
987 | } |
||
988 | |||
989 | count = 1; |
||
990 | last_pde = pde; |
||
991 | last_pt = pt; |
||
992 | } else { |
||
993 | ++count; |
||
994 | } |
||
995 | } |
||
996 | |||
997 | if (count) { |
||
3764 | Serge | 998 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, |
2997 | Serge | 999 | incr, RADEON_VM_PAGE_VALID); |
1000 | |||
1001 | } |
||
1002 | |||
1003 | return 0; |
||
1004 | } |
||
1005 | |||
1006 | /** |
||
1007 | * radeon_vm_update_ptes - make sure that page tables are valid |
||
1008 | * |
||
1009 | * @rdev: radeon_device pointer |
||
1010 | * @vm: requested vm |
||
1011 | * @start: start of GPU address range |
||
1012 | * @end: end of GPU address range |
||
1013 | * @dst: destination address to map to |
||
1014 | * @flags: mapping flags |
||
1015 | * |
||
1016 | * Update the page tables in the range @start - @end (cayman+). |
||
1017 | * |
||
1018 | * Global and local mutex must be locked! |
||
1019 | */ |
||
1020 | static void radeon_vm_update_ptes(struct radeon_device *rdev, |
||
1021 | struct radeon_vm *vm, |
||
3764 | Serge | 1022 | struct radeon_ib *ib, |
2997 | Serge | 1023 | uint64_t start, uint64_t end, |
1024 | uint64_t dst, uint32_t flags) |
||
1025 | { |
||
1026 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
||
1027 | |||
1028 | uint64_t last_pte = ~0, last_dst = ~0; |
||
1029 | unsigned count = 0; |
||
1030 | uint64_t addr; |
||
1031 | |||
1032 | start = start / RADEON_GPU_PAGE_SIZE; |
||
1033 | end = end / RADEON_GPU_PAGE_SIZE; |
||
1034 | |||
1035 | /* walk over the address space and update the page tables */ |
||
1036 | for (addr = start; addr < end; ) { |
||
1037 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
||
1038 | unsigned nptes; |
||
1039 | uint64_t pte; |
||
1040 | |||
1041 | if ((addr & ~mask) == (end & ~mask)) |
||
1042 | nptes = end - addr; |
||
1043 | else |
||
1044 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
||
1045 | |||
1046 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
||
1047 | pte += (addr & mask) * 8; |
||
1048 | |||
1049 | if ((last_pte + 8 * count) != pte) { |
||
1050 | |||
1051 | if (count) { |
||
3764 | Serge | 1052 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
2997 | Serge | 1053 | last_dst, count, |
1054 | RADEON_GPU_PAGE_SIZE, |
||
1055 | flags); |
||
1056 | } |
||
1057 | |||
1058 | count = nptes; |
||
1059 | last_pte = pte; |
||
1060 | last_dst = dst; |
||
1061 | } else { |
||
1062 | count += nptes; |
||
1063 | } |
||
1064 | |||
1065 | addr += nptes; |
||
1066 | dst += nptes * RADEON_GPU_PAGE_SIZE; |
||
1067 | } |
||
1068 | |||
1069 | if (count) { |
||
3764 | Serge | 1070 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
1071 | last_dst, count, |
||
2997 | Serge | 1072 | RADEON_GPU_PAGE_SIZE, flags); |
1073 | } |
||
1074 | } |
||
1075 | |||
1076 | /** |
||
1077 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
||
1078 | * |
||
1079 | * @rdev: radeon_device pointer |
||
1080 | * @vm: requested vm |
||
1081 | * @bo: radeon buffer object |
||
1082 | * @mem: ttm mem |
||
1083 | * |
||
1084 | * Fill in the page table entries for @bo (cayman+). |
||
1085 | * Returns 0 for success, -EINVAL for failure. |
||
1086 | * |
||
1087 | * Object have to be reserved & global and local mutex must be locked! |
||
1088 | */ |
||
1089 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
||
1090 | struct radeon_vm *vm, |
||
1091 | struct radeon_bo *bo, |
||
1092 | struct ttm_mem_reg *mem) |
||
1093 | { |
||
1094 | unsigned ridx = rdev->asic->vm.pt_ring_index; |
||
3764 | Serge | 1095 | struct radeon_ib ib; |
2997 | Serge | 1096 | struct radeon_bo_va *bo_va; |
1097 | unsigned nptes, npdes, ndw; |
||
1098 | uint64_t addr; |
||
1099 | int r; |
||
1100 | |||
1101 | /* nothing to do if vm isn't bound */ |
||
1102 | if (vm->page_directory == NULL) |
||
1103 | return 0; |
||
1104 | |||
1105 | bo_va = radeon_vm_bo_find(vm, bo); |
||
1106 | if (bo_va == NULL) { |
||
1107 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); |
||
1108 | return -EINVAL; |
||
1109 | } |
||
1110 | |||
1111 | if (!bo_va->soffset) { |
||
1112 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
||
1113 | bo, vm); |
||
1114 | return -EINVAL; |
||
1115 | } |
||
1116 | |||
1117 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
||
1118 | return 0; |
||
1119 | |||
1120 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
||
1121 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
||
1122 | if (mem) { |
||
1123 | addr = mem->start << PAGE_SHIFT; |
||
1124 | if (mem->mem_type != TTM_PL_SYSTEM) { |
||
1125 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
||
1126 | bo_va->valid = true; |
||
1127 | } |
||
1128 | if (mem->mem_type == TTM_PL_TT) { |
||
1129 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
||
1130 | } else { |
||
1131 | addr += rdev->vm_manager.vram_base_offset; |
||
1132 | } |
||
1133 | } else { |
||
1134 | addr = 0; |
||
1135 | bo_va->valid = false; |
||
1136 | } |
||
1137 | |||
1138 | nptes = radeon_bo_ngpu_pages(bo); |
||
1139 | |||
1140 | /* assume two extra pdes in case the mapping overlaps the borders */ |
||
1141 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; |
||
1142 | |||
3764 | Serge | 1143 | /* padding, etc. */ |
1144 | ndw = 64; |
||
2997 | Serge | 1145 | |
1146 | if (RADEON_VM_BLOCK_SIZE > 11) |
||
1147 | /* reserve space for one header for every 2k dwords */ |
||
1148 | ndw += (nptes >> 11) * 4; |
||
1149 | else |
||
1150 | /* reserve space for one header for |
||
1151 | every (1 << BLOCK_SIZE) entries */ |
||
1152 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
||
1153 | |||
1154 | /* reserve space for pte addresses */ |
||
1155 | ndw += nptes * 2; |
||
1156 | |||
1157 | /* reserve space for one header for every 2k dwords */ |
||
1158 | ndw += (npdes >> 11) * 4; |
||
1159 | |||
1160 | /* reserve space for pde addresses */ |
||
1161 | ndw += npdes * 2; |
||
1162 | |||
3764 | Serge | 1163 | /* update too big for an IB */ |
1164 | if (ndw > 0xfffff) |
||
1165 | return -ENOMEM; |
||
2997 | Serge | 1166 | |
3764 | Serge | 1167 | r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); |
1168 | ib.length_dw = 0; |
||
2997 | Serge | 1169 | |
3764 | Serge | 1170 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); |
2997 | Serge | 1171 | if (r) { |
3764 | Serge | 1172 | radeon_ib_free(rdev, &ib); |
2997 | Serge | 1173 | return r; |
1174 | } |
||
1175 | |||
3764 | Serge | 1176 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, |
2997 | Serge | 1177 | addr, bo_va->flags); |
1178 | |||
3764 | Serge | 1179 | radeon_ib_sync_to(&ib, vm->fence); |
1180 | r = radeon_ib_schedule(rdev, &ib, NULL); |
||
2997 | Serge | 1181 | if (r) { |
3764 | Serge | 1182 | radeon_ib_free(rdev, &ib); |
2997 | Serge | 1183 | return r; |
1184 | } |
||
3764 | Serge | 1185 | radeon_fence_unref(&vm->fence); |
1186 | vm->fence = radeon_fence_ref(ib.fence); |
||
1187 | radeon_ib_free(rdev, &ib); |
||
2997 | Serge | 1188 | radeon_fence_unref(&vm->last_flush); |
1189 | |||
1190 | return 0; |
||
1191 | } |
||
1192 | |||
1193 | /** |
||
1194 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
||
1195 | * |
||
1196 | * @rdev: radeon_device pointer |
||
1197 | * @bo_va: requested bo_va |
||
1198 | * |
||
1199 | * Remove @bo_va->bo from the requested vm (cayman+). |
||
1200 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
||
1201 | * remove the ptes for @bo_va in the page table. |
||
1202 | * Returns 0 for success. |
||
1203 | * |
||
1204 | * Object have to be reserved! |
||
1205 | */ |
||
1206 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
||
1207 | struct radeon_bo_va *bo_va) |
||
1208 | { |
||
3764 | Serge | 1209 | int r = 0; |
2997 | Serge | 1210 | |
1211 | mutex_lock(&rdev->vm_manager.lock); |
||
1212 | mutex_lock(&bo_va->vm->mutex); |
||
3764 | Serge | 1213 | if (bo_va->soffset) { |
2997 | Serge | 1214 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
3764 | Serge | 1215 | } |
2997 | Serge | 1216 | mutex_unlock(&rdev->vm_manager.lock); |
1217 | list_del(&bo_va->vm_list); |
||
1218 | mutex_unlock(&bo_va->vm->mutex); |
||
1219 | list_del(&bo_va->bo_list); |
||
1220 | |||
1221 | kfree(bo_va); |
||
1222 | return r; |
||
1223 | } |
||
1224 | |||
1225 | /** |
||
1226 | * radeon_vm_bo_invalidate - mark the bo as invalid |
||
1227 | * |
||
1228 | * @rdev: radeon_device pointer |
||
1229 | * @vm: requested vm |
||
1230 | * @bo: radeon buffer object |
||
1231 | * |
||
1232 | * Mark @bo as invalid (cayman+). |
||
1233 | */ |
||
1234 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
||
1235 | struct radeon_bo *bo) |
||
1236 | { |
||
1237 | struct radeon_bo_va *bo_va; |
||
1238 | |||
1239 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
||
1240 | bo_va->valid = false; |
||
1241 | } |
||
1242 | } |
||
1243 | |||
1244 | /** |
||
1245 | * radeon_vm_init - initialize a vm instance |
||
1246 | * |
||
1247 | * @rdev: radeon_device pointer |
||
1248 | * @vm: requested vm |
||
1249 | * |
||
1250 | * Init @vm fields (cayman+). |
||
1251 | */ |
||
1252 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
||
1253 | { |
||
1254 | vm->id = 0; |
||
1255 | vm->fence = NULL; |
||
1256 | mutex_init(&vm->mutex); |
||
1257 | INIT_LIST_HEAD(&vm->list); |
||
1258 | INIT_LIST_HEAD(&vm->va); |
||
1259 | } |
||
1260 | |||
1261 | /** |
||
1262 | * radeon_vm_fini - tear down a vm instance |
||
1263 | * |
||
1264 | * @rdev: radeon_device pointer |
||
1265 | * @vm: requested vm |
||
1266 | * |
||
1267 | * Tear down @vm (cayman+). |
||
1268 | * Unbind the VM and remove all bos from the vm bo list |
||
1269 | */ |
||
1270 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) |
||
1271 | { |
||
1272 | struct radeon_bo_va *bo_va, *tmp; |
||
1273 | int r; |
||
1274 | |||
1275 | mutex_lock(&rdev->vm_manager.lock); |
||
1276 | mutex_lock(&vm->mutex); |
||
1277 | radeon_vm_free_pt(rdev, vm); |
||
1278 | mutex_unlock(&rdev->vm_manager.lock); |
||
1279 | |||
1280 | if (!list_empty(&vm->va)) { |
||
1281 | dev_err(rdev->dev, "still active bo inside vm\n"); |
||
1282 | } |
||
1283 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { |
||
1284 | list_del_init(&bo_va->vm_list); |
||
1285 | r = radeon_bo_reserve(bo_va->bo, false); |
||
1286 | if (!r) { |
||
1287 | list_del_init(&bo_va->bo_list); |
||
1288 | radeon_bo_unreserve(bo_va->bo); |
||
1289 | kfree(bo_va); |
||
1290 | } |
||
1291 | } |
||
1292 | radeon_fence_unref(&vm->fence); |
||
1293 | radeon_fence_unref(&vm->last_flush); |
||
1294 | mutex_unlock(&vm->mutex); |
||
1295 | }><>><>>=>>=>>>>>>>>>>>>> |