Rev 3192 | Rev 5078 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3192 | Rev 3764 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
14 | * all copies or substantial portions of the Software. |
15 | * |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
23 | * |
24 | * Authors: Dave Airlie |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
25 | * Alex Deucher |
26 | * Jerome Glisse |
26 | * Jerome Glisse |
27 | */ |
27 | */ |
28 | #include |
28 | #include |
29 | #include |
29 | #include |
30 | #include "radeon.h" |
30 | #include "radeon.h" |
31 | #include "radeon_reg.h" |
31 | #include "radeon_reg.h" |
32 | 32 | ||
33 | 33 | ||
34 | static inline void * |
34 | static inline void * |
35 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
35 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
36 | addr_t *dma_handle) |
36 | addr_t *dma_handle) |
37 | { |
37 | { |
38 | 38 | ||
39 | size = (size + 0x7FFF) & ~0x7FFF; |
39 | size = (size + 0x7FFF) & ~0x7FFF; |
40 | 40 | ||
41 | *dma_handle = AllocPages(size >> 12); |
41 | *dma_handle = AllocPages(size >> 12); |
42 | return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE); |
42 | return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE); |
43 | } |
43 | } |
44 | 44 | ||
45 | /* |
45 | /* |
46 | * GART |
46 | * GART |
47 | * The GART (Graphics Aperture Remapping Table) is an aperture |
47 | * The GART (Graphics Aperture Remapping Table) is an aperture |
48 | * in the GPU's address space. System pages can be mapped into |
48 | * in the GPU's address space. System pages can be mapped into |
49 | * the aperture and look like contiguous pages from the GPU's |
49 | * the aperture and look like contiguous pages from the GPU's |
50 | * perspective. A page table maps the pages in the aperture |
50 | * perspective. A page table maps the pages in the aperture |
51 | * to the actual backing pages in system memory. |
51 | * to the actual backing pages in system memory. |
52 | * |
52 | * |
53 | * Radeon GPUs support both an internal GART, as described above, |
53 | * Radeon GPUs support both an internal GART, as described above, |
54 | * and AGP. AGP works similarly, but the GART table is configured |
54 | * and AGP. AGP works similarly, but the GART table is configured |
55 | * and maintained by the northbridge rather than the driver. |
55 | * and maintained by the northbridge rather than the driver. |
56 | * Radeon hw has a separate AGP aperture that is programmed to |
56 | * Radeon hw has a separate AGP aperture that is programmed to |
57 | * point to the AGP aperture provided by the northbridge and the |
57 | * point to the AGP aperture provided by the northbridge and the |
58 | * requests are passed through to the northbridge aperture. |
58 | * requests are passed through to the northbridge aperture. |
59 | * Both AGP and internal GART can be used at the same time, however |
59 | * Both AGP and internal GART can be used at the same time, however |
60 | * that is not currently supported by the driver. |
60 | * that is not currently supported by the driver. |
61 | * |
61 | * |
62 | * This file handles the common internal GART management. |
62 | * This file handles the common internal GART management. |
63 | */ |
63 | */ |
64 | 64 | ||
65 | /* |
65 | /* |
66 | * Common GART table functions. |
66 | * Common GART table functions. |
67 | */ |
67 | */ |
68 | /** |
68 | /** |
69 | * radeon_gart_table_ram_alloc - allocate system ram for gart page table |
69 | * radeon_gart_table_ram_alloc - allocate system ram for gart page table |
70 | * |
70 | * |
71 | * @rdev: radeon_device pointer |
71 | * @rdev: radeon_device pointer |
72 | * |
72 | * |
73 | * Allocate system memory for GART page table |
73 | * Allocate system memory for GART page table |
74 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
74 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
75 | * gart table to be in system memory. |
75 | * gart table to be in system memory. |
76 | * Returns 0 for success, -ENOMEM for failure. |
76 | * Returns 0 for success, -ENOMEM for failure. |
77 | */ |
77 | */ |
78 | int radeon_gart_table_ram_alloc(struct radeon_device *rdev) |
78 | int radeon_gart_table_ram_alloc(struct radeon_device *rdev) |
79 | { |
79 | { |
80 | void *ptr; |
80 | void *ptr; |
81 | 81 | ||
82 | ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
82 | ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
83 | &rdev->gart.table_addr); |
83 | &rdev->gart.table_addr); |
84 | if (ptr == NULL) { |
84 | if (ptr == NULL) { |
85 | return -ENOMEM; |
85 | return -ENOMEM; |
86 | } |
86 | } |
87 | #ifdef CONFIG_X86 |
87 | #ifdef CONFIG_X86 |
88 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
88 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
89 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
89 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
90 | set_memory_uc((unsigned long)ptr, |
90 | set_memory_uc((unsigned long)ptr, |
91 | rdev->gart.table_size >> PAGE_SHIFT); |
91 | rdev->gart.table_size >> PAGE_SHIFT); |
92 | } |
92 | } |
93 | #endif |
93 | #endif |
94 | rdev->gart.ptr = ptr; |
94 | rdev->gart.ptr = ptr; |
95 | memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); |
95 | memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size); |
96 | return 0; |
96 | return 0; |
97 | } |
97 | } |
98 | 98 | ||
99 | /** |
99 | /** |
100 | * radeon_gart_table_ram_free - free system ram for gart page table |
100 | * radeon_gart_table_ram_free - free system ram for gart page table |
101 | * |
101 | * |
102 | * @rdev: radeon_device pointer |
102 | * @rdev: radeon_device pointer |
103 | * |
103 | * |
104 | * Free system memory for GART page table |
104 | * Free system memory for GART page table |
105 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
105 | * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the |
106 | * gart table to be in system memory. |
106 | * gart table to be in system memory. |
107 | */ |
107 | */ |
108 | void radeon_gart_table_ram_free(struct radeon_device *rdev) |
108 | void radeon_gart_table_ram_free(struct radeon_device *rdev) |
109 | { |
109 | { |
110 | if (rdev->gart.ptr == NULL) { |
110 | if (rdev->gart.ptr == NULL) { |
111 | return; |
111 | return; |
112 | } |
112 | } |
113 | #ifdef CONFIG_X86 |
113 | #ifdef CONFIG_X86 |
114 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
114 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 || |
115 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
115 | rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
116 | set_memory_wb((unsigned long)rdev->gart.ptr, |
116 | set_memory_wb((unsigned long)rdev->gart.ptr, |
117 | rdev->gart.table_size >> PAGE_SHIFT); |
117 | rdev->gart.table_size >> PAGE_SHIFT); |
118 | } |
118 | } |
119 | #endif |
119 | #endif |
120 | rdev->gart.ptr = NULL; |
120 | rdev->gart.ptr = NULL; |
121 | rdev->gart.table_addr = 0; |
121 | rdev->gart.table_addr = 0; |
122 | } |
122 | } |
123 | 123 | ||
124 | /** |
124 | /** |
125 | * radeon_gart_table_vram_alloc - allocate vram for gart page table |
125 | * radeon_gart_table_vram_alloc - allocate vram for gart page table |
126 | * |
126 | * |
127 | * @rdev: radeon_device pointer |
127 | * @rdev: radeon_device pointer |
128 | * |
128 | * |
129 | * Allocate video memory for GART page table |
129 | * Allocate video memory for GART page table |
130 | * (pcie r4xx, r5xx+). These asics require the |
130 | * (pcie r4xx, r5xx+). These asics require the |
131 | * gart table to be in video memory. |
131 | * gart table to be in video memory. |
132 | * Returns 0 for success, error for failure. |
132 | * Returns 0 for success, error for failure. |
133 | */ |
133 | */ |
134 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
134 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
135 | { |
135 | { |
136 | int r; |
136 | int r; |
137 | 137 | ||
138 | if (rdev->gart.robj == NULL) { |
138 | if (rdev->gart.robj == NULL) { |
139 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
139 | r = radeon_bo_create(rdev, rdev->gart.table_size, |
140 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
140 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
141 | NULL, &rdev->gart.robj); |
141 | NULL, &rdev->gart.robj); |
142 | if (r) { |
142 | if (r) { |
143 | return r; |
143 | return r; |
144 | } |
144 | } |
145 | } |
145 | } |
146 | return 0; |
146 | return 0; |
147 | } |
147 | } |
148 | 148 | ||
149 | /** |
149 | /** |
150 | * radeon_gart_table_vram_pin - pin gart page table in vram |
150 | * radeon_gart_table_vram_pin - pin gart page table in vram |
151 | * |
151 | * |
152 | * @rdev: radeon_device pointer |
152 | * @rdev: radeon_device pointer |
153 | * |
153 | * |
154 | * Pin the GART page table in vram so it will not be moved |
154 | * Pin the GART page table in vram so it will not be moved |
155 | * by the memory manager (pcie r4xx, r5xx+). These asics require the |
155 | * by the memory manager (pcie r4xx, r5xx+). These asics require the |
156 | * gart table to be in video memory. |
156 | * gart table to be in video memory. |
157 | * Returns 0 for success, error for failure. |
157 | * Returns 0 for success, error for failure. |
158 | */ |
158 | */ |
159 | int radeon_gart_table_vram_pin(struct radeon_device *rdev) |
159 | int radeon_gart_table_vram_pin(struct radeon_device *rdev) |
160 | { |
160 | { |
161 | uint64_t gpu_addr; |
161 | uint64_t gpu_addr; |
162 | int r; |
162 | int r; |
163 | 163 | ||
164 | r = radeon_bo_reserve(rdev->gart.robj, false); |
164 | r = radeon_bo_reserve(rdev->gart.robj, false); |
165 | if (unlikely(r != 0)) |
165 | if (unlikely(r != 0)) |
166 | return r; |
166 | return r; |
167 | r = radeon_bo_pin(rdev->gart.robj, |
167 | r = radeon_bo_pin(rdev->gart.robj, |
168 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
168 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
169 | if (r) { |
169 | if (r) { |
170 | radeon_bo_unreserve(rdev->gart.robj); |
170 | radeon_bo_unreserve(rdev->gart.robj); |
171 | return r; |
171 | return r; |
172 | } |
172 | } |
173 | r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); |
173 | r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr); |
174 | if (r) |
174 | if (r) |
175 | radeon_bo_unpin(rdev->gart.robj); |
175 | radeon_bo_unpin(rdev->gart.robj); |
176 | radeon_bo_unreserve(rdev->gart.robj); |
176 | radeon_bo_unreserve(rdev->gart.robj); |
177 | rdev->gart.table_addr = gpu_addr; |
177 | rdev->gart.table_addr = gpu_addr; |
178 | return r; |
178 | return r; |
179 | } |
179 | } |
180 | 180 | ||
181 | /** |
181 | /** |
182 | * radeon_gart_table_vram_unpin - unpin gart page table in vram |
182 | * radeon_gart_table_vram_unpin - unpin gart page table in vram |
183 | * |
183 | * |
184 | * @rdev: radeon_device pointer |
184 | * @rdev: radeon_device pointer |
185 | * |
185 | * |
186 | * Unpin the GART page table in vram (pcie r4xx, r5xx+). |
186 | * Unpin the GART page table in vram (pcie r4xx, r5xx+). |
187 | * These asics require the gart table to be in video memory. |
187 | * These asics require the gart table to be in video memory. |
188 | */ |
188 | */ |
189 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev) |
189 | void radeon_gart_table_vram_unpin(struct radeon_device *rdev) |
190 | { |
190 | { |
191 | int r; |
191 | int r; |
192 | 192 | ||
193 | if (rdev->gart.robj == NULL) { |
193 | if (rdev->gart.robj == NULL) { |
194 | return; |
194 | return; |
195 | } |
195 | } |
196 | r = radeon_bo_reserve(rdev->gart.robj, false); |
196 | r = radeon_bo_reserve(rdev->gart.robj, false); |
197 | if (likely(r == 0)) { |
197 | if (likely(r == 0)) { |
198 | radeon_bo_kunmap(rdev->gart.robj); |
198 | radeon_bo_kunmap(rdev->gart.robj); |
199 | radeon_bo_unpin(rdev->gart.robj); |
199 | radeon_bo_unpin(rdev->gart.robj); |
200 | radeon_bo_unreserve(rdev->gart.robj); |
200 | radeon_bo_unreserve(rdev->gart.robj); |
201 | rdev->gart.ptr = NULL; |
201 | rdev->gart.ptr = NULL; |
202 | } |
202 | } |
203 | } |
203 | } |
204 | 204 | ||
205 | /** |
205 | /** |
206 | * radeon_gart_table_vram_free - free gart page table vram |
206 | * radeon_gart_table_vram_free - free gart page table vram |
207 | * |
207 | * |
208 | * @rdev: radeon_device pointer |
208 | * @rdev: radeon_device pointer |
209 | * |
209 | * |
210 | * Free the video memory used for the GART page table |
210 | * Free the video memory used for the GART page table |
211 | * (pcie r4xx, r5xx+). These asics require the gart table to |
211 | * (pcie r4xx, r5xx+). These asics require the gart table to |
212 | * be in video memory. |
212 | * be in video memory. |
213 | */ |
213 | */ |
214 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
214 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
215 | { |
215 | { |
216 | if (rdev->gart.robj == NULL) { |
216 | if (rdev->gart.robj == NULL) { |
217 | return; |
217 | return; |
218 | } |
218 | } |
219 | radeon_gart_table_vram_unpin(rdev); |
219 | radeon_gart_table_vram_unpin(rdev); |
220 | radeon_bo_unref(&rdev->gart.robj); |
220 | radeon_bo_unref(&rdev->gart.robj); |
221 | } |
221 | } |
222 | 222 | ||
223 | /* |
223 | /* |
224 | * Common gart functions. |
224 | * Common gart functions. |
225 | */ |
225 | */ |
226 | /** |
226 | /** |
227 | * radeon_gart_unbind - unbind pages from the gart page table |
227 | * radeon_gart_unbind - unbind pages from the gart page table |
228 | * |
228 | * |
229 | * @rdev: radeon_device pointer |
229 | * @rdev: radeon_device pointer |
230 | * @offset: offset into the GPU's gart aperture |
230 | * @offset: offset into the GPU's gart aperture |
231 | * @pages: number of pages to unbind |
231 | * @pages: number of pages to unbind |
232 | * |
232 | * |
233 | * Unbinds the requested pages from the gart page table and |
233 | * Unbinds the requested pages from the gart page table and |
234 | * replaces them with the dummy page (all asics). |
234 | * replaces them with the dummy page (all asics). |
235 | */ |
235 | */ |
236 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
236 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
237 | int pages) |
237 | int pages) |
238 | { |
238 | { |
239 | unsigned t; |
239 | unsigned t; |
240 | unsigned p; |
240 | unsigned p; |
241 | int i, j; |
241 | int i, j; |
242 | u64 page_base; |
242 | u64 page_base; |
243 | 243 | ||
244 | if (!rdev->gart.ready) { |
244 | if (!rdev->gart.ready) { |
245 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
245 | WARN(1, "trying to unbind memory from uninitialized GART !\n"); |
246 | return; |
246 | return; |
247 | } |
247 | } |
248 | t = offset / RADEON_GPU_PAGE_SIZE; |
248 | t = offset / RADEON_GPU_PAGE_SIZE; |
249 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
249 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
250 | for (i = 0; i < pages; i++, p++) { |
250 | for (i = 0; i < pages; i++, p++) { |
251 | if (rdev->gart.pages[p]) { |
251 | if (rdev->gart.pages[p]) { |
252 | // pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
- | |
253 | // PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
- | |
254 | rdev->gart.pages[p] = NULL; |
252 | rdev->gart.pages[p] = NULL; |
255 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
253 | rdev->gart.pages_addr[p] = rdev->dummy_page.addr; |
256 | page_base = rdev->gart.pages_addr[p]; |
254 | page_base = rdev->gart.pages_addr[p]; |
257 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
255 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
258 | if (rdev->gart.ptr) { |
256 | if (rdev->gart.ptr) { |
259 | radeon_gart_set_page(rdev, t, page_base); |
257 | radeon_gart_set_page(rdev, t, page_base); |
260 | } |
258 | } |
261 | page_base += RADEON_GPU_PAGE_SIZE; |
259 | page_base += RADEON_GPU_PAGE_SIZE; |
262 | } |
260 | } |
263 | } |
261 | } |
264 | } |
262 | } |
265 | mb(); |
263 | mb(); |
266 | radeon_gart_tlb_flush(rdev); |
264 | radeon_gart_tlb_flush(rdev); |
267 | } |
265 | } |
268 | 266 | ||
269 | /** |
267 | /** |
270 | * radeon_gart_bind - bind pages into the gart page table |
268 | * radeon_gart_bind - bind pages into the gart page table |
271 | * |
269 | * |
272 | * @rdev: radeon_device pointer |
270 | * @rdev: radeon_device pointer |
273 | * @offset: offset into the GPU's gart aperture |
271 | * @offset: offset into the GPU's gart aperture |
274 | * @pages: number of pages to bind |
272 | * @pages: number of pages to bind |
275 | * @pagelist: pages to bind |
273 | * @pagelist: pages to bind |
276 | * @dma_addr: DMA addresses of pages |
274 | * @dma_addr: DMA addresses of pages |
277 | * |
275 | * |
278 | * Binds the requested pages to the gart page table |
276 | * Binds the requested pages to the gart page table |
279 | * (all asics). |
277 | * (all asics). |
280 | * Returns 0 for success, -EINVAL for failure. |
278 | * Returns 0 for success, -EINVAL for failure. |
281 | */ |
279 | */ |
282 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
280 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
283 | int pages, u32 *pagelist, dma_addr_t *dma_addr) |
281 | int pages, u32 *pagelist, dma_addr_t *dma_addr) |
284 | { |
282 | { |
285 | unsigned t; |
283 | unsigned t; |
286 | unsigned p; |
284 | unsigned p; |
287 | uint64_t page_base; |
285 | uint64_t page_base; |
288 | int i, j; |
286 | int i, j; |
289 | 287 | ||
290 | // dbgprintf("offset %x pages %d list %x\n", |
288 | // dbgprintf("offset %x pages %d list %x\n", |
291 | // offset, pages, pagelist); |
289 | // offset, pages, pagelist); |
292 | if (!rdev->gart.ready) { |
290 | if (!rdev->gart.ready) { |
293 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
291 | WARN(1, "trying to bind memory to uninitialized GART !\n"); |
294 | return -EINVAL; |
292 | return -EINVAL; |
295 | } |
293 | } |
296 | t = offset / RADEON_GPU_PAGE_SIZE; |
294 | t = offset / RADEON_GPU_PAGE_SIZE; |
297 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
295 | p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
298 | 296 | ||
299 | for (i = 0; i < pages; i++, p++) { |
297 | for (i = 0; i < pages; i++, p++) { |
300 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
298 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
301 | rdev->gart.pages[p] = pagelist[i]; |
299 | rdev->gart.pages[p] = pagelist[i]; |
302 | if (rdev->gart.ptr) { |
300 | if (rdev->gart.ptr) { |
303 | page_base = rdev->gart.pages_addr[p]; |
301 | page_base = rdev->gart.pages_addr[p]; |
304 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
302 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
305 | radeon_gart_set_page(rdev, t, page_base); |
303 | radeon_gart_set_page(rdev, t, page_base); |
306 | page_base += RADEON_GPU_PAGE_SIZE; |
304 | page_base += RADEON_GPU_PAGE_SIZE; |
307 | } |
305 | } |
308 | } |
306 | } |
309 | } |
307 | } |
310 | mb(); |
308 | mb(); |
311 | radeon_gart_tlb_flush(rdev); |
309 | radeon_gart_tlb_flush(rdev); |
312 | return 0; |
310 | return 0; |
313 | } |
311 | } |
314 | 312 | ||
315 | /** |
313 | /** |
316 | * radeon_gart_restore - bind all pages in the gart page table |
314 | * radeon_gart_restore - bind all pages in the gart page table |
317 | * |
315 | * |
318 | * @rdev: radeon_device pointer |
316 | * @rdev: radeon_device pointer |
319 | * |
317 | * |
320 | * Binds all pages in the gart page table (all asics). |
318 | * Binds all pages in the gart page table (all asics). |
321 | * Used to rebuild the gart table on device startup or resume. |
319 | * Used to rebuild the gart table on device startup or resume. |
322 | */ |
320 | */ |
323 | void radeon_gart_restore(struct radeon_device *rdev) |
321 | void radeon_gart_restore(struct radeon_device *rdev) |
324 | { |
322 | { |
325 | int i, j, t; |
323 | int i, j, t; |
326 | u64 page_base; |
324 | u64 page_base; |
327 | 325 | ||
328 | if (!rdev->gart.ptr) { |
326 | if (!rdev->gart.ptr) { |
329 | return; |
327 | return; |
330 | } |
328 | } |
331 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
329 | for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { |
332 | page_base = rdev->gart.pages_addr[i]; |
330 | page_base = rdev->gart.pages_addr[i]; |
333 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
331 | for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { |
334 | radeon_gart_set_page(rdev, t, page_base); |
332 | radeon_gart_set_page(rdev, t, page_base); |
335 | page_base += RADEON_GPU_PAGE_SIZE; |
333 | page_base += RADEON_GPU_PAGE_SIZE; |
336 | } |
334 | } |
337 | } |
335 | } |
338 | mb(); |
336 | mb(); |
339 | radeon_gart_tlb_flush(rdev); |
337 | radeon_gart_tlb_flush(rdev); |
340 | } |
338 | } |
341 | 339 | ||
342 | /** |
340 | /** |
343 | * radeon_gart_init - init the driver info for managing the gart |
341 | * radeon_gart_init - init the driver info for managing the gart |
344 | * |
342 | * |
345 | * @rdev: radeon_device pointer |
343 | * @rdev: radeon_device pointer |
346 | * |
344 | * |
347 | * Allocate the dummy page and init the gart driver info (all asics). |
345 | * Allocate the dummy page and init the gart driver info (all asics). |
348 | * Returns 0 for success, error for failure. |
346 | * Returns 0 for success, error for failure. |
349 | */ |
347 | */ |
350 | int radeon_gart_init(struct radeon_device *rdev) |
348 | int radeon_gart_init(struct radeon_device *rdev) |
351 | { |
349 | { |
352 | int r, i; |
350 | int r, i; |
353 | 351 | ||
354 | if (rdev->gart.pages) { |
352 | if (rdev->gart.pages) { |
355 | return 0; |
353 | return 0; |
356 | } |
354 | } |
357 | /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
355 | /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */ |
358 | if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
356 | if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) { |
359 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
357 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
360 | return -EINVAL; |
358 | return -EINVAL; |
361 | } |
359 | } |
362 | r = radeon_dummy_page_init(rdev); |
360 | r = radeon_dummy_page_init(rdev); |
363 | if (r) |
361 | if (r) |
364 | return r; |
362 | return r; |
365 | /* Compute table size */ |
363 | /* Compute table size */ |
366 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
364 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
367 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
365 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; |
368 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
366 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
369 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
367 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
370 | /* Allocate pages table */ |
368 | /* Allocate pages table */ |
371 | rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); |
369 | rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages); |
372 | if (rdev->gart.pages == NULL) { |
370 | if (rdev->gart.pages == NULL) { |
373 | radeon_gart_fini(rdev); |
371 | radeon_gart_fini(rdev); |
374 | return -ENOMEM; |
372 | return -ENOMEM; |
375 | } |
373 | } |
376 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
374 | rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) * |
377 | rdev->gart.num_cpu_pages); |
375 | rdev->gart.num_cpu_pages); |
378 | if (rdev->gart.pages_addr == NULL) { |
376 | if (rdev->gart.pages_addr == NULL) { |
379 | radeon_gart_fini(rdev); |
377 | radeon_gart_fini(rdev); |
380 | return -ENOMEM; |
378 | return -ENOMEM; |
381 | } |
379 | } |
382 | /* set GART entry to point to the dummy page by default */ |
380 | /* set GART entry to point to the dummy page by default */ |
383 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
381 | for (i = 0; i < rdev->gart.num_cpu_pages; i++) { |
384 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
382 | rdev->gart.pages_addr[i] = rdev->dummy_page.addr; |
385 | } |
383 | } |
386 | return 0; |
384 | return 0; |
387 | } |
385 | } |
388 | 386 | ||
389 | /** |
387 | /** |
390 | * radeon_gart_fini - tear down the driver info for managing the gart |
388 | * radeon_gart_fini - tear down the driver info for managing the gart |
391 | * |
389 | * |
392 | * @rdev: radeon_device pointer |
390 | * @rdev: radeon_device pointer |
393 | * |
391 | * |
394 | * Tear down the gart driver info and free the dummy page (all asics). |
392 | * Tear down the gart driver info and free the dummy page (all asics). |
395 | */ |
393 | */ |
396 | void radeon_gart_fini(struct radeon_device *rdev) |
394 | void radeon_gart_fini(struct radeon_device *rdev) |
397 | { |
395 | { |
398 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
396 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
399 | /* unbind pages */ |
397 | /* unbind pages */ |
400 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
398 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
401 | } |
399 | } |
402 | rdev->gart.ready = false; |
400 | rdev->gart.ready = false; |
403 | vfree(rdev->gart.pages); |
401 | vfree(rdev->gart.pages); |
404 | vfree(rdev->gart.pages_addr); |
402 | vfree(rdev->gart.pages_addr); |
405 | rdev->gart.pages = NULL; |
403 | rdev->gart.pages = NULL; |
406 | rdev->gart.pages_addr = NULL; |
404 | rdev->gart.pages_addr = NULL; |
407 | } |
405 | } |
408 | 406 | ||
409 | /* |
407 | /* |
410 | * GPUVM |
408 | * GPUVM |
411 | * GPUVM is similar to the legacy gart on older asics, however |
409 | * GPUVM is similar to the legacy gart on older asics, however |
412 | * rather than there being a single global gart table |
410 | * rather than there being a single global gart table |
413 | * for the entire GPU, there are multiple VM page tables active |
411 | * for the entire GPU, there are multiple VM page tables active |
414 | * at any given time. The VM page tables can contain a mix |
412 | * at any given time. The VM page tables can contain a mix |
415 | * vram pages and system memory pages and system memory pages |
413 | * vram pages and system memory pages and system memory pages |
416 | * can be mapped as snooped (cached system pages) or unsnooped |
414 | * can be mapped as snooped (cached system pages) or unsnooped |
417 | * (uncached system pages). |
415 | * (uncached system pages). |
418 | * Each VM has an ID associated with it and there is a page table |
416 | * Each VM has an ID associated with it and there is a page table |
419 | * associated with each VMID. When execting a command buffer, |
417 | * associated with each VMID. When execting a command buffer, |
420 | * the kernel tells the the ring what VMID to use for that command |
418 | * the kernel tells the the ring what VMID to use for that command |
421 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
419 | * buffer. VMIDs are allocated dynamically as commands are submitted. |
422 | * The userspace drivers maintain their own address space and the kernel |
420 | * The userspace drivers maintain their own address space and the kernel |
423 | * sets up their pages tables accordingly when they submit their |
421 | * sets up their pages tables accordingly when they submit their |
424 | * command buffers and a VMID is assigned. |
422 | * command buffers and a VMID is assigned. |
425 | * Cayman/Trinity support up to 8 active VMs at any given time; |
423 | * Cayman/Trinity support up to 8 active VMs at any given time; |
426 | * SI supports 16. |
424 | * SI supports 16. |
427 | */ |
425 | */ |
428 | 426 | ||
429 | /* |
427 | /* |
430 | * vm helpers |
428 | * vm helpers |
431 | * |
429 | * |
432 | * TODO bind a default page at vm initialization for default address |
430 | * TODO bind a default page at vm initialization for default address |
433 | */ |
431 | */ |
434 | 432 | ||
435 | /** |
433 | /** |
436 | * radeon_vm_num_pde - return the number of page directory entries |
434 | * radeon_vm_num_pde - return the number of page directory entries |
437 | * |
435 | * |
438 | * @rdev: radeon_device pointer |
436 | * @rdev: radeon_device pointer |
439 | * |
437 | * |
440 | * Calculate the number of page directory entries (cayman+). |
438 | * Calculate the number of page directory entries (cayman+). |
441 | */ |
439 | */ |
442 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) |
440 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) |
443 | { |
441 | { |
444 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; |
442 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; |
445 | } |
443 | } |
446 | 444 | ||
447 | /** |
445 | /** |
448 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
446 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
449 | * |
447 | * |
450 | * @rdev: radeon_device pointer |
448 | * @rdev: radeon_device pointer |
451 | * |
449 | * |
452 | * Calculate the size of the page directory in bytes (cayman+). |
450 | * Calculate the size of the page directory in bytes (cayman+). |
453 | */ |
451 | */ |
454 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
452 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
455 | { |
453 | { |
456 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
454 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
457 | } |
455 | } |
458 | 456 | ||
459 | /** |
457 | /** |
460 | * radeon_vm_manager_init - init the vm manager |
458 | * radeon_vm_manager_init - init the vm manager |
461 | * |
459 | * |
462 | * @rdev: radeon_device pointer |
460 | * @rdev: radeon_device pointer |
463 | * |
461 | * |
464 | * Init the vm manager (cayman+). |
462 | * Init the vm manager (cayman+). |
465 | * Returns 0 for success, error for failure. |
463 | * Returns 0 for success, error for failure. |
466 | */ |
464 | */ |
467 | int radeon_vm_manager_init(struct radeon_device *rdev) |
465 | int radeon_vm_manager_init(struct radeon_device *rdev) |
468 | { |
466 | { |
469 | struct radeon_vm *vm; |
467 | struct radeon_vm *vm; |
470 | struct radeon_bo_va *bo_va; |
468 | struct radeon_bo_va *bo_va; |
471 | int r; |
469 | int r; |
472 | unsigned size; |
470 | unsigned size; |
473 | 471 | ||
474 | if (!rdev->vm_manager.enabled) { |
472 | if (!rdev->vm_manager.enabled) { |
475 | /* allocate enough for 2 full VM pts */ |
473 | /* allocate enough for 2 full VM pts */ |
476 | size = radeon_vm_directory_size(rdev); |
474 | size = radeon_vm_directory_size(rdev); |
477 | size += rdev->vm_manager.max_pfn * 8; |
475 | size += rdev->vm_manager.max_pfn * 8; |
478 | size *= 2; |
476 | size *= 2; |
479 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
477 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
480 | RADEON_GPU_PAGE_ALIGN(size), |
478 | RADEON_GPU_PAGE_ALIGN(size), |
481 | RADEON_GEM_DOMAIN_VRAM); |
479 | RADEON_GEM_DOMAIN_VRAM); |
482 | if (r) { |
480 | if (r) { |
483 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
481 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
484 | (rdev->vm_manager.max_pfn * 8) >> 10); |
482 | (rdev->vm_manager.max_pfn * 8) >> 10); |
485 | return r; |
483 | return r; |
486 | } |
484 | } |
487 | 485 | ||
488 | r = radeon_asic_vm_init(rdev); |
486 | r = radeon_asic_vm_init(rdev); |
489 | if (r) |
487 | if (r) |
490 | return r; |
488 | return r; |
491 | 489 | ||
492 | rdev->vm_manager.enabled = true; |
490 | rdev->vm_manager.enabled = true; |
493 | 491 | ||
494 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); |
492 | r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager); |
495 | if (r) |
493 | if (r) |
496 | return r; |
494 | return r; |
497 | } |
495 | } |
498 | 496 | ||
499 | /* restore page table */ |
497 | /* restore page table */ |
500 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
498 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
501 | if (vm->page_directory == NULL) |
499 | if (vm->page_directory == NULL) |
502 | continue; |
500 | continue; |
503 | 501 | ||
504 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
502 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
505 | bo_va->valid = false; |
503 | bo_va->valid = false; |
506 | } |
504 | } |
507 | } |
505 | } |
508 | return 0; |
506 | return 0; |
509 | } |
507 | } |
510 | 508 | ||
511 | /** |
509 | /** |
512 | * radeon_vm_free_pt - free the page table for a specific vm |
510 | * radeon_vm_free_pt - free the page table for a specific vm |
513 | * |
511 | * |
514 | * @rdev: radeon_device pointer |
512 | * @rdev: radeon_device pointer |
515 | * @vm: vm to unbind |
513 | * @vm: vm to unbind |
516 | * |
514 | * |
517 | * Free the page table of a specific vm (cayman+). |
515 | * Free the page table of a specific vm (cayman+). |
518 | * |
516 | * |
519 | * Global and local mutex must be lock! |
517 | * Global and local mutex must be lock! |
520 | */ |
518 | */ |
521 | static void radeon_vm_free_pt(struct radeon_device *rdev, |
519 | static void radeon_vm_free_pt(struct radeon_device *rdev, |
522 | struct radeon_vm *vm) |
520 | struct radeon_vm *vm) |
523 | { |
521 | { |
524 | struct radeon_bo_va *bo_va; |
522 | struct radeon_bo_va *bo_va; |
525 | int i; |
523 | int i; |
526 | 524 | ||
527 | if (!vm->page_directory) |
525 | if (!vm->page_directory) |
528 | return; |
526 | return; |
529 | 527 | ||
530 | list_del_init(&vm->list); |
528 | list_del_init(&vm->list); |
531 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
529 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
532 | 530 | ||
533 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
531 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
534 | bo_va->valid = false; |
532 | bo_va->valid = false; |
535 | } |
533 | } |
536 | 534 | ||
537 | if (vm->page_tables == NULL) |
535 | if (vm->page_tables == NULL) |
538 | return; |
536 | return; |
539 | 537 | ||
540 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) |
538 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) |
541 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); |
539 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); |
542 | 540 | ||
543 | kfree(vm->page_tables); |
541 | kfree(vm->page_tables); |
544 | } |
542 | } |
545 | 543 | ||
546 | /** |
544 | /** |
547 | * radeon_vm_manager_fini - tear down the vm manager |
545 | * radeon_vm_manager_fini - tear down the vm manager |
548 | * |
546 | * |
549 | * @rdev: radeon_device pointer |
547 | * @rdev: radeon_device pointer |
550 | * |
548 | * |
551 | * Tear down the VM manager (cayman+). |
549 | * Tear down the VM manager (cayman+). |
552 | */ |
550 | */ |
553 | void radeon_vm_manager_fini(struct radeon_device *rdev) |
551 | void radeon_vm_manager_fini(struct radeon_device *rdev) |
554 | { |
552 | { |
555 | struct radeon_vm *vm, *tmp; |
553 | struct radeon_vm *vm, *tmp; |
556 | int i; |
554 | int i; |
557 | 555 | ||
558 | if (!rdev->vm_manager.enabled) |
556 | if (!rdev->vm_manager.enabled) |
559 | return; |
557 | return; |
560 | 558 | ||
561 | mutex_lock(&rdev->vm_manager.lock); |
559 | mutex_lock(&rdev->vm_manager.lock); |
562 | /* free all allocated page tables */ |
560 | /* free all allocated page tables */ |
563 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { |
561 | list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) { |
564 | mutex_lock(&vm->mutex); |
562 | mutex_lock(&vm->mutex); |
565 | radeon_vm_free_pt(rdev, vm); |
563 | radeon_vm_free_pt(rdev, vm); |
566 | mutex_unlock(&vm->mutex); |
564 | mutex_unlock(&vm->mutex); |
567 | } |
565 | } |
568 | for (i = 0; i < RADEON_NUM_VM; ++i) { |
566 | for (i = 0; i < RADEON_NUM_VM; ++i) { |
569 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
567 | radeon_fence_unref(&rdev->vm_manager.active[i]); |
570 | } |
568 | } |
571 | radeon_asic_vm_fini(rdev); |
569 | radeon_asic_vm_fini(rdev); |
572 | mutex_unlock(&rdev->vm_manager.lock); |
570 | mutex_unlock(&rdev->vm_manager.lock); |
573 | 571 | ||
574 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); |
572 | radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager); |
575 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); |
573 | radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager); |
576 | rdev->vm_manager.enabled = false; |
574 | rdev->vm_manager.enabled = false; |
577 | } |
575 | } |
578 | 576 | ||
579 | /** |
577 | /** |
580 | * radeon_vm_evict - evict page table to make room for new one |
578 | * radeon_vm_evict - evict page table to make room for new one |
581 | * |
579 | * |
582 | * @rdev: radeon_device pointer |
580 | * @rdev: radeon_device pointer |
583 | * @vm: VM we want to allocate something for |
581 | * @vm: VM we want to allocate something for |
584 | * |
582 | * |
585 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). |
583 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). |
586 | * Returns 0 for success, -ENOMEM for failure. |
584 | * Returns 0 for success, -ENOMEM for failure. |
587 | * |
585 | * |
588 | * Global and local mutex must be locked! |
586 | * Global and local mutex must be locked! |
589 | */ |
587 | */ |
590 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
588 | static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) |
591 | { |
589 | { |
592 | struct radeon_vm *vm_evict; |
590 | struct radeon_vm *vm_evict; |
593 | 591 | ||
594 | if (list_empty(&rdev->vm_manager.lru_vm)) |
592 | if (list_empty(&rdev->vm_manager.lru_vm)) |
595 | return -ENOMEM; |
593 | return -ENOMEM; |
596 | 594 | ||
597 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, |
595 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, |
598 | struct radeon_vm, list); |
596 | struct radeon_vm, list); |
599 | if (vm_evict == vm) |
597 | if (vm_evict == vm) |
600 | return -ENOMEM; |
598 | return -ENOMEM; |
601 | 599 | ||
602 | mutex_lock(&vm_evict->mutex); |
600 | mutex_lock(&vm_evict->mutex); |
603 | radeon_vm_free_pt(rdev, vm_evict); |
601 | radeon_vm_free_pt(rdev, vm_evict); |
604 | mutex_unlock(&vm_evict->mutex); |
602 | mutex_unlock(&vm_evict->mutex); |
605 | return 0; |
603 | return 0; |
606 | } |
604 | } |
607 | 605 | ||
608 | /** |
606 | /** |
609 | * radeon_vm_alloc_pt - allocates a page table for a VM |
607 | * radeon_vm_alloc_pt - allocates a page table for a VM |
610 | * |
608 | * |
611 | * @rdev: radeon_device pointer |
609 | * @rdev: radeon_device pointer |
612 | * @vm: vm to bind |
610 | * @vm: vm to bind |
613 | * |
611 | * |
614 | * Allocate a page table for the requested vm (cayman+). |
612 | * Allocate a page table for the requested vm (cayman+). |
615 | * Returns 0 for success, error for failure. |
613 | * Returns 0 for success, error for failure. |
616 | * |
614 | * |
617 | * Global and local mutex must be locked! |
615 | * Global and local mutex must be locked! |
618 | */ |
616 | */ |
619 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
617 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
620 | { |
618 | { |
621 | unsigned pd_size, pts_size; |
619 | unsigned pd_size, pts_size; |
622 | u64 *pd_addr; |
620 | u64 *pd_addr; |
623 | int r; |
621 | int r; |
624 | 622 | ||
625 | if (vm == NULL) { |
623 | if (vm == NULL) { |
626 | return -EINVAL; |
624 | return -EINVAL; |
627 | } |
625 | } |
628 | 626 | ||
629 | if (vm->page_directory != NULL) { |
627 | if (vm->page_directory != NULL) { |
630 | return 0; |
628 | return 0; |
631 | } |
629 | } |
632 | 630 | ||
633 | retry: |
631 | retry: |
634 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
632 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
635 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
633 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
636 | &vm->page_directory, pd_size, |
634 | &vm->page_directory, pd_size, |
637 | RADEON_GPU_PAGE_SIZE, false); |
635 | RADEON_GPU_PAGE_SIZE, false); |
638 | if (r == -ENOMEM) { |
636 | if (r == -ENOMEM) { |
639 | r = radeon_vm_evict(rdev, vm); |
637 | r = radeon_vm_evict(rdev, vm); |
640 | if (r) |
638 | if (r) |
641 | return r; |
639 | return r; |
642 | goto retry; |
640 | goto retry; |
643 | 641 | ||
644 | } else if (r) { |
642 | } else if (r) { |
645 | return r; |
643 | return r; |
646 | } |
644 | } |
647 | 645 | ||
648 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
646 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
649 | 647 | ||
650 | /* Initially clear the page directory */ |
648 | /* Initially clear the page directory */ |
651 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); |
649 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); |
652 | memset(pd_addr, 0, pd_size); |
650 | memset(pd_addr, 0, pd_size); |
653 | 651 | ||
654 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); |
652 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); |
655 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); |
653 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); |
656 | 654 | ||
657 | if (vm->page_tables == NULL) { |
655 | if (vm->page_tables == NULL) { |
658 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
656 | DRM_ERROR("Cannot allocate memory for page table array\n"); |
659 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
657 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
660 | return -ENOMEM; |
658 | return -ENOMEM; |
661 | } |
659 | } |
662 | 660 | ||
663 | return 0; |
661 | return 0; |
664 | } |
662 | } |
665 | 663 | ||
666 | /** |
664 | /** |
667 | * radeon_vm_add_to_lru - add VMs page table to LRU list |
665 | * radeon_vm_add_to_lru - add VMs page table to LRU list |
668 | * |
666 | * |
669 | * @rdev: radeon_device pointer |
667 | * @rdev: radeon_device pointer |
670 | * @vm: vm to add to LRU |
668 | * @vm: vm to add to LRU |
671 | * |
669 | * |
672 | * Add the allocated page table to the LRU list (cayman+). |
670 | * Add the allocated page table to the LRU list (cayman+). |
673 | * |
671 | * |
674 | * Global mutex must be locked! |
672 | * Global mutex must be locked! |
675 | */ |
673 | */ |
676 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) |
674 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) |
677 | { |
675 | { |
678 | list_del_init(&vm->list); |
676 | list_del_init(&vm->list); |
679 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
677 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
680 | } |
678 | } |
681 | 679 | ||
682 | /** |
680 | /** |
683 | * radeon_vm_grab_id - allocate the next free VMID |
681 | * radeon_vm_grab_id - allocate the next free VMID |
684 | * |
682 | * |
685 | * @rdev: radeon_device pointer |
683 | * @rdev: radeon_device pointer |
686 | * @vm: vm to allocate id for |
684 | * @vm: vm to allocate id for |
687 | * @ring: ring we want to submit job to |
685 | * @ring: ring we want to submit job to |
688 | * |
686 | * |
689 | * Allocate an id for the vm (cayman+). |
687 | * Allocate an id for the vm (cayman+). |
690 | * Returns the fence we need to sync to (if any). |
688 | * Returns the fence we need to sync to (if any). |
691 | * |
689 | * |
692 | * Global and local mutex must be locked! |
690 | * Global and local mutex must be locked! |
693 | */ |
691 | */ |
694 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
692 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
695 | struct radeon_vm *vm, int ring) |
693 | struct radeon_vm *vm, int ring) |
696 | { |
694 | { |
697 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
695 | struct radeon_fence *best[RADEON_NUM_RINGS] = {}; |
698 | unsigned choices[2] = {}; |
696 | unsigned choices[2] = {}; |
699 | unsigned i; |
697 | unsigned i; |
700 | 698 | ||
701 | /* check if the id is still valid */ |
699 | /* check if the id is still valid */ |
702 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) |
700 | if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id]) |
703 | return NULL; |
701 | return NULL; |
704 | 702 | ||
705 | /* we definately need to flush */ |
703 | /* we definately need to flush */ |
706 | radeon_fence_unref(&vm->last_flush); |
704 | radeon_fence_unref(&vm->last_flush); |
707 | 705 | ||
708 | /* skip over VMID 0, since it is the system VM */ |
706 | /* skip over VMID 0, since it is the system VM */ |
709 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
707 | for (i = 1; i < rdev->vm_manager.nvm; ++i) { |
710 | struct radeon_fence *fence = rdev->vm_manager.active[i]; |
708 | struct radeon_fence *fence = rdev->vm_manager.active[i]; |
711 | 709 | ||
712 | if (fence == NULL) { |
710 | if (fence == NULL) { |
713 | /* found a free one */ |
711 | /* found a free one */ |
714 | vm->id = i; |
712 | vm->id = i; |
715 | return NULL; |
713 | return NULL; |
716 | } |
714 | } |
717 | 715 | ||
718 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { |
716 | if (radeon_fence_is_earlier(fence, best[fence->ring])) { |
719 | best[fence->ring] = fence; |
717 | best[fence->ring] = fence; |
720 | choices[fence->ring == ring ? 0 : 1] = i; |
718 | choices[fence->ring == ring ? 0 : 1] = i; |
721 | } |
719 | } |
722 | } |
720 | } |
723 | 721 | ||
724 | for (i = 0; i < 2; ++i) { |
722 | for (i = 0; i < 2; ++i) { |
725 | if (choices[i]) { |
723 | if (choices[i]) { |
726 | vm->id = choices[i]; |
724 | vm->id = choices[i]; |
727 | return rdev->vm_manager.active[choices[i]]; |
725 | return rdev->vm_manager.active[choices[i]]; |
728 | } |
726 | } |
729 | } |
727 | } |
730 | 728 | ||
731 | /* should never happen */ |
729 | /* should never happen */ |
732 | BUG(); |
730 | BUG(); |
733 | return NULL; |
731 | return NULL; |
734 | } |
732 | } |
735 | 733 | ||
736 | /** |
734 | /** |
737 | * radeon_vm_fence - remember fence for vm |
735 | * radeon_vm_fence - remember fence for vm |
738 | * |
736 | * |
739 | * @rdev: radeon_device pointer |
737 | * @rdev: radeon_device pointer |
740 | * @vm: vm we want to fence |
738 | * @vm: vm we want to fence |
741 | * @fence: fence to remember |
739 | * @fence: fence to remember |
742 | * |
740 | * |
743 | * Fence the vm (cayman+). |
741 | * Fence the vm (cayman+). |
744 | * Set the fence used to protect page table and id. |
742 | * Set the fence used to protect page table and id. |
745 | * |
743 | * |
746 | * Global and local mutex must be locked! |
744 | * Global and local mutex must be locked! |
747 | */ |
745 | */ |
748 | void radeon_vm_fence(struct radeon_device *rdev, |
746 | void radeon_vm_fence(struct radeon_device *rdev, |
749 | struct radeon_vm *vm, |
747 | struct radeon_vm *vm, |
750 | struct radeon_fence *fence) |
748 | struct radeon_fence *fence) |
751 | { |
749 | { |
752 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
750 | radeon_fence_unref(&rdev->vm_manager.active[vm->id]); |
753 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
751 | rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence); |
754 | 752 | ||
755 | radeon_fence_unref(&vm->fence); |
753 | radeon_fence_unref(&vm->fence); |
756 | vm->fence = radeon_fence_ref(fence); |
754 | vm->fence = radeon_fence_ref(fence); |
757 | } |
755 | } |
758 | 756 | ||
759 | /** |
757 | /** |
760 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo |
758 | * radeon_vm_bo_find - find the bo_va for a specific vm & bo |
761 | * |
759 | * |
762 | * @vm: requested vm |
760 | * @vm: requested vm |
763 | * @bo: requested buffer object |
761 | * @bo: requested buffer object |
764 | * |
762 | * |
765 | * Find @bo inside the requested vm (cayman+). |
763 | * Find @bo inside the requested vm (cayman+). |
766 | * Search inside the @bos vm list for the requested vm |
764 | * Search inside the @bos vm list for the requested vm |
767 | * Returns the found bo_va or NULL if none is found |
765 | * Returns the found bo_va or NULL if none is found |
768 | * |
766 | * |
769 | * Object has to be reserved! |
767 | * Object has to be reserved! |
770 | */ |
768 | */ |
771 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
769 | struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
772 | struct radeon_bo *bo) |
770 | struct radeon_bo *bo) |
773 | { |
771 | { |
774 | struct radeon_bo_va *bo_va; |
772 | struct radeon_bo_va *bo_va; |
775 | 773 | ||
776 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
774 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
777 | if (bo_va->vm == vm) { |
775 | if (bo_va->vm == vm) { |
778 | return bo_va; |
776 | return bo_va; |
779 | } |
777 | } |
780 | } |
778 | } |
781 | return NULL; |
779 | return NULL; |
782 | } |
780 | } |
783 | 781 | ||
784 | /** |
782 | /** |
785 | * radeon_vm_bo_add - add a bo to a specific vm |
783 | * radeon_vm_bo_add - add a bo to a specific vm |
786 | * |
784 | * |
787 | * @rdev: radeon_device pointer |
785 | * @rdev: radeon_device pointer |
788 | * @vm: requested vm |
786 | * @vm: requested vm |
789 | * @bo: radeon buffer object |
787 | * @bo: radeon buffer object |
790 | * |
788 | * |
791 | * Add @bo into the requested vm (cayman+). |
789 | * Add @bo into the requested vm (cayman+). |
792 | * Add @bo to the list of bos associated with the vm |
790 | * Add @bo to the list of bos associated with the vm |
793 | * Returns newly added bo_va or NULL for failure |
791 | * Returns newly added bo_va or NULL for failure |
794 | * |
792 | * |
795 | * Object has to be reserved! |
793 | * Object has to be reserved! |
796 | */ |
794 | */ |
797 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
795 | struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
798 | struct radeon_vm *vm, |
796 | struct radeon_vm *vm, |
799 | struct radeon_bo *bo) |
797 | struct radeon_bo *bo) |
800 | { |
798 | { |
801 | struct radeon_bo_va *bo_va; |
799 | struct radeon_bo_va *bo_va; |
802 | 800 | ||
803 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
801 | bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); |
804 | if (bo_va == NULL) { |
802 | if (bo_va == NULL) { |
805 | return NULL; |
803 | return NULL; |
806 | } |
804 | } |
807 | bo_va->vm = vm; |
805 | bo_va->vm = vm; |
808 | bo_va->bo = bo; |
806 | bo_va->bo = bo; |
809 | bo_va->soffset = 0; |
807 | bo_va->soffset = 0; |
810 | bo_va->eoffset = 0; |
808 | bo_va->eoffset = 0; |
811 | bo_va->flags = 0; |
809 | bo_va->flags = 0; |
812 | bo_va->valid = false; |
810 | bo_va->valid = false; |
813 | bo_va->ref_count = 1; |
811 | bo_va->ref_count = 1; |
814 | INIT_LIST_HEAD(&bo_va->bo_list); |
812 | INIT_LIST_HEAD(&bo_va->bo_list); |
815 | INIT_LIST_HEAD(&bo_va->vm_list); |
813 | INIT_LIST_HEAD(&bo_va->vm_list); |
816 | 814 | ||
817 | mutex_lock(&vm->mutex); |
815 | mutex_lock(&vm->mutex); |
818 | list_add(&bo_va->vm_list, &vm->va); |
816 | list_add(&bo_va->vm_list, &vm->va); |
819 | list_add_tail(&bo_va->bo_list, &bo->va); |
817 | list_add_tail(&bo_va->bo_list, &bo->va); |
820 | mutex_unlock(&vm->mutex); |
818 | mutex_unlock(&vm->mutex); |
821 | 819 | ||
822 | return bo_va; |
820 | return bo_va; |
823 | } |
821 | } |
824 | 822 | ||
825 | /** |
823 | /** |
826 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm |
824 | * radeon_vm_bo_set_addr - set bos virtual address inside a vm |
827 | * |
825 | * |
828 | * @rdev: radeon_device pointer |
826 | * @rdev: radeon_device pointer |
829 | * @bo_va: bo_va to store the address |
827 | * @bo_va: bo_va to store the address |
830 | * @soffset: requested offset of the buffer in the VM address space |
828 | * @soffset: requested offset of the buffer in the VM address space |
831 | * @flags: attributes of pages (read/write/valid/etc.) |
829 | * @flags: attributes of pages (read/write/valid/etc.) |
832 | * |
830 | * |
833 | * Set offset of @bo_va (cayman+). |
831 | * Set offset of @bo_va (cayman+). |
834 | * Validate and set the offset requested within the vm address space. |
832 | * Validate and set the offset requested within the vm address space. |
835 | * Returns 0 for success, error for failure. |
833 | * Returns 0 for success, error for failure. |
836 | * |
834 | * |
837 | * Object has to be reserved! |
835 | * Object has to be reserved! |
838 | */ |
836 | */ |
839 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
837 | int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
840 | struct radeon_bo_va *bo_va, |
838 | struct radeon_bo_va *bo_va, |
841 | uint64_t soffset, |
839 | uint64_t soffset, |
842 | uint32_t flags) |
840 | uint32_t flags) |
843 | { |
841 | { |
844 | uint64_t size = radeon_bo_size(bo_va->bo); |
842 | uint64_t size = radeon_bo_size(bo_va->bo); |
845 | uint64_t eoffset, last_offset = 0; |
843 | uint64_t eoffset, last_offset = 0; |
846 | struct radeon_vm *vm = bo_va->vm; |
844 | struct radeon_vm *vm = bo_va->vm; |
847 | struct radeon_bo_va *tmp; |
845 | struct radeon_bo_va *tmp; |
848 | struct list_head *head; |
846 | struct list_head *head; |
849 | unsigned last_pfn; |
847 | unsigned last_pfn; |
850 | 848 | ||
851 | if (soffset) { |
849 | if (soffset) { |
852 | /* make sure object fit at this offset */ |
850 | /* make sure object fit at this offset */ |
853 | eoffset = soffset + size; |
851 | eoffset = soffset + size; |
854 | if (soffset >= eoffset) { |
852 | if (soffset >= eoffset) { |
855 | return -EINVAL; |
853 | return -EINVAL; |
856 | } |
854 | } |
857 | 855 | ||
858 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
856 | last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; |
859 | if (last_pfn > rdev->vm_manager.max_pfn) { |
857 | if (last_pfn > rdev->vm_manager.max_pfn) { |
860 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
858 | dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", |
861 | last_pfn, rdev->vm_manager.max_pfn); |
859 | last_pfn, rdev->vm_manager.max_pfn); |
862 | return -EINVAL; |
860 | return -EINVAL; |
863 | } |
861 | } |
864 | 862 | ||
865 | } else { |
863 | } else { |
866 | eoffset = last_pfn = 0; |
864 | eoffset = last_pfn = 0; |
867 | } |
865 | } |
868 | 866 | ||
869 | mutex_lock(&vm->mutex); |
867 | mutex_lock(&vm->mutex); |
870 | head = &vm->va; |
868 | head = &vm->va; |
871 | last_offset = 0; |
869 | last_offset = 0; |
872 | list_for_each_entry(tmp, &vm->va, vm_list) { |
870 | list_for_each_entry(tmp, &vm->va, vm_list) { |
873 | if (bo_va == tmp) { |
871 | if (bo_va == tmp) { |
874 | /* skip over currently modified bo */ |
872 | /* skip over currently modified bo */ |
875 | continue; |
873 | continue; |
876 | } |
874 | } |
877 | 875 | ||
878 | if (soffset >= last_offset && eoffset <= tmp->soffset) { |
876 | if (soffset >= last_offset && eoffset <= tmp->soffset) { |
879 | /* bo can be added before this one */ |
877 | /* bo can be added before this one */ |
880 | break; |
878 | break; |
881 | } |
879 | } |
882 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
880 | if (eoffset > tmp->soffset && soffset < tmp->eoffset) { |
883 | /* bo and tmp overlap, invalid offset */ |
881 | /* bo and tmp overlap, invalid offset */ |
884 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
882 | dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n", |
885 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
883 | bo_va->bo, (unsigned)bo_va->soffset, tmp->bo, |
886 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
884 | (unsigned)tmp->soffset, (unsigned)tmp->eoffset); |
887 | mutex_unlock(&vm->mutex); |
885 | mutex_unlock(&vm->mutex); |
888 | return -EINVAL; |
886 | return -EINVAL; |
889 | } |
887 | } |
890 | last_offset = tmp->eoffset; |
888 | last_offset = tmp->eoffset; |
891 | head = &tmp->vm_list; |
889 | head = &tmp->vm_list; |
892 | } |
890 | } |
893 | 891 | ||
894 | bo_va->soffset = soffset; |
892 | bo_va->soffset = soffset; |
895 | bo_va->eoffset = eoffset; |
893 | bo_va->eoffset = eoffset; |
896 | bo_va->flags = flags; |
894 | bo_va->flags = flags; |
897 | bo_va->valid = false; |
895 | bo_va->valid = false; |
898 | list_move(&bo_va->vm_list, head); |
896 | list_move(&bo_va->vm_list, head); |
899 | 897 | ||
900 | mutex_unlock(&vm->mutex); |
898 | mutex_unlock(&vm->mutex); |
901 | return 0; |
899 | return 0; |
902 | } |
900 | } |
903 | 901 | ||
904 | /** |
902 | /** |
905 | * radeon_vm_map_gart - get the physical address of a gart page |
903 | * radeon_vm_map_gart - get the physical address of a gart page |
906 | * |
904 | * |
907 | * @rdev: radeon_device pointer |
905 | * @rdev: radeon_device pointer |
908 | * @addr: the unmapped addr |
906 | * @addr: the unmapped addr |
909 | * |
907 | * |
910 | * Look up the physical address of the page that the pte resolves |
908 | * Look up the physical address of the page that the pte resolves |
911 | * to (cayman+). |
909 | * to (cayman+). |
912 | * Returns the physical address of the page. |
910 | * Returns the physical address of the page. |
913 | */ |
911 | */ |
914 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) |
912 | uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) |
915 | { |
913 | { |
916 | uint64_t result; |
914 | uint64_t result; |
917 | 915 | ||
918 | /* page table offset */ |
916 | /* page table offset */ |
919 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
917 | result = rdev->gart.pages_addr[addr >> PAGE_SHIFT]; |
920 | 918 | ||
921 | /* in case cpu page size != gpu page size*/ |
919 | /* in case cpu page size != gpu page size*/ |
922 | result |= addr & (~PAGE_MASK); |
920 | result |= addr & (~PAGE_MASK); |
923 | 921 | ||
924 | return result; |
922 | return result; |
925 | } |
923 | } |
926 | 924 | ||
927 | /** |
925 | /** |
928 | * radeon_vm_update_pdes - make sure that page directory is valid |
926 | * radeon_vm_update_pdes - make sure that page directory is valid |
929 | * |
927 | * |
930 | * @rdev: radeon_device pointer |
928 | * @rdev: radeon_device pointer |
931 | * @vm: requested vm |
929 | * @vm: requested vm |
932 | * @start: start of GPU address range |
930 | * @start: start of GPU address range |
933 | * @end: end of GPU address range |
931 | * @end: end of GPU address range |
934 | * |
932 | * |
935 | * Allocates new page tables if necessary |
933 | * Allocates new page tables if necessary |
936 | * and updates the page directory (cayman+). |
934 | * and updates the page directory (cayman+). |
937 | * Returns 0 for success, error for failure. |
935 | * Returns 0 for success, error for failure. |
938 | * |
936 | * |
939 | * Global and local mutex must be locked! |
937 | * Global and local mutex must be locked! |
940 | */ |
938 | */ |
941 | static int radeon_vm_update_pdes(struct radeon_device *rdev, |
939 | static int radeon_vm_update_pdes(struct radeon_device *rdev, |
942 | struct radeon_vm *vm, |
940 | struct radeon_vm *vm, |
- | 941 | struct radeon_ib *ib, |
|
943 | uint64_t start, uint64_t end) |
942 | uint64_t start, uint64_t end) |
944 | { |
943 | { |
945 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
944 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; |
946 | 945 | ||
947 | uint64_t last_pde = ~0, last_pt = ~0; |
946 | uint64_t last_pde = ~0, last_pt = ~0; |
948 | unsigned count = 0; |
947 | unsigned count = 0; |
949 | uint64_t pt_idx; |
948 | uint64_t pt_idx; |
950 | int r; |
949 | int r; |
951 | 950 | ||
952 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
951 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
953 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
952 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; |
954 | 953 | ||
955 | /* walk over the address space and update the page directory */ |
954 | /* walk over the address space and update the page directory */ |
956 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { |
955 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { |
957 | uint64_t pde, pt; |
956 | uint64_t pde, pt; |
958 | 957 | ||
959 | if (vm->page_tables[pt_idx]) |
958 | if (vm->page_tables[pt_idx]) |
960 | continue; |
959 | continue; |
961 | 960 | ||
962 | retry: |
961 | retry: |
963 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
962 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
964 | &vm->page_tables[pt_idx], |
963 | &vm->page_tables[pt_idx], |
965 | RADEON_VM_PTE_COUNT * 8, |
964 | RADEON_VM_PTE_COUNT * 8, |
966 | RADEON_GPU_PAGE_SIZE, false); |
965 | RADEON_GPU_PAGE_SIZE, false); |
967 | 966 | ||
968 | if (r == -ENOMEM) { |
967 | if (r == -ENOMEM) { |
969 | r = radeon_vm_evict(rdev, vm); |
968 | r = radeon_vm_evict(rdev, vm); |
970 | if (r) |
969 | if (r) |
971 | return r; |
970 | return r; |
972 | goto retry; |
971 | goto retry; |
973 | } else if (r) { |
972 | } else if (r) { |
974 | return r; |
973 | return r; |
975 | } |
974 | } |
976 | 975 | ||
977 | pde = vm->pd_gpu_addr + pt_idx * 8; |
976 | pde = vm->pd_gpu_addr + pt_idx * 8; |
978 | 977 | ||
979 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
978 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
980 | 979 | ||
981 | if (((last_pde + 8 * count) != pde) || |
980 | if (((last_pde + 8 * count) != pde) || |
982 | ((last_pt + incr * count) != pt)) { |
981 | ((last_pt + incr * count) != pt)) { |
983 | 982 | ||
984 | if (count) { |
983 | if (count) { |
985 | radeon_asic_vm_set_page(rdev, last_pde, |
984 | radeon_asic_vm_set_page(rdev, ib, last_pde, |
986 | last_pt, count, incr, |
985 | last_pt, count, incr, |
987 | RADEON_VM_PAGE_VALID); |
986 | RADEON_VM_PAGE_VALID); |
988 | } |
987 | } |
989 | 988 | ||
990 | count = 1; |
989 | count = 1; |
991 | last_pde = pde; |
990 | last_pde = pde; |
992 | last_pt = pt; |
991 | last_pt = pt; |
993 | } else { |
992 | } else { |
994 | ++count; |
993 | ++count; |
995 | } |
994 | } |
996 | } |
995 | } |
997 | 996 | ||
998 | if (count) { |
997 | if (count) { |
999 | radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, |
998 | radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count, |
1000 | incr, RADEON_VM_PAGE_VALID); |
999 | incr, RADEON_VM_PAGE_VALID); |
1001 | 1000 | ||
1002 | } |
1001 | } |
1003 | 1002 | ||
1004 | return 0; |
1003 | return 0; |
1005 | } |
1004 | } |
1006 | 1005 | ||
1007 | /** |
1006 | /** |
1008 | * radeon_vm_update_ptes - make sure that page tables are valid |
1007 | * radeon_vm_update_ptes - make sure that page tables are valid |
1009 | * |
1008 | * |
1010 | * @rdev: radeon_device pointer |
1009 | * @rdev: radeon_device pointer |
1011 | * @vm: requested vm |
1010 | * @vm: requested vm |
1012 | * @start: start of GPU address range |
1011 | * @start: start of GPU address range |
1013 | * @end: end of GPU address range |
1012 | * @end: end of GPU address range |
1014 | * @dst: destination address to map to |
1013 | * @dst: destination address to map to |
1015 | * @flags: mapping flags |
1014 | * @flags: mapping flags |
1016 | * |
1015 | * |
1017 | * Update the page tables in the range @start - @end (cayman+). |
1016 | * Update the page tables in the range @start - @end (cayman+). |
1018 | * |
1017 | * |
1019 | * Global and local mutex must be locked! |
1018 | * Global and local mutex must be locked! |
1020 | */ |
1019 | */ |
1021 | static void radeon_vm_update_ptes(struct radeon_device *rdev, |
1020 | static void radeon_vm_update_ptes(struct radeon_device *rdev, |
1022 | struct radeon_vm *vm, |
1021 | struct radeon_vm *vm, |
- | 1022 | struct radeon_ib *ib, |
|
1023 | uint64_t start, uint64_t end, |
1023 | uint64_t start, uint64_t end, |
1024 | uint64_t dst, uint32_t flags) |
1024 | uint64_t dst, uint32_t flags) |
1025 | { |
1025 | { |
1026 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
1026 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; |
1027 | 1027 | ||
1028 | uint64_t last_pte = ~0, last_dst = ~0; |
1028 | uint64_t last_pte = ~0, last_dst = ~0; |
1029 | unsigned count = 0; |
1029 | unsigned count = 0; |
1030 | uint64_t addr; |
1030 | uint64_t addr; |
1031 | 1031 | ||
1032 | start = start / RADEON_GPU_PAGE_SIZE; |
1032 | start = start / RADEON_GPU_PAGE_SIZE; |
1033 | end = end / RADEON_GPU_PAGE_SIZE; |
1033 | end = end / RADEON_GPU_PAGE_SIZE; |
1034 | 1034 | ||
1035 | /* walk over the address space and update the page tables */ |
1035 | /* walk over the address space and update the page tables */ |
1036 | for (addr = start; addr < end; ) { |
1036 | for (addr = start; addr < end; ) { |
1037 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
1037 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; |
1038 | unsigned nptes; |
1038 | unsigned nptes; |
1039 | uint64_t pte; |
1039 | uint64_t pte; |
1040 | 1040 | ||
1041 | if ((addr & ~mask) == (end & ~mask)) |
1041 | if ((addr & ~mask) == (end & ~mask)) |
1042 | nptes = end - addr; |
1042 | nptes = end - addr; |
1043 | else |
1043 | else |
1044 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
1044 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); |
1045 | 1045 | ||
1046 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
1046 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); |
1047 | pte += (addr & mask) * 8; |
1047 | pte += (addr & mask) * 8; |
1048 | 1048 | ||
1049 | if ((last_pte + 8 * count) != pte) { |
1049 | if ((last_pte + 8 * count) != pte) { |
1050 | 1050 | ||
1051 | if (count) { |
1051 | if (count) { |
1052 | radeon_asic_vm_set_page(rdev, last_pte, |
1052 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
1053 | last_dst, count, |
1053 | last_dst, count, |
1054 | RADEON_GPU_PAGE_SIZE, |
1054 | RADEON_GPU_PAGE_SIZE, |
1055 | flags); |
1055 | flags); |
1056 | } |
1056 | } |
1057 | 1057 | ||
1058 | count = nptes; |
1058 | count = nptes; |
1059 | last_pte = pte; |
1059 | last_pte = pte; |
1060 | last_dst = dst; |
1060 | last_dst = dst; |
1061 | } else { |
1061 | } else { |
1062 | count += nptes; |
1062 | count += nptes; |
1063 | } |
1063 | } |
1064 | 1064 | ||
1065 | addr += nptes; |
1065 | addr += nptes; |
1066 | dst += nptes * RADEON_GPU_PAGE_SIZE; |
1066 | dst += nptes * RADEON_GPU_PAGE_SIZE; |
1067 | } |
1067 | } |
1068 | 1068 | ||
1069 | if (count) { |
1069 | if (count) { |
1070 | radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, |
1070 | radeon_asic_vm_set_page(rdev, ib, last_pte, |
- | 1071 | last_dst, count, |
|
1071 | RADEON_GPU_PAGE_SIZE, flags); |
1072 | RADEON_GPU_PAGE_SIZE, flags); |
1072 | } |
1073 | } |
1073 | } |
1074 | } |
1074 | 1075 | ||
1075 | /** |
1076 | /** |
1076 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
1077 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
1077 | * |
1078 | * |
1078 | * @rdev: radeon_device pointer |
1079 | * @rdev: radeon_device pointer |
1079 | * @vm: requested vm |
1080 | * @vm: requested vm |
1080 | * @bo: radeon buffer object |
1081 | * @bo: radeon buffer object |
1081 | * @mem: ttm mem |
1082 | * @mem: ttm mem |
1082 | * |
1083 | * |
1083 | * Fill in the page table entries for @bo (cayman+). |
1084 | * Fill in the page table entries for @bo (cayman+). |
1084 | * Returns 0 for success, -EINVAL for failure. |
1085 | * Returns 0 for success, -EINVAL for failure. |
1085 | * |
1086 | * |
1086 | * Object have to be reserved & global and local mutex must be locked! |
1087 | * Object have to be reserved & global and local mutex must be locked! |
1087 | */ |
1088 | */ |
1088 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
1089 | int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
1089 | struct radeon_vm *vm, |
1090 | struct radeon_vm *vm, |
1090 | struct radeon_bo *bo, |
1091 | struct radeon_bo *bo, |
1091 | struct ttm_mem_reg *mem) |
1092 | struct ttm_mem_reg *mem) |
1092 | { |
1093 | { |
1093 | unsigned ridx = rdev->asic->vm.pt_ring_index; |
1094 | unsigned ridx = rdev->asic->vm.pt_ring_index; |
1094 | struct radeon_ring *ring = &rdev->ring[ridx]; |
- | |
1095 | struct radeon_semaphore *sem = NULL; |
1095 | struct radeon_ib ib; |
1096 | struct radeon_bo_va *bo_va; |
1096 | struct radeon_bo_va *bo_va; |
1097 | unsigned nptes, npdes, ndw; |
1097 | unsigned nptes, npdes, ndw; |
1098 | uint64_t addr; |
1098 | uint64_t addr; |
1099 | int r; |
1099 | int r; |
1100 | 1100 | ||
1101 | /* nothing to do if vm isn't bound */ |
1101 | /* nothing to do if vm isn't bound */ |
1102 | if (vm->page_directory == NULL) |
1102 | if (vm->page_directory == NULL) |
1103 | return 0; |
1103 | return 0; |
1104 | 1104 | ||
1105 | bo_va = radeon_vm_bo_find(vm, bo); |
1105 | bo_va = radeon_vm_bo_find(vm, bo); |
1106 | if (bo_va == NULL) { |
1106 | if (bo_va == NULL) { |
1107 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); |
1107 | dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm); |
1108 | return -EINVAL; |
1108 | return -EINVAL; |
1109 | } |
1109 | } |
1110 | 1110 | ||
1111 | if (!bo_va->soffset) { |
1111 | if (!bo_va->soffset) { |
1112 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
1112 | dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", |
1113 | bo, vm); |
1113 | bo, vm); |
1114 | return -EINVAL; |
1114 | return -EINVAL; |
1115 | } |
1115 | } |
1116 | 1116 | ||
1117 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
1117 | if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL)) |
1118 | return 0; |
1118 | return 0; |
1119 | 1119 | ||
1120 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
1120 | bo_va->flags &= ~RADEON_VM_PAGE_VALID; |
1121 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
1121 | bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; |
1122 | if (mem) { |
1122 | if (mem) { |
1123 | addr = mem->start << PAGE_SHIFT; |
1123 | addr = mem->start << PAGE_SHIFT; |
1124 | if (mem->mem_type != TTM_PL_SYSTEM) { |
1124 | if (mem->mem_type != TTM_PL_SYSTEM) { |
1125 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
1125 | bo_va->flags |= RADEON_VM_PAGE_VALID; |
1126 | bo_va->valid = true; |
1126 | bo_va->valid = true; |
1127 | } |
1127 | } |
1128 | if (mem->mem_type == TTM_PL_TT) { |
1128 | if (mem->mem_type == TTM_PL_TT) { |
1129 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
1129 | bo_va->flags |= RADEON_VM_PAGE_SYSTEM; |
1130 | } else { |
1130 | } else { |
1131 | addr += rdev->vm_manager.vram_base_offset; |
1131 | addr += rdev->vm_manager.vram_base_offset; |
1132 | } |
1132 | } |
1133 | } else { |
1133 | } else { |
1134 | addr = 0; |
1134 | addr = 0; |
1135 | bo_va->valid = false; |
1135 | bo_va->valid = false; |
1136 | } |
1136 | } |
1137 | - | ||
1138 | if (vm->fence && radeon_fence_signaled(vm->fence)) { |
- | |
1139 | radeon_fence_unref(&vm->fence); |
- | |
1140 | } |
- | |
1141 | - | ||
1142 | if (vm->fence && vm->fence->ring != ridx) { |
- | |
1143 | r = radeon_semaphore_create(rdev, &sem); |
- | |
1144 | if (r) { |
- | |
1145 | return r; |
- | |
1146 | } |
- | |
1147 | } |
- | |
1148 | 1137 | ||
1149 | nptes = radeon_bo_ngpu_pages(bo); |
1138 | nptes = radeon_bo_ngpu_pages(bo); |
1150 | 1139 | ||
1151 | /* assume two extra pdes in case the mapping overlaps the borders */ |
1140 | /* assume two extra pdes in case the mapping overlaps the borders */ |
1152 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; |
1141 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; |
1153 | - | ||
1154 | /* estimate number of dw needed */ |
1142 | |
1155 | /* semaphore, fence and padding */ |
1143 | /* padding, etc. */ |
1156 | ndw = 32; |
1144 | ndw = 64; |
1157 | 1145 | ||
1158 | if (RADEON_VM_BLOCK_SIZE > 11) |
1146 | if (RADEON_VM_BLOCK_SIZE > 11) |
1159 | /* reserve space for one header for every 2k dwords */ |
1147 | /* reserve space for one header for every 2k dwords */ |
1160 | ndw += (nptes >> 11) * 4; |
1148 | ndw += (nptes >> 11) * 4; |
1161 | else |
1149 | else |
1162 | /* reserve space for one header for |
1150 | /* reserve space for one header for |
1163 | every (1 << BLOCK_SIZE) entries */ |
1151 | every (1 << BLOCK_SIZE) entries */ |
1164 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
1152 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4; |
1165 | 1153 | ||
1166 | /* reserve space for pte addresses */ |
1154 | /* reserve space for pte addresses */ |
1167 | ndw += nptes * 2; |
1155 | ndw += nptes * 2; |
1168 | 1156 | ||
1169 | /* reserve space for one header for every 2k dwords */ |
1157 | /* reserve space for one header for every 2k dwords */ |
1170 | ndw += (npdes >> 11) * 4; |
1158 | ndw += (npdes >> 11) * 4; |
1171 | 1159 | ||
1172 | /* reserve space for pde addresses */ |
1160 | /* reserve space for pde addresses */ |
1173 | ndw += npdes * 2; |
1161 | ndw += npdes * 2; |
1174 | 1162 | ||
1175 | r = radeon_ring_lock(rdev, ring, ndw); |
1163 | /* update too big for an IB */ |
1176 | if (r) { |
1164 | if (ndw > 0xfffff) |
1177 | return r; |
- | |
1178 | } |
- | |
1179 | 1165 | return -ENOMEM; |
|
1180 | if (sem && radeon_fence_need_sync(vm->fence, ridx)) { |
1166 | |
1181 | radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx); |
- | |
1182 | radeon_fence_note_sync(vm->fence, ridx); |
1167 | r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4); |
1183 | } |
1168 | ib.length_dw = 0; |
1184 | 1169 | ||
1185 | r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); |
1170 | r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); |
1186 | if (r) { |
1171 | if (r) { |
1187 | radeon_ring_unlock_undo(rdev, ring); |
1172 | radeon_ib_free(rdev, &ib); |
1188 | return r; |
1173 | return r; |
1189 | } |
1174 | } |
1190 | 1175 | ||
1191 | radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, |
1176 | radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, |
1192 | addr, bo_va->flags); |
1177 | addr, bo_va->flags); |
1193 | 1178 | ||
1194 | radeon_fence_unref(&vm->fence); |
1179 | radeon_ib_sync_to(&ib, vm->fence); |
1195 | r = radeon_fence_emit(rdev, &vm->fence, ridx); |
1180 | r = radeon_ib_schedule(rdev, &ib, NULL); |
1196 | if (r) { |
1181 | if (r) { |
1197 | radeon_ring_unlock_undo(rdev, ring); |
1182 | radeon_ib_free(rdev, &ib); |
1198 | return r; |
1183 | return r; |
- | 1184 | } |
|
1199 | } |
1185 | radeon_fence_unref(&vm->fence); |
1200 | radeon_ring_unlock_commit(rdev, ring); |
1186 | vm->fence = radeon_fence_ref(ib.fence); |
1201 | radeon_semaphore_free(rdev, &sem, vm->fence); |
1187 | radeon_ib_free(rdev, &ib); |
1202 | radeon_fence_unref(&vm->last_flush); |
1188 | radeon_fence_unref(&vm->last_flush); |
1203 | 1189 | ||
1204 | return 0; |
1190 | return 0; |
1205 | } |
1191 | } |
1206 | 1192 | ||
1207 | /** |
1193 | /** |
1208 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
1194 | * radeon_vm_bo_rmv - remove a bo to a specific vm |
1209 | * |
1195 | * |
1210 | * @rdev: radeon_device pointer |
1196 | * @rdev: radeon_device pointer |
1211 | * @bo_va: requested bo_va |
1197 | * @bo_va: requested bo_va |
1212 | * |
1198 | * |
1213 | * Remove @bo_va->bo from the requested vm (cayman+). |
1199 | * Remove @bo_va->bo from the requested vm (cayman+). |
1214 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
1200 | * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and |
1215 | * remove the ptes for @bo_va in the page table. |
1201 | * remove the ptes for @bo_va in the page table. |
1216 | * Returns 0 for success. |
1202 | * Returns 0 for success. |
1217 | * |
1203 | * |
1218 | * Object have to be reserved! |
1204 | * Object have to be reserved! |
1219 | */ |
1205 | */ |
1220 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
1206 | int radeon_vm_bo_rmv(struct radeon_device *rdev, |
1221 | struct radeon_bo_va *bo_va) |
1207 | struct radeon_bo_va *bo_va) |
1222 | { |
1208 | { |
1223 | int r; |
1209 | int r = 0; |
1224 | 1210 | ||
1225 | mutex_lock(&rdev->vm_manager.lock); |
1211 | mutex_lock(&rdev->vm_manager.lock); |
1226 | mutex_lock(&bo_va->vm->mutex); |
1212 | mutex_lock(&bo_va->vm->mutex); |
- | 1213 | if (bo_va->soffset) { |
|
1227 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
1214 | r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL); |
- | 1215 | } |
|
1228 | mutex_unlock(&rdev->vm_manager.lock); |
1216 | mutex_unlock(&rdev->vm_manager.lock); |
1229 | list_del(&bo_va->vm_list); |
1217 | list_del(&bo_va->vm_list); |
1230 | mutex_unlock(&bo_va->vm->mutex); |
1218 | mutex_unlock(&bo_va->vm->mutex); |
1231 | list_del(&bo_va->bo_list); |
1219 | list_del(&bo_va->bo_list); |
1232 | 1220 | ||
1233 | kfree(bo_va); |
1221 | kfree(bo_va); |
1234 | return r; |
1222 | return r; |
1235 | } |
1223 | } |
1236 | 1224 | ||
1237 | /** |
1225 | /** |
1238 | * radeon_vm_bo_invalidate - mark the bo as invalid |
1226 | * radeon_vm_bo_invalidate - mark the bo as invalid |
1239 | * |
1227 | * |
1240 | * @rdev: radeon_device pointer |
1228 | * @rdev: radeon_device pointer |
1241 | * @vm: requested vm |
1229 | * @vm: requested vm |
1242 | * @bo: radeon buffer object |
1230 | * @bo: radeon buffer object |
1243 | * |
1231 | * |
1244 | * Mark @bo as invalid (cayman+). |
1232 | * Mark @bo as invalid (cayman+). |
1245 | */ |
1233 | */ |
1246 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
1234 | void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
1247 | struct radeon_bo *bo) |
1235 | struct radeon_bo *bo) |
1248 | { |
1236 | { |
1249 | struct radeon_bo_va *bo_va; |
1237 | struct radeon_bo_va *bo_va; |
1250 | 1238 | ||
1251 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
1239 | list_for_each_entry(bo_va, &bo->va, bo_list) { |
1252 | bo_va->valid = false; |
1240 | bo_va->valid = false; |
1253 | } |
1241 | } |
1254 | } |
1242 | } |
1255 | 1243 | ||
1256 | /** |
1244 | /** |
1257 | * radeon_vm_init - initialize a vm instance |
1245 | * radeon_vm_init - initialize a vm instance |
1258 | * |
1246 | * |
1259 | * @rdev: radeon_device pointer |
1247 | * @rdev: radeon_device pointer |
1260 | * @vm: requested vm |
1248 | * @vm: requested vm |
1261 | * |
1249 | * |
1262 | * Init @vm fields (cayman+). |
1250 | * Init @vm fields (cayman+). |
1263 | */ |
1251 | */ |
1264 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
1252 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
1265 | { |
1253 | { |
1266 | vm->id = 0; |
1254 | vm->id = 0; |
1267 | vm->fence = NULL; |
1255 | vm->fence = NULL; |
1268 | mutex_init(&vm->mutex); |
1256 | mutex_init(&vm->mutex); |
1269 | INIT_LIST_HEAD(&vm->list); |
1257 | INIT_LIST_HEAD(&vm->list); |
1270 | INIT_LIST_HEAD(&vm->va); |
1258 | INIT_LIST_HEAD(&vm->va); |
1271 | } |
1259 | } |
1272 | 1260 | ||
1273 | /** |
1261 | /** |
1274 | * radeon_vm_fini - tear down a vm instance |
1262 | * radeon_vm_fini - tear down a vm instance |
1275 | * |
1263 | * |
1276 | * @rdev: radeon_device pointer |
1264 | * @rdev: radeon_device pointer |
1277 | * @vm: requested vm |
1265 | * @vm: requested vm |
1278 | * |
1266 | * |
1279 | * Tear down @vm (cayman+). |
1267 | * Tear down @vm (cayman+). |
1280 | * Unbind the VM and remove all bos from the vm bo list |
1268 | * Unbind the VM and remove all bos from the vm bo list |
1281 | */ |
1269 | */ |
1282 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) |
1270 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) |
1283 | { |
1271 | { |
1284 | struct radeon_bo_va *bo_va, *tmp; |
1272 | struct radeon_bo_va *bo_va, *tmp; |
1285 | int r; |
1273 | int r; |
1286 | 1274 | ||
1287 | mutex_lock(&rdev->vm_manager.lock); |
1275 | mutex_lock(&rdev->vm_manager.lock); |
1288 | mutex_lock(&vm->mutex); |
1276 | mutex_lock(&vm->mutex); |
1289 | radeon_vm_free_pt(rdev, vm); |
1277 | radeon_vm_free_pt(rdev, vm); |
1290 | mutex_unlock(&rdev->vm_manager.lock); |
1278 | mutex_unlock(&rdev->vm_manager.lock); |
1291 | 1279 | ||
1292 | if (!list_empty(&vm->va)) { |
1280 | if (!list_empty(&vm->va)) { |
1293 | dev_err(rdev->dev, "still active bo inside vm\n"); |
1281 | dev_err(rdev->dev, "still active bo inside vm\n"); |
1294 | } |
1282 | } |
1295 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { |
1283 | list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) { |
1296 | list_del_init(&bo_va->vm_list); |
1284 | list_del_init(&bo_va->vm_list); |
1297 | r = radeon_bo_reserve(bo_va->bo, false); |
1285 | r = radeon_bo_reserve(bo_va->bo, false); |
1298 | if (!r) { |
1286 | if (!r) { |
1299 | list_del_init(&bo_va->bo_list); |
1287 | list_del_init(&bo_va->bo_list); |
1300 | radeon_bo_unreserve(bo_va->bo); |
1288 | radeon_bo_unreserve(bo_va->bo); |
1301 | kfree(bo_va); |
1289 | kfree(bo_va); |
1302 | } |
1290 | } |
1303 | } |
1291 | } |
1304 | radeon_fence_unref(&vm->fence); |
1292 | radeon_fence_unref(&vm->fence); |
1305 | radeon_fence_unref(&vm->last_flush); |
1293 | radeon_fence_unref(&vm->last_flush); |
1306 | mutex_unlock(&vm->mutex); |
1294 | mutex_unlock(&vm->mutex); |
1307 | }><>><>>=>>=>>>>>>>>>>>>> |
1295 | }><>><>>=>>=>>>>>>>>>>>>> |