Rev 4075 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
4075 | Serge | 1 | /************************************************************************** |
2 | * |
||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA |
||
4 | * All Rights Reserved. |
||
5 | * |
||
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
||
7 | * copy of this software and associated documentation files (the |
||
8 | * "Software"), to deal in the Software without restriction, including |
||
9 | * without limitation the rights to use, copy, modify, merge, publish, |
||
10 | * distribute, sub license, and/or sell copies of the Software, and to |
||
11 | * permit persons to whom the Software is furnished to do so, subject to |
||
12 | * the following conditions: |
||
13 | * |
||
14 | * The above copyright notice and this permission notice (including the |
||
15 | * next paragraph) shall be included in all copies or substantial portions |
||
16 | * of the Software. |
||
17 | * |
||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
||
25 | * |
||
26 | **************************************************************************/ |
||
27 | |||
28 | #include "vmwgfx_drv.h" |
||
29 | #include |
||
30 | #include |
||
31 | #include |
||
32 | |||
33 | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | |
||
34 | TTM_PL_FLAG_CACHED; |
||
35 | |||
36 | static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | |
||
37 | TTM_PL_FLAG_CACHED | |
||
38 | TTM_PL_FLAG_NO_EVICT; |
||
39 | |||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
||
41 | TTM_PL_FLAG_CACHED; |
||
42 | |||
4569 | Serge | 43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | |
44 | TTM_PL_FLAG_CACHED | |
||
45 | TTM_PL_FLAG_NO_EVICT; |
||
46 | |||
4075 | Serge | 47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
48 | TTM_PL_FLAG_CACHED; |
||
49 | |||
50 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
||
51 | TTM_PL_FLAG_CACHED | |
||
52 | TTM_PL_FLAG_NO_EVICT; |
||
53 | |||
4569 | Serge | 54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | |
55 | TTM_PL_FLAG_CACHED; |
||
56 | |||
4075 | Serge | 57 | struct ttm_placement vmw_vram_placement = { |
58 | .fpfn = 0, |
||
59 | .lpfn = 0, |
||
60 | .num_placement = 1, |
||
61 | .placement = &vram_placement_flags, |
||
62 | .num_busy_placement = 1, |
||
63 | .busy_placement = &vram_placement_flags |
||
64 | }; |
||
65 | |||
66 | static uint32_t vram_gmr_placement_flags[] = { |
||
67 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
||
68 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
||
69 | }; |
||
70 | |||
71 | static uint32_t gmr_vram_placement_flags[] = { |
||
72 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
||
73 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
||
74 | }; |
||
75 | |||
76 | struct ttm_placement vmw_vram_gmr_placement = { |
||
77 | .fpfn = 0, |
||
78 | .lpfn = 0, |
||
79 | .num_placement = 2, |
||
80 | .placement = vram_gmr_placement_flags, |
||
81 | .num_busy_placement = 1, |
||
82 | .busy_placement = &gmr_placement_flags |
||
83 | }; |
||
84 | |||
85 | static uint32_t vram_gmr_ne_placement_flags[] = { |
||
86 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT, |
||
87 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT |
||
88 | }; |
||
89 | |||
90 | struct ttm_placement vmw_vram_gmr_ne_placement = { |
||
91 | .fpfn = 0, |
||
92 | .lpfn = 0, |
||
93 | .num_placement = 2, |
||
94 | .placement = vram_gmr_ne_placement_flags, |
||
95 | .num_busy_placement = 1, |
||
96 | .busy_placement = &gmr_ne_placement_flags |
||
97 | }; |
||
98 | |||
99 | struct ttm_placement vmw_vram_sys_placement = { |
||
100 | .fpfn = 0, |
||
101 | .lpfn = 0, |
||
102 | .num_placement = 1, |
||
103 | .placement = &vram_placement_flags, |
||
104 | .num_busy_placement = 1, |
||
105 | .busy_placement = &sys_placement_flags |
||
106 | }; |
||
107 | |||
108 | struct ttm_placement vmw_vram_ne_placement = { |
||
109 | .fpfn = 0, |
||
110 | .lpfn = 0, |
||
111 | .num_placement = 1, |
||
112 | .placement = &vram_ne_placement_flags, |
||
113 | .num_busy_placement = 1, |
||
114 | .busy_placement = &vram_ne_placement_flags |
||
115 | }; |
||
116 | |||
117 | struct ttm_placement vmw_sys_placement = { |
||
118 | .fpfn = 0, |
||
119 | .lpfn = 0, |
||
120 | .num_placement = 1, |
||
121 | .placement = &sys_placement_flags, |
||
122 | .num_busy_placement = 1, |
||
123 | .busy_placement = &sys_placement_flags |
||
124 | }; |
||
125 | |||
4569 | Serge | 126 | struct ttm_placement vmw_sys_ne_placement = { |
127 | .fpfn = 0, |
||
128 | .lpfn = 0, |
||
129 | .num_placement = 1, |
||
130 | .placement = &sys_ne_placement_flags, |
||
131 | .num_busy_placement = 1, |
||
132 | .busy_placement = &sys_ne_placement_flags |
||
133 | }; |
||
134 | |||
4075 | Serge | 135 | static uint32_t evictable_placement_flags[] = { |
136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
||
137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
||
4569 | Serge | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
||
4075 | Serge | 140 | }; |
141 | |||
142 | struct ttm_placement vmw_evictable_placement = { |
||
143 | .fpfn = 0, |
||
144 | .lpfn = 0, |
||
4569 | Serge | 145 | .num_placement = 4, |
4075 | Serge | 146 | .placement = evictable_placement_flags, |
147 | .num_busy_placement = 1, |
||
148 | .busy_placement = &sys_placement_flags |
||
149 | }; |
||
150 | |||
151 | struct ttm_placement vmw_srf_placement = { |
||
152 | .fpfn = 0, |
||
153 | .lpfn = 0, |
||
154 | .num_placement = 1, |
||
155 | .num_busy_placement = 2, |
||
156 | .placement = &gmr_placement_flags, |
||
157 | .busy_placement = gmr_vram_placement_flags |
||
158 | }; |
||
159 | |||
4569 | Serge | 160 | struct ttm_placement vmw_mob_placement = { |
161 | .fpfn = 0, |
||
162 | .lpfn = 0, |
||
163 | .num_placement = 1, |
||
164 | .num_busy_placement = 1, |
||
165 | .placement = &mob_placement_flags, |
||
166 | .busy_placement = &mob_placement_flags |
||
167 | }; |
||
168 | |||
4075 | Serge | 169 | struct vmw_ttm_tt { |
4569 | Serge | 170 | struct ttm_dma_tt dma_ttm; |
4075 | Serge | 171 | struct vmw_private *dev_priv; |
172 | int gmr_id; |
||
4569 | Serge | 173 | struct vmw_mob *mob; |
174 | int mem_type; |
||
175 | struct sg_table sgt; |
||
176 | struct vmw_sg_table vsgt; |
||
177 | uint64_t sg_alloc_size; |
||
178 | bool mapped; |
||
4075 | Serge | 179 | }; |
180 | |||
4569 | Serge | 181 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
182 | |||
183 | /** |
||
184 | * Helper functions to advance a struct vmw_piter iterator. |
||
185 | * |
||
186 | * @viter: Pointer to the iterator. |
||
187 | * |
||
188 | * These functions return false if past the end of the list, |
||
189 | * true otherwise. Functions are selected depending on the current |
||
190 | * DMA mapping mode. |
||
191 | */ |
||
192 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) |
||
193 | { |
||
194 | return ++(viter->i) < viter->num_pages; |
||
195 | } |
||
196 | |||
197 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
||
198 | { |
||
199 | return __sg_page_iter_next(&viter->iter); |
||
200 | } |
||
201 | |||
202 | |||
203 | /** |
||
204 | * Helper functions to return a pointer to the current page. |
||
205 | * |
||
206 | * @viter: Pointer to the iterator |
||
207 | * |
||
208 | * These functions return a pointer to the page currently |
||
209 | * pointed to by @viter. Functions are selected depending on the |
||
210 | * current mapping mode. |
||
211 | */ |
||
212 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) |
||
213 | { |
||
214 | return viter->pages[viter->i]; |
||
215 | } |
||
216 | |||
217 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
||
218 | { |
||
219 | return sg_page_iter_page(&viter->iter); |
||
220 | } |
||
221 | |||
222 | |||
223 | /** |
||
224 | * Helper functions to return the DMA address of the current page. |
||
225 | * |
||
226 | * @viter: Pointer to the iterator |
||
227 | * |
||
228 | * These functions return the DMA address of the page currently |
||
229 | * pointed to by @viter. Functions are selected depending on the |
||
230 | * current mapping mode. |
||
231 | */ |
||
232 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) |
||
233 | { |
||
234 | return page_to_phys(viter->pages[viter->i]); |
||
235 | } |
||
236 | |||
237 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
||
238 | { |
||
239 | return viter->addrs[viter->i]; |
||
240 | } |
||
241 | |||
242 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) |
||
243 | { |
||
244 | return sg_page_iter_dma_address(&viter->iter); |
||
245 | } |
||
246 | |||
247 | |||
248 | /** |
||
249 | * vmw_piter_start - Initialize a struct vmw_piter. |
||
250 | * |
||
251 | * @viter: Pointer to the iterator to initialize |
||
252 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from |
||
253 | * |
||
254 | * Note that we're following the convention of __sg_page_iter_start, so that |
||
255 | * the iterator doesn't point to a valid page after initialization; it has |
||
256 | * to be advanced one step first. |
||
257 | */ |
||
258 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, |
||
259 | unsigned long p_offset) |
||
260 | { |
||
261 | viter->i = p_offset - 1; |
||
262 | viter->num_pages = vsgt->num_pages; |
||
263 | switch (vsgt->mode) { |
||
264 | case vmw_dma_phys: |
||
265 | viter->next = &__vmw_piter_non_sg_next; |
||
266 | viter->dma_address = &__vmw_piter_phys_addr; |
||
267 | viter->page = &__vmw_piter_non_sg_page; |
||
268 | viter->pages = vsgt->pages; |
||
269 | break; |
||
270 | case vmw_dma_alloc_coherent: |
||
271 | viter->next = &__vmw_piter_non_sg_next; |
||
272 | viter->dma_address = &__vmw_piter_dma_addr; |
||
273 | viter->page = &__vmw_piter_non_sg_page; |
||
274 | viter->addrs = vsgt->addrs; |
||
275 | viter->pages = vsgt->pages; |
||
276 | break; |
||
277 | case vmw_dma_map_populate: |
||
278 | case vmw_dma_map_bind: |
||
279 | viter->next = &__vmw_piter_sg_next; |
||
280 | viter->dma_address = &__vmw_piter_sg_addr; |
||
281 | viter->page = &__vmw_piter_sg_page; |
||
282 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
||
283 | vsgt->sgt->orig_nents, p_offset); |
||
284 | break; |
||
285 | default: |
||
286 | BUG(); |
||
287 | } |
||
288 | } |
||
289 | |||
290 | /** |
||
291 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for |
||
292 | * TTM pages |
||
293 | * |
||
294 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
||
295 | * |
||
296 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. |
||
297 | */ |
||
298 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) |
||
299 | { |
||
300 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
||
301 | |||
302 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
||
303 | DMA_BIDIRECTIONAL); |
||
304 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
||
305 | } |
||
306 | |||
307 | /** |
||
308 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses |
||
309 | * |
||
310 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
||
311 | * |
||
312 | * This function is used to get device addresses from the kernel DMA layer. |
||
313 | * However, it's violating the DMA API in that when this operation has been |
||
314 | * performed, it's illegal for the CPU to write to the pages without first |
||
315 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is |
||
316 | * therefore only legal to call this function if we know that the function |
||
317 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most |
||
318 | * a CPU write buffer flush. |
||
319 | */ |
||
320 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
||
321 | { |
||
322 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
||
323 | int ret; |
||
324 | |||
325 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
||
326 | DMA_BIDIRECTIONAL); |
||
327 | if (unlikely(ret == 0)) |
||
328 | return -ENOMEM; |
||
329 | |||
330 | vmw_tt->sgt.nents = ret; |
||
331 | |||
332 | return 0; |
||
333 | } |
||
334 | |||
335 | /** |
||
336 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device |
||
337 | * |
||
338 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
||
339 | * |
||
340 | * Select the correct function for and make sure the TTM pages are |
||
341 | * visible to the device. Allocate storage for the device mappings. |
||
342 | * If a mapping has already been performed, indicated by the storage |
||
343 | * pointer being non NULL, the function returns success. |
||
344 | */ |
||
345 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) |
||
346 | { |
||
347 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
||
348 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
||
349 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
||
350 | struct vmw_piter iter; |
||
351 | dma_addr_t old; |
||
352 | int ret = 0; |
||
353 | static size_t sgl_size; |
||
354 | static size_t sgt_size; |
||
355 | |||
356 | if (vmw_tt->mapped) |
||
357 | return 0; |
||
358 | |||
359 | vsgt->mode = dev_priv->map_mode; |
||
360 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; |
||
361 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; |
||
362 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
||
363 | vsgt->sgt = &vmw_tt->sgt; |
||
364 | |||
365 | switch (dev_priv->map_mode) { |
||
366 | case vmw_dma_map_bind: |
||
367 | case vmw_dma_map_populate: |
||
368 | if (unlikely(!sgl_size)) { |
||
369 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); |
||
370 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); |
||
371 | } |
||
372 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; |
||
373 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, |
||
374 | true); |
||
375 | if (unlikely(ret != 0)) |
||
376 | return ret; |
||
377 | |||
378 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
||
379 | vsgt->num_pages, 0, |
||
380 | (unsigned long) |
||
381 | vsgt->num_pages << PAGE_SHIFT, |
||
382 | GFP_KERNEL); |
||
383 | if (unlikely(ret != 0)) |
||
384 | goto out_sg_alloc_fail; |
||
385 | |||
386 | if (vsgt->num_pages > vmw_tt->sgt.nents) { |
||
387 | uint64_t over_alloc = |
||
388 | sgl_size * (vsgt->num_pages - |
||
389 | vmw_tt->sgt.nents); |
||
390 | |||
391 | ttm_mem_global_free(glob, over_alloc); |
||
392 | vmw_tt->sg_alloc_size -= over_alloc; |
||
393 | } |
||
394 | |||
395 | ret = vmw_ttm_map_for_dma(vmw_tt); |
||
396 | if (unlikely(ret != 0)) |
||
397 | goto out_map_fail; |
||
398 | |||
399 | break; |
||
400 | default: |
||
401 | break; |
||
402 | } |
||
403 | |||
404 | old = ~((dma_addr_t) 0); |
||
405 | vmw_tt->vsgt.num_regions = 0; |
||
406 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { |
||
407 | dma_addr_t cur = vmw_piter_dma_addr(&iter); |
||
408 | |||
409 | if (cur != old + PAGE_SIZE) |
||
410 | vmw_tt->vsgt.num_regions++; |
||
411 | old = cur; |
||
412 | } |
||
413 | |||
414 | vmw_tt->mapped = true; |
||
415 | return 0; |
||
416 | |||
417 | out_map_fail: |
||
418 | sg_free_table(vmw_tt->vsgt.sgt); |
||
419 | vmw_tt->vsgt.sgt = NULL; |
||
420 | out_sg_alloc_fail: |
||
421 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); |
||
422 | return ret; |
||
423 | } |
||
424 | |||
425 | /** |
||
426 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings |
||
427 | * |
||
428 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
||
429 | * |
||
430 | * Tear down any previously set up device DMA mappings and free |
||
431 | * any storage space allocated for them. If there are no mappings set up, |
||
432 | * this function is a NOP. |
||
433 | */ |
||
434 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) |
||
435 | { |
||
436 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
||
437 | |||
438 | if (!vmw_tt->vsgt.sgt) |
||
439 | return; |
||
440 | |||
441 | switch (dev_priv->map_mode) { |
||
442 | case vmw_dma_map_bind: |
||
443 | case vmw_dma_map_populate: |
||
444 | vmw_ttm_unmap_from_dma(vmw_tt); |
||
445 | sg_free_table(vmw_tt->vsgt.sgt); |
||
446 | vmw_tt->vsgt.sgt = NULL; |
||
447 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
||
448 | vmw_tt->sg_alloc_size); |
||
449 | break; |
||
450 | default: |
||
451 | break; |
||
452 | } |
||
453 | vmw_tt->mapped = false; |
||
454 | } |
||
455 | |||
456 | |||
457 | /** |
||
458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
||
459 | * |
||
460 | * @bo: Pointer to a struct ttm_buffer_object |
||
461 | * |
||
462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
||
463 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
||
464 | * Note that the buffer object must be either pinned or reserved before |
||
465 | * calling this function. |
||
466 | */ |
||
467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
||
468 | { |
||
469 | struct vmw_ttm_tt *vmw_tt = |
||
470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
471 | |||
472 | return vmw_ttm_map_dma(vmw_tt); |
||
473 | } |
||
474 | |||
475 | |||
476 | /** |
||
477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
||
478 | * |
||
479 | * @bo: Pointer to a struct ttm_buffer_object |
||
480 | * |
||
481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
||
482 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
||
483 | */ |
||
484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
||
485 | { |
||
486 | struct vmw_ttm_tt *vmw_tt = |
||
487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
488 | |||
489 | vmw_ttm_unmap_dma(vmw_tt); |
||
490 | } |
||
491 | |||
492 | |||
493 | /** |
||
494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
||
495 | * TTM buffer object |
||
496 | * |
||
497 | * @bo: Pointer to a struct ttm_buffer_object |
||
498 | * |
||
499 | * Returns a pointer to a struct vmw_sg_table object. The object should |
||
500 | * not be freed after use. |
||
501 | * Note that for the device addresses to be valid, the buffer object must |
||
502 | * either be reserved or pinned. |
||
503 | */ |
||
504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) |
||
505 | { |
||
506 | struct vmw_ttm_tt *vmw_tt = |
||
507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
508 | |||
509 | return &vmw_tt->vsgt; |
||
510 | } |
||
511 | |||
512 | |||
4075 | Serge | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
514 | { |
||
4569 | Serge | 515 | struct vmw_ttm_tt *vmw_be = |
516 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
517 | int ret; |
||
4075 | Serge | 518 | |
4569 | Serge | 519 | ret = vmw_ttm_map_dma(vmw_be); |
520 | if (unlikely(ret != 0)) |
||
521 | return ret; |
||
522 | |||
4075 | Serge | 523 | vmw_be->gmr_id = bo_mem->start; |
4569 | Serge | 524 | vmw_be->mem_type = bo_mem->mem_type; |
4075 | Serge | 525 | |
4569 | Serge | 526 | switch (bo_mem->mem_type) { |
527 | case VMW_PL_GMR: |
||
528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
||
4075 | Serge | 529 | ttm->num_pages, vmw_be->gmr_id); |
4569 | Serge | 530 | case VMW_PL_MOB: |
531 | if (unlikely(vmw_be->mob == NULL)) { |
||
532 | vmw_be->mob = |
||
533 | vmw_mob_create(ttm->num_pages); |
||
534 | if (unlikely(vmw_be->mob == NULL)) |
||
535 | return -ENOMEM; |
||
536 | } |
||
537 | |||
538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
||
539 | &vmw_be->vsgt, ttm->num_pages, |
||
540 | vmw_be->gmr_id); |
||
541 | default: |
||
542 | BUG(); |
||
543 | } |
||
544 | return 0; |
||
4075 | Serge | 545 | } |
546 | |||
547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
||
548 | { |
||
4569 | Serge | 549 | struct vmw_ttm_tt *vmw_be = |
550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
4075 | Serge | 551 | |
4569 | Serge | 552 | switch (vmw_be->mem_type) { |
553 | case VMW_PL_GMR: |
||
4075 | Serge | 554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
4569 | Serge | 555 | break; |
556 | case VMW_PL_MOB: |
||
557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); |
||
558 | break; |
||
559 | default: |
||
560 | BUG(); |
||
561 | } |
||
562 | |||
563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
||
564 | vmw_ttm_unmap_dma(vmw_be); |
||
565 | |||
4075 | Serge | 566 | return 0; |
567 | } |
||
568 | |||
4569 | Serge | 569 | |
4075 | Serge | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
571 | { |
||
4569 | Serge | 572 | struct vmw_ttm_tt *vmw_be = |
573 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
4075 | Serge | 574 | |
4569 | Serge | 575 | vmw_ttm_unmap_dma(vmw_be); |
576 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
||
577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
||
578 | else |
||
4075 | Serge | 579 | ttm_tt_fini(ttm); |
4569 | Serge | 580 | |
581 | if (vmw_be->mob) |
||
582 | vmw_mob_destroy(vmw_be->mob); |
||
583 | |||
4075 | Serge | 584 | kfree(vmw_be); |
585 | } |
||
586 | |||
4569 | Serge | 587 | |
588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
||
589 | { |
||
590 | struct vmw_ttm_tt *vmw_tt = |
||
591 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
||
592 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
||
593 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
||
594 | int ret; |
||
595 | |||
596 | if (ttm->state != tt_unpopulated) |
||
597 | return 0; |
||
598 | |||
599 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
||
600 | size_t size = |
||
601 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
||
602 | ret = ttm_mem_global_alloc(glob, size, false, true); |
||
603 | if (unlikely(ret != 0)) |
||
604 | return ret; |
||
605 | |||
606 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
||
607 | if (unlikely(ret != 0)) |
||
608 | ttm_mem_global_free(glob, size); |
||
609 | } else |
||
610 | ret = ttm_pool_populate(ttm); |
||
611 | |||
612 | return ret; |
||
613 | } |
||
614 | |||
615 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
||
616 | { |
||
617 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
||
618 | dma_ttm.ttm); |
||
619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
||
620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
||
621 | |||
622 | |||
623 | if (vmw_tt->mob) { |
||
624 | vmw_mob_destroy(vmw_tt->mob); |
||
625 | vmw_tt->mob = NULL; |
||
626 | } |
||
627 | |||
628 | vmw_ttm_unmap_dma(vmw_tt); |
||
629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
||
630 | size_t size = |
||
631 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
||
632 | |||
633 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
||
634 | ttm_mem_global_free(glob, size); |
||
635 | } else |
||
636 | ttm_pool_unpopulate(ttm); |
||
637 | } |
||
638 | |||
4075 | Serge | 639 | static struct ttm_backend_func vmw_ttm_func = { |
640 | .bind = vmw_ttm_bind, |
||
641 | .unbind = vmw_ttm_unbind, |
||
642 | .destroy = vmw_ttm_destroy, |
||
643 | }; |
||
644 | |||
4569 | Serge | 645 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
4075 | Serge | 646 | unsigned long size, uint32_t page_flags, |
647 | struct page *dummy_read_page) |
||
648 | { |
||
649 | struct vmw_ttm_tt *vmw_be; |
||
4569 | Serge | 650 | int ret; |
4075 | Serge | 651 | |
4569 | Serge | 652 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
4075 | Serge | 653 | if (!vmw_be) |
654 | return NULL; |
||
655 | |||
4569 | Serge | 656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
4075 | Serge | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
4569 | Serge | 658 | vmw_be->mob = NULL; |
4075 | Serge | 659 | |
4569 | Serge | 660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
||
662 | dummy_read_page); |
||
663 | else |
||
664 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, |
||
665 | dummy_read_page); |
||
666 | if (unlikely(ret != 0)) |
||
667 | goto out_no_init; |
||
668 | |||
669 | return &vmw_be->dma_ttm.ttm; |
||
670 | out_no_init: |
||
4075 | Serge | 671 | kfree(vmw_be); |
672 | return NULL; |
||
673 | } |
||
674 | |||
4569 | Serge | 675 | static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
4075 | Serge | 676 | { |
677 | return 0; |
||
678 | } |
||
679 | |||
4569 | Serge | 680 | static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
4075 | Serge | 681 | struct ttm_mem_type_manager *man) |
682 | { |
||
683 | switch (type) { |
||
684 | case TTM_PL_SYSTEM: |
||
685 | /* System memory */ |
||
686 | |||
687 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
||
688 | man->available_caching = TTM_PL_FLAG_CACHED; |
||
689 | man->default_caching = TTM_PL_FLAG_CACHED; |
||
690 | break; |
||
691 | case TTM_PL_VRAM: |
||
692 | /* "On-card" video ram */ |
||
693 | man->func = &ttm_bo_manager_func; |
||
694 | man->gpu_offset = 0; |
||
695 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
||
696 | man->available_caching = TTM_PL_FLAG_CACHED; |
||
697 | man->default_caching = TTM_PL_FLAG_CACHED; |
||
698 | break; |
||
699 | case VMW_PL_GMR: |
||
4569 | Serge | 700 | case VMW_PL_MOB: |
4075 | Serge | 701 | /* |
702 | * "Guest Memory Regions" is an aperture like feature with |
||
703 | * one slot per bo. There is an upper limit of the number of |
||
704 | * slots as well as the bo size. |
||
705 | */ |
||
706 | man->func = &vmw_gmrid_manager_func; |
||
707 | man->gpu_offset = 0; |
||
708 | man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE; |
||
709 | man->available_caching = TTM_PL_FLAG_CACHED; |
||
710 | man->default_caching = TTM_PL_FLAG_CACHED; |
||
711 | break; |
||
712 | default: |
||
713 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
||
714 | return -EINVAL; |
||
715 | } |
||
716 | return 0; |
||
717 | } |
||
718 | |||
4569 | Serge | 719 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
4075 | Serge | 720 | struct ttm_placement *placement) |
721 | { |
||
722 | *placement = vmw_sys_placement; |
||
723 | } |
||
724 | |||
725 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
||
726 | { |
||
727 | // struct ttm_object_file *tfile = |
||
728 | // vmw_fpriv((struct drm_file *)filp->private_data)->tfile; |
||
729 | |||
730 | return 0; //vmw_user_dmabuf_verify_access(bo, tfile); |
||
731 | } |
||
732 | |||
733 | static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
734 | { |
||
735 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
||
736 | struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); |
||
737 | |||
738 | mem->bus.addr = NULL; |
||
739 | mem->bus.is_iomem = false; |
||
740 | mem->bus.offset = 0; |
||
741 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
||
742 | mem->bus.base = 0; |
||
743 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
||
744 | return -EINVAL; |
||
745 | switch (mem->mem_type) { |
||
746 | case TTM_PL_SYSTEM: |
||
747 | case VMW_PL_GMR: |
||
4569 | Serge | 748 | case VMW_PL_MOB: |
4075 | Serge | 749 | return 0; |
750 | case TTM_PL_VRAM: |
||
751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
||
752 | mem->bus.base = dev_priv->vram_start; |
||
753 | mem->bus.is_iomem = true; |
||
754 | break; |
||
755 | default: |
||
756 | return -EINVAL; |
||
757 | } |
||
758 | return 0; |
||
759 | } |
||
760 | |||
761 | static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
||
762 | { |
||
763 | } |
||
764 | |||
765 | static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
||
766 | { |
||
767 | return 0; |
||
768 | } |
||
769 | |||
770 | /** |
||
771 | * FIXME: We're using the old vmware polling method to sync. |
||
772 | * Do this with fences instead. |
||
773 | */ |
||
774 | |||
775 | static void *vmw_sync_obj_ref(void *sync_obj) |
||
776 | { |
||
777 | |||
778 | return (void *) |
||
779 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); |
||
780 | } |
||
781 | |||
782 | static void vmw_sync_obj_unref(void **sync_obj) |
||
783 | { |
||
784 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); |
||
785 | } |
||
786 | |||
787 | static int vmw_sync_obj_flush(void *sync_obj) |
||
788 | { |
||
789 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); |
||
790 | return 0; |
||
791 | } |
||
792 | |||
793 | static bool vmw_sync_obj_signaled(void *sync_obj) |
||
794 | { |
||
795 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, |
||
796 | DRM_VMW_FENCE_FLAG_EXEC); |
||
797 | |||
798 | } |
||
799 | |||
800 | static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
||
801 | { |
||
802 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, |
||
803 | DRM_VMW_FENCE_FLAG_EXEC, |
||
804 | lazy, interruptible, |
||
805 | VMW_FENCE_WAIT_TIMEOUT); |
||
806 | } |
||
807 | |||
4569 | Serge | 808 | /** |
809 | * vmw_move_notify - TTM move_notify_callback |
||
810 | * |
||
811 | * @bo: The TTM buffer object about to move. |
||
812 | * @mem: The truct ttm_mem_reg indicating to what memory |
||
813 | * region the move is taking place. |
||
814 | * |
||
815 | * Calls move_notify for all subsystems needing it. |
||
816 | * (currently only resources). |
||
817 | */ |
||
818 | static void vmw_move_notify(struct ttm_buffer_object *bo, |
||
819 | struct ttm_mem_reg *mem) |
||
820 | { |
||
821 | vmw_resource_move_notify(bo, mem); |
||
822 | } |
||
823 | |||
824 | |||
825 | /** |
||
826 | * vmw_swap_notify - TTM move_notify_callback |
||
827 | * |
||
828 | * @bo: The TTM buffer object about to be swapped out. |
||
829 | */ |
||
830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) |
||
831 | { |
||
832 | struct ttm_bo_device *bdev = bo->bdev; |
||
833 | |||
834 | // spin_lock(&bdev->fence_lock); |
||
835 | // ttm_bo_wait(bo, false, false, false); |
||
836 | // spin_unlock(&bdev->fence_lock); |
||
837 | } |
||
838 | |||
839 | |||
4075 | Serge | 840 | struct ttm_bo_driver vmw_bo_driver = { |
841 | .ttm_tt_create = &vmw_ttm_tt_create, |
||
4569 | Serge | 842 | .ttm_tt_populate = &vmw_ttm_populate, |
843 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
||
4075 | Serge | 844 | .invalidate_caches = vmw_invalidate_caches, |
845 | .init_mem_type = vmw_init_mem_type, |
||
846 | .evict_flags = vmw_evict_flags, |
||
847 | .move = NULL, |
||
848 | .verify_access = vmw_verify_access, |
||
849 | .sync_obj_signaled = vmw_sync_obj_signaled, |
||
850 | .sync_obj_wait = vmw_sync_obj_wait, |
||
851 | .sync_obj_flush = vmw_sync_obj_flush, |
||
852 | .sync_obj_unref = vmw_sync_obj_unref, |
||
853 | .sync_obj_ref = vmw_sync_obj_ref, |
||
4569 | Serge | 854 | .move_notify = vmw_move_notify, |
855 | .swap_notify = vmw_swap_notify, |
||
4075 | Serge | 856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
||
858 | .io_mem_free = &vmw_ttm_io_mem_free, |
||
859 | }; |
||
4569 | Serge | 860 | |
861 | |||
862 | struct scatterlist *sg_next(struct scatterlist *sg) |
||
863 | { |
||
864 | if (sg_is_last(sg)) |
||
865 | return NULL; |
||
866 | |||
867 | sg++; |
||
868 | if (unlikely(sg_is_chain(sg))) |
||
869 | sg = sg_chain_ptr(sg); |
||
870 | |||
871 | return sg; |
||
872 | } |
||
873 | |||
874 | |||
875 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
||
876 | sg_free_fn *free_fn) |
||
877 | { |
||
878 | struct scatterlist *sgl, *next; |
||
879 | |||
880 | if (unlikely(!table->sgl)) |
||
881 | return; |
||
882 | |||
883 | sgl = table->sgl; |
||
884 | while (table->orig_nents) { |
||
885 | unsigned int alloc_size = table->orig_nents; |
||
886 | unsigned int sg_size; |
||
887 | |||
888 | /* |
||
889 | * If we have more than max_ents segments left, |
||
890 | * then assign 'next' to the sg table after the current one. |
||
891 | * sg_size is then one less than alloc size, since the last |
||
892 | * element is the chain pointer. |
||
893 | */ |
||
894 | if (alloc_size > max_ents) { |
||
895 | next = sg_chain_ptr(&sgl[max_ents - 1]); |
||
896 | alloc_size = max_ents; |
||
897 | sg_size = alloc_size - 1; |
||
898 | } else { |
||
899 | sg_size = alloc_size; |
||
900 | next = NULL; |
||
901 | } |
||
902 | |||
903 | table->orig_nents -= sg_size; |
||
904 | kfree(sgl); |
||
905 | sgl = next; |
||
906 | } |
||
907 | |||
908 | table->sgl = NULL; |
||
909 | } |
||
910 | |||
911 | void sg_free_table(struct sg_table *table) |
||
912 | { |
||
913 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
||
914 | } |
||
915 | |||
916 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
||
917 | { |
||
918 | struct scatterlist *sg, *prv; |
||
919 | unsigned int left; |
||
920 | unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
||
921 | |||
922 | #ifndef ARCH_HAS_SG_CHAIN |
||
923 | BUG_ON(nents > max_ents); |
||
924 | #endif |
||
925 | |||
926 | memset(table, 0, sizeof(*table)); |
||
927 | |||
928 | left = nents; |
||
929 | prv = NULL; |
||
930 | do { |
||
931 | unsigned int sg_size, alloc_size = left; |
||
932 | |||
933 | if (alloc_size > max_ents) { |
||
934 | alloc_size = max_ents; |
||
935 | sg_size = alloc_size - 1; |
||
936 | } else |
||
937 | sg_size = alloc_size; |
||
938 | |||
939 | left -= sg_size; |
||
940 | |||
941 | sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
||
942 | if (unlikely(!sg)) { |
||
943 | /* |
||
944 | * Adjust entry count to reflect that the last |
||
945 | * entry of the previous table won't be used for |
||
946 | * linkage. Without this, sg_kfree() may get |
||
947 | * confused. |
||
948 | */ |
||
949 | if (prv) |
||
950 | table->nents = ++table->orig_nents; |
||
951 | |||
952 | goto err; |
||
953 | } |
||
954 | |||
955 | sg_init_table(sg, alloc_size); |
||
956 | table->nents = table->orig_nents += sg_size; |
||
957 | |||
958 | /* |
||
959 | * If this is the first mapping, assign the sg table header. |
||
960 | * If this is not the first mapping, chain previous part. |
||
961 | */ |
||
962 | if (prv) |
||
963 | sg_chain(prv, max_ents, sg); |
||
964 | else |
||
965 | table->sgl = sg; |
||
966 | |||
967 | /* |
||
968 | * If no more entries after this one, mark the end |
||
969 | */ |
||
970 | if (!left) |
||
971 | sg_mark_end(&sg[sg_size - 1]); |
||
972 | |||
973 | prv = sg; |
||
974 | } while (left); |
||
975 | |||
976 | return 0; |
||
977 | |||
978 | err: |
||
979 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
||
980 | |||
981 | return -ENOMEM; |
||
982 | } |
||
983 | |||
984 | |||
985 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
||
986 | { |
||
987 | memset(sgl, 0, sizeof(*sgl) * nents); |
||
988 | #ifdef CONFIG_DEBUG_SG |
||
989 | { |
||
990 | unsigned int i; |
||
991 | for (i = 0; i < nents; i++) |
||
992 | sgl[i].sg_magic = SG_MAGIC; |
||
993 | } |
||
994 | #endif |
||
995 | sg_mark_end(&sgl[nents - 1]); |
||
996 | } |
||
997 | |||
998 | |||
999 | void __sg_page_iter_start(struct sg_page_iter *piter, |
||
1000 | struct scatterlist *sglist, unsigned int nents, |
||
1001 | unsigned long pgoffset) |
||
1002 | { |
||
1003 | piter->__pg_advance = 0; |
||
1004 | piter->__nents = nents; |
||
1005 | |||
1006 | piter->sg = sglist; |
||
1007 | piter->sg_pgoffset = pgoffset; |
||
1008 | } |
||
1009 | |||
1010 | static int sg_page_count(struct scatterlist *sg) |
||
1011 | { |
||
1012 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
||
1013 | } |
||
1014 | |||
1015 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
||
1016 | { |
||
1017 | if (!piter->__nents || !piter->sg) |
||
1018 | return false; |
||
1019 | |||
1020 | piter->sg_pgoffset += piter->__pg_advance; |
||
1021 | piter->__pg_advance = 1; |
||
1022 | |||
1023 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
||
1024 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
||
1025 | piter->sg = sg_next(piter->sg); |
||
1026 | if (!--piter->__nents || !piter->sg) |
||
1027 | return false; |
||
1028 | } |
||
1029 | |||
1030 | return true; |
||
1031 | } |
||
1032 | EXPORT_SYMBOL(__sg_page_iter_next); |
||
1033 | |||
1034 | |||
1035 | int sg_alloc_table_from_pages(struct sg_table *sgt, |
||
1036 | struct page **pages, unsigned int n_pages, |
||
1037 | unsigned long offset, unsigned long size, |
||
1038 | gfp_t gfp_mask) |
||
1039 | { |
||
1040 | unsigned int chunks; |
||
1041 | unsigned int i; |
||
1042 | unsigned int cur_page; |
||
1043 | int ret; |
||
1044 | struct scatterlist *s; |
||
1045 | |||
1046 | /* compute number of contiguous chunks */ |
||
1047 | chunks = 1; |
||
1048 | for (i = 1; i < n_pages; ++i) |
||
1049 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) |
||
1050 | ++chunks; |
||
1051 | |||
1052 | ret = sg_alloc_table(sgt, chunks, gfp_mask); |
||
1053 | if (unlikely(ret)) |
||
1054 | return ret; |
||
1055 | |||
1056 | /* merging chunks and putting them into the scatterlist */ |
||
1057 | cur_page = 0; |
||
1058 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
||
1059 | unsigned long chunk_size; |
||
1060 | unsigned int j; |
||
1061 | |||
1062 | /* look for the end of the current chunk */ |
||
1063 | for (j = cur_page + 1; j < n_pages; ++j) |
||
1064 | if (page_to_pfn(pages[j]) != |
||
1065 | page_to_pfn(pages[j - 1]) + 1) |
||
1066 | break; |
||
1067 | |||
1068 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
||
1069 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); |
||
1070 | size -= chunk_size; |
||
1071 | offset = 0; |
||
1072 | cur_page = j; |
||
1073 | } |
||
1074 | |||
1075 | return 0; |
||
1076 | } |
||
1077 | |||
1078 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
||
1079 | int nelems, int dir) |
||
1080 | { |
||
1081 | struct scatterlist *s; |
||
1082 | int i; |
||
1083 | |||
1084 | for_each_sg(sglist, s, nelems, i) { |
||
1085 | s->dma_address = (dma_addr_t)sg_phys(s); |
||
1086 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
||
1087 | s->dma_length = s->length; |
||
1088 | #endif |
||
1089 | } |
||
1090 | |||
1091 | return nelems; |
||
1092 | }><>>>>><>><>><>> |
||
1093 |