Rev 4075 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 4075 | Rev 4569 | ||
---|---|---|---|
Line 38... | Line 38... | ||
38 | TTM_PL_FLAG_NO_EVICT; |
38 | TTM_PL_FLAG_NO_EVICT; |
Line 39... | Line 39... | ||
39 | 39 | ||
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
40 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
Line -... | Line 41... | ||
- | 41 | TTM_PL_FLAG_CACHED; |
|
- | 42 | ||
- | 43 | static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | |
|
- | 44 | TTM_PL_FLAG_CACHED | |
|
41 | TTM_PL_FLAG_CACHED; |
45 | TTM_PL_FLAG_NO_EVICT; |
42 | 46 | ||
Line 43... | Line 47... | ||
43 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
47 | static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
44 | TTM_PL_FLAG_CACHED; |
48 | TTM_PL_FLAG_CACHED; |
45 | 49 | ||
Line -... | Line 50... | ||
- | 50 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
|
- | 51 | TTM_PL_FLAG_CACHED | |
|
- | 52 | TTM_PL_FLAG_NO_EVICT; |
|
46 | static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR | |
53 | |
47 | TTM_PL_FLAG_CACHED | |
54 | static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | |
48 | TTM_PL_FLAG_NO_EVICT; |
55 | TTM_PL_FLAG_CACHED; |
49 | 56 | ||
50 | struct ttm_placement vmw_vram_placement = { |
57 | struct ttm_placement vmw_vram_placement = { |
Line 114... | Line 121... | ||
114 | .placement = &sys_placement_flags, |
121 | .placement = &sys_placement_flags, |
115 | .num_busy_placement = 1, |
122 | .num_busy_placement = 1, |
116 | .busy_placement = &sys_placement_flags |
123 | .busy_placement = &sys_placement_flags |
117 | }; |
124 | }; |
Line -... | Line 125... | ||
- | 125 | ||
- | 126 | struct ttm_placement vmw_sys_ne_placement = { |
|
- | 127 | .fpfn = 0, |
|
- | 128 | .lpfn = 0, |
|
- | 129 | .num_placement = 1, |
|
- | 130 | .placement = &sys_ne_placement_flags, |
|
- | 131 | .num_busy_placement = 1, |
|
- | 132 | .busy_placement = &sys_ne_placement_flags |
|
- | 133 | }; |
|
118 | 134 | ||
119 | static uint32_t evictable_placement_flags[] = { |
135 | static uint32_t evictable_placement_flags[] = { |
120 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
136 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
121 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
137 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
- | 138 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
|
122 | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
139 | VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
Line 123... | Line 140... | ||
123 | }; |
140 | }; |
124 | 141 | ||
125 | struct ttm_placement vmw_evictable_placement = { |
142 | struct ttm_placement vmw_evictable_placement = { |
126 | .fpfn = 0, |
143 | .fpfn = 0, |
127 | .lpfn = 0, |
144 | .lpfn = 0, |
128 | .num_placement = 3, |
145 | .num_placement = 4, |
129 | .placement = evictable_placement_flags, |
146 | .placement = evictable_placement_flags, |
130 | .num_busy_placement = 1, |
147 | .num_busy_placement = 1, |
Line 138... | Line 155... | ||
138 | .num_busy_placement = 2, |
155 | .num_busy_placement = 2, |
139 | .placement = &gmr_placement_flags, |
156 | .placement = &gmr_placement_flags, |
140 | .busy_placement = gmr_vram_placement_flags |
157 | .busy_placement = gmr_vram_placement_flags |
141 | }; |
158 | }; |
Line -... | Line 159... | ||
- | 159 | ||
- | 160 | struct ttm_placement vmw_mob_placement = { |
|
- | 161 | .fpfn = 0, |
|
- | 162 | .lpfn = 0, |
|
- | 163 | .num_placement = 1, |
|
- | 164 | .num_busy_placement = 1, |
|
- | 165 | .placement = &mob_placement_flags, |
|
- | 166 | .busy_placement = &mob_placement_flags |
|
- | 167 | }; |
|
142 | 168 | ||
143 | struct vmw_ttm_tt { |
169 | struct vmw_ttm_tt { |
144 | struct ttm_tt ttm; |
170 | struct ttm_dma_tt dma_ttm; |
145 | struct vmw_private *dev_priv; |
171 | struct vmw_private *dev_priv; |
- | 172 | int gmr_id; |
|
- | 173 | struct vmw_mob *mob; |
|
- | 174 | int mem_type; |
|
- | 175 | struct sg_table sgt; |
|
- | 176 | struct vmw_sg_table vsgt; |
|
- | 177 | uint64_t sg_alloc_size; |
|
146 | int gmr_id; |
178 | bool mapped; |
Line -... | Line 179... | ||
- | 179 | }; |
|
- | 180 | ||
- | 181 | const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
|
- | 182 | ||
- | 183 | /** |
|
- | 184 | * Helper functions to advance a struct vmw_piter iterator. |
|
- | 185 | * |
|
- | 186 | * @viter: Pointer to the iterator. |
|
- | 187 | * |
|
- | 188 | * These functions return false if past the end of the list, |
|
- | 189 | * true otherwise. Functions are selected depending on the current |
|
- | 190 | * DMA mapping mode. |
|
- | 191 | */ |
|
- | 192 | static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) |
|
- | 193 | { |
|
- | 194 | return ++(viter->i) < viter->num_pages; |
|
- | 195 | } |
|
- | 196 | ||
- | 197 | static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
|
- | 198 | { |
|
- | 199 | return __sg_page_iter_next(&viter->iter); |
|
- | 200 | } |
|
- | 201 | ||
- | 202 | ||
- | 203 | /** |
|
- | 204 | * Helper functions to return a pointer to the current page. |
|
- | 205 | * |
|
- | 206 | * @viter: Pointer to the iterator |
|
- | 207 | * |
|
- | 208 | * These functions return a pointer to the page currently |
|
- | 209 | * pointed to by @viter. Functions are selected depending on the |
|
- | 210 | * current mapping mode. |
|
- | 211 | */ |
|
- | 212 | static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) |
|
- | 213 | { |
|
- | 214 | return viter->pages[viter->i]; |
|
- | 215 | } |
|
- | 216 | ||
- | 217 | static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
|
- | 218 | { |
|
- | 219 | return sg_page_iter_page(&viter->iter); |
|
- | 220 | } |
|
- | 221 | ||
- | 222 | ||
- | 223 | /** |
|
- | 224 | * Helper functions to return the DMA address of the current page. |
|
- | 225 | * |
|
- | 226 | * @viter: Pointer to the iterator |
|
- | 227 | * |
|
- | 228 | * These functions return the DMA address of the page currently |
|
- | 229 | * pointed to by @viter. Functions are selected depending on the |
|
- | 230 | * current mapping mode. |
|
- | 231 | */ |
|
- | 232 | static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) |
|
- | 233 | { |
|
- | 234 | return page_to_phys(viter->pages[viter->i]); |
|
- | 235 | } |
|
- | 236 | ||
- | 237 | static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
|
- | 238 | { |
|
- | 239 | return viter->addrs[viter->i]; |
|
- | 240 | } |
|
- | 241 | ||
- | 242 | static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) |
|
- | 243 | { |
|
- | 244 | return sg_page_iter_dma_address(&viter->iter); |
|
- | 245 | } |
|
- | 246 | ||
- | 247 | ||
- | 248 | /** |
|
- | 249 | * vmw_piter_start - Initialize a struct vmw_piter. |
|
- | 250 | * |
|
- | 251 | * @viter: Pointer to the iterator to initialize |
|
- | 252 | * @vsgt: Pointer to a struct vmw_sg_table to initialize from |
|
- | 253 | * |
|
- | 254 | * Note that we're following the convention of __sg_page_iter_start, so that |
|
- | 255 | * the iterator doesn't point to a valid page after initialization; it has |
|
- | 256 | * to be advanced one step first. |
|
- | 257 | */ |
|
- | 258 | void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, |
|
- | 259 | unsigned long p_offset) |
|
- | 260 | { |
|
- | 261 | viter->i = p_offset - 1; |
|
- | 262 | viter->num_pages = vsgt->num_pages; |
|
- | 263 | switch (vsgt->mode) { |
|
- | 264 | case vmw_dma_phys: |
|
- | 265 | viter->next = &__vmw_piter_non_sg_next; |
|
- | 266 | viter->dma_address = &__vmw_piter_phys_addr; |
|
- | 267 | viter->page = &__vmw_piter_non_sg_page; |
|
- | 268 | viter->pages = vsgt->pages; |
|
- | 269 | break; |
|
- | 270 | case vmw_dma_alloc_coherent: |
|
- | 271 | viter->next = &__vmw_piter_non_sg_next; |
|
- | 272 | viter->dma_address = &__vmw_piter_dma_addr; |
|
- | 273 | viter->page = &__vmw_piter_non_sg_page; |
|
- | 274 | viter->addrs = vsgt->addrs; |
|
- | 275 | viter->pages = vsgt->pages; |
|
- | 276 | break; |
|
- | 277 | case vmw_dma_map_populate: |
|
- | 278 | case vmw_dma_map_bind: |
|
- | 279 | viter->next = &__vmw_piter_sg_next; |
|
- | 280 | viter->dma_address = &__vmw_piter_sg_addr; |
|
- | 281 | viter->page = &__vmw_piter_sg_page; |
|
- | 282 | __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
|
- | 283 | vsgt->sgt->orig_nents, p_offset); |
|
- | 284 | break; |
|
- | 285 | default: |
|
- | 286 | BUG(); |
|
- | 287 | } |
|
- | 288 | } |
|
- | 289 | ||
- | 290 | /** |
|
- | 291 | * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for |
|
- | 292 | * TTM pages |
|
- | 293 | * |
|
- | 294 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
|
- | 295 | * |
|
- | 296 | * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. |
|
- | 297 | */ |
|
- | 298 | static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) |
|
- | 299 | { |
|
- | 300 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
|
- | 301 | ||
- | 302 | dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
|
- | 303 | DMA_BIDIRECTIONAL); |
|
- | 304 | vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
|
- | 305 | } |
|
- | 306 | ||
- | 307 | /** |
|
- | 308 | * vmw_ttm_map_for_dma - map TTM pages to get device addresses |
|
- | 309 | * |
|
- | 310 | * @vmw_tt: Pointer to a struct vmw_ttm_backend |
|
- | 311 | * |
|
- | 312 | * This function is used to get device addresses from the kernel DMA layer. |
|
- | 313 | * However, it's violating the DMA API in that when this operation has been |
|
- | 314 | * performed, it's illegal for the CPU to write to the pages without first |
|
- | 315 | * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is |
|
- | 316 | * therefore only legal to call this function if we know that the function |
|
- | 317 | * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most |
|
- | 318 | * a CPU write buffer flush. |
|
- | 319 | */ |
|
- | 320 | static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
|
- | 321 | { |
|
- | 322 | struct device *dev = vmw_tt->dev_priv->dev->dev; |
|
- | 323 | int ret; |
|
- | 324 | ||
- | 325 | ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
|
- | 326 | DMA_BIDIRECTIONAL); |
|
- | 327 | if (unlikely(ret == 0)) |
|
- | 328 | return -ENOMEM; |
|
- | 329 | ||
- | 330 | vmw_tt->sgt.nents = ret; |
|
- | 331 | ||
- | 332 | return 0; |
|
- | 333 | } |
|
- | 334 | ||
- | 335 | /** |
|
- | 336 | * vmw_ttm_map_dma - Make sure TTM pages are visible to the device |
|
- | 337 | * |
|
- | 338 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
|
- | 339 | * |
|
- | 340 | * Select the correct function for and make sure the TTM pages are |
|
- | 341 | * visible to the device. Allocate storage for the device mappings. |
|
- | 342 | * If a mapping has already been performed, indicated by the storage |
|
- | 343 | * pointer being non NULL, the function returns success. |
|
- | 344 | */ |
|
- | 345 | static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) |
|
- | 346 | { |
|
- | 347 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
|
- | 348 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
|
- | 349 | struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
|
- | 350 | struct vmw_piter iter; |
|
- | 351 | dma_addr_t old; |
|
- | 352 | int ret = 0; |
|
- | 353 | static size_t sgl_size; |
|
- | 354 | static size_t sgt_size; |
|
- | 355 | ||
- | 356 | if (vmw_tt->mapped) |
|
- | 357 | return 0; |
|
- | 358 | ||
- | 359 | vsgt->mode = dev_priv->map_mode; |
|
- | 360 | vsgt->pages = vmw_tt->dma_ttm.ttm.pages; |
|
- | 361 | vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; |
|
- | 362 | vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
|
- | 363 | vsgt->sgt = &vmw_tt->sgt; |
|
- | 364 | ||
- | 365 | switch (dev_priv->map_mode) { |
|
- | 366 | case vmw_dma_map_bind: |
|
- | 367 | case vmw_dma_map_populate: |
|
- | 368 | if (unlikely(!sgl_size)) { |
|
- | 369 | sgl_size = ttm_round_pot(sizeof(struct scatterlist)); |
|
- | 370 | sgt_size = ttm_round_pot(sizeof(struct sg_table)); |
|
- | 371 | } |
|
- | 372 | vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; |
|
- | 373 | ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, |
|
- | 374 | true); |
|
- | 375 | if (unlikely(ret != 0)) |
|
- | 376 | return ret; |
|
- | 377 | ||
- | 378 | ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
|
- | 379 | vsgt->num_pages, 0, |
|
- | 380 | (unsigned long) |
|
- | 381 | vsgt->num_pages << PAGE_SHIFT, |
|
- | 382 | GFP_KERNEL); |
|
- | 383 | if (unlikely(ret != 0)) |
|
- | 384 | goto out_sg_alloc_fail; |
|
- | 385 | ||
- | 386 | if (vsgt->num_pages > vmw_tt->sgt.nents) { |
|
- | 387 | uint64_t over_alloc = |
|
- | 388 | sgl_size * (vsgt->num_pages - |
|
- | 389 | vmw_tt->sgt.nents); |
|
- | 390 | ||
- | 391 | ttm_mem_global_free(glob, over_alloc); |
|
- | 392 | vmw_tt->sg_alloc_size -= over_alloc; |
|
- | 393 | } |
|
- | 394 | ||
- | 395 | ret = vmw_ttm_map_for_dma(vmw_tt); |
|
- | 396 | if (unlikely(ret != 0)) |
|
- | 397 | goto out_map_fail; |
|
- | 398 | ||
- | 399 | break; |
|
- | 400 | default: |
|
- | 401 | break; |
|
- | 402 | } |
|
- | 403 | ||
- | 404 | old = ~((dma_addr_t) 0); |
|
- | 405 | vmw_tt->vsgt.num_regions = 0; |
|
- | 406 | for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { |
|
- | 407 | dma_addr_t cur = vmw_piter_dma_addr(&iter); |
|
- | 408 | ||
- | 409 | if (cur != old + PAGE_SIZE) |
|
- | 410 | vmw_tt->vsgt.num_regions++; |
|
- | 411 | old = cur; |
|
- | 412 | } |
|
- | 413 | ||
- | 414 | vmw_tt->mapped = true; |
|
- | 415 | return 0; |
|
- | 416 | ||
- | 417 | out_map_fail: |
|
- | 418 | sg_free_table(vmw_tt->vsgt.sgt); |
|
- | 419 | vmw_tt->vsgt.sgt = NULL; |
|
- | 420 | out_sg_alloc_fail: |
|
- | 421 | ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); |
|
- | 422 | return ret; |
|
- | 423 | } |
|
- | 424 | ||
- | 425 | /** |
|
- | 426 | * vmw_ttm_unmap_dma - Tear down any TTM page device mappings |
|
- | 427 | * |
|
- | 428 | * @vmw_tt: Pointer to a struct vmw_ttm_tt |
|
- | 429 | * |
|
- | 430 | * Tear down any previously set up device DMA mappings and free |
|
- | 431 | * any storage space allocated for them. If there are no mappings set up, |
|
- | 432 | * this function is a NOP. |
|
- | 433 | */ |
|
- | 434 | static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) |
|
- | 435 | { |
|
- | 436 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
|
- | 437 | ||
- | 438 | if (!vmw_tt->vsgt.sgt) |
|
- | 439 | return; |
|
- | 440 | ||
- | 441 | switch (dev_priv->map_mode) { |
|
- | 442 | case vmw_dma_map_bind: |
|
- | 443 | case vmw_dma_map_populate: |
|
- | 444 | vmw_ttm_unmap_from_dma(vmw_tt); |
|
- | 445 | sg_free_table(vmw_tt->vsgt.sgt); |
|
- | 446 | vmw_tt->vsgt.sgt = NULL; |
|
- | 447 | ttm_mem_global_free(vmw_mem_glob(dev_priv), |
|
- | 448 | vmw_tt->sg_alloc_size); |
|
- | 449 | break; |
|
- | 450 | default: |
|
- | 451 | break; |
|
- | 452 | } |
|
- | 453 | vmw_tt->mapped = false; |
|
- | 454 | } |
|
- | 455 | ||
- | 456 | ||
- | 457 | /** |
|
- | 458 | * vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
|
- | 459 | * |
|
- | 460 | * @bo: Pointer to a struct ttm_buffer_object |
|
- | 461 | * |
|
- | 462 | * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
|
- | 463 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
|
- | 464 | * Note that the buffer object must be either pinned or reserved before |
|
- | 465 | * calling this function. |
|
- | 466 | */ |
|
- | 467 | int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
|
- | 468 | { |
|
- | 469 | struct vmw_ttm_tt *vmw_tt = |
|
- | 470 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 471 | ||
- | 472 | return vmw_ttm_map_dma(vmw_tt); |
|
- | 473 | } |
|
- | 474 | ||
- | 475 | ||
- | 476 | /** |
|
- | 477 | * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
|
- | 478 | * |
|
- | 479 | * @bo: Pointer to a struct ttm_buffer_object |
|
- | 480 | * |
|
- | 481 | * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
|
- | 482 | * instead of a pointer to a struct vmw_ttm_backend as argument. |
|
- | 483 | */ |
|
- | 484 | void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
|
- | 485 | { |
|
- | 486 | struct vmw_ttm_tt *vmw_tt = |
|
- | 487 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 488 | ||
- | 489 | vmw_ttm_unmap_dma(vmw_tt); |
|
- | 490 | } |
|
- | 491 | ||
- | 492 | ||
- | 493 | /** |
|
- | 494 | * vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
|
- | 495 | * TTM buffer object |
|
- | 496 | * |
|
- | 497 | * @bo: Pointer to a struct ttm_buffer_object |
|
- | 498 | * |
|
- | 499 | * Returns a pointer to a struct vmw_sg_table object. The object should |
|
- | 500 | * not be freed after use. |
|
- | 501 | * Note that for the device addresses to be valid, the buffer object must |
|
- | 502 | * either be reserved or pinned. |
|
- | 503 | */ |
|
- | 504 | const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) |
|
- | 505 | { |
|
- | 506 | struct vmw_ttm_tt *vmw_tt = |
|
- | 507 | container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 508 | ||
- | 509 | return &vmw_tt->vsgt; |
|
- | 510 | } |
|
147 | }; |
511 | |
148 | 512 | ||
- | 513 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
|
149 | static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
514 | { |
- | 515 | struct vmw_ttm_tt *vmw_be = |
|
- | 516 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 517 | int ret; |
|
- | 518 | ||
- | 519 | ret = vmw_ttm_map_dma(vmw_be); |
|
Line 150... | Line 520... | ||
150 | { |
520 | if (unlikely(ret != 0)) |
- | 521 | return ret; |
|
Line -... | Line 522... | ||
- | 522 | ||
- | 523 | vmw_be->gmr_id = bo_mem->start; |
|
151 | struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
524 | vmw_be->mem_type = bo_mem->mem_type; |
152 | 525 | ||
- | 526 | switch (bo_mem->mem_type) { |
|
- | 527 | case VMW_PL_GMR: |
|
- | 528 | return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
|
- | 529 | ttm->num_pages, vmw_be->gmr_id); |
|
- | 530 | case VMW_PL_MOB: |
|
- | 531 | if (unlikely(vmw_be->mob == NULL)) { |
|
- | 532 | vmw_be->mob = |
|
- | 533 | vmw_mob_create(ttm->num_pages); |
|
- | 534 | if (unlikely(vmw_be->mob == NULL)) |
|
- | 535 | return -ENOMEM; |
|
- | 536 | } |
|
- | 537 | ||
- | 538 | return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
|
- | 539 | &vmw_be->vsgt, ttm->num_pages, |
|
- | 540 | vmw_be->gmr_id); |
|
153 | vmw_be->gmr_id = bo_mem->start; |
541 | default: |
Line 154... | Line 542... | ||
154 | 542 | BUG(); |
|
155 | return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, |
543 | } |
- | 544 | return 0; |
|
156 | ttm->num_pages, vmw_be->gmr_id); |
545 | } |
Line -... | Line 546... | ||
- | 546 | ||
- | 547 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
|
157 | } |
548 | { |
- | 549 | struct vmw_ttm_tt *vmw_be = |
|
- | 550 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 551 | ||
- | 552 | switch (vmw_be->mem_type) { |
|
- | 553 | case VMW_PL_GMR: |
|
- | 554 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
|
- | 555 | break; |
|
- | 556 | case VMW_PL_MOB: |
|
- | 557 | vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); |
|
- | 558 | break; |
|
- | 559 | default: |
|
158 | 560 | BUG(); |
|
159 | static int vmw_ttm_unbind(struct ttm_tt *ttm) |
561 | } |
Line -... | Line 562... | ||
- | 562 | ||
160 | { |
563 | if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
161 | struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
564 | vmw_ttm_unmap_dma(vmw_be); |
- | 565 | ||
162 | 566 | return 0; |
|
Line -... | Line 567... | ||
- | 567 | } |
|
- | 568 | ||
- | 569 | ||
- | 570 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
|
163 | vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
571 | { |
- | 572 | struct vmw_ttm_tt *vmw_be = |
|
- | 573 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 574 | ||
- | 575 | vmw_ttm_unmap_dma(vmw_be); |
|
164 | return 0; |
576 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
165 | } |
577 | ttm_dma_tt_fini(&vmw_be->dma_ttm); |
Line -... | Line 578... | ||
- | 578 | else |
|
- | 579 | ttm_tt_fini(ttm); |
|
- | 580 | ||
- | 581 | if (vmw_be->mob) |
|
- | 582 | vmw_mob_destroy(vmw_be->mob); |
|
- | 583 | ||
- | 584 | kfree(vmw_be); |
|
- | 585 | } |
|
- | 586 | ||
- | 587 | ||
- | 588 | static int vmw_ttm_populate(struct ttm_tt *ttm) |
|
- | 589 | { |
|
- | 590 | struct vmw_ttm_tt *vmw_tt = |
|
- | 591 | container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
- | 592 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
|
- | 593 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
|
- | 594 | int ret; |
|
- | 595 | ||
- | 596 | if (ttm->state != tt_unpopulated) |
|
- | 597 | return 0; |
|
- | 598 | ||
- | 599 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
|
- | 600 | size_t size = |
|
- | 601 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
|
- | 602 | ret = ttm_mem_global_alloc(glob, size, false, true); |
|
- | 603 | if (unlikely(ret != 0)) |
|
- | 604 | return ret; |
|
- | 605 | ||
- | 606 | ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
|
- | 607 | if (unlikely(ret != 0)) |
|
- | 608 | ttm_mem_global_free(glob, size); |
|
- | 609 | } else |
|
- | 610 | ret = ttm_pool_populate(ttm); |
|
- | 611 | ||
- | 612 | return ret; |
|
- | 613 | } |
|
- | 614 | ||
- | 615 | static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
|
- | 616 | { |
|
- | 617 | struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
|
- | 618 | dma_ttm.ttm); |
|
- | 619 | struct vmw_private *dev_priv = vmw_tt->dev_priv; |
|
- | 620 | struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
|
- | 621 | ||
- | 622 | ||
- | 623 | if (vmw_tt->mob) { |
|
- | 624 | vmw_mob_destroy(vmw_tt->mob); |
|
- | 625 | vmw_tt->mob = NULL; |
|
- | 626 | } |
|
- | 627 | ||
- | 628 | vmw_ttm_unmap_dma(vmw_tt); |
|
- | 629 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
|
166 | 630 | size_t size = |
|
167 | static void vmw_ttm_destroy(struct ttm_tt *ttm) |
631 | ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
168 | { |
632 | |
169 | struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
633 | ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
170 | 634 | ttm_mem_global_free(glob, size); |
|
Line 171... | Line 635... | ||
171 | ttm_tt_fini(ttm); |
635 | } else |
172 | kfree(vmw_be); |
636 | ttm_pool_unpopulate(ttm); |
173 | } |
637 | } |
174 | 638 | ||
175 | static struct ttm_backend_func vmw_ttm_func = { |
639 | static struct ttm_backend_func vmw_ttm_func = { |
- | 640 | .bind = vmw_ttm_bind, |
|
Line 176... | Line 641... | ||
176 | .bind = vmw_ttm_bind, |
641 | .unbind = vmw_ttm_unbind, |
177 | .unbind = vmw_ttm_unbind, |
642 | .destroy = vmw_ttm_destroy, |
178 | .destroy = vmw_ttm_destroy, |
643 | }; |
Line 179... | Line 644... | ||
179 | }; |
644 | |
180 | 645 | static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
|
- | 646 | unsigned long size, uint32_t page_flags, |
|
- | 647 | struct page *dummy_read_page) |
|
- | 648 | { |
|
- | 649 | struct vmw_ttm_tt *vmw_be; |
|
- | 650 | int ret; |
|
- | 651 | ||
- | 652 | vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
|
- | 653 | if (!vmw_be) |
|
- | 654 | return NULL; |
|
- | 655 | ||
Line 181... | Line 656... | ||
181 | struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
656 | vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
- | 657 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
|
182 | unsigned long size, uint32_t page_flags, |
658 | vmw_be->mob = NULL; |
183 | struct page *dummy_read_page) |
659 | |
184 | { |
660 | if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
Line 185... | Line -... | ||
185 | struct vmw_ttm_tt *vmw_be; |
- | |
186 | - | ||
187 | vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); |
- | |
188 | if (!vmw_be) |
661 | ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
189 | return NULL; |
662 | dummy_read_page); |
190 | 663 | else |
|
191 | vmw_be->ttm.func = &vmw_ttm_func; |
664 | ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, |
Line 192... | Line 665... | ||
192 | vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
665 | dummy_read_page); |
193 | 666 | if (unlikely(ret != 0)) |
|
194 | if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { |
667 | goto out_no_init; |
195 | kfree(vmw_be); |
668 | |
196 | return NULL; |
669 | return &vmw_be->dma_ttm.ttm; |
197 | } |
670 | out_no_init: |
Line 222... | Line 695... | ||
222 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
695 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; |
223 | man->available_caching = TTM_PL_FLAG_CACHED; |
696 | man->available_caching = TTM_PL_FLAG_CACHED; |
224 | man->default_caching = TTM_PL_FLAG_CACHED; |
697 | man->default_caching = TTM_PL_FLAG_CACHED; |
225 | break; |
698 | break; |
226 | case VMW_PL_GMR: |
699 | case VMW_PL_GMR: |
- | 700 | case VMW_PL_MOB: |
|
227 | /* |
701 | /* |
228 | * "Guest Memory Regions" is an aperture like feature with |
702 | * "Guest Memory Regions" is an aperture like feature with |
229 | * one slot per bo. There is an upper limit of the number of |
703 | * one slot per bo. There is an upper limit of the number of |
230 | * slots as well as the bo size. |
704 | * slots as well as the bo size. |
231 | */ |
705 | */ |
Line 240... | Line 714... | ||
240 | return -EINVAL; |
714 | return -EINVAL; |
241 | } |
715 | } |
242 | return 0; |
716 | return 0; |
243 | } |
717 | } |
Line 244... | Line 718... | ||
244 | 718 | ||
245 | void vmw_evict_flags(struct ttm_buffer_object *bo, |
719 | static void vmw_evict_flags(struct ttm_buffer_object *bo, |
246 | struct ttm_placement *placement) |
720 | struct ttm_placement *placement) |
247 | { |
721 | { |
248 | *placement = vmw_sys_placement; |
722 | *placement = vmw_sys_placement; |
Line 269... | Line 743... | ||
269 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
743 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
270 | return -EINVAL; |
744 | return -EINVAL; |
271 | switch (mem->mem_type) { |
745 | switch (mem->mem_type) { |
272 | case TTM_PL_SYSTEM: |
746 | case TTM_PL_SYSTEM: |
273 | case VMW_PL_GMR: |
747 | case VMW_PL_GMR: |
- | 748 | case VMW_PL_MOB: |
|
274 | return 0; |
749 | return 0; |
275 | case TTM_PL_VRAM: |
750 | case TTM_PL_VRAM: |
276 | mem->bus.offset = mem->start << PAGE_SHIFT; |
751 | mem->bus.offset = mem->start << PAGE_SHIFT; |
277 | mem->bus.base = dev_priv->vram_start; |
752 | mem->bus.base = dev_priv->vram_start; |
278 | mem->bus.is_iomem = true; |
753 | mem->bus.is_iomem = true; |
Line 328... | Line 803... | ||
328 | DRM_VMW_FENCE_FLAG_EXEC, |
803 | DRM_VMW_FENCE_FLAG_EXEC, |
329 | lazy, interruptible, |
804 | lazy, interruptible, |
330 | VMW_FENCE_WAIT_TIMEOUT); |
805 | VMW_FENCE_WAIT_TIMEOUT); |
331 | } |
806 | } |
Line -... | Line 807... | ||
- | 807 | ||
- | 808 | /** |
|
- | 809 | * vmw_move_notify - TTM move_notify_callback |
|
- | 810 | * |
|
- | 811 | * @bo: The TTM buffer object about to move. |
|
- | 812 | * @mem: The truct ttm_mem_reg indicating to what memory |
|
- | 813 | * region the move is taking place. |
|
- | 814 | * |
|
- | 815 | * Calls move_notify for all subsystems needing it. |
|
- | 816 | * (currently only resources). |
|
- | 817 | */ |
|
- | 818 | static void vmw_move_notify(struct ttm_buffer_object *bo, |
|
- | 819 | struct ttm_mem_reg *mem) |
|
- | 820 | { |
|
- | 821 | vmw_resource_move_notify(bo, mem); |
|
- | 822 | } |
|
- | 823 | ||
- | 824 | ||
- | 825 | /** |
|
- | 826 | * vmw_swap_notify - TTM move_notify_callback |
|
- | 827 | * |
|
- | 828 | * @bo: The TTM buffer object about to be swapped out. |
|
- | 829 | */ |
|
- | 830 | static void vmw_swap_notify(struct ttm_buffer_object *bo) |
|
- | 831 | { |
|
- | 832 | struct ttm_bo_device *bdev = bo->bdev; |
|
- | 833 | ||
- | 834 | // spin_lock(&bdev->fence_lock); |
|
- | 835 | // ttm_bo_wait(bo, false, false, false); |
|
- | 836 | // spin_unlock(&bdev->fence_lock); |
|
- | 837 | } |
|
- | 838 | ||
332 | 839 | ||
333 | struct ttm_bo_driver vmw_bo_driver = { |
840 | struct ttm_bo_driver vmw_bo_driver = { |
334 | .ttm_tt_create = &vmw_ttm_tt_create, |
841 | .ttm_tt_create = &vmw_ttm_tt_create, |
335 | .ttm_tt_populate = &ttm_pool_populate, |
842 | .ttm_tt_populate = &vmw_ttm_populate, |
336 | .ttm_tt_unpopulate = &ttm_pool_unpopulate, |
843 | .ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
337 | .invalidate_caches = vmw_invalidate_caches, |
844 | .invalidate_caches = vmw_invalidate_caches, |
338 | .init_mem_type = vmw_init_mem_type, |
845 | .init_mem_type = vmw_init_mem_type, |
339 | .evict_flags = vmw_evict_flags, |
846 | .evict_flags = vmw_evict_flags, |
340 | .move = NULL, |
847 | .move = NULL, |
341 | .verify_access = vmw_verify_access, |
848 | .verify_access = vmw_verify_access, |
342 | .sync_obj_signaled = vmw_sync_obj_signaled, |
849 | .sync_obj_signaled = vmw_sync_obj_signaled, |
343 | .sync_obj_wait = vmw_sync_obj_wait, |
850 | .sync_obj_wait = vmw_sync_obj_wait, |
344 | .sync_obj_flush = vmw_sync_obj_flush, |
851 | .sync_obj_flush = vmw_sync_obj_flush, |
345 | .sync_obj_unref = vmw_sync_obj_unref, |
852 | .sync_obj_unref = vmw_sync_obj_unref, |
346 | .sync_obj_ref = vmw_sync_obj_ref, |
853 | .sync_obj_ref = vmw_sync_obj_ref, |
347 | .move_notify = NULL, |
854 | .move_notify = vmw_move_notify, |
348 | .swap_notify = NULL, |
855 | .swap_notify = vmw_swap_notify, |
349 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
856 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
350 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
857 | .io_mem_reserve = &vmw_ttm_io_mem_reserve, |
351 | .io_mem_free = &vmw_ttm_io_mem_free, |
858 | .io_mem_free = &vmw_ttm_io_mem_free, |
- | 859 | }; |
|
- | 860 | ||
- | 861 | ||
- | 862 | struct scatterlist *sg_next(struct scatterlist *sg) |
|
- | 863 | { |
|
- | 864 | if (sg_is_last(sg)) |
|
- | 865 | return NULL; |
|
- | 866 | ||
- | 867 | sg++; |
|
- | 868 | if (unlikely(sg_is_chain(sg))) |
|
- | 869 | sg = sg_chain_ptr(sg); |
|
- | 870 | ||
- | 871 | return sg; |
|
- | 872 | } |
|
- | 873 | ||
- | 874 | ||
- | 875 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
|
- | 876 | sg_free_fn *free_fn) |
|
- | 877 | { |
|
- | 878 | struct scatterlist *sgl, *next; |
|
- | 879 | ||
- | 880 | if (unlikely(!table->sgl)) |
|
- | 881 | return; |
|
- | 882 | ||
- | 883 | sgl = table->sgl; |
|
- | 884 | while (table->orig_nents) { |
|
- | 885 | unsigned int alloc_size = table->orig_nents; |
|
- | 886 | unsigned int sg_size; |
|
- | 887 | ||
- | 888 | /* |
|
- | 889 | * If we have more than max_ents segments left, |
|
- | 890 | * then assign 'next' to the sg table after the current one. |
|
- | 891 | * sg_size is then one less than alloc size, since the last |
|
- | 892 | * element is the chain pointer. |
|
- | 893 | */ |
|
- | 894 | if (alloc_size > max_ents) { |
|
- | 895 | next = sg_chain_ptr(&sgl[max_ents - 1]); |
|
- | 896 | alloc_size = max_ents; |
|
- | 897 | sg_size = alloc_size - 1; |
|
- | 898 | } else { |
|
- | 899 | sg_size = alloc_size; |
|
- | 900 | next = NULL; |
|
- | 901 | } |
|
- | 902 | ||
- | 903 | table->orig_nents -= sg_size; |
|
- | 904 | kfree(sgl); |
|
- | 905 | sgl = next; |
|
- | 906 | } |
|
- | 907 | ||
- | 908 | table->sgl = NULL; |
|
- | 909 | } |
|
- | 910 | ||
- | 911 | void sg_free_table(struct sg_table *table) |
|
- | 912 | { |
|
- | 913 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
|
- | 914 | } |
|
- | 915 | ||
- | 916 | int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
|
- | 917 | { |
|
- | 918 | struct scatterlist *sg, *prv; |
|
- | 919 | unsigned int left; |
|
- | 920 | unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
|
- | 921 | ||
- | 922 | #ifndef ARCH_HAS_SG_CHAIN |
|
- | 923 | BUG_ON(nents > max_ents); |
|
- | 924 | #endif |
|
- | 925 | ||
- | 926 | memset(table, 0, sizeof(*table)); |
|
- | 927 | ||
- | 928 | left = nents; |
|
- | 929 | prv = NULL; |
|
- | 930 | do { |
|
- | 931 | unsigned int sg_size, alloc_size = left; |
|
- | 932 | ||
- | 933 | if (alloc_size > max_ents) { |
|
- | 934 | alloc_size = max_ents; |
|
- | 935 | sg_size = alloc_size - 1; |
|
- | 936 | } else |
|
- | 937 | sg_size = alloc_size; |
|
- | 938 | ||
- | 939 | left -= sg_size; |
|
- | 940 | ||
- | 941 | sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
|
- | 942 | if (unlikely(!sg)) { |
|
- | 943 | /* |
|
- | 944 | * Adjust entry count to reflect that the last |
|
- | 945 | * entry of the previous table won't be used for |
|
- | 946 | * linkage. Without this, sg_kfree() may get |
|
- | 947 | * confused. |
|
- | 948 | */ |
|
- | 949 | if (prv) |
|
- | 950 | table->nents = ++table->orig_nents; |
|
- | 951 | ||
- | 952 | goto err; |
|
- | 953 | } |
|
- | 954 | ||
- | 955 | sg_init_table(sg, alloc_size); |
|
- | 956 | table->nents = table->orig_nents += sg_size; |
|
- | 957 | ||
- | 958 | /* |
|
- | 959 | * If this is the first mapping, assign the sg table header. |
|
- | 960 | * If this is not the first mapping, chain previous part. |
|
- | 961 | */ |
|
- | 962 | if (prv) |
|
- | 963 | sg_chain(prv, max_ents, sg); |
|
- | 964 | else |
|
- | 965 | table->sgl = sg; |
|
- | 966 | ||
- | 967 | /* |
|
- | 968 | * If no more entries after this one, mark the end |
|
- | 969 | */ |
|
- | 970 | if (!left) |
|
- | 971 | sg_mark_end(&sg[sg_size - 1]); |
|
- | 972 | ||
- | 973 | prv = sg; |
|
- | 974 | } while (left); |
|
- | 975 | ||
- | 976 | return 0; |
|
- | 977 | ||
- | 978 | err: |
|
- | 979 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
|
- | 980 | ||
- | 981 | return -ENOMEM; |
|
- | 982 | } |
|
- | 983 | ||
- | 984 | ||
- | 985 | void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
|
- | 986 | { |
|
- | 987 | memset(sgl, 0, sizeof(*sgl) * nents); |
|
- | 988 | #ifdef CONFIG_DEBUG_SG |
|
- | 989 | { |
|
- | 990 | unsigned int i; |
|
- | 991 | for (i = 0; i < nents; i++) |
|
- | 992 | sgl[i].sg_magic = SG_MAGIC; |
|
- | 993 | } |
|
- | 994 | #endif |
|
- | 995 | sg_mark_end(&sgl[nents - 1]); |
|
- | 996 | } |
|
- | 997 | ||
- | 998 | ||
- | 999 | void __sg_page_iter_start(struct sg_page_iter *piter, |
|
- | 1000 | struct scatterlist *sglist, unsigned int nents, |
|
- | 1001 | unsigned long pgoffset) |
|
- | 1002 | { |
|
- | 1003 | piter->__pg_advance = 0; |
|
- | 1004 | piter->__nents = nents; |
|
- | 1005 | ||
- | 1006 | piter->sg = sglist; |
|
- | 1007 | piter->sg_pgoffset = pgoffset; |
|
- | 1008 | } |
|
- | 1009 | ||
- | 1010 | static int sg_page_count(struct scatterlist *sg) |
|
- | 1011 | { |
|
- | 1012 | return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
|
- | 1013 | } |
|
- | 1014 | ||
- | 1015 | bool __sg_page_iter_next(struct sg_page_iter *piter) |
|
- | 1016 | { |
|
- | 1017 | if (!piter->__nents || !piter->sg) |
|
- | 1018 | return false; |
|
- | 1019 | ||
- | 1020 | piter->sg_pgoffset += piter->__pg_advance; |
|
- | 1021 | piter->__pg_advance = 1; |
|
- | 1022 | ||
- | 1023 | while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
|
- | 1024 | piter->sg_pgoffset -= sg_page_count(piter->sg); |
|
- | 1025 | piter->sg = sg_next(piter->sg); |
|
- | 1026 | if (!--piter->__nents || !piter->sg) |
|
- | 1027 | return false; |
|
- | 1028 | } |
|
- | 1029 | ||
- | 1030 | return true; |
|
- | 1031 | } |
|
- | 1032 | EXPORT_SYMBOL(__sg_page_iter_next); |
|
- | 1033 | ||
- | 1034 | ||
- | 1035 | int sg_alloc_table_from_pages(struct sg_table *sgt, |
|
- | 1036 | struct page **pages, unsigned int n_pages, |
|
- | 1037 | unsigned long offset, unsigned long size, |
|
- | 1038 | gfp_t gfp_mask) |
|
- | 1039 | { |
|
- | 1040 | unsigned int chunks; |
|
- | 1041 | unsigned int i; |
|
- | 1042 | unsigned int cur_page; |
|
- | 1043 | int ret; |
|
- | 1044 | struct scatterlist *s; |
|
- | 1045 | ||
- | 1046 | /* compute number of contiguous chunks */ |
|
- | 1047 | chunks = 1; |
|
- | 1048 | for (i = 1; i < n_pages; ++i) |
|
- | 1049 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) |
|
- | 1050 | ++chunks; |
|
- | 1051 | ||
- | 1052 | ret = sg_alloc_table(sgt, chunks, gfp_mask); |
|
- | 1053 | if (unlikely(ret)) |
|
- | 1054 | return ret; |
|
- | 1055 | ||
- | 1056 | /* merging chunks and putting them into the scatterlist */ |
|
- | 1057 | cur_page = 0; |
|
- | 1058 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
|
- | 1059 | unsigned long chunk_size; |
|
- | 1060 | unsigned int j; |
|
- | 1061 | ||
- | 1062 | /* look for the end of the current chunk */ |
|
- | 1063 | for (j = cur_page + 1; j < n_pages; ++j) |
|
- | 1064 | if (page_to_pfn(pages[j]) != |
|
- | 1065 | page_to_pfn(pages[j - 1]) + 1) |
|
- | 1066 | break; |
|
- | 1067 | ||
- | 1068 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
|
- | 1069 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); |
|
- | 1070 | size -= chunk_size; |
|
- | 1071 | offset = 0; |
|
- | 1072 | cur_page = j; |
|
- | 1073 | } |
|
- | 1074 | ||
- | 1075 | return 0; |
|
- | 1076 | } |
|
- | 1077 | ||
- | 1078 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
|
- | 1079 | int nelems, int dir) |
|
- | 1080 | { |
|
- | 1081 | struct scatterlist *s; |
|
- | 1082 | int i; |
|
- | 1083 | ||
- | 1084 | for_each_sg(sglist, s, nelems, i) { |
|
- | 1085 | s->dma_address = (dma_addr_t)sg_phys(s); |
|
- | 1086 | #ifdef CONFIG_NEED_SG_DMA_LENGTH |
|
- | 1087 | s->dma_length = s->length; |
|
- | 1088 | #endif |
|
- | 1089 | } |
|
- | 1090 | ||
- | 1091 | return nelems; |
|
- | 1092 | }><>>>>><>><>><>> |