40,6 → 40,10 |
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | |
TTM_PL_FLAG_CACHED; |
|
static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM | |
TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_NO_EVICT; |
|
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR | |
TTM_PL_FLAG_CACHED; |
|
47,6 → 51,9 |
TTM_PL_FLAG_CACHED | |
TTM_PL_FLAG_NO_EVICT; |
|
static uint32_t mob_placement_flags = VMW_PL_FLAG_MOB | |
TTM_PL_FLAG_CACHED; |
|
struct ttm_placement vmw_vram_placement = { |
.fpfn = 0, |
.lpfn = 0, |
116,16 → 123,26 |
.busy_placement = &sys_placement_flags |
}; |
|
struct ttm_placement vmw_sys_ne_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 1, |
.placement = &sys_ne_placement_flags, |
.num_busy_placement = 1, |
.busy_placement = &sys_ne_placement_flags |
}; |
|
static uint32_t evictable_placement_flags[] = { |
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED, |
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED, |
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED, |
VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED |
}; |
|
struct ttm_placement vmw_evictable_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 3, |
.num_placement = 4, |
.placement = evictable_placement_flags, |
.num_busy_placement = 1, |
.busy_placement = &sys_placement_flags |
140,38 → 157,485 |
.busy_placement = gmr_vram_placement_flags |
}; |
|
struct ttm_placement vmw_mob_placement = { |
.fpfn = 0, |
.lpfn = 0, |
.num_placement = 1, |
.num_busy_placement = 1, |
.placement = &mob_placement_flags, |
.busy_placement = &mob_placement_flags |
}; |
|
struct vmw_ttm_tt { |
struct ttm_tt ttm; |
struct ttm_dma_tt dma_ttm; |
struct vmw_private *dev_priv; |
int gmr_id; |
struct vmw_mob *mob; |
int mem_type; |
struct sg_table sgt; |
struct vmw_sg_table vsgt; |
uint64_t sg_alloc_size; |
bool mapped; |
}; |
|
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); |
|
/** |
* Helper functions to advance a struct vmw_piter iterator. |
* |
* @viter: Pointer to the iterator. |
* |
* These functions return false if past the end of the list, |
* true otherwise. Functions are selected depending on the current |
* DMA mapping mode. |
*/ |
static bool __vmw_piter_non_sg_next(struct vmw_piter *viter) |
{ |
return ++(viter->i) < viter->num_pages; |
} |
|
static bool __vmw_piter_sg_next(struct vmw_piter *viter) |
{ |
return __sg_page_iter_next(&viter->iter); |
} |
|
|
/** |
* Helper functions to return a pointer to the current page. |
* |
* @viter: Pointer to the iterator |
* |
* These functions return a pointer to the page currently |
* pointed to by @viter. Functions are selected depending on the |
* current mapping mode. |
*/ |
static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter) |
{ |
return viter->pages[viter->i]; |
} |
|
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter) |
{ |
return sg_page_iter_page(&viter->iter); |
} |
|
|
/** |
* Helper functions to return the DMA address of the current page. |
* |
* @viter: Pointer to the iterator |
* |
* These functions return the DMA address of the page currently |
* pointed to by @viter. Functions are selected depending on the |
* current mapping mode. |
*/ |
static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter) |
{ |
return page_to_phys(viter->pages[viter->i]); |
} |
|
static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter) |
{ |
return viter->addrs[viter->i]; |
} |
|
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter) |
{ |
return sg_page_iter_dma_address(&viter->iter); |
} |
|
|
/** |
* vmw_piter_start - Initialize a struct vmw_piter. |
* |
* @viter: Pointer to the iterator to initialize |
* @vsgt: Pointer to a struct vmw_sg_table to initialize from |
* |
* Note that we're following the convention of __sg_page_iter_start, so that |
* the iterator doesn't point to a valid page after initialization; it has |
* to be advanced one step first. |
*/ |
void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, |
unsigned long p_offset) |
{ |
viter->i = p_offset - 1; |
viter->num_pages = vsgt->num_pages; |
switch (vsgt->mode) { |
case vmw_dma_phys: |
viter->next = &__vmw_piter_non_sg_next; |
viter->dma_address = &__vmw_piter_phys_addr; |
viter->page = &__vmw_piter_non_sg_page; |
viter->pages = vsgt->pages; |
break; |
case vmw_dma_alloc_coherent: |
viter->next = &__vmw_piter_non_sg_next; |
viter->dma_address = &__vmw_piter_dma_addr; |
viter->page = &__vmw_piter_non_sg_page; |
viter->addrs = vsgt->addrs; |
viter->pages = vsgt->pages; |
break; |
case vmw_dma_map_populate: |
case vmw_dma_map_bind: |
viter->next = &__vmw_piter_sg_next; |
viter->dma_address = &__vmw_piter_sg_addr; |
viter->page = &__vmw_piter_sg_page; |
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl, |
vsgt->sgt->orig_nents, p_offset); |
break; |
default: |
BUG(); |
} |
} |
|
/** |
* vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for |
* TTM pages |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_backend |
* |
* Used to free dma mappings previously mapped by vmw_ttm_map_for_dma. |
*/ |
static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct device *dev = vmw_tt->dev_priv->dev->dev; |
|
dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents, |
DMA_BIDIRECTIONAL); |
vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; |
} |
|
/** |
* vmw_ttm_map_for_dma - map TTM pages to get device addresses |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_backend |
* |
* This function is used to get device addresses from the kernel DMA layer. |
* However, it's violating the DMA API in that when this operation has been |
* performed, it's illegal for the CPU to write to the pages without first |
* unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is |
* therefore only legal to call this function if we know that the function |
* dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most |
* a CPU write buffer flush. |
*/ |
static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct device *dev = vmw_tt->dev_priv->dev->dev; |
int ret; |
|
ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents, |
DMA_BIDIRECTIONAL); |
if (unlikely(ret == 0)) |
return -ENOMEM; |
|
vmw_tt->sgt.nents = ret; |
|
return 0; |
} |
|
/** |
* vmw_ttm_map_dma - Make sure TTM pages are visible to the device |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_tt |
* |
* Select the correct function for and make sure the TTM pages are |
* visible to the device. Allocate storage for the device mappings. |
* If a mapping has already been performed, indicated by the storage |
* pointer being non NULL, the function returns success. |
*/ |
static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
struct vmw_sg_table *vsgt = &vmw_tt->vsgt; |
struct vmw_piter iter; |
dma_addr_t old; |
int ret = 0; |
static size_t sgl_size; |
static size_t sgt_size; |
|
if (vmw_tt->mapped) |
return 0; |
|
vsgt->mode = dev_priv->map_mode; |
vsgt->pages = vmw_tt->dma_ttm.ttm.pages; |
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; |
vsgt->addrs = vmw_tt->dma_ttm.dma_address; |
vsgt->sgt = &vmw_tt->sgt; |
|
switch (dev_priv->map_mode) { |
case vmw_dma_map_bind: |
case vmw_dma_map_populate: |
if (unlikely(!sgl_size)) { |
sgl_size = ttm_round_pot(sizeof(struct scatterlist)); |
sgt_size = ttm_round_pot(sizeof(struct sg_table)); |
} |
vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages; |
ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false, |
true); |
if (unlikely(ret != 0)) |
return ret; |
|
ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, |
vsgt->num_pages, 0, |
(unsigned long) |
vsgt->num_pages << PAGE_SHIFT, |
GFP_KERNEL); |
if (unlikely(ret != 0)) |
goto out_sg_alloc_fail; |
|
if (vsgt->num_pages > vmw_tt->sgt.nents) { |
uint64_t over_alloc = |
sgl_size * (vsgt->num_pages - |
vmw_tt->sgt.nents); |
|
ttm_mem_global_free(glob, over_alloc); |
vmw_tt->sg_alloc_size -= over_alloc; |
} |
|
ret = vmw_ttm_map_for_dma(vmw_tt); |
if (unlikely(ret != 0)) |
goto out_map_fail; |
|
break; |
default: |
break; |
} |
|
old = ~((dma_addr_t) 0); |
vmw_tt->vsgt.num_regions = 0; |
for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) { |
dma_addr_t cur = vmw_piter_dma_addr(&iter); |
|
if (cur != old + PAGE_SIZE) |
vmw_tt->vsgt.num_regions++; |
old = cur; |
} |
|
vmw_tt->mapped = true; |
return 0; |
|
out_map_fail: |
sg_free_table(vmw_tt->vsgt.sgt); |
vmw_tt->vsgt.sgt = NULL; |
out_sg_alloc_fail: |
ttm_mem_global_free(glob, vmw_tt->sg_alloc_size); |
return ret; |
} |
|
/** |
* vmw_ttm_unmap_dma - Tear down any TTM page device mappings |
* |
* @vmw_tt: Pointer to a struct vmw_ttm_tt |
* |
* Tear down any previously set up device DMA mappings and free |
* any storage space allocated for them. If there are no mappings set up, |
* this function is a NOP. |
*/ |
static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) |
{ |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
|
if (!vmw_tt->vsgt.sgt) |
return; |
|
switch (dev_priv->map_mode) { |
case vmw_dma_map_bind: |
case vmw_dma_map_populate: |
vmw_ttm_unmap_from_dma(vmw_tt); |
sg_free_table(vmw_tt->vsgt.sgt); |
vmw_tt->vsgt.sgt = NULL; |
ttm_mem_global_free(vmw_mem_glob(dev_priv), |
vmw_tt->sg_alloc_size); |
break; |
default: |
break; |
} |
vmw_tt->mapped = false; |
} |
|
|
/** |
* vmw_bo_map_dma - Make sure buffer object pages are visible to the device |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer |
* instead of a pointer to a struct vmw_ttm_backend as argument. |
* Note that the buffer object must be either pinned or reserved before |
* calling this function. |
*/ |
int vmw_bo_map_dma(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
return vmw_ttm_map_dma(vmw_tt); |
} |
|
|
/** |
* vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer |
* instead of a pointer to a struct vmw_ttm_backend as argument. |
*/ |
void vmw_bo_unmap_dma(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
vmw_ttm_unmap_dma(vmw_tt); |
} |
|
|
/** |
* vmw_bo_sg_table - Return a struct vmw_sg_table object for a |
* TTM buffer object |
* |
* @bo: Pointer to a struct ttm_buffer_object |
* |
* Returns a pointer to a struct vmw_sg_table object. The object should |
* not be freed after use. |
* Note that for the device addresses to be valid, the buffer object must |
* either be reserved or pinned. |
*/ |
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
return &vmw_tt->vsgt; |
} |
|
|
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
int ret; |
|
ret = vmw_ttm_map_dma(vmw_be); |
if (unlikely(ret != 0)) |
return ret; |
|
vmw_be->gmr_id = bo_mem->start; |
vmw_be->mem_type = bo_mem->mem_type; |
|
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages, |
switch (bo_mem->mem_type) { |
case VMW_PL_GMR: |
return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt, |
ttm->num_pages, vmw_be->gmr_id); |
case VMW_PL_MOB: |
if (unlikely(vmw_be->mob == NULL)) { |
vmw_be->mob = |
vmw_mob_create(ttm->num_pages); |
if (unlikely(vmw_be->mob == NULL)) |
return -ENOMEM; |
} |
|
return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob, |
&vmw_be->vsgt, ttm->num_pages, |
vmw_be->gmr_id); |
default: |
BUG(); |
} |
return 0; |
} |
|
static int vmw_ttm_unbind(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
switch (vmw_be->mem_type) { |
case VMW_PL_GMR: |
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id); |
break; |
case VMW_PL_MOB: |
vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob); |
break; |
default: |
BUG(); |
} |
|
if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind) |
vmw_ttm_unmap_dma(vmw_be); |
|
return 0; |
} |
|
|
static void vmw_ttm_destroy(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm); |
struct vmw_ttm_tt *vmw_be = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
|
vmw_ttm_unmap_dma(vmw_be); |
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
ttm_dma_tt_fini(&vmw_be->dma_ttm); |
else |
ttm_tt_fini(ttm); |
|
if (vmw_be->mob) |
vmw_mob_destroy(vmw_be->mob); |
|
kfree(vmw_be); |
} |
|
|
static int vmw_ttm_populate(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_tt = |
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
int ret; |
|
if (ttm->state != tt_unpopulated) |
return 0; |
|
if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
size_t size = |
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
ret = ttm_mem_global_alloc(glob, size, false, true); |
if (unlikely(ret != 0)) |
return ret; |
|
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
if (unlikely(ret != 0)) |
ttm_mem_global_free(glob, size); |
} else |
ret = ttm_pool_populate(ttm); |
|
return ret; |
} |
|
static void vmw_ttm_unpopulate(struct ttm_tt *ttm) |
{ |
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, |
dma_ttm.ttm); |
struct vmw_private *dev_priv = vmw_tt->dev_priv; |
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); |
|
|
if (vmw_tt->mob) { |
vmw_mob_destroy(vmw_tt->mob); |
vmw_tt->mob = NULL; |
} |
|
vmw_ttm_unmap_dma(vmw_tt); |
if (dev_priv->map_mode == vmw_dma_alloc_coherent) { |
size_t size = |
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); |
|
ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev); |
ttm_mem_global_free(glob, size); |
} else |
ttm_pool_unpopulate(ttm); |
} |
|
static struct ttm_backend_func vmw_ttm_func = { |
.bind = vmw_ttm_bind, |
.unbind = vmw_ttm_unbind, |
178,33 → 642,42 |
.destroy = vmw_ttm_destroy, |
}; |
|
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, |
unsigned long size, uint32_t page_flags, |
struct page *dummy_read_page) |
{ |
struct vmw_ttm_tt *vmw_be; |
int ret; |
|
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); |
vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL); |
if (!vmw_be) |
return NULL; |
|
vmw_be->ttm.func = &vmw_ttm_func; |
vmw_be->dma_ttm.ttm.func = &vmw_ttm_func; |
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev); |
vmw_be->mob = NULL; |
|
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) { |
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) |
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags, |
dummy_read_page); |
else |
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags, |
dummy_read_page); |
if (unlikely(ret != 0)) |
goto out_no_init; |
|
return &vmw_be->dma_ttm.ttm; |
out_no_init: |
kfree(vmw_be); |
return NULL; |
} |
|
return &vmw_be->ttm; |
} |
|
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
{ |
return 0; |
} |
|
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
struct ttm_mem_type_manager *man) |
{ |
switch (type) { |
224,6 → 697,7 |
man->default_caching = TTM_PL_FLAG_CACHED; |
break; |
case VMW_PL_GMR: |
case VMW_PL_MOB: |
/* |
* "Guest Memory Regions" is an aperture like feature with |
* one slot per bo. There is an upper limit of the number of |
242,7 → 716,7 |
return 0; |
} |
|
void vmw_evict_flags(struct ttm_buffer_object *bo, |
static void vmw_evict_flags(struct ttm_buffer_object *bo, |
struct ttm_placement *placement) |
{ |
*placement = vmw_sys_placement; |
271,6 → 745,7 |
switch (mem->mem_type) { |
case TTM_PL_SYSTEM: |
case VMW_PL_GMR: |
case VMW_PL_MOB: |
return 0; |
case TTM_PL_VRAM: |
mem->bus.offset = mem->start << PAGE_SHIFT; |
330,10 → 805,42 |
VMW_FENCE_WAIT_TIMEOUT); |
} |
|
/** |
* vmw_move_notify - TTM move_notify_callback |
* |
* @bo: The TTM buffer object about to move. |
* @mem: The truct ttm_mem_reg indicating to what memory |
* region the move is taking place. |
* |
* Calls move_notify for all subsystems needing it. |
* (currently only resources). |
*/ |
static void vmw_move_notify(struct ttm_buffer_object *bo, |
struct ttm_mem_reg *mem) |
{ |
vmw_resource_move_notify(bo, mem); |
} |
|
|
/** |
* vmw_swap_notify - TTM move_notify_callback |
* |
* @bo: The TTM buffer object about to be swapped out. |
*/ |
static void vmw_swap_notify(struct ttm_buffer_object *bo) |
{ |
struct ttm_bo_device *bdev = bo->bdev; |
|
// spin_lock(&bdev->fence_lock); |
// ttm_bo_wait(bo, false, false, false); |
// spin_unlock(&bdev->fence_lock); |
} |
|
|
struct ttm_bo_driver vmw_bo_driver = { |
.ttm_tt_create = &vmw_ttm_tt_create, |
.ttm_tt_populate = &ttm_pool_populate, |
.ttm_tt_unpopulate = &ttm_pool_unpopulate, |
.ttm_tt_populate = &vmw_ttm_populate, |
.ttm_tt_unpopulate = &vmw_ttm_unpopulate, |
.invalidate_caches = vmw_invalidate_caches, |
.init_mem_type = vmw_init_mem_type, |
.evict_flags = vmw_evict_flags, |
344,9 → 851,243 |
.sync_obj_flush = vmw_sync_obj_flush, |
.sync_obj_unref = vmw_sync_obj_unref, |
.sync_obj_ref = vmw_sync_obj_ref, |
.move_notify = NULL, |
.swap_notify = NULL, |
.move_notify = vmw_move_notify, |
.swap_notify = vmw_swap_notify, |
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
.io_mem_reserve = &vmw_ttm_io_mem_reserve, |
.io_mem_free = &vmw_ttm_io_mem_free, |
}; |
|
|
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
if (sg_is_last(sg)) |
return NULL; |
|
sg++; |
if (unlikely(sg_is_chain(sg))) |
sg = sg_chain_ptr(sg); |
|
return sg; |
} |
|
|
void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
sg_free_fn *free_fn) |
{ |
struct scatterlist *sgl, *next; |
|
if (unlikely(!table->sgl)) |
return; |
|
sgl = table->sgl; |
while (table->orig_nents) { |
unsigned int alloc_size = table->orig_nents; |
unsigned int sg_size; |
|
/* |
* If we have more than max_ents segments left, |
* then assign 'next' to the sg table after the current one. |
* sg_size is then one less than alloc size, since the last |
* element is the chain pointer. |
*/ |
if (alloc_size > max_ents) { |
next = sg_chain_ptr(&sgl[max_ents - 1]); |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else { |
sg_size = alloc_size; |
next = NULL; |
} |
|
table->orig_nents -= sg_size; |
kfree(sgl); |
sgl = next; |
} |
|
table->sgl = NULL; |
} |
|
void sg_free_table(struct sg_table *table) |
{ |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
} |
|
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) |
{ |
struct scatterlist *sg, *prv; |
unsigned int left; |
unsigned int max_ents = SG_MAX_SINGLE_ALLOC; |
|
#ifndef ARCH_HAS_SG_CHAIN |
BUG_ON(nents > max_ents); |
#endif |
|
memset(table, 0, sizeof(*table)); |
|
left = nents; |
prv = NULL; |
do { |
unsigned int sg_size, alloc_size = left; |
|
if (alloc_size > max_ents) { |
alloc_size = max_ents; |
sg_size = alloc_size - 1; |
} else |
sg_size = alloc_size; |
|
left -= sg_size; |
|
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask); |
if (unlikely(!sg)) { |
/* |
* Adjust entry count to reflect that the last |
* entry of the previous table won't be used for |
* linkage. Without this, sg_kfree() may get |
* confused. |
*/ |
if (prv) |
table->nents = ++table->orig_nents; |
|
goto err; |
} |
|
sg_init_table(sg, alloc_size); |
table->nents = table->orig_nents += sg_size; |
|
/* |
* If this is the first mapping, assign the sg table header. |
* If this is not the first mapping, chain previous part. |
*/ |
if (prv) |
sg_chain(prv, max_ents, sg); |
else |
table->sgl = sg; |
|
/* |
* If no more entries after this one, mark the end |
*/ |
if (!left) |
sg_mark_end(&sg[sg_size - 1]); |
|
prv = sg; |
} while (left); |
|
return 0; |
|
err: |
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL); |
|
return -ENOMEM; |
} |
|
|
void sg_init_table(struct scatterlist *sgl, unsigned int nents) |
{ |
memset(sgl, 0, sizeof(*sgl) * nents); |
#ifdef CONFIG_DEBUG_SG |
{ |
unsigned int i; |
for (i = 0; i < nents; i++) |
sgl[i].sg_magic = SG_MAGIC; |
} |
#endif |
sg_mark_end(&sgl[nents - 1]); |
} |
|
|
void __sg_page_iter_start(struct sg_page_iter *piter, |
struct scatterlist *sglist, unsigned int nents, |
unsigned long pgoffset) |
{ |
piter->__pg_advance = 0; |
piter->__nents = nents; |
|
piter->sg = sglist; |
piter->sg_pgoffset = pgoffset; |
} |
|
static int sg_page_count(struct scatterlist *sg) |
{ |
return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; |
} |
|
bool __sg_page_iter_next(struct sg_page_iter *piter) |
{ |
if (!piter->__nents || !piter->sg) |
return false; |
|
piter->sg_pgoffset += piter->__pg_advance; |
piter->__pg_advance = 1; |
|
while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { |
piter->sg_pgoffset -= sg_page_count(piter->sg); |
piter->sg = sg_next(piter->sg); |
if (!--piter->__nents || !piter->sg) |
return false; |
} |
|
return true; |
} |
EXPORT_SYMBOL(__sg_page_iter_next); |
|
|
int sg_alloc_table_from_pages(struct sg_table *sgt, |
struct page **pages, unsigned int n_pages, |
unsigned long offset, unsigned long size, |
gfp_t gfp_mask) |
{ |
unsigned int chunks; |
unsigned int i; |
unsigned int cur_page; |
int ret; |
struct scatterlist *s; |
|
/* compute number of contiguous chunks */ |
chunks = 1; |
for (i = 1; i < n_pages; ++i) |
if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) |
++chunks; |
|
ret = sg_alloc_table(sgt, chunks, gfp_mask); |
if (unlikely(ret)) |
return ret; |
|
/* merging chunks and putting them into the scatterlist */ |
cur_page = 0; |
for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
unsigned long chunk_size; |
unsigned int j; |
|
/* look for the end of the current chunk */ |
for (j = cur_page + 1; j < n_pages; ++j) |
if (page_to_pfn(pages[j]) != |
page_to_pfn(pages[j - 1]) + 1) |
break; |
|
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); |
size -= chunk_size; |
offset = 0; |
cur_page = j; |
} |
|
return 0; |
} |
|
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir) |
{ |
struct scatterlist *s; |
int i; |
|
for_each_sg(sglist, s, nelems, i) { |
s->dma_address = (dma_addr_t)sg_phys(s); |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
s->dma_length = s->length; |
#endif |
} |
|
return nelems; |
} |
|