426,8 → 426,20 |
sync_obj = driver->sync_obj_ref(bo->sync_obj); |
spin_unlock(&bdev->fence_lock); |
|
if (!ret) |
if (!ret) { |
|
/* |
* Make NO_EVICT bos immediately available to |
* shrinkers, now that they are queued for |
* destruction. |
*/ |
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { |
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; |
ttm_bo_add_to_lru(bo); |
} |
|
ww_mutex_unlock(&bo->resv->lock); |
} |
|
kref_get(&bo->list_kref); |
list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
944,7 → 956,7 |
} |
EXPORT_SYMBOL(ttm_bo_mem_space); |
|
int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
bool interruptible, |
bool no_wait_gpu) |
986,8 → 998,9 |
} |
#endif |
|
static int ttm_bo_mem_compat(struct ttm_placement *placement, |
struct ttm_mem_reg *mem) |
static bool ttm_bo_mem_compat(struct ttm_placement *placement, |
struct ttm_mem_reg *mem, |
uint32_t *new_flags) |
{ |
int i; |
|
994,18 → 1007,25 |
if (mem->mm_node && placement->lpfn != 0 && |
(mem->start < placement->fpfn || |
mem->start + mem->num_pages > placement->lpfn)) |
return -1; |
return false; |
|
for (i = 0; i < placement->num_placement; i++) { |
if ((placement->placement[i] & mem->placement & |
TTM_PL_MASK_CACHING) && |
(placement->placement[i] & mem->placement & |
TTM_PL_MASK_MEM)) |
return i; |
*new_flags = placement->placement[i]; |
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
(*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
return true; |
} |
return -1; |
|
for (i = 0; i < placement->num_busy_placement; i++) { |
*new_flags = placement->busy_placement[i]; |
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && |
(*new_flags & mem->placement & TTM_PL_MASK_MEM)) |
return true; |
} |
|
return false; |
} |
|
int ttm_bo_validate(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
bool interruptible, |
1012,6 → 1032,7 |
bool no_wait_gpu) |
{ |
int ret; |
uint32_t new_flags; |
|
// BUG_ON(!ttm_bo_is_reserved(bo)); |
/* Check that range is valid */ |
1022,8 → 1043,7 |
/* |
* Check whether we need to move buffer. |
*/ |
ret = ttm_bo_mem_compat(placement, &bo->mem); |
if (ret < 0) { |
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { |
// ret = ttm_bo_move_buffer(bo, placement, interruptible, |
// no_wait_gpu); |
if (ret) |
1033,7 → 1053,7 |
* Use the access and other non-mapping-related flag bits from |
* the compatible memory placement flags to the active flags |
*/ |
ttm_flag_masked(&bo->mem.placement, placement->placement[ret], |
ttm_flag_masked(&bo->mem.placement, new_flags, |
~TTM_PL_MASK_MEMTYPE); |
} |
/* |
1103,6 → 1123,7 |
INIT_LIST_HEAD(&bo->ddestroy); |
INIT_LIST_HEAD(&bo->swap); |
INIT_LIST_HEAD(&bo->io_reserve_lru); |
mutex_init(&bo->wu_mutex); |
bo->bdev = bdev; |
bo->glob = bdev->glob; |
bo->type = type; |
1368,3 → 1389,36 |
return true; |
} |
|
int ttm_bo_wait(struct ttm_buffer_object *bo, |
bool lazy, bool interruptible, bool no_wait) |
{ |
struct ttm_bo_driver *driver = bo->bdev->driver; |
struct ttm_bo_device *bdev = bo->bdev; |
void *sync_obj; |
int ret = 0; |
|
if (likely(bo->sync_obj == NULL)) |
return 0; |
|
return 0; |
} |
EXPORT_SYMBOL(ttm_bo_wait); |
|
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) |
{ |
struct ttm_bo_device *bdev = bo->bdev; |
int ret = 0; |
|
/* |
* Using ttm_bo_reserve makes sure the lru lists are updated. |
*/ |
|
return ret; |
} |
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); |
|
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) |
{ |
atomic_dec(&bo->cpu_writers); |
} |
EXPORT_SYMBOL(ttm_bo_synccpu_write_release); |