187,7 → 187,7 |
} |
} |
|
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
void **virtual) |
{ |
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
219,7 → 219,7 |
return 0; |
} |
|
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, |
void *virtual) |
{ |
struct ttm_mem_type_manager *man; |
343,20 → 343,26 |
if (ret) |
goto out; |
|
/* |
* Single TTM move. NOP. |
*/ |
if (old_iomap == NULL && new_iomap == NULL) |
goto out2; |
|
/* |
* Don't move nonexistent data. Clear destination instead. |
*/ |
if (old_iomap == NULL && ttm == NULL) |
goto out2; |
|
if (ttm->state == tt_unpopulated) { |
/* |
* TTM might be null for moves within the same region. |
*/ |
if (ttm && ttm->state == tt_unpopulated) { |
ret = ttm->bdev->driver->ttm_tt_populate(ttm); |
if (ret) { |
/* if we fail here don't nuke the mm node |
* as the bo still owns it */ |
old_copy.mm_node = NULL; |
if (ret) |
goto out1; |
} |
} |
|
add = 0; |
dir = 1; |
381,12 → 387,9 |
prot); |
} else |
ret = ttm_copy_io_page(new_iomap, old_iomap, page); |
if (ret) { |
/* failing here, means keep old copy as-is */ |
old_copy.mm_node = NULL; |
if (ret) |
goto out1; |
} |
} |
mb(); |
out2: |
old_copy = *old_mem; |
403,6 → 406,11 |
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); |
out: |
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); |
|
/* |
* On error, keep the mm node! |
*/ |
if (!ret) |
ttm_bo_mem_put(bo, &old_copy); |
return ret; |
} |
582,7 → 590,7 |
if (start_page > bo->num_pages) |
return -EINVAL; |
#if 0 |
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) |
if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) |
return -EPERM; |
#endif |
(void) ttm_mem_io_lock(man, false); |