/drivers/video/drm/ttm/ttm_bo.c |
---|
39,6 → 39,10 |
#include <linux/mm.h> |
#include <linux/module.h> |
#define TTM_ASSERT_LOCKED(param) |
#define TTM_DEBUG(fmt, arg...) |
#define TTM_BO_HASH_ORDER 13 |
#define pr_err(fmt, ...) \ |
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
219,7 → 223,6 |
} |
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); |
/* |
* Call bo->mutex locked. |
*/ |
230,7 → 233,7 |
int ret = 0; |
uint32_t page_flags = 0; |
// TTM_ASSERT_LOCKED(&bo->mutex); |
TTM_ASSERT_LOCKED(&bo->mutex); |
bo->ttm = NULL; |
if (bdev->need_dma32) |
609,13 → 612,7 |
struct ttm_bo_device *bdev = bo->bdev; |
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; |
write_lock(&bdev->vm_lock); |
if (likely(bo->vm_node != NULL)) { |
// rb_erase(&bo->vm_rb, &bdev->addr_space_rb); |
drm_mm_put_block(bo->vm_node); |
bo->vm_node = NULL; |
} |
write_unlock(&bdev->vm_lock); |
drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); |
ttm_mem_io_lock(man, false); |
// ttm_mem_io_free_vm(bo); |
ttm_mem_io_unlock(man); |
1125,6 → 1122,7 |
bo->resv = &bo->ttm_resv; |
// reservation_object_init(bo->resv); |
atomic_inc(&bo->glob->bo_count); |
drm_vma_node_reset(&bo->vma_node); |
ret = ttm_bo_check_placement(bo, placement); |
1303,6 → 1301,7 |
kfree(glob); |
return ret; |
} |
EXPORT_SYMBOL(ttm_bo_global_init); |
int ttm_bo_device_init(struct ttm_bo_device *bdev, |
1315,7 → 1314,6 |
ENTER(); |
// rwlock_init(&bdev->vm_lock); |
bdev->driver = driver; |
memset(bdev->man, 0, sizeof(bdev->man)); |
1328,9 → 1326,8 |
if (unlikely(ret != 0)) |
goto out_no_sys; |
bdev->addr_space_rb = RB_ROOT; |
drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); |
drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, |
0x10000000); |
// INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); |
INIT_LIST_HEAD(&bdev->ddestroy); |
bdev->dev_mapping = NULL; |
/drivers/video/drm/ttm/ttm_bo_manager.c |
---|
61,28 → 61,25 |
lpfn = placement->lpfn; |
if (!lpfn) |
lpfn = man->size; |
do { |
ret = drm_mm_pre_get(mm); |
if (unlikely(ret)) |
return ret; |
node = kzalloc(sizeof(*node), GFP_KERNEL); |
if (!node) |
return -ENOMEM; |
spin_lock(&rman->lock); |
node = drm_mm_search_free_in_range(mm, |
mem->num_pages, mem->page_alignment, |
placement->fpfn, lpfn, 1); |
if (unlikely(node == NULL)) { |
spin_unlock(&rman->lock); |
return 0; |
} |
node = drm_mm_get_block_atomic_range(node, mem->num_pages, |
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, |
mem->page_alignment, |
placement->fpfn, |
lpfn); |
placement->fpfn, lpfn, |
DRM_MM_SEARCH_BEST); |
spin_unlock(&rman->lock); |
} while (node == NULL); |
if (unlikely(ret)) { |
kfree(node); |
} else { |
mem->mm_node = node; |
mem->start = node->start; |
} |
return 0; |
} |
93,8 → 90,10 |
if (mem->mm_node) { |
spin_lock(&rman->lock); |
drm_mm_put_block(mem->mm_node); |
drm_mm_remove_node(mem->mm_node); |
spin_unlock(&rman->lock); |
kfree(mem->mm_node); |
mem->mm_node = NULL; |
} |
} |
/drivers/video/drm/ttm/ttm_bo_util.c |
---|
30,6 → 30,7 |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_placement.h> |
#include <drm/drm_vma_manager.h> |
#include <linux/io.h> |
#include <linux/highmem.h> |
#include <linux/wait.h> |
450,7 → 451,7 |
INIT_LIST_HEAD(&fbo->lru); |
INIT_LIST_HEAD(&fbo->swap); |
INIT_LIST_HEAD(&fbo->io_reserve_lru); |
fbo->vm_node = NULL; |
drm_vma_node_reset(&fbo->vma_node); |
atomic_set(&fbo->cpu_writers, 0); |
spin_lock(&bdev->fence_lock); |
/drivers/video/drm/ttm/ttm_page_alloc.c |
---|
383,22 → 383,19 |
return nr_free; |
} |
/* Get good estimation how many pages are free in pools */ |
static int ttm_pool_get_num_unused_pages(void) |
{ |
unsigned i; |
int total = 0; |
for (i = 0; i < NUM_POOLS; ++i) |
total += _manager->pools[i].npages; |
return total; |
} |
/** |
* Callback for mm to request pool to reduce number of page held. |
* |
* XXX: (dchinner) Deadlock warning! |
* |
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means |
* this can deadlock when called a sc->gfp_mask that is not equal to |
* GFP_KERNEL. |
* |
* This code is crying out for a shrinker per pool.... |
*/ |
static int ttm_pool_mm_shrink(struct shrinker *shrink, |
struct shrink_control *sc) |
static unsigned long |
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) |
{ |
static atomic_t start_pool = ATOMIC_INIT(0); |
unsigned i; |
405,6 → 402,7 |
unsigned pool_offset = atomic_add_return(1, &start_pool); |
struct ttm_page_pool *pool; |
int shrink_pages = sc->nr_to_scan; |
unsigned long freed = 0; |
pool_offset = pool_offset % NUM_POOLS; |
/* select start pool in round robin fashion */ |
414,14 → 412,28 |
break; |
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
shrink_pages = ttm_page_pool_free(pool, nr_free); |
freed += nr_free - shrink_pages; |
} |
/* return estimated number of unused pages in pool */ |
return ttm_pool_get_num_unused_pages(); |
return freed; |
} |
static unsigned long |
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
{ |
unsigned i; |
unsigned long count = 0; |
for (i = 0; i < NUM_POOLS; ++i) |
count += _manager->pools[i].npages; |
return count; |
} |
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) |
{ |
manager->mm_shrink.shrink = &ttm_pool_mm_shrink; |
manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; |
manager->mm_shrink.seeks = 1; |
register_shrinker(&manager->mm_shrink); |
} |