Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4112 → Rev 4569

/drivers/video/drm/ttm/ttm_bo.c
426,8 → 426,20
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
 
if (!ret)
if (!ret) {
 
/*
* Make NO_EVICT bos immediately available to
* shrinkers, now that they are queued for
* destruction.
*/
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
ttm_bo_add_to_lru(bo);
}
 
ww_mutex_unlock(&bo->resv->lock);
}
 
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
944,7 → 956,7
}
EXPORT_SYMBOL(ttm_bo_mem_space);
 
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
986,8 → 998,9
}
#endif
 
static int ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem)
static bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_mem_reg *mem,
uint32_t *new_flags)
{
int i;
 
994,18 → 1007,25
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
return -1;
return false;
 
for (i = 0; i < placement->num_placement; i++) {
if ((placement->placement[i] & mem->placement &
TTM_PL_MASK_CACHING) &&
(placement->placement[i] & mem->placement &
TTM_PL_MASK_MEM))
return i;
*new_flags = placement->placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
return -1;
 
for (i = 0; i < placement->num_busy_placement; i++) {
*new_flags = placement->busy_placement[i];
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
(*new_flags & mem->placement & TTM_PL_MASK_MEM))
return true;
}
 
return false;
}
 
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
1012,6 → 1032,7
bool no_wait_gpu)
{
int ret;
uint32_t new_flags;
 
// BUG_ON(!ttm_bo_is_reserved(bo));
/* Check that range is valid */
1022,8 → 1043,7
/*
* Check whether we need to move buffer.
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
// ret = ttm_bo_move_buffer(bo, placement, interruptible,
// no_wait_gpu);
if (ret)
1033,7 → 1053,7
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE);
}
/*
1103,6 → 1123,7
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
1368,3 → 1389,36
return true;
}
 
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
{
struct ttm_bo_driver *driver = bo->bdev->driver;
struct ttm_bo_device *bdev = bo->bdev;
void *sync_obj;
int ret = 0;
 
if (likely(bo->sync_obj == NULL))
return 0;
 
return 0;
}
EXPORT_SYMBOL(ttm_bo_wait);
 
int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
{
struct ttm_bo_device *bdev = bo->bdev;
int ret = 0;
 
/*
* Using ttm_bo_reserve makes sure the lru lists are updated.
*/
 
return ret;
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
 
void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
{
atomic_dec(&bo->cpu_writers);
}
EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
/drivers/video/drm/ttm/ttm_bo_util.c
187,7 → 187,7
}
}
 
int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
219,7 → 219,7
return 0;
}
 
void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
343,20 → 343,26
if (ret)
goto out;
 
/*
* Single TTM move. NOP.
*/
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
 
/*
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL && ttm == NULL)
goto out2;
 
if (ttm->state == tt_unpopulated) {
/*
* TTM might be null for moves within the same region.
*/
if (ttm && ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret) {
/* if we fail here don't nuke the mm node
* as the bo still owns it */
old_copy.mm_node = NULL;
if (ret)
goto out1;
}
}
 
add = 0;
dir = 1;
381,12 → 387,9
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret) {
/* failing here, means keep old copy as-is */
old_copy.mm_node = NULL;
if (ret)
goto out1;
}
}
mb();
out2:
old_copy = *old_mem;
403,6 → 406,11
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
 
/*
* On error, keep the mm node!
*/
if (!ret)
ttm_bo_mem_put(bo, &old_copy);
return ret;
}
582,7 → 590,7
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);
/drivers/video/drm/ttm/ttm_object.c
1,6 → 1,6
/**************************************************************************
*
* Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
* Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
26,6 → 26,12
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* While no substantial code is shared, the prime code is inspired by
* drm_prime.c, with
* Authors:
* Dave Airlie <airlied@redhat.com>
* Rob Clark <rob.clark@linaro.org>
*/
/** @file ttm_ref_object.c
*
34,6 → 40,7
* and release on file close.
*/
 
 
/**
* struct ttm_object_file
*
51,6 → 58,8
 
#define pr_fmt(fmt) "[TTM] " fmt
 
#include <linux/mutex.h>
 
#include <drm/ttm/ttm_object.h>
#include <drm/ttm/ttm_module.h>
#include <linux/list.h>
69,7 → 78,7
 
struct ttm_object_file {
struct ttm_object_device *tdev;
rwlock_t lock;
spinlock_t lock;
struct list_head ref_list;
struct drm_open_hash ref_hash[TTM_REF_NUM];
struct kref refcount;
124,6 → 133,8
struct ttm_object_file *tfile;
};
 
static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
 
static inline struct ttm_object_file *
ttm_object_file_ref(struct ttm_object_file *tfile)
{
206,11 → 217,10
* call_rcu() or ttm_base_object_kfree().
*/
 
if (base->refcount_release) {
ttm_object_file_unref(&base->tfile);
if (base->refcount_release)
base->refcount_release(&base);
}
}
 
void ttm_base_object_unref(struct ttm_base_object **p_base)
{
225,32 → 235,44
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint32_t key)
{
struct ttm_object_device *tdev = tfile->tdev;
struct ttm_base_object *base;
struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
int ret;
 
// rcu_read_lock();
ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
ret = drm_ht_find_item_rcu(ht, key, &hash);
 
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
// rcu_read_unlock();
 
if (unlikely(ret != 0))
return NULL;
return base;
}
EXPORT_SYMBOL(ttm_base_object_lookup);
 
if (tfile != base->tfile && !base->shareable) {
pr_err("Attempted access of non-shareable object\n");
ttm_base_object_unref(&base);
return NULL;
struct ttm_base_object *
ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
{
struct ttm_base_object *base = NULL;
struct drm_hash_item *hash;
struct drm_open_hash *ht = &tdev->object_hash;
int ret;
 
ret = drm_ht_find_item_rcu(ht, key, &hash);
 
if (likely(ret == 0)) {
base = drm_hash_entry(hash, struct ttm_base_object, hash);
if (!kref_get_unless_zero(&base->refcount))
base = NULL;
}
 
return base;
}
EXPORT_SYMBOL(ttm_base_object_lookup);
EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
 
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
266,17 → 288,15
*existed = true;
 
while (ret == -EINVAL) {
read_lock(&tfile->lock);
ret = drm_ht_find_item(ht, base->hash.key, &hash);
ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
 
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_get(&ref->kref);
read_unlock(&tfile->lock);
if (!kref_get_unless_zero(&ref->kref)) {
break;
}
}
 
read_unlock(&tfile->lock);
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
293,19 → 313,19
ref->ref_type = ref_type;
kref_init(&ref->kref);
 
write_lock(&tfile->lock);
ret = drm_ht_insert_item(ht, &ref->hash);
spin_lock(&tfile->lock);
ret = drm_ht_insert_item_rcu(ht, &ref->hash);
 
if (likely(ret == 0)) {
list_add_tail(&ref->head, &tfile->ref_list);
kref_get(&base->refcount);
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
if (existed != NULL)
*existed = false;
break;
}
 
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
BUG_ON(ret != -EINVAL);
 
ttm_mem_global_free(mem_glob, sizeof(*ref));
326,9 → 346,9
struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
 
ht = &tfile->ref_hash[ref->ref_type];
(void)drm_ht_remove_item(ht, &ref->hash);
(void)drm_ht_remove_item_rcu(ht, &ref->hash);
list_del(&ref->head);
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
 
if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
base->ref_obj_release(base, ref->ref_type);
336,7 → 356,7
ttm_base_object_unref(&ref->obj);
ttm_mem_global_free(mem_glob, sizeof(*ref));
kfree(ref);
write_lock(&tfile->lock);
spin_lock(&tfile->lock);
}
 
int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
347,15 → 367,15
struct drm_hash_item *hash;
int ret;
 
write_lock(&tfile->lock);
spin_lock(&tfile->lock);
ret = drm_ht_find_item(ht, key, &hash);
if (unlikely(ret != 0)) {
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
return -EINVAL;
}
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
kref_put(&ref->kref, ttm_ref_object_release);
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
return 0;
}
EXPORT_SYMBOL(ttm_ref_object_base_unref);
368,7 → 388,7
struct ttm_object_file *tfile = *p_tfile;
 
*p_tfile = NULL;
write_lock(&tfile->lock);
spin_lock(&tfile->lock);
 
/*
* Since we release the lock within the loop, we have to
384,7 → 404,7
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
 
write_unlock(&tfile->lock);
spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
400,7 → 420,7
if (unlikely(tfile == NULL))
return NULL;
 
rwlock_init(&tfile->lock);
spin_lock_init(&tfile->lock);
tfile->tdev = tdev;
kref_init(&tfile->refcount);
INIT_LIST_HEAD(&tfile->ref_list);
424,9 → 444,10
}
EXPORT_SYMBOL(ttm_object_file_init);
 
struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
*mem_glob,
unsigned int hash_order)
struct ttm_object_device *
ttm_object_device_init(struct ttm_mem_global *mem_glob,
unsigned int hash_order,
const struct dma_buf_ops *ops)
{
struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
int ret;
438,10 → 459,17
spin_lock_init(&tdev->object_lock);
atomic_set(&tdev->object_count, 0);
ret = drm_ht_create(&tdev->object_hash, hash_order);
if (ret != 0)
goto out_no_object_hash;
 
if (likely(ret == 0))
// tdev->ops = *ops;
// tdev->dmabuf_release = tdev->ops.release;
// tdev->ops.release = ttm_prime_dmabuf_release;
// tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
// ttm_round_pot(sizeof(struct file));
return tdev;
 
out_no_object_hash:
kfree(tdev);
return NULL;
}
/drivers/video/drm/ttm/ttm_page_alloc.c
41,7 → 41,7
#include <linux/mm.h>
#include <linux/seq_file.h> /* for seq_printf */
#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
 
//#include <linux/atomic.h>
 
/drivers/video/drm/ttm/ttm_tt.c
172,9 → 172,8
ttm_tt_unbind(ttm);
}
 
if (likely(ttm->pages != NULL)) {
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
// if (ttm->state == tt_unbound)
// ttm_tt_unpopulate(ttm);
 
// if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
// ttm->swap_storage)
368,7 → 367,7
page_cache_release(to_page);
}
 
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)