/drivers/video/drm/radeon/radeon.h |
---|
73,6 → 73,7 |
#include <ttm/ttm_placement.h> |
//#include <ttm/ttm_module.h> |
#include <ttm/ttm_execbuf_util.h> |
#include <linux/rwsem.h> |
#include <drm/drm_gem.h> |
/drivers/video/drm/radeon/radeon_cs.c |
---|
183,14 → 183,9 |
if (p->cs_flags & RADEON_CS_USE_VM) |
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, |
&p->validated); |
// if (need_mmap_lock) |
// down_read(¤t->mm->mmap_sem); |
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); |
// if (need_mmap_lock) |
// up_read(¤t->mm->mmap_sem); |
return r; |
} |
649,9 → 644,9 |
struct radeon_cs_parser parser; |
int r; |
// down_read(&rdev->exclusive_lock); |
down_read(&rdev->exclusive_lock); |
if (!rdev->accel_working) { |
// up_read(&rdev->exclusive_lock); |
up_read(&rdev->exclusive_lock); |
return -EBUSY; |
} |
/* initialize parser */ |
664,7 → 659,7 |
if (r) { |
DRM_ERROR("Failed to initialize parser !\n"); |
radeon_cs_parser_fini(&parser, r, false); |
// up_read(&rdev->exclusive_lock); |
up_read(&rdev->exclusive_lock); |
r = radeon_cs_handle_lockup(rdev, r); |
return r; |
} |
678,7 → 673,7 |
if (r) { |
radeon_cs_parser_fini(&parser, r, false); |
// up_read(&rdev->exclusive_lock); |
up_read(&rdev->exclusive_lock); |
r = radeon_cs_handle_lockup(rdev, r); |
return r; |
} |
695,7 → 690,7 |
} |
out: |
radeon_cs_parser_fini(&parser, r, true); |
// up_read(&rdev->exclusive_lock); |
up_read(&rdev->exclusive_lock); |
r = radeon_cs_handle_lockup(rdev, r); |
return r; |
} |
/drivers/video/drm/radeon/radeon_device.c |
---|
1282,9 → 1282,8 |
mutex_init(&rdev->gpu_clock_mutex); |
mutex_init(&rdev->srbm_mutex); |
mutex_init(&rdev->grbm_idx_mutex); |
// init_rwsem(&rdev->pm.mclk_lock); |
// init_rwsem(&rdev->exclusive_lock); |
init_rwsem(&rdev->pm.mclk_lock); |
init_rwsem(&rdev->exclusive_lock); |
init_waitqueue_head(&rdev->irq.vblank_queue); |
mutex_init(&rdev->mn_lock); |
// hash_init(rdev->mn_hash); |
1456,9 → 1455,13 |
int i, r; |
int resched; |
// down_write(&rdev->exclusive_lock); |
rdev->needs_reset = false; |
down_write(&rdev->exclusive_lock); |
if (!rdev->needs_reset) { |
up_write(&rdev->exclusive_lock); |
return 0; |
} |
radeon_save_bios_scratch_regs(rdev); |
/* block TTM */ |
// resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
1498,7 → 1501,10 |
dev_info(rdev->dev, "GPU reset failed\n"); |
} |
// up_write(&rdev->exclusive_lock); |
rdev->needs_reset = r == -EAGAIN; |
rdev->in_reset = false; |
up_read(&rdev->exclusive_lock); |
return r; |
} |
/drivers/video/drm/radeon/radeon_gem.c |
---|
229,10 → 229,9 |
return r; |
} |
static int radeon_mode_mmap(struct drm_file *filp, |
int radeon_mode_dumb_mmap(struct drm_file *filp, |
struct drm_device *dev, |
uint32_t handle, bool dumb, |
uint64_t *offset_p) |
uint32_t handle, uint64_t *offset_p) |
{ |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
241,14 → 240,6 |
if (gobj == NULL) { |
return -ENOENT; |
} |
/* |
* We don't allow dumb mmaps on objects created using another |
* interface. |
*/ |
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), |
"Illegal dumb map of GPU buffer.\n"); |
robj = gem_to_radeon_bo(gobj); |
*offset_p = radeon_bo_mmap_offset(robj); |
drm_gem_object_unreference_unlocked(gobj); |
260,8 → 251,7 |
{ |
struct drm_radeon_gem_mmap *args = data; |
return radeon_mode_mmap(filp, dev, args->handle, false, |
&args->addr_ptr); |
return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
} |
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
/drivers/video/drm/radeon/radeon_object.c |
---|
241,11 → 241,11 |
radeon_ttm_placement_from_domain(bo, domain); |
/* Kernel allocation are uninterruptible */ |
// down_read(&rdev->pm.mclk_lock); |
down_read(&rdev->pm.mclk_lock); |
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
&bo->placement, page_align, !kernel, NULL, |
acc_size, sg, resv, &radeon_ttm_bo_destroy); |
// up_read(&rdev->pm.mclk_lock); |
up_read(&rdev->pm.mclk_lock); |
if (unlikely(r != 0)) { |
return r; |
} |
488,9 → 488,6 |
u32 current_domain = |
radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
WARN_ONCE(bo->gem_base.dumb, |
"GPU use of dumb buffer is illegal.\n"); |
/* Check if this buffer will be moved and don't move it |
* if we have moved too many buffers for this IB already. |
* |
/drivers/video/drm/radeon/radeon_pm.c |
---|
251,7 → 251,7 |
return; |
mutex_lock(&rdev->ddev->struct_mutex); |
// down_write(&rdev->pm.mclk_lock); |
down_write(&rdev->pm.mclk_lock); |
mutex_lock(&rdev->ring_lock); |
/* wait for the rings to drain */ |
264,7 → 264,7 |
if (r) { |
/* needs a GPU reset dont reset here */ |
mutex_unlock(&rdev->ring_lock); |
// up_write(&rdev->pm.mclk_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
return; |
} |
300,7 → 300,7 |
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
mutex_unlock(&rdev->ring_lock); |
// up_write(&rdev->pm.mclk_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
} |
872,7 → 872,7 |
} |
mutex_lock(&rdev->ddev->struct_mutex); |
// down_write(&rdev->pm.mclk_lock); |
down_write(&rdev->pm.mclk_lock); |
mutex_lock(&rdev->ring_lock); |
/* update whether vce is active */ |
920,7 → 920,7 |
done: |
mutex_unlock(&rdev->ring_lock); |
// up_write(&rdev->pm.mclk_lock); |
up_write(&rdev->pm.mclk_lock); |
mutex_unlock(&rdev->ddev->struct_mutex); |
} |
/drivers/video/drm/ttm/ttm_page_alloc.c |
---|
43,7 → 43,7 |
#include <linux/slab.h> |
//#include <linux/dma-mapping.h> |
//#include <linux/atomic.h> |
#include <linux/atomic.h> |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_page_alloc.h> |