787,7 → 787,13 |
} |
EXPORT_SYMBOL(drm_gem_object_free); |
|
|
/** |
* drm_gem_vm_open - vma->ops->open implementation for GEM |
* @vma: VM area structure |
* |
* This function implements the #vm_operations_struct open() callback for GEM |
* drivers. This must be used together with drm_gem_vm_close(). |
*/ |
#if 0 |
void drm_gem_vm_open(struct vm_area_struct *vma) |
{ |
797,19 → 803,135 |
} |
EXPORT_SYMBOL(drm_gem_vm_open); |
|
/** |
* drm_gem_vm_close - vma->ops->close implementation for GEM |
* @vma: VM area structure |
* |
* This function implements the #vm_operations_struct close() callback for GEM |
* drivers. This must be used together with drm_gem_vm_open(). |
*/ |
void drm_gem_vm_close(struct vm_area_struct *vma) |
{ |
struct drm_gem_object *obj = vma->vm_private_data; |
struct drm_device *dev = obj->dev; |
|
mutex_lock(&dev->struct_mutex); |
drm_vm_close_locked(obj->dev, vma); |
drm_gem_object_unreference(obj); |
mutex_unlock(&dev->struct_mutex); |
drm_gem_object_unreference_unlocked(obj); |
} |
EXPORT_SYMBOL(drm_gem_vm_close); |
|
#endif |
/** |
* drm_gem_mmap_obj - memory map a GEM object |
* @obj: the GEM object to map |
* @obj_size: the object size to be mapped, in bytes |
* @vma: VMA for the area to be mapped |
* |
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops |
* provided by the driver. Depending on their requirements, drivers can either |
* provide a fault handler in their gem_vm_ops (in which case any accesses to |
* the object will be trapped, to perform migration, GTT binding, surface |
* register allocation, or performance monitoring), or mmap the buffer memory |
* synchronously after calling drm_gem_mmap_obj. |
* |
* This function is mainly intended to implement the DMABUF mmap operation, when |
* the GEM object is not looked up based on its fake offset. To implement the |
* DRM mmap operation, drivers should use the drm_gem_mmap() function. |
* |
* drm_gem_mmap_obj() assumes the user is granted access to the buffer while |
* drm_gem_mmap() prevents unprivileged users from mapping random objects. So |
* callers must verify access restrictions before calling this helper. |
* |
* Return 0 or success or -EINVAL if the object size is smaller than the VMA |
* size, or if no gem_vm_ops are provided. |
*/ |
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, |
struct vm_area_struct *vma) |
{ |
struct drm_device *dev = obj->dev; |
|
/* Check for valid size. */ |
if (obj_size < vma->vm_end - vma->vm_start) |
return -EINVAL; |
|
if (!dev->driver->gem_vm_ops) |
return -EINVAL; |
|
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; |
vma->vm_ops = dev->driver->gem_vm_ops; |
vma->vm_private_data = obj; |
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
|
/* Take a ref for this mapping of the object, so that the fault |
* handler can dereference the mmap offset's pointer to the object. |
* This reference is cleaned up by the corresponding vm_close |
* (which should happen whether the vma was created by this call, or |
* by a vm_open due to mremap or partial unmap or whatever). |
*/ |
drm_gem_object_reference(obj); |
|
return 0; |
} |
EXPORT_SYMBOL(drm_gem_mmap_obj); |
|
/** |
* drm_gem_mmap - memory map routine for GEM objects |
* @filp: DRM file pointer |
* @vma: VMA for the area to be mapped |
* |
* If a driver supports GEM object mapping, mmap calls on the DRM file |
* descriptor will end up here. |
* |
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
* contain the fake offset we created when the GTT map ioctl was called on |
* the object) and map it with a call to drm_gem_mmap_obj(). |
* |
* If the caller is not granted access to the buffer object, the mmap will fail |
* with EACCES. Please see the vma manager for more information. |
*/ |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
{ |
struct drm_file *priv = filp->private_data; |
struct drm_device *dev = priv->minor->dev; |
struct drm_gem_object *obj = NULL; |
struct drm_vma_offset_node *node; |
int ret; |
|
if (drm_device_is_unplugged(dev)) |
return -ENODEV; |
|
drm_vma_offset_lock_lookup(dev->vma_offset_manager); |
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, |
vma->vm_pgoff, |
vma_pages(vma)); |
if (likely(node)) { |
obj = container_of(node, struct drm_gem_object, vma_node); |
/* |
* When the object is being freed, after it hits 0-refcnt it |
* proceeds to tear down the object. In the process it will |
* attempt to remove the VMA offset and so acquire this |
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt |
* that matches our range, we know it is in the process of being |
* destroyed and will be freed as soon as we release the lock - |
* so we have to check for the 0-refcnted object and treat it as |
* invalid. |
*/ |
if (!kref_get_unless_zero(&obj->refcount)) |
obj = NULL; |
} |
drm_vma_offset_unlock_lookup(dev->vma_offset_manager); |
|
if (!obj) |
return -EINVAL; |
|
if (!drm_vma_node_is_allowed(node, filp)) { |
drm_gem_object_unreference_unlocked(obj); |
return -EACCES; |
} |
|
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, |
vma); |
|
drm_gem_object_unreference_unlocked(obj); |
|
return ret; |
} |
EXPORT_SYMBOL(drm_gem_mmap); |
#endif |