Subversion Repositories Kolibri OS

Rev

Rev 4075 | Rev 4246 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/module.h>
  32. #include <linux/shmem_fs.h>
  33. #include <linux/err.h>
  34. #include <drm/drmP.h>
  35. #include <drm/drm_vma_manager.h>
  36.  
  37. /** @file drm_gem.c
  38.  *
  39.  * This file provides some of the base ioctls and library routines for
  40.  * the graphics memory manager implemented by each device driver.
  41.  *
  42.  * Because various devices have different requirements in terms of
  43.  * synchronization and migration strategies, implementing that is left up to
  44.  * the driver, and all that the general API provides should be generic --
  45.  * allocating objects, reading/writing data with the cpu, freeing objects.
  46.  * Even there, platform-dependent optimizations for reading/writing data with
  47.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  48.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  49.  *
  50.  * The goal was to have swap-backed object allocation managed through
  51.  * struct file.  However, file descriptors as handles to a struct file have
  52.  * two major failings:
  53.  * - Process limits prevent more than 1024 or so being used at a time by
  54.  *   default.
  55.  * - Inability to allocate high fds will aggravate the X Server's select()
  56.  *   handling, and likely that of many GL client applications as well.
  57.  *
  58.  * This led to a plan of using our own integer IDs (called handles, following
  59.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  60.  * ioctls.  The objects themselves will still include the struct file so
  61.  * that we can transition to fds if the required kernel infrastructure shows
  62.  * up at a later date, and as our interface with shmfs for memory allocation.
  63.  */
  64.  
  65. /*
  66.  * We make up offsets for buffer objects so we can recognize them at
  67.  * mmap time.
  68.  */
  69.  
  70. /* pgoff in mmap is an unsigned long, so we need to make sure that
  71.  * the faked up offset will fit
  72.  */
  73.  
  74. #if BITS_PER_LONG == 64
  75. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  76. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  77. #else
  78. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  79. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  80. #endif
  81.  
  82. /**
  83.  * Initialize the GEM device fields
  84.  */
  85.  
  86. int
  87. drm_gem_init(struct drm_device *dev)
  88. {
  89.         struct drm_gem_mm *mm;
  90.  
  91.         mutex_init(&dev->object_name_lock);
  92.         idr_init(&dev->object_name_idr);
  93.  
  94.         mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  95.         if (!mm) {
  96.                 DRM_ERROR("out of memory\n");
  97.                 return -ENOMEM;
  98.         }
  99.  
  100.         dev->mm_private = mm;
  101.         drm_vma_offset_manager_init(&mm->vma_manager,
  102.                                     DRM_FILE_PAGE_OFFSET_START,
  103.                     DRM_FILE_PAGE_OFFSET_SIZE);
  104.  
  105.         return 0;
  106. }
  107.  
  108. void
  109. drm_gem_destroy(struct drm_device *dev)
  110. {
  111.         struct drm_gem_mm *mm = dev->mm_private;
  112.  
  113.         drm_vma_offset_manager_destroy(&mm->vma_manager);
  114.         kfree(mm);
  115.         dev->mm_private = NULL;
  116. }
  117.  
  118. /**
  119.  * Initialize an already allocated GEM object of the specified size with
  120.  * shmfs backing store.
  121.  */
  122. int drm_gem_object_init(struct drm_device *dev,
  123.                         struct drm_gem_object *obj, size_t size)
  124. {
  125.         struct file *filp;
  126.  
  127.         filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  128.         if (IS_ERR(filp))
  129.                 return PTR_ERR(filp);
  130.  
  131.         drm_gem_private_object_init(dev, obj, size);
  132.         obj->filp = filp;
  133.  
  134.         return 0;
  135. }
  136. EXPORT_SYMBOL(drm_gem_object_init);
  137.  
  138. /**
  139.  * Initialize an already allocated GEM object of the specified size with
  140.  * no GEM provided backing store. Instead the caller is responsible for
  141.  * backing the object and handling it.
  142.  */
  143. void drm_gem_private_object_init(struct drm_device *dev,
  144.                         struct drm_gem_object *obj, size_t size)
  145. {
  146.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  147.  
  148.         obj->dev = dev;
  149.         obj->filp = NULL;
  150.  
  151.         kref_init(&obj->refcount);
  152.         obj->handle_count = 0;
  153.         obj->size = size;
  154.         drm_vma_node_reset(&obj->vma_node);
  155. }
  156. EXPORT_SYMBOL(drm_gem_private_object_init);
  157.  
  158. /**
  159.  * Allocate a GEM object of the specified size with shmfs backing store
  160.  */
  161. struct drm_gem_object *
  162. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  163. {
  164.         struct drm_gem_object *obj;
  165.  
  166.         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  167.         if (!obj)
  168.                 goto free;
  169.  
  170.         if (drm_gem_object_init(dev, obj, size) != 0)
  171.                 goto free;
  172.  
  173.         if (dev->driver->gem_init_object != NULL &&
  174.             dev->driver->gem_init_object(obj) != 0) {
  175.                 goto fput;
  176.         }
  177.         return obj;
  178. fput:
  179.         /* Object_init mangles the global counters - readjust them. */
  180.         free(obj->filp);
  181. free:
  182.         kfree(obj);
  183.         return NULL;
  184. }
  185. EXPORT_SYMBOL(drm_gem_object_alloc);
  186.  
  187. static void drm_gem_object_ref_bug(struct kref *list_kref)
  188. {
  189.         BUG();
  190. }
  191.  
  192. /**
  193.  * Called after the last handle to the object has been closed
  194.  *
  195.  * Removes any name for the object. Note that this must be
  196.  * called before drm_gem_object_free or we'll be touching
  197.  * freed memory
  198.  */
  199. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  200. {
  201.         struct drm_device *dev = obj->dev;
  202.  
  203.         /* Remove any name for this object */
  204.         if (obj->name) {
  205.                 idr_remove(&dev->object_name_idr, obj->name);
  206.                 obj->name = 0;
  207.                 /*
  208.                  * The object name held a reference to this object, drop
  209.                  * that now.
  210.                 *
  211.                 * This cannot be the last reference, since the handle holds one too.
  212.                  */
  213.                 kref_put(&obj->refcount, drm_gem_object_ref_bug);
  214.         }
  215. }
  216.  
  217.  
  218. static void
  219. drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
  220. {
  221.         if (WARN_ON(obj->handle_count == 0))
  222.                 return;
  223.  
  224.         /*
  225.         * Must bump handle count first as this may be the last
  226.         * ref, in which case the object would disappear before we
  227.         * checked for a name
  228.         */
  229.  
  230.         mutex_lock(&obj->dev->object_name_lock);
  231.         if (--obj->handle_count == 0) {
  232.                 drm_gem_object_handle_free(obj);
  233.         }
  234.         mutex_unlock(&obj->dev->object_name_lock);
  235.  
  236.         drm_gem_object_unreference_unlocked(obj);
  237. }
  238.  
  239. /**
  240.  * Removes the mapping from handle to filp for this object.
  241.  */
  242. int
  243. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  244. {
  245.         struct drm_device *dev;
  246.         struct drm_gem_object *obj;
  247.  
  248.         /* This is gross. The idr system doesn't let us try a delete and
  249.          * return an error code.  It just spews if you fail at deleting.
  250.          * So, we have to grab a lock around finding the object and then
  251.          * doing the delete on it and dropping the refcount, or the user
  252.          * could race us to double-decrement the refcount and cause a
  253.          * use-after-free later.  Given the frequency of our handle lookups,
  254.          * we may want to use ida for number allocation and a hash table
  255.          * for the pointers, anyway.
  256.          */
  257.     if(handle == -2)
  258.         printf("%s handle %d\n", __FUNCTION__, handle);
  259.  
  260.         spin_lock(&filp->table_lock);
  261.  
  262.         /* Check if we currently have a reference on the object */
  263.         obj = idr_find(&filp->object_idr, handle);
  264.         if (obj == NULL) {
  265.                 spin_unlock(&filp->table_lock);
  266.                 return -EINVAL;
  267.         }
  268.         dev = obj->dev;
  269.  
  270.  //   printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj);
  271.  
  272.         /* Release reference and decrement refcount. */
  273.         idr_remove(&filp->object_idr, handle);
  274.         spin_unlock(&filp->table_lock);
  275.  
  276. //   drm_gem_remove_prime_handles(obj, filp);
  277.  
  278.         if (dev->driver->gem_close_object)
  279.                 dev->driver->gem_close_object(obj, filp);
  280.         drm_gem_object_handle_unreference_unlocked(obj);
  281.  
  282.         return 0;
  283. }
  284. EXPORT_SYMBOL(drm_gem_handle_delete);
  285.  
  286. /**
  287.  * Create a handle for this object. This adds a handle reference
  288.  * to the object, which includes a regular reference count. Callers
  289.  * will likely want to dereference the object afterwards.
  290.  */
  291. int
  292. drm_gem_handle_create_tail(struct drm_file *file_priv,
  293.                        struct drm_gem_object *obj,
  294.                        u32 *handlep)
  295. {
  296.         struct drm_device *dev = obj->dev;
  297.         int ret;
  298.  
  299.         WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  300.  
  301.         /*
  302.          * Get the user-visible handle using idr.  Preload and perform
  303.          * allocation under our spinlock.
  304.          */
  305.         idr_preload(GFP_KERNEL);
  306.         spin_lock(&file_priv->table_lock);
  307.  
  308.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  309.         drm_gem_object_reference(obj);
  310.         obj->handle_count++;
  311.         spin_unlock(&file_priv->table_lock);
  312.         idr_preload_end();
  313.         mutex_unlock(&dev->object_name_lock);
  314.         if (ret < 0) {
  315.                 drm_gem_object_handle_unreference_unlocked(obj);
  316.                 return ret;
  317.         }
  318.         *handlep = ret;
  319.  
  320. //   ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
  321. //   if (ret) {
  322. //       drm_gem_handle_delete(file_priv, *handlep);
  323. //       return ret;
  324. //   }
  325.  
  326.         if (dev->driver->gem_open_object) {
  327.                 ret = dev->driver->gem_open_object(obj, file_priv);
  328.                 if (ret) {
  329.                         drm_gem_handle_delete(file_priv, *handlep);
  330.                         return ret;
  331.                 }
  332.         }
  333.  
  334.         return 0;
  335. }
  336.  
  337. /**
  338.  * Create a handle for this object. This adds a handle reference
  339.  * to the object, which includes a regular reference count. Callers
  340.  * will likely want to dereference the object afterwards.
  341.  */
  342. int
  343. drm_gem_handle_create(struct drm_file *file_priv,
  344.                        struct drm_gem_object *obj,
  345.                        u32 *handlep)
  346. {
  347.         mutex_lock(&obj->dev->object_name_lock);
  348.  
  349.         return drm_gem_handle_create_tail(file_priv, obj, handlep);
  350. }
  351. EXPORT_SYMBOL(drm_gem_handle_create);
  352.  
  353.  
  354. /**
  355.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  356.  * @obj: obj in question
  357.  *
  358.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  359.  */
  360. #if 0
  361. void
  362. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  363. {
  364.         struct drm_device *dev = obj->dev;
  365.         struct drm_gem_mm *mm = dev->mm_private;
  366.  
  367.         drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
  368. }
  369. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  370.  
  371. /**
  372.  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  373.  * @obj: obj in question
  374.  * @size: the virtual size
  375.  *
  376.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  377.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  378.  * up the object based on the offset and sets up the various memory mapping
  379.  * structures.
  380.  *
  381.  * This routine allocates and attaches a fake offset for @obj, in cases where
  382.  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  383.  * just use drm_gem_create_mmap_offset().
  384.  */
  385. int
  386. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  387. {
  388.         struct drm_device *dev = obj->dev;
  389.         struct drm_gem_mm *mm = dev->mm_private;
  390.  
  391.         /* Set the object up for mmap'ing */
  392.         list = &obj->map_list;
  393.         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  394.         if (!list->map)
  395.                 return -ENOMEM;
  396.  
  397.         map = list->map;
  398.         map->type = _DRM_GEM;
  399.         map->size = obj->size;
  400.         map->handle = obj;
  401.  
  402.         /* Get a DRM GEM mmap offset allocated... */
  403.         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  404.                         obj->size / PAGE_SIZE, 0, false);
  405.  
  406.         if (!list->file_offset_node) {
  407.                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  408.                 ret = -ENOSPC;
  409.                 goto out_free_list;
  410.         }
  411.  
  412.         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  413.                         obj->size / PAGE_SIZE, 0);
  414.         if (!list->file_offset_node) {
  415.                 ret = -ENOMEM;
  416.                 goto out_free_list;
  417.         }
  418.  
  419.         list->hash.key = list->file_offset_node->start;
  420.         ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  421.         if (ret) {
  422.                 DRM_ERROR("failed to add to map hash\n");
  423.                 goto out_free_mm;
  424.         }
  425.  
  426.         return 0;
  427.  
  428. out_free_mm:
  429.         drm_mm_put_block(list->file_offset_node);
  430. out_free_list:
  431.         kfree(list->map);
  432.         list->map = NULL;
  433.  
  434.         return ret;
  435. }
  436. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  437. #endif
  438.  
  439. /** Returns a reference to the object named by the handle. */
  440. struct drm_gem_object *
  441. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  442.                       u32 handle)
  443. {
  444.         struct drm_gem_object *obj;
  445.  
  446.      if(handle == -2)
  447.         printf("%s handle %d\n", __FUNCTION__, handle);
  448.  
  449.         spin_lock(&filp->table_lock);
  450.  
  451.         /* Check if we currently have a reference on the object */
  452.         obj = idr_find(&filp->object_idr, handle);
  453.         if (obj == NULL) {
  454.                 spin_unlock(&filp->table_lock);
  455.                 return NULL;
  456.         }
  457.  
  458.         drm_gem_object_reference(obj);
  459.  
  460.         spin_unlock(&filp->table_lock);
  461.  
  462.         return obj;
  463. }
  464. EXPORT_SYMBOL(drm_gem_object_lookup);
  465.  
  466. /**
  467.  * Releases the handle to an mm object.
  468.  */
  469. int
  470. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  471.                     struct drm_file *file_priv)
  472. {
  473.         struct drm_gem_close *args = data;
  474.         int ret;
  475.  
  476.         ret = drm_gem_handle_delete(file_priv, args->handle);
  477.  
  478.         return ret;
  479. }
  480.  
  481. /**
  482.  * Create a global name for an object, returning the name.
  483.  *
  484.  * Note that the name does not hold a reference; when the object
  485.  * is freed, the name goes away.
  486.  */
  487.  
  488. #if 0
  489. int
  490. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  491.                     struct drm_file *file_priv)
  492. {
  493.         struct drm_gem_flink *args = data;
  494.         struct drm_gem_object *obj;
  495.         int ret;
  496.  
  497.         if (!(dev->driver->driver_features & DRIVER_GEM))
  498.                 return -ENODEV;
  499.  
  500.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  501.         if (obj == NULL)
  502.                 return -ENOENT;
  503.  
  504.         mutex_lock(&dev->object_name_lock);
  505.         idr_preload(GFP_KERNEL);
  506.         /* prevent races with concurrent gem_close. */
  507.         if (obj->handle_count == 0) {
  508.                 ret = -ENOENT;
  509.                 goto err;
  510.         }
  511.  
  512.         if (!obj->name) {
  513.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  514.                 if (ret < 0)
  515.                         goto err;
  516.  
  517.                 obj->name = ret;
  518.  
  519.                 /* Allocate a reference for the name table.  */
  520.                 drm_gem_object_reference(obj);
  521.         }
  522.  
  523.                 args->name = (uint64_t) obj->name;
  524.                 ret = 0;
  525.  
  526. err:
  527.         idr_preload_end();
  528.         mutex_unlock(&dev->object_name_lock);
  529.         drm_gem_object_unreference_unlocked(obj);
  530.         return ret;
  531. }
  532.  
  533. /**
  534.  * Open an object using the global name, returning a handle and the size.
  535.  *
  536.  * This handle (of course) holds a reference to the object, so the object
  537.  * will not go away until the handle is deleted.
  538.  */
  539. int
  540. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  541.                    struct drm_file *file_priv)
  542. {
  543.         struct drm_gem_open *args = data;
  544.         struct drm_gem_object *obj;
  545.         int ret;
  546.         u32 handle;
  547.  
  548.         if (!(dev->driver->driver_features & DRIVER_GEM))
  549.                 return -ENODEV;
  550.  
  551.     if(handle == -2)
  552.         printf("%s handle %d\n", __FUNCTION__, handle);
  553.  
  554.         mutex_lock(&dev->object_name_lock);
  555.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  556.         if (obj) {
  557.                 drm_gem_object_reference(obj);
  558.         } else {
  559.                 mutex_unlock(&dev->object_name_lock);
  560.                 return -ENOENT;
  561.         }
  562.  
  563.         /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  564.         ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  565.         drm_gem_object_unreference_unlocked(obj);
  566.         if (ret)
  567.                 return ret;
  568.  
  569.         args->handle = handle;
  570.         args->size = obj->size;
  571.  
  572.         return 0;
  573. }
  574.  
  575. /**
  576.  * Called at device open time, sets up the structure for handling refcounting
  577.  * of mm objects.
  578.  */
  579. void
  580. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  581. {
  582.         idr_init(&file_private->object_idr);
  583.         spin_lock_init(&file_private->table_lock);
  584. }
  585.  
  586. /**
  587.  * Called at device close to release the file's
  588.  * handle references on objects.
  589.  */
  590. static int
  591. drm_gem_object_release_handle(int id, void *ptr, void *data)
  592. {
  593.         struct drm_file *file_priv = data;
  594.         struct drm_gem_object *obj = ptr;
  595.         struct drm_device *dev = obj->dev;
  596.  
  597.         drm_gem_remove_prime_handles(obj, file_priv);
  598.         drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  599.  
  600.         if (dev->driver->gem_close_object)
  601.                 dev->driver->gem_close_object(obj, file_priv);
  602.  
  603.         drm_gem_object_handle_unreference_unlocked(obj);
  604.  
  605.         return 0;
  606. }
  607.  
  608. /**
  609.  * Called at close time when the filp is going away.
  610.  *
  611.  * Releases any remaining references on objects by this filp.
  612.  */
  613. void
  614. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  615. {
  616.         idr_for_each(&file_private->object_idr,
  617.                      &drm_gem_object_release_handle, file_private);
  618.         idr_destroy(&file_private->object_idr);
  619. }
  620. #endif
  621.  
  622. void
  623. drm_gem_object_release(struct drm_gem_object *obj)
  624. {
  625.         if (obj->filp)
  626.             free(obj->filp);
  627. }
  628. EXPORT_SYMBOL(drm_gem_object_release);
  629.  
  630. /**
  631.  * Called after the last reference to the object has been lost.
  632.  * Must be called holding struct_ mutex
  633.  *
  634.  * Frees the object
  635.  */
  636. void
  637. drm_gem_object_free(struct kref *kref)
  638. {
  639.         struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  640.         struct drm_device *dev = obj->dev;
  641.  
  642.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  643.  
  644.         if (dev->driver->gem_free_object != NULL)
  645.                 dev->driver->gem_free_object(obj);
  646. }
  647. EXPORT_SYMBOL(drm_gem_object_free);
  648.  
  649.  
  650. #if 0
  651. void drm_gem_vm_open(struct vm_area_struct *vma)
  652. {
  653.         struct drm_gem_object *obj = vma->vm_private_data;
  654.  
  655.         drm_gem_object_reference(obj);
  656.  
  657.         mutex_lock(&obj->dev->struct_mutex);
  658.         drm_vm_open_locked(obj->dev, vma);
  659.         mutex_unlock(&obj->dev->struct_mutex);
  660. }
  661. EXPORT_SYMBOL(drm_gem_vm_open);
  662.  
  663. void drm_gem_vm_close(struct vm_area_struct *vma)
  664. {
  665.         struct drm_gem_object *obj = vma->vm_private_data;
  666.         struct drm_device *dev = obj->dev;
  667.  
  668.         mutex_lock(&dev->struct_mutex);
  669.         drm_vm_close_locked(obj->dev, vma);
  670.         drm_gem_object_unreference(obj);
  671.         mutex_unlock(&dev->struct_mutex);
  672. }
  673. EXPORT_SYMBOL(drm_gem_vm_close);
  674.  
  675. #endif
  676.  
  677.  
  678.  
  679.