Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/err.h>
  36. #include <drm/drmP.h>
  37. #include <drm/drm_vma_manager.h>
  38.  
  39. /** @file drm_gem.c
  40.  *
  41.  * This file provides some of the base ioctls and library routines for
  42.  * the graphics memory manager implemented by each device driver.
  43.  *
  44.  * Because various devices have different requirements in terms of
  45.  * synchronization and migration strategies, implementing that is left up to
  46.  * the driver, and all that the general API provides should be generic --
  47.  * allocating objects, reading/writing data with the cpu, freeing objects.
  48.  * Even there, platform-dependent optimizations for reading/writing data with
  49.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  50.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  51.  *
  52.  * The goal was to have swap-backed object allocation managed through
  53.  * struct file.  However, file descriptors as handles to a struct file have
  54.  * two major failings:
  55.  * - Process limits prevent more than 1024 or so being used at a time by
  56.  *   default.
  57.  * - Inability to allocate high fds will aggravate the X Server's select()
  58.  *   handling, and likely that of many GL client applications as well.
  59.  *
  60.  * This led to a plan of using our own integer IDs (called handles, following
  61.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  62.  * ioctls.  The objects themselves will still include the struct file so
  63.  * that we can transition to fds if the required kernel infrastructure shows
  64.  * up at a later date, and as our interface with shmfs for memory allocation.
  65.  */
  66.  
  67. /*
  68.  * We make up offsets for buffer objects so we can recognize them at
  69.  * mmap time.
  70.  */
  71.  
  72. /* pgoff in mmap is an unsigned long, so we need to make sure that
  73.  * the faked up offset will fit
  74.  */
  75.  
  76. #if BITS_PER_LONG == 64
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #else
  80. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  81. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  82. #endif
  83.  
  84. /**
  85.  * Initialize the GEM device fields
  86.  */
  87.  
  88. int
  89. drm_gem_init(struct drm_device *dev)
  90. {
  91.         struct drm_vma_offset_manager *vma_offset_manager;
  92.  
  93.         mutex_init(&dev->object_name_lock);
  94.         idr_init(&dev->object_name_idr);
  95.  
  96.         vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
  97.         if (!vma_offset_manager) {
  98.                 DRM_ERROR("out of memory\n");
  99.                 return -ENOMEM;
  100.         }
  101.  
  102.         dev->vma_offset_manager = vma_offset_manager;
  103.         drm_vma_offset_manager_init(vma_offset_manager,
  104.                                     DRM_FILE_PAGE_OFFSET_START,
  105.                     DRM_FILE_PAGE_OFFSET_SIZE);
  106.  
  107.         return 0;
  108. }
  109.  
  110. void
  111. drm_gem_destroy(struct drm_device *dev)
  112. {
  113.  
  114.         drm_vma_offset_manager_destroy(dev->vma_offset_manager);
  115.         kfree(dev->vma_offset_manager);
  116.         dev->vma_offset_manager = NULL;
  117. }
  118.  
  119. /**
  120.  * Initialize an already allocated GEM object of the specified size with
  121.  * shmfs backing store.
  122.  */
  123. int drm_gem_object_init(struct drm_device *dev,
  124.                         struct drm_gem_object *obj, size_t size)
  125. {
  126.         struct file *filp;
  127.  
  128.         drm_gem_private_object_init(dev, obj, size);
  129.  
  130.         filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  131.         if (IS_ERR(filp))
  132.                 return PTR_ERR(filp);
  133.  
  134.         obj->filp = filp;
  135.  
  136.         return 0;
  137. }
  138. EXPORT_SYMBOL(drm_gem_object_init);
  139.  
  140. /**
  141.  * Initialize an already allocated GEM object of the specified size with
  142.  * no GEM provided backing store. Instead the caller is responsible for
  143.  * backing the object and handling it.
  144.  */
  145. void drm_gem_private_object_init(struct drm_device *dev,
  146.                         struct drm_gem_object *obj, size_t size)
  147. {
  148.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  149.  
  150.         obj->dev = dev;
  151.         obj->filp = NULL;
  152.  
  153.         kref_init(&obj->refcount);
  154.         obj->handle_count = 0;
  155.         obj->size = size;
  156.         drm_vma_node_reset(&obj->vma_node);
  157. }
  158. EXPORT_SYMBOL(drm_gem_private_object_init);
  159.  
  160. /**
  161.  * Called after the last handle to the object has been closed
  162.  *
  163.  * Removes any name for the object. Note that this must be
  164.  * called before drm_gem_object_free or we'll be touching
  165.  * freed memory
  166.  */
  167. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  168. {
  169.         struct drm_device *dev = obj->dev;
  170.  
  171.         /* Remove any name for this object */
  172.         if (obj->name) {
  173.                 idr_remove(&dev->object_name_idr, obj->name);
  174.                 obj->name = 0;
  175.         }
  176. }
  177.  
  178.  
  179. static void
  180. drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
  181. {
  182.         if (WARN_ON(obj->handle_count == 0))
  183.                 return;
  184.  
  185.         /*
  186.         * Must bump handle count first as this may be the last
  187.         * ref, in which case the object would disappear before we
  188.         * checked for a name
  189.         */
  190.  
  191.         mutex_lock(&obj->dev->object_name_lock);
  192.         if (--obj->handle_count == 0) {
  193.                 drm_gem_object_handle_free(obj);
  194.         }
  195.         mutex_unlock(&obj->dev->object_name_lock);
  196.  
  197.         drm_gem_object_unreference_unlocked(obj);
  198. }
  199.  
  200. /**
  201.  * Removes the mapping from handle to filp for this object.
  202.  */
  203. int
  204. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  205. {
  206.         struct drm_device *dev;
  207.         struct drm_gem_object *obj;
  208.  
  209.         /* This is gross. The idr system doesn't let us try a delete and
  210.          * return an error code.  It just spews if you fail at deleting.
  211.          * So, we have to grab a lock around finding the object and then
  212.          * doing the delete on it and dropping the refcount, or the user
  213.          * could race us to double-decrement the refcount and cause a
  214.          * use-after-free later.  Given the frequency of our handle lookups,
  215.          * we may want to use ida for number allocation and a hash table
  216.          * for the pointers, anyway.
  217.          */
  218.         spin_lock(&filp->table_lock);
  219.  
  220.         /* Check if we currently have a reference on the object */
  221.         obj = idr_find(&filp->object_idr, handle);
  222.         if (obj == NULL) {
  223.                 spin_unlock(&filp->table_lock);
  224.                 return -EINVAL;
  225.         }
  226.         dev = obj->dev;
  227.  
  228.         /* Release reference and decrement refcount. */
  229.         idr_remove(&filp->object_idr, handle);
  230.         spin_unlock(&filp->table_lock);
  231.  
  232. //      drm_vma_node_revoke(&obj->vma_node, filp->filp);
  233.  
  234.         if (dev->driver->gem_close_object)
  235.                 dev->driver->gem_close_object(obj, filp);
  236.         drm_gem_object_handle_unreference_unlocked(obj);
  237.  
  238.         return 0;
  239. }
  240. EXPORT_SYMBOL(drm_gem_handle_delete);
  241.  
  242. /**
  243.  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
  244.  *
  245.  * This implements the ->dumb_destroy kms driver callback for drivers which use
  246.  * gem to manage their backing storage.
  247.  */
  248. int drm_gem_dumb_destroy(struct drm_file *file,
  249.                          struct drm_device *dev,
  250.                          uint32_t handle)
  251. {
  252.         return drm_gem_handle_delete(file, handle);
  253. }
  254. EXPORT_SYMBOL(drm_gem_dumb_destroy);
  255.  
  256. /**
  257.  * drm_gem_handle_create_tail - internal functions to create a handle
  258.  *
  259.  * This expects the dev->object_name_lock to be held already and will drop it
  260.  * before returning. Used to avoid races in establishing new handles when
  261.  * importing an object from either an flink name or a dma-buf.
  262.  */
  263. int
  264. drm_gem_handle_create_tail(struct drm_file *file_priv,
  265.                        struct drm_gem_object *obj,
  266.                        u32 *handlep)
  267. {
  268.         struct drm_device *dev = obj->dev;
  269.         int ret;
  270.  
  271.         WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  272.  
  273.         /*
  274.          * Get the user-visible handle using idr.  Preload and perform
  275.          * allocation under our spinlock.
  276.          */
  277.         idr_preload(GFP_KERNEL);
  278.         spin_lock(&file_priv->table_lock);
  279.  
  280.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  281.         drm_gem_object_reference(obj);
  282.         obj->handle_count++;
  283.         spin_unlock(&file_priv->table_lock);
  284.         idr_preload_end();
  285.         mutex_unlock(&dev->object_name_lock);
  286.         if (ret < 0) {
  287.                 drm_gem_object_handle_unreference_unlocked(obj);
  288.                 return ret;
  289.         }
  290.         *handlep = ret;
  291.  
  292. //      ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
  293. //      if (ret) {
  294. //              drm_gem_handle_delete(file_priv, *handlep);
  295. //              return ret;
  296. //      }
  297.  
  298.         if (dev->driver->gem_open_object) {
  299.                 ret = dev->driver->gem_open_object(obj, file_priv);
  300.                 if (ret) {
  301.                         drm_gem_handle_delete(file_priv, *handlep);
  302.                         return ret;
  303.                 }
  304.         }
  305.  
  306.         return 0;
  307. }
  308.  
  309. /**
  310.  * Create a handle for this object. This adds a handle reference
  311.  * to the object, which includes a regular reference count. Callers
  312.  * will likely want to dereference the object afterwards.
  313.  */
  314. int
  315. drm_gem_handle_create(struct drm_file *file_priv,
  316.                        struct drm_gem_object *obj,
  317.                        u32 *handlep)
  318. {
  319.         mutex_lock(&obj->dev->object_name_lock);
  320.  
  321.         return drm_gem_handle_create_tail(file_priv, obj, handlep);
  322. }
  323. EXPORT_SYMBOL(drm_gem_handle_create);
  324.  
  325. #if 0
  326. /**
  327.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  328.  * @obj: obj in question
  329.  *
  330.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  331.  */
  332. void
  333. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  334. {
  335.         struct drm_device *dev = obj->dev;
  336.  
  337.         drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
  338. }
  339. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  340.  
  341. /**
  342.  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  343.  * @obj: obj in question
  344.  * @size: the virtual size
  345.  *
  346.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  347.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  348.  * up the object based on the offset and sets up the various memory mapping
  349.  * structures.
  350.  *
  351.  * This routine allocates and attaches a fake offset for @obj, in cases where
  352.  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  353.  * just use drm_gem_create_mmap_offset().
  354.  */
  355. int
  356. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  357. {
  358.         struct drm_device *dev = obj->dev;
  359.  
  360.         return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
  361.                                   size / PAGE_SIZE);
  362. }
  363. EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
  364.  
  365. /**
  366.  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  367.  * @obj: obj in question
  368.  *
  369.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  370.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  371.  * up the object based on the offset and sets up the various memory mapping
  372.  * structures.
  373.  *
  374.  * This routine allocates and attaches a fake offset for @obj.
  375.  */
  376. int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  377. {
  378.         return drm_gem_create_mmap_offset_size(obj, obj->size);
  379. }
  380. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  381.  
  382. /**
  383.  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  384.  * from shmem
  385.  * @obj: obj in question
  386.  * @gfpmask: gfp mask of requested pages
  387.  */
  388. struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
  389. {
  390.         struct inode *inode;
  391.         struct address_space *mapping;
  392.         struct page *p, **pages;
  393.         int i, npages;
  394.  
  395.         /* This is the shared memory object that backs the GEM resource */
  396.         inode = file_inode(obj->filp);
  397.         mapping = inode->i_mapping;
  398.  
  399.         /* We already BUG_ON() for non-page-aligned sizes in
  400.          * drm_gem_object_init(), so we should never hit this unless
  401.          * driver author is doing something really wrong:
  402.          */
  403.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  404.  
  405.         npages = obj->size >> PAGE_SHIFT;
  406.  
  407.         pages = drm_malloc_ab(npages, sizeof(struct page *));
  408.         if (pages == NULL)
  409.                 return ERR_PTR(-ENOMEM);
  410.  
  411.         gfpmask |= mapping_gfp_mask(mapping);
  412.  
  413.         for (i = 0; i < npages; i++) {
  414.                 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
  415.                 if (IS_ERR(p))
  416.                         goto fail;
  417.                 pages[i] = p;
  418.  
  419.                 /* There is a hypothetical issue w/ drivers that require
  420.                  * buffer memory in the low 4GB.. if the pages are un-
  421.                  * pinned, and swapped out, they can end up swapped back
  422.                  * in above 4GB.  If pages are already in memory, then
  423.                  * shmem_read_mapping_page_gfp will ignore the gfpmask,
  424.                  * even if the already in-memory page disobeys the mask.
  425.                  *
  426.                  * It is only a theoretical issue today, because none of
  427.                  * the devices with this limitation can be populated with
  428.                  * enough memory to trigger the issue.  But this BUG_ON()
  429.                  * is here as a reminder in case the problem with
  430.                  * shmem_read_mapping_page_gfp() isn't solved by the time
  431.                  * it does become a real issue.
  432.                  *
  433.                  * See this thread: http://lkml.org/lkml/2011/7/11/238
  434.                  */
  435.                 BUG_ON((gfpmask & __GFP_DMA32) &&
  436.                                 (page_to_pfn(p) >= 0x00100000UL));
  437.         }
  438.  
  439.         return pages;
  440.  
  441. fail:
  442.         while (i--)
  443.                 page_cache_release(pages[i]);
  444.  
  445.         drm_free_large(pages);
  446.         return ERR_CAST(p);
  447. }
  448. EXPORT_SYMBOL(drm_gem_get_pages);
  449.  
  450. /**
  451.  * drm_gem_put_pages - helper to free backing pages for a GEM object
  452.  * @obj: obj in question
  453.  * @pages: pages to free
  454.  * @dirty: if true, pages will be marked as dirty
  455.  * @accessed: if true, the pages will be marked as accessed
  456.  */
  457. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  458.                 bool dirty, bool accessed)
  459. {
  460.         int i, npages;
  461.  
  462.         /* We already BUG_ON() for non-page-aligned sizes in
  463.          * drm_gem_object_init(), so we should never hit this unless
  464.          * driver author is doing something really wrong:
  465.          */
  466.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  467.  
  468.         npages = obj->size >> PAGE_SHIFT;
  469.  
  470.         for (i = 0; i < npages; i++) {
  471.                 if (dirty)
  472.                         set_page_dirty(pages[i]);
  473.  
  474.                 if (accessed)
  475.                         mark_page_accessed(pages[i]);
  476.  
  477.                 /* Undo the reference we took when populating the table */
  478.                 page_cache_release(pages[i]);
  479.         }
  480.  
  481.         drm_free_large(pages);
  482. }
  483. EXPORT_SYMBOL(drm_gem_put_pages);
  484. #endif
  485.  
  486. /** Returns a reference to the object named by the handle. */
  487. struct drm_gem_object *
  488. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  489.                       u32 handle)
  490. {
  491.         struct drm_gem_object *obj;
  492.  
  493.         spin_lock(&filp->table_lock);
  494.  
  495.         /* Check if we currently have a reference on the object */
  496.         obj = idr_find(&filp->object_idr, handle);
  497.         if (obj == NULL) {
  498.                 spin_unlock(&filp->table_lock);
  499.                 return NULL;
  500.         }
  501.  
  502.         drm_gem_object_reference(obj);
  503.  
  504.         spin_unlock(&filp->table_lock);
  505.  
  506.         return obj;
  507. }
  508. EXPORT_SYMBOL(drm_gem_object_lookup);
  509.  
  510. /**
  511.  * Releases the handle to an mm object.
  512.  */
  513. int
  514. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  515.                     struct drm_file *file_priv)
  516. {
  517.         struct drm_gem_close *args = data;
  518.         int ret;
  519.  
  520.         ret = drm_gem_handle_delete(file_priv, args->handle);
  521.  
  522.         return ret;
  523. }
  524.  
  525. /**
  526.  * Create a global name for an object, returning the name.
  527.  *
  528.  * Note that the name does not hold a reference; when the object
  529.  * is freed, the name goes away.
  530.  */
  531. int
  532. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  533.                     struct drm_file *file_priv)
  534. {
  535.         struct drm_gem_flink *args = data;
  536.         struct drm_gem_object *obj;
  537.         int ret;
  538.  
  539.         if (!(dev->driver->driver_features & DRIVER_GEM))
  540.                 return -ENODEV;
  541.  
  542.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  543.         if (obj == NULL)
  544.                 return -ENOENT;
  545.  
  546.         mutex_lock(&dev->object_name_lock);
  547.         idr_preload(GFP_KERNEL);
  548.         /* prevent races with concurrent gem_close. */
  549.         if (obj->handle_count == 0) {
  550.                 ret = -ENOENT;
  551.                 goto err;
  552.         }
  553.  
  554.         if (!obj->name) {
  555.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  556.                 if (ret < 0)
  557.                         goto err;
  558.  
  559.                 obj->name = ret;
  560.         }
  561.  
  562.                 args->name = (uint64_t) obj->name;
  563.                 ret = 0;
  564.  
  565. err:
  566.         idr_preload_end();
  567.         mutex_unlock(&dev->object_name_lock);
  568.         drm_gem_object_unreference_unlocked(obj);
  569.         return ret;
  570. }
  571.  
  572. /**
  573.  * Open an object using the global name, returning a handle and the size.
  574.  *
  575.  * This handle (of course) holds a reference to the object, so the object
  576.  * will not go away until the handle is deleted.
  577.  */
  578. int
  579. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  580.                    struct drm_file *file_priv)
  581. {
  582.         struct drm_gem_open *args = data;
  583.         struct drm_gem_object *obj;
  584.         int ret;
  585.         u32 handle;
  586.  
  587.         if (!(dev->driver->driver_features & DRIVER_GEM))
  588.                 return -ENODEV;
  589.  
  590.         mutex_lock(&dev->object_name_lock);
  591.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  592.         if (obj) {
  593.                 drm_gem_object_reference(obj);
  594.         } else {
  595.                 mutex_unlock(&dev->object_name_lock);
  596.                 return -ENOENT;
  597.         }
  598.  
  599.         /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  600.         ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  601.         drm_gem_object_unreference_unlocked(obj);
  602.         if (ret)
  603.                 return ret;
  604.  
  605.         args->handle = handle;
  606.         args->size = obj->size;
  607.  
  608.         return 0;
  609. }
  610.  
  611. #if 0
  612. /**
  613.  * Called at device open time, sets up the structure for handling refcounting
  614.  * of mm objects.
  615.  */
  616. void
  617. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  618. {
  619.         idr_init(&file_private->object_idr);
  620.         spin_lock_init(&file_private->table_lock);
  621. }
  622.  
  623. /**
  624.  * Called at device close to release the file's
  625.  * handle references on objects.
  626.  */
  627. static int
  628. drm_gem_object_release_handle(int id, void *ptr, void *data)
  629. {
  630.         struct drm_file *file_priv = data;
  631.         struct drm_gem_object *obj = ptr;
  632.         struct drm_device *dev = obj->dev;
  633.  
  634.         drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  635.  
  636.         if (dev->driver->gem_close_object)
  637.                 dev->driver->gem_close_object(obj, file_priv);
  638.  
  639.         drm_gem_object_handle_unreference_unlocked(obj);
  640.  
  641.         return 0;
  642. }
  643.  
  644. /**
  645.  * Called at close time when the filp is going away.
  646.  *
  647.  * Releases any remaining references on objects by this filp.
  648.  */
  649. void
  650. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  651. {
  652.         idr_for_each(&file_private->object_idr,
  653.                      &drm_gem_object_release_handle, file_private);
  654.         idr_destroy(&file_private->object_idr);
  655. }
  656. #endif
  657.  
  658. void
  659. drm_gem_object_release(struct drm_gem_object *obj)
  660. {
  661.         if (obj->filp)
  662.             free(obj->filp);
  663. }
  664. EXPORT_SYMBOL(drm_gem_object_release);
  665.  
  666. /**
  667.  * Called after the last reference to the object has been lost.
  668.  * Must be called holding struct_ mutex
  669.  *
  670.  * Frees the object
  671.  */
  672. void
  673. drm_gem_object_free(struct kref *kref)
  674. {
  675.         struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  676.         struct drm_device *dev = obj->dev;
  677.  
  678.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  679.  
  680.         if (dev->driver->gem_free_object != NULL)
  681.                 dev->driver->gem_free_object(obj);
  682. }
  683. EXPORT_SYMBOL(drm_gem_object_free);
  684.  
  685.  
  686. #if 0
  687. void drm_gem_vm_open(struct vm_area_struct *vma)
  688. {
  689.         struct drm_gem_object *obj = vma->vm_private_data;
  690.  
  691.         drm_gem_object_reference(obj);
  692.  
  693.         mutex_lock(&obj->dev->struct_mutex);
  694.         drm_vm_open_locked(obj->dev, vma);
  695.         mutex_unlock(&obj->dev->struct_mutex);
  696. }
  697. EXPORT_SYMBOL(drm_gem_vm_open);
  698.  
  699. void drm_gem_vm_close(struct vm_area_struct *vma)
  700. {
  701.         struct drm_gem_object *obj = vma->vm_private_data;
  702.         struct drm_device *dev = obj->dev;
  703.  
  704.         mutex_lock(&dev->struct_mutex);
  705.         drm_vm_close_locked(obj->dev, vma);
  706.         drm_gem_object_unreference(obj);
  707.         mutex_unlock(&dev->struct_mutex);
  708. }
  709. EXPORT_SYMBOL(drm_gem_vm_close);
  710.  
  711. #endif
  712.  
  713.  
  714.  
  715.