Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6088 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/err.h>
  36. #include <drm/drmP.h>
  37. #include <drm/drm_vma_manager.h>
  38. #include <drm/drm_gem.h>
  39. #include "drm_internal.h"
  40.  
  41. /** @file drm_gem.c
  42.  *
  43.  * This file provides some of the base ioctls and library routines for
  44.  * the graphics memory manager implemented by each device driver.
  45.  *
  46.  * Because various devices have different requirements in terms of
  47.  * synchronization and migration strategies, implementing that is left up to
  48.  * the driver, and all that the general API provides should be generic --
  49.  * allocating objects, reading/writing data with the cpu, freeing objects.
  50.  * Even there, platform-dependent optimizations for reading/writing data with
  51.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  52.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  53.  *
  54.  * The goal was to have swap-backed object allocation managed through
  55.  * struct file.  However, file descriptors as handles to a struct file have
  56.  * two major failings:
  57.  * - Process limits prevent more than 1024 or so being used at a time by
  58.  *   default.
  59.  * - Inability to allocate high fds will aggravate the X Server's select()
  60.  *   handling, and likely that of many GL client applications as well.
  61.  *
  62.  * This led to a plan of using our own integer IDs (called handles, following
  63.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  64.  * ioctls.  The objects themselves will still include the struct file so
  65.  * that we can transition to fds if the required kernel infrastructure shows
  66.  * up at a later date, and as our interface with shmfs for memory allocation.
  67.  */
  68.  
  69. /*
  70.  * We make up offsets for buffer objects so we can recognize them at
  71.  * mmap time.
  72.  */
  73.  
  74. /* pgoff in mmap is an unsigned long, so we need to make sure that
  75.  * the faked up offset will fit
  76.  */
  77.  
  78. #if BITS_PER_LONG == 64
  79. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  80. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  81. #else
  82. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  83. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  84. #endif
  85.  
  86. /**
  87.  * drm_gem_init - Initialize the GEM device fields
  88.  * @dev: drm_devic structure to initialize
  89.  */
  90. int
  91. drm_gem_init(struct drm_device *dev)
  92. {
  93.         struct drm_vma_offset_manager *vma_offset_manager;
  94.  
  95.         mutex_init(&dev->object_name_lock);
  96.         idr_init(&dev->object_name_idr);
  97.  
  98.         vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
  99.         if (!vma_offset_manager) {
  100.                 DRM_ERROR("out of memory\n");
  101.                 return -ENOMEM;
  102.         }
  103.  
  104.         dev->vma_offset_manager = vma_offset_manager;
  105.         drm_vma_offset_manager_init(vma_offset_manager,
  106.                                     DRM_FILE_PAGE_OFFSET_START,
  107.                                     DRM_FILE_PAGE_OFFSET_SIZE);
  108.  
  109.         return 0;
  110. }
  111.  
  112. void
  113. drm_gem_destroy(struct drm_device *dev)
  114. {
  115.  
  116.         drm_vma_offset_manager_destroy(dev->vma_offset_manager);
  117.         kfree(dev->vma_offset_manager);
  118.         dev->vma_offset_manager = NULL;
  119. }
  120.  
  121. /**
  122.  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
  123.  * @dev: drm_device the object should be initialized for
  124.  * @obj: drm_gem_object to initialize
  125.  * @size: object size
  126.  *
  127.  * Initialize an already allocated GEM object of the specified size with
  128.  * shmfs backing store.
  129.  */
  130. int drm_gem_object_init(struct drm_device *dev,
  131.                         struct drm_gem_object *obj, size_t size)
  132. {
  133.         struct file *filp;
  134.  
  135.         drm_gem_private_object_init(dev, obj, size);
  136.  
  137.         filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  138.         if (IS_ERR(filp))
  139.                 return PTR_ERR(filp);
  140.  
  141.         obj->filp = filp;
  142.  
  143.         return 0;
  144. }
  145. EXPORT_SYMBOL(drm_gem_object_init);
  146.  
  147. /**
  148.  * drm_gem_private_object_init - initialize an allocated private GEM object
  149.  * @dev: drm_device the object should be initialized for
  150.  * @obj: drm_gem_object to initialize
  151.  * @size: object size
  152.  *
  153.  * Initialize an already allocated GEM object of the specified size with
  154.  * no GEM provided backing store. Instead the caller is responsible for
  155.  * backing the object and handling it.
  156.  */
  157. void drm_gem_private_object_init(struct drm_device *dev,
  158.                                  struct drm_gem_object *obj, size_t size)
  159. {
  160.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  161.  
  162.         obj->dev = dev;
  163.         obj->filp = NULL;
  164.  
  165.         kref_init(&obj->refcount);
  166.         obj->handle_count = 0;
  167.         obj->size = size;
  168.         drm_vma_node_reset(&obj->vma_node);
  169. }
  170. EXPORT_SYMBOL(drm_gem_private_object_init);
  171.  
  172. /**
  173.  * drm_gem_object_handle_free - release resources bound to userspace handles
  174.  * @obj: GEM object to clean up.
  175.  *
  176.  * Called after the last handle to the object has been closed
  177.  *
  178.  * Removes any name for the object. Note that this must be
  179.  * called before drm_gem_object_free or we'll be touching
  180.  * freed memory
  181.  */
  182. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  183. {
  184.         struct drm_device *dev = obj->dev;
  185.  
  186.         /* Remove any name for this object */
  187.         if (obj->name) {
  188.                 idr_remove(&dev->object_name_idr, obj->name);
  189.                 obj->name = 0;
  190.         }
  191. }
  192.  
  193.  
  194. static void
  195. drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
  196. {
  197.         if (WARN_ON(obj->handle_count == 0))
  198.                 return;
  199.  
  200.         /*
  201.         * Must bump handle count first as this may be the last
  202.         * ref, in which case the object would disappear before we
  203.         * checked for a name
  204.         */
  205.  
  206.         mutex_lock(&obj->dev->object_name_lock);
  207.         if (--obj->handle_count == 0) {
  208.                 drm_gem_object_handle_free(obj);
  209.         }
  210.         mutex_unlock(&obj->dev->object_name_lock);
  211.  
  212.         drm_gem_object_unreference_unlocked(obj);
  213. }
  214.  
  215. /**
  216.  * drm_gem_handle_delete - deletes the given file-private handle
  217.  * @filp: drm file-private structure to use for the handle look up
  218.  * @handle: userspace handle to delete
  219.  *
  220.  * Removes the GEM handle from the @filp lookup table and if this is the last
  221.  * handle also cleans up linked resources like GEM names.
  222.  */
  223. int
  224. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  225. {
  226.         struct drm_device *dev;
  227.         struct drm_gem_object *obj;
  228.  
  229.         /* This is gross. The idr system doesn't let us try a delete and
  230.          * return an error code.  It just spews if you fail at deleting.
  231.          * So, we have to grab a lock around finding the object and then
  232.          * doing the delete on it and dropping the refcount, or the user
  233.          * could race us to double-decrement the refcount and cause a
  234.          * use-after-free later.  Given the frequency of our handle lookups,
  235.          * we may want to use ida for number allocation and a hash table
  236.          * for the pointers, anyway.
  237.          */
  238.         spin_lock(&filp->table_lock);
  239.  
  240.         /* Check if we currently have a reference on the object */
  241.         obj = idr_find(&filp->object_idr, handle);
  242.         if (obj == NULL) {
  243.                 spin_unlock(&filp->table_lock);
  244.                 return -EINVAL;
  245.         }
  246.         dev = obj->dev;
  247.  
  248.         /* Release reference and decrement refcount. */
  249.         idr_remove(&filp->object_idr, handle);
  250.         spin_unlock(&filp->table_lock);
  251.  
  252. //      drm_vma_node_revoke(&obj->vma_node, filp->filp);
  253.  
  254.         if (dev->driver->gem_close_object)
  255.                 dev->driver->gem_close_object(obj, filp);
  256.     drm_gem_object_handle_unreference_unlocked(obj);
  257.  
  258.         return 0;
  259. }
  260. EXPORT_SYMBOL(drm_gem_handle_delete);
  261.  
  262. /**
  263.  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
  264.  * @file: drm file-private structure to remove the dumb handle from
  265.  * @dev: corresponding drm_device
  266.  * @handle: the dumb handle to remove
  267.  *
  268.  * This implements the ->dumb_destroy kms driver callback for drivers which use
  269.  * gem to manage their backing storage.
  270.  */
  271. int drm_gem_dumb_destroy(struct drm_file *file,
  272.                          struct drm_device *dev,
  273.                          uint32_t handle)
  274. {
  275.         return drm_gem_handle_delete(file, handle);
  276. }
  277. EXPORT_SYMBOL(drm_gem_dumb_destroy);
  278.  
  279. /**
  280.  * drm_gem_handle_create_tail - internal functions to create a handle
  281.  * @file_priv: drm file-private structure to register the handle for
  282.  * @obj: object to register
  283.  * @handlep: pointer to return the created handle to the caller
  284.  *
  285.  * This expects the dev->object_name_lock to be held already and will drop it
  286.  * before returning. Used to avoid races in establishing new handles when
  287.  * importing an object from either an flink name or a dma-buf.
  288.  */
  289. int
  290. drm_gem_handle_create_tail(struct drm_file *file_priv,
  291.                            struct drm_gem_object *obj,
  292.                            u32 *handlep)
  293. {
  294.         struct drm_device *dev = obj->dev;
  295.         int ret;
  296.  
  297.         WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  298.  
  299.         /*
  300.          * Get the user-visible handle using idr.  Preload and perform
  301.          * allocation under our spinlock.
  302.          */
  303.         idr_preload(GFP_KERNEL);
  304.         spin_lock(&file_priv->table_lock);
  305.  
  306.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  307.         drm_gem_object_reference(obj);
  308.         obj->handle_count++;
  309.         spin_unlock(&file_priv->table_lock);
  310.         idr_preload_end();
  311.         mutex_unlock(&dev->object_name_lock);
  312.         if (ret < 0) {
  313.                 drm_gem_object_handle_unreference_unlocked(obj);
  314.                 return ret;
  315.         }
  316.         *handlep = ret;
  317.  
  318. //      ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
  319. //      if (ret) {
  320. //              drm_gem_handle_delete(file_priv, *handlep);
  321. //              return ret;
  322. //      }
  323.  
  324.         if (dev->driver->gem_open_object) {
  325.                 ret = dev->driver->gem_open_object(obj, file_priv);
  326.                 if (ret) {
  327.                         drm_gem_handle_delete(file_priv, *handlep);
  328.                         return ret;
  329.                 }
  330.         }
  331.  
  332.         return 0;
  333. }
  334.  
  335. /**
  336.  * drm_gem_handle_create - create a gem handle for an object
  337.  * @file_priv: drm file-private structure to register the handle for
  338.  * @obj: object to register
  339.  * @handlep: pionter to return the created handle to the caller
  340.  *
  341.  * Create a handle for this object. This adds a handle reference
  342.  * to the object, which includes a regular reference count. Callers
  343.  * will likely want to dereference the object afterwards.
  344.  */
  345. int drm_gem_handle_create(struct drm_file *file_priv,
  346.                           struct drm_gem_object *obj,
  347.                           u32 *handlep)
  348. {
  349.         mutex_lock(&obj->dev->object_name_lock);
  350.  
  351.         return drm_gem_handle_create_tail(file_priv, obj, handlep);
  352. }
  353. EXPORT_SYMBOL(drm_gem_handle_create);
  354.  
  355. #if 0
  356. /**
  357.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  358.  * @obj: obj in question
  359.  *
  360.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  361.  */
  362. void
  363. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  364. {
  365.         struct drm_device *dev = obj->dev;
  366.  
  367.         drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
  368. }
  369. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  370.  
  371. /**
  372.  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  373.  * @obj: obj in question
  374.  * @size: the virtual size
  375.  *
  376.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  377.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  378.  * up the object based on the offset and sets up the various memory mapping
  379.  * structures.
  380.  *
  381.  * This routine allocates and attaches a fake offset for @obj, in cases where
  382.  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  383.  * just use drm_gem_create_mmap_offset().
  384.  */
  385. int
  386. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  387. {
  388.         struct drm_device *dev = obj->dev;
  389.  
  390.         return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
  391.                                   size / PAGE_SIZE);
  392. }
  393. EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
  394.  
  395. /**
  396.  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  397.  * @obj: obj in question
  398.  *
  399.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  400.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  401.  * up the object based on the offset and sets up the various memory mapping
  402.  * structures.
  403.  *
  404.  * This routine allocates and attaches a fake offset for @obj.
  405.  */
  406. int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  407. {
  408.         return drm_gem_create_mmap_offset_size(obj, obj->size);
  409. }
  410. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  411.  
  412. /**
  413.  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  414.  * from shmem
  415.  * @obj: obj in question
  416.  *
  417.  * This reads the page-array of the shmem-backing storage of the given gem
  418.  * object. An array of pages is returned. If a page is not allocated or
  419.  * swapped-out, this will allocate/swap-in the required pages. Note that the
  420.  * whole object is covered by the page-array and pinned in memory.
  421.  *
  422.  * Use drm_gem_put_pages() to release the array and unpin all pages.
  423.  *
  424.  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
  425.  * If you require other GFP-masks, you have to do those allocations yourself.
  426.  *
  427.  * Note that you are not allowed to change gfp-zones during runtime. That is,
  428.  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
  429.  * set during initialization. If you have special zone constraints, set them
  430.  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
  431.  * to keep pages in the required zone during swap-in.
  432.  */
  433. struct page **drm_gem_get_pages(struct drm_gem_object *obj)
  434. {
  435.         struct address_space *mapping;
  436.         struct page *p, **pages;
  437.         int i, npages;
  438.  
  439.         /* This is the shared memory object that backs the GEM resource */
  440.         mapping = file_inode(obj->filp)->i_mapping;
  441.  
  442.         /* We already BUG_ON() for non-page-aligned sizes in
  443.          * drm_gem_object_init(), so we should never hit this unless
  444.          * driver author is doing something really wrong:
  445.          */
  446.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  447.  
  448.         npages = obj->size >> PAGE_SHIFT;
  449.  
  450.         pages = drm_malloc_ab(npages, sizeof(struct page *));
  451.         if (pages == NULL)
  452.                 return ERR_PTR(-ENOMEM);
  453.  
  454.         for (i = 0; i < npages; i++) {
  455.                 p = shmem_read_mapping_page(mapping, i);
  456.                 if (IS_ERR(p))
  457.                         goto fail;
  458.                 pages[i] = p;
  459.  
  460.                 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
  461.                  * correct region during swapin. Note that this requires
  462.                  * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
  463.                  * so shmem can relocate pages during swapin if required.
  464.                  */
  465.                 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
  466.                                 (page_to_pfn(p) >= 0x00100000UL));
  467.         }
  468.  
  469.         return pages;
  470.  
  471. fail:
  472.         while (i--)
  473.                 page_cache_release(pages[i]);
  474.  
  475.         drm_free_large(pages);
  476.         return ERR_CAST(p);
  477. }
  478. EXPORT_SYMBOL(drm_gem_get_pages);
  479.  
  480. /**
  481.  * drm_gem_put_pages - helper to free backing pages for a GEM object
  482.  * @obj: obj in question
  483.  * @pages: pages to free
  484.  * @dirty: if true, pages will be marked as dirty
  485.  * @accessed: if true, the pages will be marked as accessed
  486.  */
  487. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  488.                 bool dirty, bool accessed)
  489. {
  490.         int i, npages;
  491.  
  492.         /* We already BUG_ON() for non-page-aligned sizes in
  493.          * drm_gem_object_init(), so we should never hit this unless
  494.          * driver author is doing something really wrong:
  495.          */
  496.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  497.  
  498.         npages = obj->size >> PAGE_SHIFT;
  499.  
  500.         for (i = 0; i < npages; i++) {
  501.                 if (dirty)
  502.                         set_page_dirty(pages[i]);
  503.  
  504.                 if (accessed)
  505.                         mark_page_accessed(pages[i]);
  506.  
  507.                 /* Undo the reference we took when populating the table */
  508.                 page_cache_release(pages[i]);
  509.         }
  510.  
  511.         drm_free_large(pages);
  512. }
  513. EXPORT_SYMBOL(drm_gem_put_pages);
  514. #endif
  515.  
  516. /** Returns a reference to the object named by the handle. */
  517. struct drm_gem_object *
  518. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  519.                       u32 handle)
  520. {
  521.         struct drm_gem_object *obj;
  522.  
  523.         spin_lock(&filp->table_lock);
  524.  
  525.         /* Check if we currently have a reference on the object */
  526.         obj = idr_find(&filp->object_idr, handle);
  527.         if (obj == NULL) {
  528.                 spin_unlock(&filp->table_lock);
  529.                 return NULL;
  530.         }
  531.  
  532.         drm_gem_object_reference(obj);
  533.  
  534.         spin_unlock(&filp->table_lock);
  535.  
  536.         return obj;
  537. }
  538. EXPORT_SYMBOL(drm_gem_object_lookup);
  539.  
  540. /**
  541.  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
  542.  * @dev: drm_device
  543.  * @data: ioctl data
  544.  * @file_priv: drm file-private structure
  545.  *
  546.  * Releases the handle to an mm object.
  547.  */
  548. int
  549. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  550.                     struct drm_file *file_priv)
  551. {
  552.         struct drm_gem_close *args = data;
  553.         int ret;
  554.  
  555.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  556.                 return -ENODEV;
  557.  
  558.         ret = drm_gem_handle_delete(file_priv, args->handle);
  559.  
  560.         return ret;
  561. }
  562.  
  563. /**
  564.  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
  565.  * @dev: drm_device
  566.  * @data: ioctl data
  567.  * @file_priv: drm file-private structure
  568.  *
  569.  * Create a global name for an object, returning the name.
  570.  *
  571.  * Note that the name does not hold a reference; when the object
  572.  * is freed, the name goes away.
  573.  */
  574. int
  575. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  576.                     struct drm_file *file_priv)
  577. {
  578.         struct drm_gem_flink *args = data;
  579.         struct drm_gem_object *obj;
  580.         int ret;
  581.  
  582.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  583.                 return -ENODEV;
  584.  
  585.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  586.         if (obj == NULL)
  587.                 return -ENOENT;
  588.  
  589.         mutex_lock(&dev->object_name_lock);
  590.         idr_preload(GFP_KERNEL);
  591.         /* prevent races with concurrent gem_close. */
  592.         if (obj->handle_count == 0) {
  593.                 ret = -ENOENT;
  594.                 goto err;
  595.         }
  596.  
  597.         if (!obj->name) {
  598.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  599.                 if (ret < 0)
  600.                         goto err;
  601.  
  602.                 obj->name = ret;
  603.         }
  604.  
  605.         args->name = (uint64_t) obj->name;
  606.         ret = 0;
  607.  
  608. err:
  609.         idr_preload_end();
  610.         mutex_unlock(&dev->object_name_lock);
  611.         drm_gem_object_unreference_unlocked(obj);
  612.         return ret;
  613. }
  614.  
  615. /**
  616.  * drm_gem_open - implementation of the GEM_OPEN ioctl
  617.  * @dev: drm_device
  618.  * @data: ioctl data
  619.  * @file_priv: drm file-private structure
  620.  *
  621.  * Open an object using the global name, returning a handle and the size.
  622.  *
  623.  * This handle (of course) holds a reference to the object, so the object
  624.  * will not go away until the handle is deleted.
  625.  */
  626. int
  627. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  628.                    struct drm_file *file_priv)
  629. {
  630.         struct drm_gem_open *args = data;
  631.         struct drm_gem_object *obj;
  632.         int ret;
  633.         u32 handle;
  634.  
  635.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  636.                 return -ENODEV;
  637.  
  638.         mutex_lock(&dev->object_name_lock);
  639.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  640.         if (obj) {
  641.                 drm_gem_object_reference(obj);
  642.         } else {
  643.                 mutex_unlock(&dev->object_name_lock);
  644.                 return -ENOENT;
  645.         }
  646.  
  647.         /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  648.         ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  649.         drm_gem_object_unreference_unlocked(obj);
  650.         if (ret)
  651.                 return ret;
  652.  
  653.         args->handle = handle;
  654.         args->size = obj->size;
  655.  
  656.         return 0;
  657. }
  658.  
  659. #if 0
  660. /**
  661.  * gem_gem_open - initalizes GEM file-private structures at devnode open time
  662.  * @dev: drm_device which is being opened by userspace
  663.  * @file_private: drm file-private structure to set up
  664.  *
  665.  * Called at device open time, sets up the structure for handling refcounting
  666.  * of mm objects.
  667.  */
  668. void
  669. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  670. {
  671.         idr_init(&file_private->object_idr);
  672.         spin_lock_init(&file_private->table_lock);
  673. }
  674.  
  675. /*
  676.  * Called at device close to release the file's
  677.  * handle references on objects.
  678.  */
  679. static int
  680. drm_gem_object_release_handle(int id, void *ptr, void *data)
  681. {
  682.         struct drm_file *file_priv = data;
  683.         struct drm_gem_object *obj = ptr;
  684.         struct drm_device *dev = obj->dev;
  685.  
  686.         drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  687.  
  688.         if (dev->driver->gem_close_object)
  689.                 dev->driver->gem_close_object(obj, file_priv);
  690.  
  691.         drm_gem_object_handle_unreference_unlocked(obj);
  692.  
  693.         return 0;
  694. }
  695.  
  696. /**
  697.  * drm_gem_release - release file-private GEM resources
  698.  * @dev: drm_device which is being closed by userspace
  699.  * @file_private: drm file-private structure to clean up
  700.  *
  701.  * Called at close time when the filp is going away.
  702.  *
  703.  * Releases any remaining references on objects by this filp.
  704.  */
  705. void
  706. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  707. {
  708.         idr_for_each(&file_private->object_idr,
  709.                      &drm_gem_object_release_handle, file_private);
  710.         idr_destroy(&file_private->object_idr);
  711. }
  712. #endif
  713.  
  714. void
  715. drm_gem_object_release(struct drm_gem_object *obj)
  716. {
  717.         WARN_ON(obj->dma_buf);
  718.  
  719.         if (obj->filp)
  720.             free(obj->filp);
  721. }
  722. EXPORT_SYMBOL(drm_gem_object_release);
  723.  
  724. /**
  725.  * drm_gem_object_free - free a GEM object
  726.  * @kref: kref of the object to free
  727.  *
  728.  * Called after the last reference to the object has been lost.
  729.  * Must be called holding struct_ mutex
  730.  *
  731.  * Frees the object
  732.  */
  733. void
  734. drm_gem_object_free(struct kref *kref)
  735. {
  736.         struct drm_gem_object *obj =
  737.                 container_of(kref, struct drm_gem_object, refcount);
  738.         struct drm_device *dev = obj->dev;
  739.  
  740.         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  741.  
  742.         if (dev->driver->gem_free_object != NULL)
  743.                 dev->driver->gem_free_object(obj);
  744. }
  745. EXPORT_SYMBOL(drm_gem_object_free);
  746.  
  747.  
  748. #if 0
  749. void drm_gem_vm_open(struct vm_area_struct *vma)
  750. {
  751.         struct drm_gem_object *obj = vma->vm_private_data;
  752.  
  753.         drm_gem_object_reference(obj);
  754.  
  755.         mutex_lock(&obj->dev->struct_mutex);
  756.         drm_vm_open_locked(obj->dev, vma);
  757.         mutex_unlock(&obj->dev->struct_mutex);
  758. }
  759. EXPORT_SYMBOL(drm_gem_vm_open);
  760.  
  761. void drm_gem_vm_close(struct vm_area_struct *vma)
  762. {
  763.         struct drm_gem_object *obj = vma->vm_private_data;
  764.         struct drm_device *dev = obj->dev;
  765.  
  766.         mutex_lock(&dev->struct_mutex);
  767.         drm_vm_close_locked(obj->dev, vma);
  768.         drm_gem_object_unreference(obj);
  769.         mutex_unlock(&dev->struct_mutex);
  770. }
  771. EXPORT_SYMBOL(drm_gem_vm_close);
  772.  
  773. #endif
  774.  
  775.  
  776.  
  777.