Subversion Repositories Kolibri OS

Rev

Rev 6660 | Rev 6937 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/fs.h>
  33. #include <linux/file.h>
  34. #include <linux/module.h>
  35. #include <linux/shmem_fs.h>
  36. #include <linux/err.h>
  37. #include <drm/drmP.h>
  38. #include <drm/drm_vma_manager.h>
  39. #include <drm/drm_gem.h>
  40. #include "drm_internal.h"
  41.  
  42. /** @file drm_gem.c
  43.  *
  44.  * This file provides some of the base ioctls and library routines for
  45.  * the graphics memory manager implemented by each device driver.
  46.  *
  47.  * Because various devices have different requirements in terms of
  48.  * synchronization and migration strategies, implementing that is left up to
  49.  * the driver, and all that the general API provides should be generic --
  50.  * allocating objects, reading/writing data with the cpu, freeing objects.
  51.  * Even there, platform-dependent optimizations for reading/writing data with
  52.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  53.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  54.  *
  55.  * The goal was to have swap-backed object allocation managed through
  56.  * struct file.  However, file descriptors as handles to a struct file have
  57.  * two major failings:
  58.  * - Process limits prevent more than 1024 or so being used at a time by
  59.  *   default.
  60.  * - Inability to allocate high fds will aggravate the X Server's select()
  61.  *   handling, and likely that of many GL client applications as well.
  62.  *
  63.  * This led to a plan of using our own integer IDs (called handles, following
  64.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  65.  * ioctls.  The objects themselves will still include the struct file so
  66.  * that we can transition to fds if the required kernel infrastructure shows
  67.  * up at a later date, and as our interface with shmfs for memory allocation.
  68.  */
  69.  
  70. /*
  71.  * We make up offsets for buffer objects so we can recognize them at
  72.  * mmap time.
  73.  */
  74.  
  75. /* pgoff in mmap is an unsigned long, so we need to make sure that
  76.  * the faked up offset will fit
  77.  */
  78.  
  79. #if BITS_PER_LONG == 64
  80. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  81. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  82. #else
  83. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  84. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  85. #endif
  86.  
  87. /**
  88.  * drm_gem_init - Initialize the GEM device fields
  89.  * @dev: drm_devic structure to initialize
  90.  */
  91. int
  92. drm_gem_init(struct drm_device *dev)
  93. {
  94.         struct drm_vma_offset_manager *vma_offset_manager;
  95.  
  96.         mutex_init(&dev->object_name_lock);
  97.         idr_init(&dev->object_name_idr);
  98.  
  99.         vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
  100.         if (!vma_offset_manager) {
  101.                 DRM_ERROR("out of memory\n");
  102.                 return -ENOMEM;
  103.         }
  104.  
  105.         dev->vma_offset_manager = vma_offset_manager;
  106.         drm_vma_offset_manager_init(vma_offset_manager,
  107.                                     DRM_FILE_PAGE_OFFSET_START,
  108.                                     DRM_FILE_PAGE_OFFSET_SIZE);
  109.  
  110.         return 0;
  111. }
  112.  
  113. void
  114. drm_gem_destroy(struct drm_device *dev)
  115. {
  116.  
  117.         drm_vma_offset_manager_destroy(dev->vma_offset_manager);
  118.         kfree(dev->vma_offset_manager);
  119.         dev->vma_offset_manager = NULL;
  120. }
  121.  
  122. /**
  123.  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
  124.  * @dev: drm_device the object should be initialized for
  125.  * @obj: drm_gem_object to initialize
  126.  * @size: object size
  127.  *
  128.  * Initialize an already allocated GEM object of the specified size with
  129.  * shmfs backing store.
  130.  */
  131. int drm_gem_object_init(struct drm_device *dev,
  132.                         struct drm_gem_object *obj, size_t size)
  133. {
  134.         struct file *filp;
  135.  
  136.         drm_gem_private_object_init(dev, obj, size);
  137.  
  138.         filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  139.         if (IS_ERR(filp))
  140.                 return PTR_ERR(filp);
  141.  
  142.         obj->filp = filp;
  143.  
  144.         return 0;
  145. }
  146. EXPORT_SYMBOL(drm_gem_object_init);
  147.  
  148. /**
  149.  * drm_gem_private_object_init - initialize an allocated private GEM object
  150.  * @dev: drm_device the object should be initialized for
  151.  * @obj: drm_gem_object to initialize
  152.  * @size: object size
  153.  *
  154.  * Initialize an already allocated GEM object of the specified size with
  155.  * no GEM provided backing store. Instead the caller is responsible for
  156.  * backing the object and handling it.
  157.  */
  158. void drm_gem_private_object_init(struct drm_device *dev,
  159.                                  struct drm_gem_object *obj, size_t size)
  160. {
  161.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  162.  
  163.         obj->dev = dev;
  164.         obj->filp = NULL;
  165.  
  166.         kref_init(&obj->refcount);
  167.         obj->handle_count = 0;
  168.         obj->size = size;
  169.         drm_vma_node_reset(&obj->vma_node);
  170. }
  171. EXPORT_SYMBOL(drm_gem_private_object_init);
  172.  
  173. /**
  174.  * drm_gem_object_handle_free - release resources bound to userspace handles
  175.  * @obj: GEM object to clean up.
  176.  *
  177.  * Called after the last handle to the object has been closed
  178.  *
  179.  * Removes any name for the object. Note that this must be
  180.  * called before drm_gem_object_free or we'll be touching
  181.  * freed memory
  182.  */
  183. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  184. {
  185.         struct drm_device *dev = obj->dev;
  186.  
  187.         /* Remove any name for this object */
  188.         if (obj->name) {
  189.                 idr_remove(&dev->object_name_idr, obj->name);
  190.                 obj->name = 0;
  191.         }
  192. }
  193.  
  194.  
  195. static void
  196. drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
  197. {
  198.         if (WARN_ON(obj->handle_count == 0))
  199.                 return;
  200.  
  201.         /*
  202.         * Must bump handle count first as this may be the last
  203.         * ref, in which case the object would disappear before we
  204.         * checked for a name
  205.         */
  206.  
  207.         mutex_lock(&obj->dev->object_name_lock);
  208.         if (--obj->handle_count == 0) {
  209.                 drm_gem_object_handle_free(obj);
  210.         }
  211.         mutex_unlock(&obj->dev->object_name_lock);
  212.  
  213.         drm_gem_object_unreference_unlocked(obj);
  214. }
  215.  
  216. /**
  217.  * drm_gem_handle_delete - deletes the given file-private handle
  218.  * @filp: drm file-private structure to use for the handle look up
  219.  * @handle: userspace handle to delete
  220.  *
  221.  * Removes the GEM handle from the @filp lookup table and if this is the last
  222.  * handle also cleans up linked resources like GEM names.
  223.  */
  224. int
  225. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  226. {
  227.         struct drm_device *dev;
  228.         struct drm_gem_object *obj;
  229.  
  230.         /* This is gross. The idr system doesn't let us try a delete and
  231.          * return an error code.  It just spews if you fail at deleting.
  232.          * So, we have to grab a lock around finding the object and then
  233.          * doing the delete on it and dropping the refcount, or the user
  234.          * could race us to double-decrement the refcount and cause a
  235.          * use-after-free later.  Given the frequency of our handle lookups,
  236.          * we may want to use ida for number allocation and a hash table
  237.          * for the pointers, anyway.
  238.          */
  239.         spin_lock(&filp->table_lock);
  240.  
  241.         /* Check if we currently have a reference on the object */
  242.         obj = idr_find(&filp->object_idr, handle);
  243.         if (obj == NULL) {
  244.                 spin_unlock(&filp->table_lock);
  245.                 return -EINVAL;
  246.         }
  247.         dev = obj->dev;
  248.  
  249.         /* Release reference and decrement refcount. */
  250.         idr_remove(&filp->object_idr, handle);
  251.         spin_unlock(&filp->table_lock);
  252.  
  253. //      drm_vma_node_revoke(&obj->vma_node, filp->filp);
  254.  
  255.         if (dev->driver->gem_close_object)
  256.                 dev->driver->gem_close_object(obj, filp);
  257.  
  258.         drm_gem_object_handle_unreference_unlocked(obj);
  259.         return 0;
  260. }
  261. EXPORT_SYMBOL(drm_gem_handle_delete);
  262.  
  263. /**
  264.  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
  265.  * @file: drm file-private structure to remove the dumb handle from
  266.  * @dev: corresponding drm_device
  267.  * @handle: the dumb handle to remove
  268.  *
  269.  * This implements the ->dumb_destroy kms driver callback for drivers which use
  270.  * gem to manage their backing storage.
  271.  */
  272. int drm_gem_dumb_destroy(struct drm_file *file,
  273.                          struct drm_device *dev,
  274.                          uint32_t handle)
  275. {
  276.         return drm_gem_handle_delete(file, handle);
  277. }
  278. EXPORT_SYMBOL(drm_gem_dumb_destroy);
  279.  
  280. /**
  281.  * drm_gem_handle_create_tail - internal functions to create a handle
  282.  * @file_priv: drm file-private structure to register the handle for
  283.  * @obj: object to register
  284.  * @handlep: pointer to return the created handle to the caller
  285.  *
  286.  * This expects the dev->object_name_lock to be held already and will drop it
  287.  * before returning. Used to avoid races in establishing new handles when
  288.  * importing an object from either an flink name or a dma-buf.
  289.  */
  290. int
  291. drm_gem_handle_create_tail(struct drm_file *file_priv,
  292.                            struct drm_gem_object *obj,
  293.                            u32 *handlep)
  294. {
  295.         struct drm_device *dev = obj->dev;
  296.         int ret;
  297.  
  298.         WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  299.  
  300.         /*
  301.          * Get the user-visible handle using idr.  Preload and perform
  302.          * allocation under our spinlock.
  303.          */
  304.         idr_preload(GFP_KERNEL);
  305.         spin_lock(&file_priv->table_lock);
  306.  
  307.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  308.         drm_gem_object_reference(obj);
  309.         obj->handle_count++;
  310.         spin_unlock(&file_priv->table_lock);
  311.         idr_preload_end();
  312.         mutex_unlock(&dev->object_name_lock);
  313.         if (ret < 0)
  314.                 goto err_unref;
  315.  
  316.         *handlep = ret;
  317.  
  318. //      ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
  319. //      if (ret) {
  320. //              drm_gem_handle_delete(file_priv, *handlep);
  321. //              return ret;
  322. //      }
  323.  
  324.         if (dev->driver->gem_open_object) {
  325.                 ret = dev->driver->gem_open_object(obj, file_priv);
  326.                 if (ret)
  327.                         goto err_revoke;
  328.         }
  329.  
  330.         return 0;
  331.  
  332. err_revoke:
  333. //      drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  334. err_remove:
  335.         spin_lock(&file_priv->table_lock);
  336.         idr_remove(&file_priv->object_idr, *handlep);
  337.         spin_unlock(&file_priv->table_lock);
  338. err_unref:
  339.         drm_gem_object_handle_unreference_unlocked(obj);
  340.         return ret;
  341. }
  342.  
  343. /**
  344.  * drm_gem_handle_create - create a gem handle for an object
  345.  * @file_priv: drm file-private structure to register the handle for
  346.  * @obj: object to register
  347.  * @handlep: pionter to return the created handle to the caller
  348.  *
  349.  * Create a handle for this object. This adds a handle reference
  350.  * to the object, which includes a regular reference count. Callers
  351.  * will likely want to dereference the object afterwards.
  352.  */
  353. int drm_gem_handle_create(struct drm_file *file_priv,
  354.                           struct drm_gem_object *obj,
  355.                           u32 *handlep)
  356. {
  357.         mutex_lock(&obj->dev->object_name_lock);
  358.  
  359.         return drm_gem_handle_create_tail(file_priv, obj, handlep);
  360. }
  361. EXPORT_SYMBOL(drm_gem_handle_create);
  362.  
  363. #if 0
  364. /**
  365.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  366.  * @obj: obj in question
  367.  *
  368.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  369.  */
  370. void
  371. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  372. {
  373.         struct drm_device *dev = obj->dev;
  374.  
  375.         drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
  376. }
  377. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  378.  
  379. /**
  380.  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  381.  * @obj: obj in question
  382.  * @size: the virtual size
  383.  *
  384.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  385.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  386.  * up the object based on the offset and sets up the various memory mapping
  387.  * structures.
  388.  *
  389.  * This routine allocates and attaches a fake offset for @obj, in cases where
  390.  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  391.  * just use drm_gem_create_mmap_offset().
  392.  */
  393. int
  394. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  395. {
  396.         struct drm_device *dev = obj->dev;
  397.  
  398.         return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
  399.                                   size / PAGE_SIZE);
  400. }
  401. EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
  402.  
  403. /**
  404.  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  405.  * @obj: obj in question
  406.  *
  407.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  408.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  409.  * up the object based on the offset and sets up the various memory mapping
  410.  * structures.
  411.  *
  412.  * This routine allocates and attaches a fake offset for @obj.
  413.  */
  414. int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  415. {
  416.         return drm_gem_create_mmap_offset_size(obj, obj->size);
  417. }
  418. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  419.  
  420. /**
  421.  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  422.  * from shmem
  423.  * @obj: obj in question
  424.  *
  425.  * This reads the page-array of the shmem-backing storage of the given gem
  426.  * object. An array of pages is returned. If a page is not allocated or
  427.  * swapped-out, this will allocate/swap-in the required pages. Note that the
  428.  * whole object is covered by the page-array and pinned in memory.
  429.  *
  430.  * Use drm_gem_put_pages() to release the array and unpin all pages.
  431.  *
  432.  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
  433.  * If you require other GFP-masks, you have to do those allocations yourself.
  434.  *
  435.  * Note that you are not allowed to change gfp-zones during runtime. That is,
  436.  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
  437.  * set during initialization. If you have special zone constraints, set them
  438.  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
  439.  * to keep pages in the required zone during swap-in.
  440.  */
  441. struct page **drm_gem_get_pages(struct drm_gem_object *obj)
  442. {
  443.         struct address_space *mapping;
  444.         struct page *p, **pages;
  445.         int i, npages;
  446.  
  447.         /* This is the shared memory object that backs the GEM resource */
  448.         mapping = file_inode(obj->filp)->i_mapping;
  449.  
  450.         /* We already BUG_ON() for non-page-aligned sizes in
  451.          * drm_gem_object_init(), so we should never hit this unless
  452.          * driver author is doing something really wrong:
  453.          */
  454.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  455.  
  456.         npages = obj->size >> PAGE_SHIFT;
  457.  
  458.         pages = drm_malloc_ab(npages, sizeof(struct page *));
  459.         if (pages == NULL)
  460.                 return ERR_PTR(-ENOMEM);
  461.  
  462.         for (i = 0; i < npages; i++) {
  463.                 p = shmem_read_mapping_page(mapping, i);
  464.                 if (IS_ERR(p))
  465.                         goto fail;
  466.                 pages[i] = p;
  467.  
  468.                 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
  469.                  * correct region during swapin. Note that this requires
  470.                  * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
  471.                  * so shmem can relocate pages during swapin if required.
  472.                  */
  473.                 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
  474.                                 (page_to_pfn(p) >= 0x00100000UL));
  475.         }
  476.  
  477.         return pages;
  478.  
  479. fail:
  480.         while (i--)
  481.                 page_cache_release(pages[i]);
  482.  
  483.         drm_free_large(pages);
  484.         return ERR_CAST(p);
  485. }
  486. EXPORT_SYMBOL(drm_gem_get_pages);
  487.  
  488. /**
  489.  * drm_gem_put_pages - helper to free backing pages for a GEM object
  490.  * @obj: obj in question
  491.  * @pages: pages to free
  492.  * @dirty: if true, pages will be marked as dirty
  493.  * @accessed: if true, the pages will be marked as accessed
  494.  */
  495. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  496.                 bool dirty, bool accessed)
  497. {
  498.         int i, npages;
  499.  
  500.         /* We already BUG_ON() for non-page-aligned sizes in
  501.          * drm_gem_object_init(), so we should never hit this unless
  502.          * driver author is doing something really wrong:
  503.          */
  504.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  505.  
  506.         npages = obj->size >> PAGE_SHIFT;
  507.  
  508.         for (i = 0; i < npages; i++) {
  509.                 if (dirty)
  510.                         set_page_dirty(pages[i]);
  511.  
  512.                 if (accessed)
  513.                         mark_page_accessed(pages[i]);
  514.  
  515.                 /* Undo the reference we took when populating the table */
  516.                 page_cache_release(pages[i]);
  517.         }
  518.  
  519.         drm_free_large(pages);
  520. }
  521. EXPORT_SYMBOL(drm_gem_put_pages);
  522. #endif
  523.  
  524. /** Returns a reference to the object named by the handle. */
  525. struct drm_gem_object *
  526. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  527.                       u32 handle)
  528. {
  529.         struct drm_gem_object *obj;
  530.  
  531.         spin_lock(&filp->table_lock);
  532.  
  533.         /* Check if we currently have a reference on the object */
  534.         obj = idr_find(&filp->object_idr, handle);
  535.         if (obj == NULL) {
  536.                 spin_unlock(&filp->table_lock);
  537.                 return NULL;
  538.         }
  539.  
  540.         drm_gem_object_reference(obj);
  541.  
  542.         spin_unlock(&filp->table_lock);
  543.  
  544.         return obj;
  545. }
  546. EXPORT_SYMBOL(drm_gem_object_lookup);
  547.  
  548. /**
  549.  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
  550.  * @dev: drm_device
  551.  * @data: ioctl data
  552.  * @file_priv: drm file-private structure
  553.  *
  554.  * Releases the handle to an mm object.
  555.  */
  556. int
  557. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  558.                     struct drm_file *file_priv)
  559. {
  560.         struct drm_gem_close *args = data;
  561.         int ret;
  562.  
  563.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  564.                 return -ENODEV;
  565.  
  566.         ret = drm_gem_handle_delete(file_priv, args->handle);
  567.  
  568.         return ret;
  569. }
  570.  
  571. /**
  572.  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
  573.  * @dev: drm_device
  574.  * @data: ioctl data
  575.  * @file_priv: drm file-private structure
  576.  *
  577.  * Create a global name for an object, returning the name.
  578.  *
  579.  * Note that the name does not hold a reference; when the object
  580.  * is freed, the name goes away.
  581.  */
  582. int
  583. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  584.                     struct drm_file *file_priv)
  585. {
  586.         struct drm_gem_flink *args = data;
  587.         struct drm_gem_object *obj;
  588.         int ret;
  589.  
  590.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  591.                 return -ENODEV;
  592.  
  593.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  594.         if (obj == NULL)
  595.                 return -ENOENT;
  596.  
  597.         mutex_lock(&dev->object_name_lock);
  598.         idr_preload(GFP_KERNEL);
  599.         /* prevent races with concurrent gem_close. */
  600.         if (obj->handle_count == 0) {
  601.                 ret = -ENOENT;
  602.                 goto err;
  603.         }
  604.  
  605.         if (!obj->name) {
  606.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  607.                 if (ret < 0)
  608.                         goto err;
  609.  
  610.                 obj->name = ret;
  611.         }
  612.  
  613.         args->name = (uint64_t) obj->name;
  614.         ret = 0;
  615.  
  616. err:
  617.         idr_preload_end();
  618.         mutex_unlock(&dev->object_name_lock);
  619.         drm_gem_object_unreference_unlocked(obj);
  620.         return ret;
  621. }
  622.  
  623. /**
  624.  * drm_gem_open - implementation of the GEM_OPEN ioctl
  625.  * @dev: drm_device
  626.  * @data: ioctl data
  627.  * @file_priv: drm file-private structure
  628.  *
  629.  * Open an object using the global name, returning a handle and the size.
  630.  *
  631.  * This handle (of course) holds a reference to the object, so the object
  632.  * will not go away until the handle is deleted.
  633.  */
  634. int
  635. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  636.                    struct drm_file *file_priv)
  637. {
  638.         struct drm_gem_open *args = data;
  639.         struct drm_gem_object *obj;
  640.         int ret;
  641.         u32 handle;
  642.  
  643.         if (!drm_core_check_feature(dev, DRIVER_GEM))
  644.                 return -ENODEV;
  645.  
  646.         mutex_lock(&dev->object_name_lock);
  647.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  648.         if (obj) {
  649.                 drm_gem_object_reference(obj);
  650.         } else {
  651.                 mutex_unlock(&dev->object_name_lock);
  652.                 return -ENOENT;
  653.         }
  654.  
  655.         /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  656.         ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  657.         drm_gem_object_unreference_unlocked(obj);
  658.         if (ret)
  659.                 return ret;
  660.  
  661.         args->handle = handle;
  662.         args->size = obj->size;
  663.  
  664.         return 0;
  665. }
  666.  
  667. #if 0
  668. /**
  669.  * gem_gem_open - initalizes GEM file-private structures at devnode open time
  670.  * @dev: drm_device which is being opened by userspace
  671.  * @file_private: drm file-private structure to set up
  672.  *
  673.  * Called at device open time, sets up the structure for handling refcounting
  674.  * of mm objects.
  675.  */
  676. void
  677. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  678. {
  679.         idr_init(&file_private->object_idr);
  680.         spin_lock_init(&file_private->table_lock);
  681. }
  682.  
  683. /*
  684.  * Called at device close to release the file's
  685.  * handle references on objects.
  686.  */
  687. static int
  688. drm_gem_object_release_handle(int id, void *ptr, void *data)
  689. {
  690.         struct drm_file *file_priv = data;
  691.         struct drm_gem_object *obj = ptr;
  692.         struct drm_device *dev = obj->dev;
  693.  
  694.         drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  695.  
  696.         if (dev->driver->gem_close_object)
  697.                 dev->driver->gem_close_object(obj, file_priv);
  698.  
  699.         drm_gem_object_handle_unreference_unlocked(obj);
  700.  
  701.         return 0;
  702. }
  703.  
  704. /**
  705.  * drm_gem_release - release file-private GEM resources
  706.  * @dev: drm_device which is being closed by userspace
  707.  * @file_private: drm file-private structure to clean up
  708.  *
  709.  * Called at close time when the filp is going away.
  710.  *
  711.  * Releases any remaining references on objects by this filp.
  712.  */
  713. void
  714. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  715. {
  716.         idr_for_each(&file_private->object_idr,
  717.                      &drm_gem_object_release_handle, file_private);
  718.         idr_destroy(&file_private->object_idr);
  719. }
  720. #endif
  721.  
  722. void
  723. drm_gem_object_release(struct drm_gem_object *obj)
  724. {
  725.         WARN_ON(obj->dma_buf);
  726.  
  727.         if (obj->filp)
  728.             free(obj->filp);
  729. }
  730. EXPORT_SYMBOL(drm_gem_object_release);
  731.  
  732. /**
  733.  * drm_gem_object_free - free a GEM object
  734.  * @kref: kref of the object to free
  735.  *
  736.  * Called after the last reference to the object has been lost.
  737.  * Must be called holding struct_ mutex
  738.  *
  739.  * Frees the object
  740.  */
  741. void
  742. drm_gem_object_free(struct kref *kref)
  743. {
  744.         struct drm_gem_object *obj =
  745.                 container_of(kref, struct drm_gem_object, refcount);
  746.         struct drm_device *dev = obj->dev;
  747.  
  748.         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
  749.  
  750.         if (dev->driver->gem_free_object != NULL)
  751.                 dev->driver->gem_free_object(obj);
  752. }
  753. EXPORT_SYMBOL(drm_gem_object_free);
  754.  
  755.  
  756. #if 0
  757. void drm_gem_vm_open(struct vm_area_struct *vma)
  758. {
  759.         struct drm_gem_object *obj = vma->vm_private_data;
  760.  
  761.         drm_gem_object_reference(obj);
  762. }
  763. EXPORT_SYMBOL(drm_gem_vm_open);
  764.  
  765. void drm_gem_vm_close(struct vm_area_struct *vma)
  766. {
  767.         struct drm_gem_object *obj = vma->vm_private_data;
  768.         struct drm_device *dev = obj->dev;
  769.  
  770.         mutex_lock(&dev->struct_mutex);
  771.         drm_vm_close_locked(obj->dev, vma);
  772.         drm_gem_object_unreference(obj);
  773.         mutex_unlock(&dev->struct_mutex);
  774. }
  775. EXPORT_SYMBOL(drm_gem_vm_close);
  776.  
  777. #endif
  778.  
  779.  
  780.  
  781.