Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5271 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/fs.h>
  32. #include <linux/file.h>
  33. #include <linux/module.h>
  34. #include <linux/shmem_fs.h>
  35. #include <linux/err.h>
  36. #include <drm/drmP.h>
  37. #include <drm/drm_vma_manager.h>
  38.  
  39. /** @file drm_gem.c
  40.  *
  41.  * This file provides some of the base ioctls and library routines for
  42.  * the graphics memory manager implemented by each device driver.
  43.  *
  44.  * Because various devices have different requirements in terms of
  45.  * synchronization and migration strategies, implementing that is left up to
  46.  * the driver, and all that the general API provides should be generic --
  47.  * allocating objects, reading/writing data with the cpu, freeing objects.
  48.  * Even there, platform-dependent optimizations for reading/writing data with
  49.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  50.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  51.  *
  52.  * The goal was to have swap-backed object allocation managed through
  53.  * struct file.  However, file descriptors as handles to a struct file have
  54.  * two major failings:
  55.  * - Process limits prevent more than 1024 or so being used at a time by
  56.  *   default.
  57.  * - Inability to allocate high fds will aggravate the X Server's select()
  58.  *   handling, and likely that of many GL client applications as well.
  59.  *
  60.  * This led to a plan of using our own integer IDs (called handles, following
  61.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  62.  * ioctls.  The objects themselves will still include the struct file so
  63.  * that we can transition to fds if the required kernel infrastructure shows
  64.  * up at a later date, and as our interface with shmfs for memory allocation.
  65.  */
  66.  
  67. /*
  68.  * We make up offsets for buffer objects so we can recognize them at
  69.  * mmap time.
  70.  */
  71.  
  72. /* pgoff in mmap is an unsigned long, so we need to make sure that
  73.  * the faked up offset will fit
  74.  */
  75.  
  76. #if BITS_PER_LONG == 64
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #else
  80. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  81. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  82. #endif
  83.  
  84. /**
  85.  * drm_gem_init - Initialize the GEM device fields
  86.  * @dev: drm_devic structure to initialize
  87.  */
  88. int
  89. drm_gem_init(struct drm_device *dev)
  90. {
  91.         struct drm_vma_offset_manager *vma_offset_manager;
  92.  
  93.         mutex_init(&dev->object_name_lock);
  94.         idr_init(&dev->object_name_idr);
  95.  
  96.         vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
  97.         if (!vma_offset_manager) {
  98.                 DRM_ERROR("out of memory\n");
  99.                 return -ENOMEM;
  100.         }
  101.  
  102.         dev->vma_offset_manager = vma_offset_manager;
  103.         drm_vma_offset_manager_init(vma_offset_manager,
  104.                                     DRM_FILE_PAGE_OFFSET_START,
  105.                     DRM_FILE_PAGE_OFFSET_SIZE);
  106.  
  107.         return 0;
  108. }
  109.  
  110. void
  111. drm_gem_destroy(struct drm_device *dev)
  112. {
  113.  
  114.         drm_vma_offset_manager_destroy(dev->vma_offset_manager);
  115.         kfree(dev->vma_offset_manager);
  116.         dev->vma_offset_manager = NULL;
  117. }
  118.  
  119. /**
  120.  * drm_gem_object_init - initialize an allocated shmem-backed GEM object
  121.  * @dev: drm_device the object should be initialized for
  122.  * @obj: drm_gem_object to initialize
  123.  * @size: object size
  124.  *
  125.  * Initialize an already allocated GEM object of the specified size with
  126.  * shmfs backing store.
  127.  */
  128. int drm_gem_object_init(struct drm_device *dev,
  129.                         struct drm_gem_object *obj, size_t size)
  130. {
  131.         struct file *filp;
  132.  
  133.         drm_gem_private_object_init(dev, obj, size);
  134.  
  135.         filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  136.         if (IS_ERR(filp))
  137.                 return PTR_ERR(filp);
  138.  
  139.         obj->filp = filp;
  140.  
  141.         return 0;
  142. }
  143. EXPORT_SYMBOL(drm_gem_object_init);
  144.  
  145. /**
  146.  * drm_gem_object_init - initialize an allocated private GEM object
  147.  * @dev: drm_device the object should be initialized for
  148.  * @obj: drm_gem_object to initialize
  149.  * @size: object size
  150.  *
  151.  * Initialize an already allocated GEM object of the specified size with
  152.  * no GEM provided backing store. Instead the caller is responsible for
  153.  * backing the object and handling it.
  154.  */
  155. void drm_gem_private_object_init(struct drm_device *dev,
  156.                         struct drm_gem_object *obj, size_t size)
  157. {
  158.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  159.  
  160.         obj->dev = dev;
  161.         obj->filp = NULL;
  162.  
  163.         kref_init(&obj->refcount);
  164.         obj->handle_count = 0;
  165.         obj->size = size;
  166.         drm_vma_node_reset(&obj->vma_node);
  167. }
  168. EXPORT_SYMBOL(drm_gem_private_object_init);
  169.  
  170. /**
  171.  * drm_gem_object_free - release resources bound to userspace handles
  172.  * @obj: GEM object to clean up.
  173.  *
  174.  * Called after the last handle to the object has been closed
  175.  *
  176.  * Removes any name for the object. Note that this must be
  177.  * called before drm_gem_object_free or we'll be touching
  178.  * freed memory
  179.  */
  180. static void drm_gem_object_handle_free(struct drm_gem_object *obj)
  181. {
  182.         struct drm_device *dev = obj->dev;
  183.  
  184.         /* Remove any name for this object */
  185.         if (obj->name) {
  186.                 idr_remove(&dev->object_name_idr, obj->name);
  187.                 obj->name = 0;
  188.         }
  189. }
  190.  
  191.  
  192. static void
  193. drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
  194. {
  195.         if (WARN_ON(obj->handle_count == 0))
  196.                 return;
  197.  
  198.         /*
  199.         * Must bump handle count first as this may be the last
  200.         * ref, in which case the object would disappear before we
  201.         * checked for a name
  202.         */
  203.  
  204.         mutex_lock(&obj->dev->object_name_lock);
  205.         if (--obj->handle_count == 0) {
  206.                 drm_gem_object_handle_free(obj);
  207.         }
  208.         mutex_unlock(&obj->dev->object_name_lock);
  209.  
  210.         drm_gem_object_unreference_unlocked(obj);
  211. }
  212.  
  213. /**
  214.  * drm_gem_handle_delete - deletes the given file-private handle
  215.  * @filp: drm file-private structure to use for the handle look up
  216.  * @handle: userspace handle to delete
  217.  *
  218.  * Removes the GEM handle from the @filp lookup table and if this is the last
  219.  * handle also cleans up linked resources like GEM names.
  220.  */
  221. int
  222. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  223. {
  224.         struct drm_device *dev;
  225.         struct drm_gem_object *obj;
  226.  
  227.         /* This is gross. The idr system doesn't let us try a delete and
  228.          * return an error code.  It just spews if you fail at deleting.
  229.          * So, we have to grab a lock around finding the object and then
  230.          * doing the delete on it and dropping the refcount, or the user
  231.          * could race us to double-decrement the refcount and cause a
  232.          * use-after-free later.  Given the frequency of our handle lookups,
  233.          * we may want to use ida for number allocation and a hash table
  234.          * for the pointers, anyway.
  235.          */
  236.         spin_lock(&filp->table_lock);
  237.  
  238.         /* Check if we currently have a reference on the object */
  239.         obj = idr_find(&filp->object_idr, handle);
  240.         if (obj == NULL) {
  241.                 spin_unlock(&filp->table_lock);
  242.                 return -EINVAL;
  243.         }
  244.         dev = obj->dev;
  245.  
  246.         /* Release reference and decrement refcount. */
  247.         idr_remove(&filp->object_idr, handle);
  248.         spin_unlock(&filp->table_lock);
  249.  
  250. //      drm_vma_node_revoke(&obj->vma_node, filp->filp);
  251.  
  252.         if (dev->driver->gem_close_object)
  253.                 dev->driver->gem_close_object(obj, filp);
  254.         drm_gem_object_handle_unreference_unlocked(obj);
  255.  
  256.         return 0;
  257. }
  258. EXPORT_SYMBOL(drm_gem_handle_delete);
  259.  
  260. /**
  261.  * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
  262.  * @file: drm file-private structure to remove the dumb handle from
  263.  * @dev: corresponding drm_device
  264.  * @handle: the dumb handle to remove
  265.  *
  266.  * This implements the ->dumb_destroy kms driver callback for drivers which use
  267.  * gem to manage their backing storage.
  268.  */
  269. int drm_gem_dumb_destroy(struct drm_file *file,
  270.                          struct drm_device *dev,
  271.                          uint32_t handle)
  272. {
  273.         return drm_gem_handle_delete(file, handle);
  274. }
  275. EXPORT_SYMBOL(drm_gem_dumb_destroy);
  276.  
  277. /**
  278.  * drm_gem_handle_create_tail - internal functions to create a handle
  279.  * @file_priv: drm file-private structure to register the handle for
  280.  * @obj: object to register
  281.  * @handlep: pionter to return the created handle to the caller
  282.  *
  283.  * This expects the dev->object_name_lock to be held already and will drop it
  284.  * before returning. Used to avoid races in establishing new handles when
  285.  * importing an object from either an flink name or a dma-buf.
  286.  */
  287. int
  288. drm_gem_handle_create_tail(struct drm_file *file_priv,
  289.                        struct drm_gem_object *obj,
  290.                        u32 *handlep)
  291. {
  292.         struct drm_device *dev = obj->dev;
  293.         int ret;
  294.  
  295.         WARN_ON(!mutex_is_locked(&dev->object_name_lock));
  296.  
  297.         /*
  298.          * Get the user-visible handle using idr.  Preload and perform
  299.          * allocation under our spinlock.
  300.          */
  301.         idr_preload(GFP_KERNEL);
  302.         spin_lock(&file_priv->table_lock);
  303.  
  304.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  305.         drm_gem_object_reference(obj);
  306.         obj->handle_count++;
  307.         spin_unlock(&file_priv->table_lock);
  308.         idr_preload_end();
  309.         mutex_unlock(&dev->object_name_lock);
  310.         if (ret < 0) {
  311.                 drm_gem_object_handle_unreference_unlocked(obj);
  312.                 return ret;
  313.         }
  314.         *handlep = ret;
  315.  
  316. //      ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
  317. //      if (ret) {
  318. //              drm_gem_handle_delete(file_priv, *handlep);
  319. //              return ret;
  320. //      }
  321.  
  322.         if (dev->driver->gem_open_object) {
  323.                 ret = dev->driver->gem_open_object(obj, file_priv);
  324.                 if (ret) {
  325.                         drm_gem_handle_delete(file_priv, *handlep);
  326.                         return ret;
  327.                 }
  328.         }
  329.  
  330.         return 0;
  331. }
  332.  
  333. /**
  334.  * gem_handle_create - create a gem handle for an object
  335.  * @file_priv: drm file-private structure to register the handle for
  336.  * @obj: object to register
  337.  * @handlep: pionter to return the created handle to the caller
  338.  *
  339.  * Create a handle for this object. This adds a handle reference
  340.  * to the object, which includes a regular reference count. Callers
  341.  * will likely want to dereference the object afterwards.
  342.  */
  343. int
  344. drm_gem_handle_create(struct drm_file *file_priv,
  345.                        struct drm_gem_object *obj,
  346.                        u32 *handlep)
  347. {
  348.         mutex_lock(&obj->dev->object_name_lock);
  349.  
  350.         return drm_gem_handle_create_tail(file_priv, obj, handlep);
  351. }
  352. EXPORT_SYMBOL(drm_gem_handle_create);
  353.  
  354. #if 0
  355. /**
  356.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  357.  * @obj: obj in question
  358.  *
  359.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  360.  */
  361. void
  362. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  363. {
  364.         struct drm_device *dev = obj->dev;
  365.  
  366.         drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
  367. }
  368. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  369.  
  370. /**
  371.  * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
  372.  * @obj: obj in question
  373.  * @size: the virtual size
  374.  *
  375.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  376.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  377.  * up the object based on the offset and sets up the various memory mapping
  378.  * structures.
  379.  *
  380.  * This routine allocates and attaches a fake offset for @obj, in cases where
  381.  * the virtual size differs from the physical size (ie. obj->size).  Otherwise
  382.  * just use drm_gem_create_mmap_offset().
  383.  */
  384. int
  385. drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
  386. {
  387.         struct drm_device *dev = obj->dev;
  388.  
  389.         return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
  390.                                   size / PAGE_SIZE);
  391. }
  392. EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
  393.  
  394. /**
  395.  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  396.  * @obj: obj in question
  397.  *
  398.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  399.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  400.  * up the object based on the offset and sets up the various memory mapping
  401.  * structures.
  402.  *
  403.  * This routine allocates and attaches a fake offset for @obj.
  404.  */
  405. int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  406. {
  407.         return drm_gem_create_mmap_offset_size(obj, obj->size);
  408. }
  409. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  410.  
  411. /**
  412.  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  413.  * from shmem
  414.  * @obj: obj in question
  415.  *
  416.  * This reads the page-array of the shmem-backing storage of the given gem
  417.  * object. An array of pages is returned. If a page is not allocated or
  418.  * swapped-out, this will allocate/swap-in the required pages. Note that the
  419.  * whole object is covered by the page-array and pinned in memory.
  420.  *
  421.  * Use drm_gem_put_pages() to release the array and unpin all pages.
  422.  *
  423.  * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
  424.  * If you require other GFP-masks, you have to do those allocations yourself.
  425.  *
  426.  * Note that you are not allowed to change gfp-zones during runtime. That is,
  427.  * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
  428.  * set during initialization. If you have special zone constraints, set them
  429.  * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
  430.  * to keep pages in the required zone during swap-in.
  431.  */
  432. struct page **drm_gem_get_pages(struct drm_gem_object *obj)
  433. {
  434.         struct address_space *mapping;
  435.         struct page *p, **pages;
  436.         int i, npages;
  437.  
  438.         /* This is the shared memory object that backs the GEM resource */
  439.         mapping = file_inode(obj->filp)->i_mapping;
  440.  
  441.         /* We already BUG_ON() for non-page-aligned sizes in
  442.          * drm_gem_object_init(), so we should never hit this unless
  443.          * driver author is doing something really wrong:
  444.          */
  445.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  446.  
  447.         npages = obj->size >> PAGE_SHIFT;
  448.  
  449.         pages = drm_malloc_ab(npages, sizeof(struct page *));
  450.         if (pages == NULL)
  451.                 return ERR_PTR(-ENOMEM);
  452.  
  453.         for (i = 0; i < npages; i++) {
  454.                 p = shmem_read_mapping_page(mapping, i);
  455.                 if (IS_ERR(p))
  456.                         goto fail;
  457.                 pages[i] = p;
  458.  
  459.                 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
  460.                  * correct region during swapin. Note that this requires
  461.                  * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
  462.                  * so shmem can relocate pages during swapin if required.
  463.                  */
  464.                 BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
  465.                                 (page_to_pfn(p) >= 0x00100000UL));
  466.         }
  467.  
  468.         return pages;
  469.  
  470. fail:
  471.         while (i--)
  472.                 page_cache_release(pages[i]);
  473.  
  474.         drm_free_large(pages);
  475.         return ERR_CAST(p);
  476. }
  477. EXPORT_SYMBOL(drm_gem_get_pages);
  478.  
  479. /**
  480.  * drm_gem_put_pages - helper to free backing pages for a GEM object
  481.  * @obj: obj in question
  482.  * @pages: pages to free
  483.  * @dirty: if true, pages will be marked as dirty
  484.  * @accessed: if true, the pages will be marked as accessed
  485.  */
  486. void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
  487.                 bool dirty, bool accessed)
  488. {
  489.         int i, npages;
  490.  
  491.         /* We already BUG_ON() for non-page-aligned sizes in
  492.          * drm_gem_object_init(), so we should never hit this unless
  493.          * driver author is doing something really wrong:
  494.          */
  495.         WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
  496.  
  497.         npages = obj->size >> PAGE_SHIFT;
  498.  
  499.         for (i = 0; i < npages; i++) {
  500.                 if (dirty)
  501.                         set_page_dirty(pages[i]);
  502.  
  503.                 if (accessed)
  504.                         mark_page_accessed(pages[i]);
  505.  
  506.                 /* Undo the reference we took when populating the table */
  507.                 page_cache_release(pages[i]);
  508.         }
  509.  
  510.         drm_free_large(pages);
  511. }
  512. EXPORT_SYMBOL(drm_gem_put_pages);
  513. #endif
  514.  
  515. /** Returns a reference to the object named by the handle. */
  516. struct drm_gem_object *
  517. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  518.                       u32 handle)
  519. {
  520.         struct drm_gem_object *obj;
  521.  
  522.         spin_lock(&filp->table_lock);
  523.  
  524.         /* Check if we currently have a reference on the object */
  525.         obj = idr_find(&filp->object_idr, handle);
  526.         if (obj == NULL) {
  527.                 spin_unlock(&filp->table_lock);
  528.                 return NULL;
  529.         }
  530.  
  531.         drm_gem_object_reference(obj);
  532.  
  533.         spin_unlock(&filp->table_lock);
  534.  
  535.         return obj;
  536. }
  537. EXPORT_SYMBOL(drm_gem_object_lookup);
  538.  
  539. /**
  540.  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
  541.  * @dev: drm_device
  542.  * @data: ioctl data
  543.  * @file_priv: drm file-private structure
  544.  *
  545.  * Releases the handle to an mm object.
  546.  */
  547. int
  548. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  549.                     struct drm_file *file_priv)
  550. {
  551.         struct drm_gem_close *args = data;
  552.         int ret;
  553.  
  554.         if (!(dev->driver->driver_features & DRIVER_GEM))
  555.                 return -ENODEV;
  556.  
  557.         ret = drm_gem_handle_delete(file_priv, args->handle);
  558.  
  559.         return ret;
  560. }
  561.  
  562. /**
  563.  * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
  564.  * @dev: drm_device
  565.  * @data: ioctl data
  566.  * @file_priv: drm file-private structure
  567.  *
  568.  * Create a global name for an object, returning the name.
  569.  *
  570.  * Note that the name does not hold a reference; when the object
  571.  * is freed, the name goes away.
  572.  */
  573. int
  574. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  575.                     struct drm_file *file_priv)
  576. {
  577.         struct drm_gem_flink *args = data;
  578.         struct drm_gem_object *obj;
  579.         int ret;
  580.  
  581.         if (!(dev->driver->driver_features & DRIVER_GEM))
  582.                 return -ENODEV;
  583.  
  584.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  585.         if (obj == NULL)
  586.                 return -ENOENT;
  587.  
  588.         mutex_lock(&dev->object_name_lock);
  589.         idr_preload(GFP_KERNEL);
  590.         /* prevent races with concurrent gem_close. */
  591.         if (obj->handle_count == 0) {
  592.                 ret = -ENOENT;
  593.                 goto err;
  594.         }
  595.  
  596.         if (!obj->name) {
  597.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  598.                 if (ret < 0)
  599.                         goto err;
  600.  
  601.                 obj->name = ret;
  602.         }
  603.  
  604.                 args->name = (uint64_t) obj->name;
  605.                 ret = 0;
  606.  
  607. err:
  608.         idr_preload_end();
  609.         mutex_unlock(&dev->object_name_lock);
  610.         drm_gem_object_unreference_unlocked(obj);
  611.         return ret;
  612. }
  613.  
  614. /**
  615.  * drm_gem_open - implementation of the GEM_OPEN ioctl
  616.  * @dev: drm_device
  617.  * @data: ioctl data
  618.  * @file_priv: drm file-private structure
  619.  *
  620.  * Open an object using the global name, returning a handle and the size.
  621.  *
  622.  * This handle (of course) holds a reference to the object, so the object
  623.  * will not go away until the handle is deleted.
  624.  */
  625. int
  626. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  627.                    struct drm_file *file_priv)
  628. {
  629.         struct drm_gem_open *args = data;
  630.         struct drm_gem_object *obj;
  631.         int ret;
  632.         u32 handle;
  633.  
  634.         if (!(dev->driver->driver_features & DRIVER_GEM))
  635.                 return -ENODEV;
  636.  
  637.         mutex_lock(&dev->object_name_lock);
  638.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  639.         if (obj) {
  640.                 drm_gem_object_reference(obj);
  641.         } else {
  642.                 mutex_unlock(&dev->object_name_lock);
  643.                 return -ENOENT;
  644.         }
  645.  
  646.         /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
  647.         ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
  648.         drm_gem_object_unreference_unlocked(obj);
  649.         if (ret)
  650.                 return ret;
  651.  
  652.         args->handle = handle;
  653.         args->size = obj->size;
  654.  
  655.         return 0;
  656. }
  657.  
  658. #if 0
  659. /**
  660.  * gem_gem_open - initalizes GEM file-private structures at devnode open time
  661.  * @dev: drm_device which is being opened by userspace
  662.  * @file_private: drm file-private structure to set up
  663.  *
  664.  * Called at device open time, sets up the structure for handling refcounting
  665.  * of mm objects.
  666.  */
  667. void
  668. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  669. {
  670.         idr_init(&file_private->object_idr);
  671.         spin_lock_init(&file_private->table_lock);
  672. }
  673.  
  674. /*
  675.  * Called at device close to release the file's
  676.  * handle references on objects.
  677.  */
  678. static int
  679. drm_gem_object_release_handle(int id, void *ptr, void *data)
  680. {
  681.         struct drm_file *file_priv = data;
  682.         struct drm_gem_object *obj = ptr;
  683.         struct drm_device *dev = obj->dev;
  684.  
  685.         drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
  686.  
  687.         if (dev->driver->gem_close_object)
  688.                 dev->driver->gem_close_object(obj, file_priv);
  689.  
  690.         drm_gem_object_handle_unreference_unlocked(obj);
  691.  
  692.         return 0;
  693. }
  694.  
  695. /**
  696.  * drm_gem_release - release file-private GEM resources
  697.  * @dev: drm_device which is being closed by userspace
  698.  * @file_private: drm file-private structure to clean up
  699.  *
  700.  * Called at close time when the filp is going away.
  701.  *
  702.  * Releases any remaining references on objects by this filp.
  703.  */
  704. void
  705. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  706. {
  707.         idr_for_each(&file_private->object_idr,
  708.                      &drm_gem_object_release_handle, file_private);
  709.         idr_destroy(&file_private->object_idr);
  710. }
  711. #endif
  712.  
  713. void
  714. drm_gem_object_release(struct drm_gem_object *obj)
  715. {
  716.         WARN_ON(obj->dma_buf);
  717.  
  718.         if (obj->filp)
  719.             free(obj->filp);
  720. }
  721. EXPORT_SYMBOL(drm_gem_object_release);
  722.  
  723. /**
  724.  * drm_gem_object_free - free a GEM object
  725.  * @kref: kref of the object to free
  726.  *
  727.  * Called after the last reference to the object has been lost.
  728.  * Must be called holding struct_ mutex
  729.  *
  730.  * Frees the object
  731.  */
  732. void
  733. drm_gem_object_free(struct kref *kref)
  734. {
  735.         struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  736.         struct drm_device *dev = obj->dev;
  737.  
  738.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  739.  
  740.         if (dev->driver->gem_free_object != NULL)
  741.                 dev->driver->gem_free_object(obj);
  742. }
  743. EXPORT_SYMBOL(drm_gem_object_free);
  744.  
  745.  
  746. #if 0
  747. void drm_gem_vm_open(struct vm_area_struct *vma)
  748. {
  749.         struct drm_gem_object *obj = vma->vm_private_data;
  750.  
  751.         drm_gem_object_reference(obj);
  752.  
  753.         mutex_lock(&obj->dev->struct_mutex);
  754.         drm_vm_open_locked(obj->dev, vma);
  755.         mutex_unlock(&obj->dev->struct_mutex);
  756. }
  757. EXPORT_SYMBOL(drm_gem_vm_open);
  758.  
  759. void drm_gem_vm_close(struct vm_area_struct *vma)
  760. {
  761.         struct drm_gem_object *obj = vma->vm_private_data;
  762.         struct drm_device *dev = obj->dev;
  763.  
  764.         mutex_lock(&dev->struct_mutex);
  765.         drm_vm_close_locked(obj->dev, vma);
  766.         drm_gem_object_unreference(obj);
  767.         mutex_unlock(&dev->struct_mutex);
  768. }
  769. EXPORT_SYMBOL(drm_gem_vm_close);
  770.  
  771. #endif
  772.  
  773.  
  774.  
  775.