Subversion Repositories Kolibri OS

Rev

Rev 3298 | Rev 4075 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <linux/types.h>
  29. #include <linux/slab.h>
  30. #include <linux/mm.h>
  31. #include <linux/module.h>
  32. #include <linux/shmem_fs.h>
  33. #include <linux/err.h>
  34. #include <drm/drmP.h>
  35.  
  36. /** @file drm_gem.c
  37.  *
  38.  * This file provides some of the base ioctls and library routines for
  39.  * the graphics memory manager implemented by each device driver.
  40.  *
  41.  * Because various devices have different requirements in terms of
  42.  * synchronization and migration strategies, implementing that is left up to
  43.  * the driver, and all that the general API provides should be generic --
  44.  * allocating objects, reading/writing data with the cpu, freeing objects.
  45.  * Even there, platform-dependent optimizations for reading/writing data with
  46.  * the CPU mean we'll likely hook those out to driver-specific calls.  However,
  47.  * the DRI2 implementation wants to have at least allocate/mmap be generic.
  48.  *
  49.  * The goal was to have swap-backed object allocation managed through
  50.  * struct file.  However, file descriptors as handles to a struct file have
  51.  * two major failings:
  52.  * - Process limits prevent more than 1024 or so being used at a time by
  53.  *   default.
  54.  * - Inability to allocate high fds will aggravate the X Server's select()
  55.  *   handling, and likely that of many GL client applications as well.
  56.  *
  57.  * This led to a plan of using our own integer IDs (called handles, following
  58.  * DRM terminology) to mimic fds, and implement the fd syscalls we need as
  59.  * ioctls.  The objects themselves will still include the struct file so
  60.  * that we can transition to fds if the required kernel infrastructure shows
  61.  * up at a later date, and as our interface with shmfs for memory allocation.
  62.  */
  63.  
  64. /*
  65.  * We make up offsets for buffer objects so we can recognize them at
  66.  * mmap time.
  67.  */
  68.  
  69. /* pgoff in mmap is an unsigned long, so we need to make sure that
  70.  * the faked up offset will fit
  71.  */
  72.  
  73. #if BITS_PER_LONG == 64
  74. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
  75. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
  76. #else
  77. #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
  78. #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
  79. #endif
  80.  
  81. #if 0
  82. /**
  83.  * Initialize the GEM device fields
  84.  */
  85.  
  86. int
  87. drm_gem_init(struct drm_device *dev)
  88. {
  89.         struct drm_gem_mm *mm;
  90.  
  91.         spin_lock_init(&dev->object_name_lock);
  92.         idr_init(&dev->object_name_idr);
  93.  
  94.         mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
  95.         if (!mm) {
  96.                 DRM_ERROR("out of memory\n");
  97.                 return -ENOMEM;
  98.         }
  99.  
  100.         dev->mm_private = mm;
  101.  
  102.         if (drm_ht_create(&mm->offset_hash, 12)) {
  103.                 kfree(mm);
  104.                 return -ENOMEM;
  105.         }
  106.  
  107.         if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
  108.                         DRM_FILE_PAGE_OFFSET_SIZE)) {
  109.                 drm_ht_remove(&mm->offset_hash);
  110.                 kfree(mm);
  111.                 return -ENOMEM;
  112.         }
  113.  
  114.         return 0;
  115. }
  116.  
  117. void
  118. drm_gem_destroy(struct drm_device *dev)
  119. {
  120.         struct drm_gem_mm *mm = dev->mm_private;
  121.  
  122.         drm_mm_takedown(&mm->offset_manager);
  123.         drm_ht_remove(&mm->offset_hash);
  124.         kfree(mm);
  125.         dev->mm_private = NULL;
  126. }
  127. #endif
  128.  
  129. /**
  130.  * Initialize an already allocated GEM object of the specified size with
  131.  * shmfs backing store.
  132.  */
  133. int drm_gem_object_init(struct drm_device *dev,
  134.                         struct drm_gem_object *obj, size_t size)
  135. {
  136.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  137.  
  138.         obj->dev = dev;
  139.         obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
  140.         if (IS_ERR(obj->filp))
  141.                 return PTR_ERR(obj->filp);
  142.  
  143.         kref_init(&obj->refcount);
  144.         atomic_set(&obj->handle_count, 0);
  145.         obj->size = size;
  146.  
  147.         return 0;
  148. }
  149. EXPORT_SYMBOL(drm_gem_object_init);
  150.  
  151. /**
  152.  * Initialize an already allocated GEM object of the specified size with
  153.  * no GEM provided backing store. Instead the caller is responsible for
  154.  * backing the object and handling it.
  155.  */
  156. int drm_gem_private_object_init(struct drm_device *dev,
  157.                         struct drm_gem_object *obj, size_t size)
  158. {
  159.         BUG_ON((size & (PAGE_SIZE - 1)) != 0);
  160.  
  161.         obj->dev = dev;
  162.         obj->filp = NULL;
  163.  
  164.         kref_init(&obj->refcount);
  165.         atomic_set(&obj->handle_count, 0);
  166.         obj->size = size;
  167.  
  168.         return 0;
  169. }
  170. EXPORT_SYMBOL(drm_gem_private_object_init);
  171.  
  172. /**
  173.  * Allocate a GEM object of the specified size with shmfs backing store
  174.  */
  175. struct drm_gem_object *
  176. drm_gem_object_alloc(struct drm_device *dev, size_t size)
  177. {
  178.         struct drm_gem_object *obj;
  179.  
  180.         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  181.         if (!obj)
  182.                 goto free;
  183.  
  184.         if (drm_gem_object_init(dev, obj, size) != 0)
  185.                 goto free;
  186.  
  187.         if (dev->driver->gem_init_object != NULL &&
  188.             dev->driver->gem_init_object(obj) != 0) {
  189.                 goto fput;
  190.         }
  191.         return obj;
  192. fput:
  193.         /* Object_init mangles the global counters - readjust them. */
  194.         free(obj->filp);
  195. free:
  196.         kfree(obj);
  197.         return NULL;
  198. }
  199. EXPORT_SYMBOL(drm_gem_object_alloc);
  200.  
  201.  
  202. /**
  203.  * Removes the mapping from handle to filp for this object.
  204.  */
  205. int
  206. drm_gem_handle_delete(struct drm_file *filp, u32 handle)
  207. {
  208.         struct drm_device *dev;
  209.         struct drm_gem_object *obj;
  210.  
  211.         /* This is gross. The idr system doesn't let us try a delete and
  212.          * return an error code.  It just spews if you fail at deleting.
  213.          * So, we have to grab a lock around finding the object and then
  214.          * doing the delete on it and dropping the refcount, or the user
  215.          * could race us to double-decrement the refcount and cause a
  216.          * use-after-free later.  Given the frequency of our handle lookups,
  217.          * we may want to use ida for number allocation and a hash table
  218.          * for the pointers, anyway.
  219.          */
  220.     if(handle == -2)
  221.         printf("%s handle %d\n", __FUNCTION__, handle);
  222.  
  223.         spin_lock(&filp->table_lock);
  224.  
  225.         /* Check if we currently have a reference on the object */
  226.         obj = idr_find(&filp->object_idr, handle);
  227.         if (obj == NULL) {
  228.                 spin_unlock(&filp->table_lock);
  229.                 return -EINVAL;
  230.         }
  231.         dev = obj->dev;
  232.  
  233.  //   printf("%s handle %d obj %p\n", __FUNCTION__, handle, obj);
  234.  
  235.         /* Release reference and decrement refcount. */
  236.         idr_remove(&filp->object_idr, handle);
  237.         spin_unlock(&filp->table_lock);
  238.  
  239. //   drm_gem_remove_prime_handles(obj, filp);
  240.  
  241.         if (dev->driver->gem_close_object)
  242.                 dev->driver->gem_close_object(obj, filp);
  243.         drm_gem_object_handle_unreference_unlocked(obj);
  244.  
  245.         return 0;
  246. }
  247. EXPORT_SYMBOL(drm_gem_handle_delete);
  248.  
  249. /**
  250.  * Create a handle for this object. This adds a handle reference
  251.  * to the object, which includes a regular reference count. Callers
  252.  * will likely want to dereference the object afterwards.
  253.  */
  254. int
  255. drm_gem_handle_create(struct drm_file *file_priv,
  256.                        struct drm_gem_object *obj,
  257.                        u32 *handlep)
  258. {
  259.         struct drm_device *dev = obj->dev;
  260.         int ret;
  261.  
  262.         /*
  263.          * Get the user-visible handle using idr.  Preload and perform
  264.          * allocation under our spinlock.
  265.          */
  266.         idr_preload(GFP_KERNEL);
  267.         spin_lock(&file_priv->table_lock);
  268.  
  269.         ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
  270.  
  271.         spin_unlock(&file_priv->table_lock);
  272.         idr_preload_end();
  273.         if (ret < 0)
  274.                 return ret;
  275.         *handlep = ret;
  276.  
  277.         drm_gem_object_handle_reference(obj);
  278.  
  279.         if (dev->driver->gem_open_object) {
  280.                 ret = dev->driver->gem_open_object(obj, file_priv);
  281.                 if (ret) {
  282.                         drm_gem_handle_delete(file_priv, *handlep);
  283.                         return ret;
  284.                 }
  285.         }
  286.  
  287.         return 0;
  288. }
  289. EXPORT_SYMBOL(drm_gem_handle_create);
  290.  
  291.  
  292. /**
  293.  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
  294.  * @obj: obj in question
  295.  *
  296.  * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
  297.  */
  298. #if 0
  299. void
  300. drm_gem_free_mmap_offset(struct drm_gem_object *obj)
  301. {
  302.         struct drm_device *dev = obj->dev;
  303.         struct drm_gem_mm *mm = dev->mm_private;
  304.         struct drm_map_list *list = &obj->map_list;
  305.  
  306.         drm_ht_remove_item(&mm->offset_hash, &list->hash);
  307.         drm_mm_put_block(list->file_offset_node);
  308.         kfree(list->map);
  309.         list->map = NULL;
  310. }
  311. EXPORT_SYMBOL(drm_gem_free_mmap_offset);
  312.  
  313. /**
  314.  * drm_gem_create_mmap_offset - create a fake mmap offset for an object
  315.  * @obj: obj in question
  316.  *
  317.  * GEM memory mapping works by handing back to userspace a fake mmap offset
  318.  * it can use in a subsequent mmap(2) call.  The DRM core code then looks
  319.  * up the object based on the offset and sets up the various memory mapping
  320.  * structures.
  321.  *
  322.  * This routine allocates and attaches a fake offset for @obj.
  323.  */
  324. int
  325. drm_gem_create_mmap_offset(struct drm_gem_object *obj)
  326. {
  327.         struct drm_device *dev = obj->dev;
  328.         struct drm_gem_mm *mm = dev->mm_private;
  329.         struct drm_map_list *list;
  330.         struct drm_local_map *map;
  331.         int ret;
  332.  
  333.         /* Set the object up for mmap'ing */
  334.         list = &obj->map_list;
  335.         list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
  336.         if (!list->map)
  337.                 return -ENOMEM;
  338.  
  339.         map = list->map;
  340.         map->type = _DRM_GEM;
  341.         map->size = obj->size;
  342.         map->handle = obj;
  343.  
  344.         /* Get a DRM GEM mmap offset allocated... */
  345.         list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
  346.                         obj->size / PAGE_SIZE, 0, false);
  347.  
  348.         if (!list->file_offset_node) {
  349.                 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
  350.                 ret = -ENOSPC;
  351.                 goto out_free_list;
  352.         }
  353.  
  354.         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
  355.                         obj->size / PAGE_SIZE, 0);
  356.         if (!list->file_offset_node) {
  357.                 ret = -ENOMEM;
  358.                 goto out_free_list;
  359.         }
  360.  
  361.         list->hash.key = list->file_offset_node->start;
  362.         ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
  363.         if (ret) {
  364.                 DRM_ERROR("failed to add to map hash\n");
  365.                 goto out_free_mm;
  366.         }
  367.  
  368.         return 0;
  369.  
  370. out_free_mm:
  371.         drm_mm_put_block(list->file_offset_node);
  372. out_free_list:
  373.         kfree(list->map);
  374.         list->map = NULL;
  375.  
  376.         return ret;
  377. }
  378. EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  379. #endif
  380.  
  381. /** Returns a reference to the object named by the handle. */
  382. struct drm_gem_object *
  383. drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
  384.                       u32 handle)
  385. {
  386.         struct drm_gem_object *obj;
  387.  
  388.      if(handle == -2)
  389.         printf("%s handle %d\n", __FUNCTION__, handle);
  390.  
  391.         spin_lock(&filp->table_lock);
  392.  
  393.         /* Check if we currently have a reference on the object */
  394.         obj = idr_find(&filp->object_idr, handle);
  395.         if (obj == NULL) {
  396.                 spin_unlock(&filp->table_lock);
  397.                 return NULL;
  398.         }
  399.  
  400.         drm_gem_object_reference(obj);
  401.  
  402.         spin_unlock(&filp->table_lock);
  403.  
  404.         return obj;
  405. }
  406. EXPORT_SYMBOL(drm_gem_object_lookup);
  407.  
  408. /**
  409.  * Releases the handle to an mm object.
  410.  */
  411. int
  412. drm_gem_close_ioctl(struct drm_device *dev, void *data,
  413.                     struct drm_file *file_priv)
  414. {
  415.         struct drm_gem_close *args = data;
  416.         int ret;
  417.  
  418.         ret = drm_gem_handle_delete(file_priv, args->handle);
  419.  
  420.         return ret;
  421. }
  422.  
  423. /**
  424.  * Create a global name for an object, returning the name.
  425.  *
  426.  * Note that the name does not hold a reference; when the object
  427.  * is freed, the name goes away.
  428.  */
  429.  
  430. #if 0
  431. int
  432. drm_gem_flink_ioctl(struct drm_device *dev, void *data,
  433.                     struct drm_file *file_priv)
  434. {
  435.         struct drm_gem_flink *args = data;
  436.         struct drm_gem_object *obj;
  437.         int ret;
  438.  
  439.         if (!(dev->driver->driver_features & DRIVER_GEM))
  440.                 return -ENODEV;
  441.  
  442.         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  443.         if (obj == NULL)
  444.                 return -ENOENT;
  445.  
  446.         idr_preload(GFP_KERNEL);
  447.         spin_lock(&dev->object_name_lock);
  448.         if (!obj->name) {
  449.                 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
  450.                 obj->name = ret;
  451.                 args->name = (uint64_t) obj->name;
  452.                 spin_unlock(&dev->object_name_lock);
  453.                 idr_preload_end();
  454.  
  455.                 if (ret < 0)
  456.                         goto err;
  457.                 ret = 0;
  458.  
  459.                 /* Allocate a reference for the name table.  */
  460.                 drm_gem_object_reference(obj);
  461.         } else {
  462.                 args->name = (uint64_t) obj->name;
  463.                 spin_unlock(&dev->object_name_lock);
  464.                 idr_preload_end();
  465.                 ret = 0;
  466.         }
  467.  
  468. err:
  469.         drm_gem_object_unreference_unlocked(obj);
  470.         return ret;
  471. }
  472.  
  473. /**
  474.  * Open an object using the global name, returning a handle and the size.
  475.  *
  476.  * This handle (of course) holds a reference to the object, so the object
  477.  * will not go away until the handle is deleted.
  478.  */
  479. int
  480. drm_gem_open_ioctl(struct drm_device *dev, void *data,
  481.                    struct drm_file *file_priv)
  482. {
  483.         struct drm_gem_open *args = data;
  484.         struct drm_gem_object *obj;
  485.         int ret;
  486.         u32 handle;
  487.  
  488.         if (!(dev->driver->driver_features & DRIVER_GEM))
  489.                 return -ENODEV;
  490.  
  491.     if(handle == -2)
  492.         printf("%s handle %d\n", __FUNCTION__, handle);
  493.  
  494.         spin_lock(&dev->object_name_lock);
  495.         obj = idr_find(&dev->object_name_idr, (int) args->name);
  496.         if (obj)
  497.                 drm_gem_object_reference(obj);
  498.         spin_unlock(&dev->object_name_lock);
  499.         if (!obj)
  500.                 return -ENOENT;
  501.  
  502.         ret = drm_gem_handle_create(file_priv, obj, &handle);
  503.         drm_gem_object_unreference_unlocked(obj);
  504.         if (ret)
  505.                 return ret;
  506.  
  507.         args->handle = handle;
  508.         args->size = obj->size;
  509.  
  510.         return 0;
  511. }
  512.  
  513. /**
  514.  * Called at device open time, sets up the structure for handling refcounting
  515.  * of mm objects.
  516.  */
  517. void
  518. drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
  519. {
  520.         idr_init(&file_private->object_idr);
  521.         spin_lock_init(&file_private->table_lock);
  522. }
  523.  
  524. /**
  525.  * Called at device close to release the file's
  526.  * handle references on objects.
  527.  */
  528. static int
  529. drm_gem_object_release_handle(int id, void *ptr, void *data)
  530. {
  531.         struct drm_file *file_priv = data;
  532.         struct drm_gem_object *obj = ptr;
  533.         struct drm_device *dev = obj->dev;
  534.  
  535.         drm_gem_remove_prime_handles(obj, file_priv);
  536.  
  537.         if (dev->driver->gem_close_object)
  538.                 dev->driver->gem_close_object(obj, file_priv);
  539.  
  540.         drm_gem_object_handle_unreference_unlocked(obj);
  541.  
  542.         return 0;
  543. }
  544.  
  545. /**
  546.  * Called at close time when the filp is going away.
  547.  *
  548.  * Releases any remaining references on objects by this filp.
  549.  */
  550. void
  551. drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
  552. {
  553.         idr_for_each(&file_private->object_idr,
  554.                      &drm_gem_object_release_handle, file_private);
  555.         idr_destroy(&file_private->object_idr);
  556. }
  557. #endif
  558.  
  559. void
  560. drm_gem_object_release(struct drm_gem_object *obj)
  561. {
  562.         if (obj->filp)
  563.             free(obj->filp);
  564. }
  565. EXPORT_SYMBOL(drm_gem_object_release);
  566.  
  567. /**
  568.  * Called after the last reference to the object has been lost.
  569.  * Must be called holding struct_ mutex
  570.  *
  571.  * Frees the object
  572.  */
  573. void
  574. drm_gem_object_free(struct kref *kref)
  575. {
  576.         struct drm_gem_object *obj = (struct drm_gem_object *) kref;
  577.         struct drm_device *dev = obj->dev;
  578.  
  579.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  580.  
  581.         if (dev->driver->gem_free_object != NULL)
  582.                 dev->driver->gem_free_object(obj);
  583. }
  584. EXPORT_SYMBOL(drm_gem_object_free);
  585.  
  586. static void drm_gem_object_ref_bug(struct kref *list_kref)
  587. {
  588.         BUG();
  589. }
  590.  
  591. /**
  592.  * Called after the last handle to the object has been closed
  593.  *
  594.  * Removes any name for the object. Note that this must be
  595.  * called before drm_gem_object_free or we'll be touching
  596.  * freed memory
  597.  */
  598. void drm_gem_object_handle_free(struct drm_gem_object *obj)
  599. {
  600.         struct drm_device *dev = obj->dev;
  601.  
  602.         /* Remove any name for this object */
  603.         spin_lock(&dev->object_name_lock);
  604.         if (obj->name) {
  605.                 idr_remove(&dev->object_name_idr, obj->name);
  606.                 obj->name = 0;
  607.                 spin_unlock(&dev->object_name_lock);
  608.                 /*
  609.                  * The object name held a reference to this object, drop
  610.                  * that now.
  611.                 *
  612.                 * This cannot be the last reference, since the handle holds one too.
  613.                  */
  614.                 kref_put(&obj->refcount, drm_gem_object_ref_bug);
  615.         } else
  616.                 spin_unlock(&dev->object_name_lock);
  617.  
  618. }
  619. EXPORT_SYMBOL(drm_gem_object_handle_free);
  620.  
  621. #if 0
  622. void drm_gem_vm_open(struct vm_area_struct *vma)
  623. {
  624.         struct drm_gem_object *obj = vma->vm_private_data;
  625.  
  626.         drm_gem_object_reference(obj);
  627.  
  628.         mutex_lock(&obj->dev->struct_mutex);
  629.         drm_vm_open_locked(obj->dev, vma);
  630.         mutex_unlock(&obj->dev->struct_mutex);
  631. }
  632. EXPORT_SYMBOL(drm_gem_vm_open);
  633.  
  634. void drm_gem_vm_close(struct vm_area_struct *vma)
  635. {
  636.         struct drm_gem_object *obj = vma->vm_private_data;
  637.         struct drm_device *dev = obj->dev;
  638.  
  639.         mutex_lock(&dev->struct_mutex);
  640.         drm_vm_close_locked(obj->dev, vma);
  641.         drm_gem_object_unreference(obj);
  642.         mutex_unlock(&dev->struct_mutex);
  643. }
  644. EXPORT_SYMBOL(drm_gem_vm_close);
  645.  
  646. #endif
  647.  
  648.  
  649.  
  650.