Subversion Repositories Kolibri OS

Rev

Rev 4075 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. /*
  28.  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29.  *
  30.  * While no substantial code is shared, the prime code is inspired by
  31.  * drm_prime.c, with
  32.  * Authors:
  33.  *      Dave Airlie <airlied@redhat.com>
  34.  *      Rob Clark <rob.clark@linaro.org>
  35.  */
  36. /** @file ttm_ref_object.c
  37.  *
  38.  * Base- and reference object implementation for the various
  39.  * ttm objects. Implements reference counting, minimal security checks
  40.  * and release on file close.
  41.  */
  42.  
  43.  
  44. /**
  45.  * struct ttm_object_file
  46.  *
  47.  * @tdev: Pointer to the ttm_object_device.
  48.  *
  49.  * @lock: Lock that protects the ref_list list and the
  50.  * ref_hash hash tables.
  51.  *
  52.  * @ref_list: List of ttm_ref_objects to be destroyed at
  53.  * file release.
  54.  *
  55.  * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
  56.  * for fast lookup of ref objects given a base object.
  57.  */
  58.  
  59. #define pr_fmt(fmt) "[TTM] " fmt
  60.  
  61. #include <linux/mutex.h>
  62.  
  63. #include <drm/ttm/ttm_object.h>
  64. #include <drm/ttm/ttm_module.h>
  65. #include <linux/list.h>
  66. #include <linux/spinlock.h>
  67. #include <linux/slab.h>
  68. #include <linux/module.h>
  69. //#include <linux/atomic.h>
  70.  
  71. static inline int __must_check kref_get_unless_zero(struct kref *kref)
  72. {
  73.     return atomic_add_unless(&kref->refcount, 1, 0);
  74. }
  75.  
  76. #define pr_err(fmt, ...) \
  77.         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
  78.  
  79. struct ttm_object_file {
  80.         struct ttm_object_device *tdev;
  81.         spinlock_t lock;
  82.         struct list_head ref_list;
  83.         struct drm_open_hash ref_hash[TTM_REF_NUM];
  84.         struct kref refcount;
  85. };
  86.  
  87. /**
  88.  * struct ttm_object_device
  89.  *
  90.  * @object_lock: lock that protects the object_hash hash table.
  91.  *
  92.  * @object_hash: hash table for fast lookup of object global names.
  93.  *
  94.  * @object_count: Per device object count.
  95.  *
  96.  * This is the per-device data structure needed for ttm object management.
  97.  */
  98.  
  99. struct ttm_object_device {
  100.         spinlock_t object_lock;
  101.         struct drm_open_hash object_hash;
  102.         atomic_t object_count;
  103.         struct ttm_mem_global *mem_glob;
  104. };
  105.  
  106. /**
  107.  * struct ttm_ref_object
  108.  *
  109.  * @hash: Hash entry for the per-file object reference hash.
  110.  *
  111.  * @head: List entry for the per-file list of ref-objects.
  112.  *
  113.  * @kref: Ref count.
  114.  *
  115.  * @obj: Base object this ref object is referencing.
  116.  *
  117.  * @ref_type: Type of ref object.
  118.  *
  119.  * This is similar to an idr object, but it also has a hash table entry
  120.  * that allows lookup with a pointer to the referenced object as a key. In
  121.  * that way, one can easily detect whether a base object is referenced by
  122.  * a particular ttm_object_file. It also carries a ref count to avoid creating
  123.  * multiple ref objects if a ttm_object_file references the same base
  124.  * object more than once.
  125.  */
  126.  
  127. struct ttm_ref_object {
  128.         struct drm_hash_item hash;
  129.         struct list_head head;
  130.         struct kref kref;
  131.         enum ttm_ref_type ref_type;
  132.         struct ttm_base_object *obj;
  133.         struct ttm_object_file *tfile;
  134. };
  135.  
  136. static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
  137.  
  138. static inline struct ttm_object_file *
  139. ttm_object_file_ref(struct ttm_object_file *tfile)
  140. {
  141.         kref_get(&tfile->refcount);
  142.         return tfile;
  143. }
  144.  
  145. static void ttm_object_file_destroy(struct kref *kref)
  146. {
  147.         struct ttm_object_file *tfile =
  148.                 container_of(kref, struct ttm_object_file, refcount);
  149.  
  150.         kfree(tfile);
  151. }
  152.  
  153.  
  154. static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
  155. {
  156.         struct ttm_object_file *tfile = *p_tfile;
  157.  
  158.         *p_tfile = NULL;
  159.         kref_put(&tfile->refcount, ttm_object_file_destroy);
  160. }
  161.  
  162.  
  163. int ttm_base_object_init(struct ttm_object_file *tfile,
  164.                          struct ttm_base_object *base,
  165.                          bool shareable,
  166.                          enum ttm_object_type object_type,
  167.                          void (*refcount_release) (struct ttm_base_object **),
  168.                          void (*ref_obj_release) (struct ttm_base_object *,
  169.                                                   enum ttm_ref_type ref_type))
  170. {
  171.         struct ttm_object_device *tdev = tfile->tdev;
  172.         int ret;
  173.  
  174.         base->shareable = shareable;
  175.         base->tfile = ttm_object_file_ref(tfile);
  176.         base->refcount_release = refcount_release;
  177.         base->ref_obj_release = ref_obj_release;
  178.         base->object_type = object_type;
  179.         kref_init(&base->refcount);
  180.         spin_lock(&tdev->object_lock);
  181.         ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
  182.                                             &base->hash,
  183.                                             (unsigned long)base, 31, 0, 0);
  184.         spin_unlock(&tdev->object_lock);
  185.         if (unlikely(ret != 0))
  186.                 goto out_err0;
  187.  
  188.         ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
  189.         if (unlikely(ret != 0))
  190.                 goto out_err1;
  191.  
  192.         ttm_base_object_unref(&base);
  193.  
  194.         return 0;
  195. out_err1:
  196.         spin_lock(&tdev->object_lock);
  197.         (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
  198.         spin_unlock(&tdev->object_lock);
  199. out_err0:
  200.         return ret;
  201. }
  202. EXPORT_SYMBOL(ttm_base_object_init);
  203.  
  204. static void ttm_release_base(struct kref *kref)
  205. {
  206.         struct ttm_base_object *base =
  207.             container_of(kref, struct ttm_base_object, refcount);
  208.         struct ttm_object_device *tdev = base->tfile->tdev;
  209.  
  210.         spin_lock(&tdev->object_lock);
  211.         (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
  212.         spin_unlock(&tdev->object_lock);
  213.  
  214.         /*
  215.          * Note: We don't use synchronize_rcu() here because it's far
  216.          * too slow. It's up to the user to free the object using
  217.          * call_rcu() or ttm_base_object_kfree().
  218.          */
  219.  
  220.         ttm_object_file_unref(&base->tfile);
  221.         if (base->refcount_release)
  222.                 base->refcount_release(&base);
  223. }
  224.  
  225. void ttm_base_object_unref(struct ttm_base_object **p_base)
  226. {
  227.         struct ttm_base_object *base = *p_base;
  228.  
  229.         *p_base = NULL;
  230.  
  231.         kref_put(&base->refcount, ttm_release_base);
  232. }
  233. EXPORT_SYMBOL(ttm_base_object_unref);
  234.  
  235. struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
  236.                                                uint32_t key)
  237. {
  238.         struct ttm_base_object *base = NULL;
  239.         struct drm_hash_item *hash;
  240.         struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
  241.         int ret;
  242.  
  243. //      rcu_read_lock();
  244.         ret = drm_ht_find_item_rcu(ht, key, &hash);
  245.  
  246.         if (likely(ret == 0)) {
  247.                 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
  248.                 if (!kref_get_unless_zero(&base->refcount))
  249.                         base = NULL;
  250.         }
  251. //   rcu_read_unlock();
  252.  
  253.         return base;
  254. }
  255. EXPORT_SYMBOL(ttm_base_object_lookup);
  256.  
  257. struct ttm_base_object *
  258. ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
  259. {
  260.         struct ttm_base_object *base = NULL;
  261.         struct drm_hash_item *hash;
  262.         struct drm_open_hash *ht = &tdev->object_hash;
  263.         int ret;
  264.  
  265.         ret = drm_ht_find_item_rcu(ht, key, &hash);
  266.  
  267.         if (likely(ret == 0)) {
  268.                 base = drm_hash_entry(hash, struct ttm_base_object, hash);
  269.                 if (!kref_get_unless_zero(&base->refcount))
  270.                         base = NULL;
  271.         }
  272.  
  273.         return base;
  274. }
  275. EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
  276.  
  277. int ttm_ref_object_add(struct ttm_object_file *tfile,
  278.                        struct ttm_base_object *base,
  279.                        enum ttm_ref_type ref_type, bool *existed)
  280. {
  281.         struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
  282.         struct ttm_ref_object *ref;
  283.         struct drm_hash_item *hash;
  284.         struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
  285.         int ret = -EINVAL;
  286.  
  287.         if (existed != NULL)
  288.                 *existed = true;
  289.  
  290.         while (ret == -EINVAL) {
  291.                 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
  292.  
  293.                 if (ret == 0) {
  294.                         ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
  295.                         if (!kref_get_unless_zero(&ref->kref)) {
  296.                         break;
  297.                 }
  298.                 }
  299.  
  300.                 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
  301.                                            false, false);
  302.                 if (unlikely(ret != 0))
  303.                         return ret;
  304.                 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
  305.                 if (unlikely(ref == NULL)) {
  306.                         ttm_mem_global_free(mem_glob, sizeof(*ref));
  307.                         return -ENOMEM;
  308.                 }
  309.  
  310.                 ref->hash.key = base->hash.key;
  311.                 ref->obj = base;
  312.                 ref->tfile = tfile;
  313.                 ref->ref_type = ref_type;
  314.                 kref_init(&ref->kref);
  315.  
  316.                 spin_lock(&tfile->lock);
  317.                 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
  318.  
  319.                 if (likely(ret == 0)) {
  320.                         list_add_tail(&ref->head, &tfile->ref_list);
  321.                         kref_get(&base->refcount);
  322.                         spin_unlock(&tfile->lock);
  323.                         if (existed != NULL)
  324.                                 *existed = false;
  325.                         break;
  326.                 }
  327.  
  328.                 spin_unlock(&tfile->lock);
  329.                 BUG_ON(ret != -EINVAL);
  330.  
  331.                 ttm_mem_global_free(mem_glob, sizeof(*ref));
  332.                 kfree(ref);
  333.         }
  334.  
  335.         return ret;
  336. }
  337. EXPORT_SYMBOL(ttm_ref_object_add);
  338.  
  339. static void ttm_ref_object_release(struct kref *kref)
  340. {
  341.         struct ttm_ref_object *ref =
  342.             container_of(kref, struct ttm_ref_object, kref);
  343.         struct ttm_base_object *base = ref->obj;
  344.         struct ttm_object_file *tfile = ref->tfile;
  345.         struct drm_open_hash *ht;
  346.         struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
  347.  
  348.         ht = &tfile->ref_hash[ref->ref_type];
  349.         (void)drm_ht_remove_item_rcu(ht, &ref->hash);
  350.         list_del(&ref->head);
  351.         spin_unlock(&tfile->lock);
  352.  
  353.         if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
  354.                 base->ref_obj_release(base, ref->ref_type);
  355.  
  356.         ttm_base_object_unref(&ref->obj);
  357.         ttm_mem_global_free(mem_glob, sizeof(*ref));
  358.         kfree(ref);
  359.         spin_lock(&tfile->lock);
  360. }
  361.  
  362. int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
  363.                               unsigned long key, enum ttm_ref_type ref_type)
  364. {
  365.         struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
  366.         struct ttm_ref_object *ref;
  367.         struct drm_hash_item *hash;
  368.         int ret;
  369.  
  370.         spin_lock(&tfile->lock);
  371.         ret = drm_ht_find_item(ht, key, &hash);
  372.         if (unlikely(ret != 0)) {
  373.                 spin_unlock(&tfile->lock);
  374.                 return -EINVAL;
  375.         }
  376.         ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
  377.         kref_put(&ref->kref, ttm_ref_object_release);
  378.         spin_unlock(&tfile->lock);
  379.         return 0;
  380. }
  381. EXPORT_SYMBOL(ttm_ref_object_base_unref);
  382.  
  383. void ttm_object_file_release(struct ttm_object_file **p_tfile)
  384. {
  385.         struct ttm_ref_object *ref;
  386.         struct list_head *list;
  387.         unsigned int i;
  388.         struct ttm_object_file *tfile = *p_tfile;
  389.  
  390.         *p_tfile = NULL;
  391.         spin_lock(&tfile->lock);
  392.  
  393.         /*
  394.          * Since we release the lock within the loop, we have to
  395.          * restart it from the beginning each time.
  396.          */
  397.  
  398.         while (!list_empty(&tfile->ref_list)) {
  399.                 list = tfile->ref_list.next;
  400.                 ref = list_entry(list, struct ttm_ref_object, head);
  401.                 ttm_ref_object_release(&ref->kref);
  402.         }
  403.  
  404.         for (i = 0; i < TTM_REF_NUM; ++i)
  405.                 drm_ht_remove(&tfile->ref_hash[i]);
  406.  
  407.         spin_unlock(&tfile->lock);
  408.         ttm_object_file_unref(&tfile);
  409. }
  410. EXPORT_SYMBOL(ttm_object_file_release);
  411.  
  412. struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
  413.                                              unsigned int hash_order)
  414. {
  415.         struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
  416.         unsigned int i;
  417.         unsigned int j = 0;
  418.         int ret;
  419.  
  420.         if (unlikely(tfile == NULL))
  421.                 return NULL;
  422.  
  423.         spin_lock_init(&tfile->lock);
  424.         tfile->tdev = tdev;
  425.         kref_init(&tfile->refcount);
  426.         INIT_LIST_HEAD(&tfile->ref_list);
  427.  
  428.         for (i = 0; i < TTM_REF_NUM; ++i) {
  429.                 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
  430.                 if (ret) {
  431.                         j = i;
  432.                         goto out_err;
  433.                 }
  434.         }
  435.  
  436.         return tfile;
  437. out_err:
  438.         for (i = 0; i < j; ++i)
  439.                 drm_ht_remove(&tfile->ref_hash[i]);
  440.  
  441.         kfree(tfile);
  442.  
  443.         return NULL;
  444. }
  445. EXPORT_SYMBOL(ttm_object_file_init);
  446.  
  447. struct ttm_object_device *
  448. ttm_object_device_init(struct ttm_mem_global *mem_glob,
  449.                        unsigned int hash_order,
  450.                        const struct dma_buf_ops *ops)
  451. {
  452.         struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
  453.         int ret;
  454.  
  455.         if (unlikely(tdev == NULL))
  456.                 return NULL;
  457.  
  458.         tdev->mem_glob = mem_glob;
  459.         spin_lock_init(&tdev->object_lock);
  460.         atomic_set(&tdev->object_count, 0);
  461.         ret = drm_ht_create(&tdev->object_hash, hash_order);
  462.         if (ret != 0)
  463.                 goto out_no_object_hash;
  464.  
  465. //   tdev->ops = *ops;
  466. //   tdev->dmabuf_release = tdev->ops.release;
  467. //   tdev->ops.release = ttm_prime_dmabuf_release;
  468. //   tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
  469. //       ttm_round_pot(sizeof(struct file));
  470.                 return tdev;
  471.  
  472. out_no_object_hash:
  473.         kfree(tdev);
  474.         return NULL;
  475. }
  476. EXPORT_SYMBOL(ttm_object_device_init);
  477.  
  478. void ttm_object_device_release(struct ttm_object_device **p_tdev)
  479. {
  480.         struct ttm_object_device *tdev = *p_tdev;
  481.  
  482.         *p_tdev = NULL;
  483.  
  484.         spin_lock(&tdev->object_lock);
  485.         drm_ht_remove(&tdev->object_hash);
  486.         spin_unlock(&tdev->object_lock);
  487.  
  488.         kfree(tdev);
  489. }
  490. EXPORT_SYMBOL(ttm_object_device_release);
  491.