Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. /*
  28.  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29.  */
  30. /** @file ttm_ref_object.c
  31.  *
  32.  * Base- and reference object implementation for the various
  33.  * ttm objects. Implements reference counting, minimal security checks
  34.  * and release on file close.
  35.  */
  36.  
  37. /**
  38.  * struct ttm_object_file
  39.  *
  40.  * @tdev: Pointer to the ttm_object_device.
  41.  *
  42.  * @lock: Lock that protects the ref_list list and the
  43.  * ref_hash hash tables.
  44.  *
  45.  * @ref_list: List of ttm_ref_objects to be destroyed at
  46.  * file release.
  47.  *
  48.  * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
  49.  * for fast lookup of ref objects given a base object.
  50.  */
  51.  
  52. #define pr_fmt(fmt) "[TTM] " fmt
  53.  
  54. #include <drm/ttm/ttm_object.h>
  55. #include <drm/ttm/ttm_module.h>
  56. #include <linux/list.h>
  57. #include <linux/spinlock.h>
  58. #include <linux/slab.h>
  59. #include <linux/module.h>
  60. //#include <linux/atomic.h>
  61.  
  62. static inline int __must_check kref_get_unless_zero(struct kref *kref)
  63. {
  64.     return atomic_add_unless(&kref->refcount, 1, 0);
  65. }
  66.  
  67. #define pr_err(fmt, ...) \
  68.         printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
  69.  
  70. struct ttm_object_file {
  71.         struct ttm_object_device *tdev;
  72.         rwlock_t lock;
  73.         struct list_head ref_list;
  74.         struct drm_open_hash ref_hash[TTM_REF_NUM];
  75.         struct kref refcount;
  76. };
  77.  
  78. /**
  79.  * struct ttm_object_device
  80.  *
  81.  * @object_lock: lock that protects the object_hash hash table.
  82.  *
  83.  * @object_hash: hash table for fast lookup of object global names.
  84.  *
  85.  * @object_count: Per device object count.
  86.  *
  87.  * This is the per-device data structure needed for ttm object management.
  88.  */
  89.  
  90. struct ttm_object_device {
  91.         spinlock_t object_lock;
  92.         struct drm_open_hash object_hash;
  93.         atomic_t object_count;
  94.         struct ttm_mem_global *mem_glob;
  95. };
  96.  
  97. /**
  98.  * struct ttm_ref_object
  99.  *
  100.  * @hash: Hash entry for the per-file object reference hash.
  101.  *
  102.  * @head: List entry for the per-file list of ref-objects.
  103.  *
  104.  * @kref: Ref count.
  105.  *
  106.  * @obj: Base object this ref object is referencing.
  107.  *
  108.  * @ref_type: Type of ref object.
  109.  *
  110.  * This is similar to an idr object, but it also has a hash table entry
  111.  * that allows lookup with a pointer to the referenced object as a key. In
  112.  * that way, one can easily detect whether a base object is referenced by
  113.  * a particular ttm_object_file. It also carries a ref count to avoid creating
  114.  * multiple ref objects if a ttm_object_file references the same base
  115.  * object more than once.
  116.  */
  117.  
  118. struct ttm_ref_object {
  119.         struct drm_hash_item hash;
  120.         struct list_head head;
  121.         struct kref kref;
  122.         enum ttm_ref_type ref_type;
  123.         struct ttm_base_object *obj;
  124.         struct ttm_object_file *tfile;
  125. };
  126.  
  127. static inline struct ttm_object_file *
  128. ttm_object_file_ref(struct ttm_object_file *tfile)
  129. {
  130.         kref_get(&tfile->refcount);
  131.         return tfile;
  132. }
  133.  
  134. static void ttm_object_file_destroy(struct kref *kref)
  135. {
  136.         struct ttm_object_file *tfile =
  137.                 container_of(kref, struct ttm_object_file, refcount);
  138.  
  139.         kfree(tfile);
  140. }
  141.  
  142.  
  143. static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
  144. {
  145.         struct ttm_object_file *tfile = *p_tfile;
  146.  
  147.         *p_tfile = NULL;
  148.         kref_put(&tfile->refcount, ttm_object_file_destroy);
  149. }
  150.  
  151.  
  152. int ttm_base_object_init(struct ttm_object_file *tfile,
  153.                          struct ttm_base_object *base,
  154.                          bool shareable,
  155.                          enum ttm_object_type object_type,
  156.                          void (*refcount_release) (struct ttm_base_object **),
  157.                          void (*ref_obj_release) (struct ttm_base_object *,
  158.                                                   enum ttm_ref_type ref_type))
  159. {
  160.         struct ttm_object_device *tdev = tfile->tdev;
  161.         int ret;
  162.  
  163.         base->shareable = shareable;
  164.         base->tfile = ttm_object_file_ref(tfile);
  165.         base->refcount_release = refcount_release;
  166.         base->ref_obj_release = ref_obj_release;
  167.         base->object_type = object_type;
  168.         kref_init(&base->refcount);
  169.         spin_lock(&tdev->object_lock);
  170.         ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
  171.                                             &base->hash,
  172.                                             (unsigned long)base, 31, 0, 0);
  173.         spin_unlock(&tdev->object_lock);
  174.         if (unlikely(ret != 0))
  175.                 goto out_err0;
  176.  
  177.         ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
  178.         if (unlikely(ret != 0))
  179.                 goto out_err1;
  180.  
  181.         ttm_base_object_unref(&base);
  182.  
  183.         return 0;
  184. out_err1:
  185.         spin_lock(&tdev->object_lock);
  186.         (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
  187.         spin_unlock(&tdev->object_lock);
  188. out_err0:
  189.         return ret;
  190. }
  191. EXPORT_SYMBOL(ttm_base_object_init);
  192.  
  193. static void ttm_release_base(struct kref *kref)
  194. {
  195.         struct ttm_base_object *base =
  196.             container_of(kref, struct ttm_base_object, refcount);
  197.         struct ttm_object_device *tdev = base->tfile->tdev;
  198.  
  199.         spin_lock(&tdev->object_lock);
  200.         (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
  201.         spin_unlock(&tdev->object_lock);
  202.  
  203.         /*
  204.          * Note: We don't use synchronize_rcu() here because it's far
  205.          * too slow. It's up to the user to free the object using
  206.          * call_rcu() or ttm_base_object_kfree().
  207.          */
  208.  
  209.         if (base->refcount_release) {
  210.                 ttm_object_file_unref(&base->tfile);
  211.                 base->refcount_release(&base);
  212.         }
  213. }
  214.  
  215. void ttm_base_object_unref(struct ttm_base_object **p_base)
  216. {
  217.         struct ttm_base_object *base = *p_base;
  218.  
  219.         *p_base = NULL;
  220.  
  221.         kref_put(&base->refcount, ttm_release_base);
  222. }
  223. EXPORT_SYMBOL(ttm_base_object_unref);
  224.  
  225. struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
  226.                                                uint32_t key)
  227. {
  228.         struct ttm_object_device *tdev = tfile->tdev;
  229.         struct ttm_base_object *base;
  230.         struct drm_hash_item *hash;
  231.         int ret;
  232.  
  233. //   rcu_read_lock();
  234.         ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
  235.  
  236.         if (likely(ret == 0)) {
  237.                 base = drm_hash_entry(hash, struct ttm_base_object, hash);
  238.                 ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
  239.         }
  240. //   rcu_read_unlock();
  241.  
  242.         if (unlikely(ret != 0))
  243.                 return NULL;
  244.  
  245.         if (tfile != base->tfile && !base->shareable) {
  246.                 pr_err("Attempted access of non-shareable object\n");
  247.                 ttm_base_object_unref(&base);
  248.                 return NULL;
  249.         }
  250.  
  251.         return base;
  252. }
  253. EXPORT_SYMBOL(ttm_base_object_lookup);
  254.  
  255. int ttm_ref_object_add(struct ttm_object_file *tfile,
  256.                        struct ttm_base_object *base,
  257.                        enum ttm_ref_type ref_type, bool *existed)
  258. {
  259.         struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
  260.         struct ttm_ref_object *ref;
  261.         struct drm_hash_item *hash;
  262.         struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
  263.         int ret = -EINVAL;
  264.  
  265.         if (existed != NULL)
  266.                 *existed = true;
  267.  
  268.         while (ret == -EINVAL) {
  269.                 read_lock(&tfile->lock);
  270.                 ret = drm_ht_find_item(ht, base->hash.key, &hash);
  271.  
  272.                 if (ret == 0) {
  273.                         ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
  274.                         kref_get(&ref->kref);
  275.                         read_unlock(&tfile->lock);
  276.                         break;
  277.                 }
  278.  
  279.                 read_unlock(&tfile->lock);
  280.                 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
  281.                                            false, false);
  282.                 if (unlikely(ret != 0))
  283.                         return ret;
  284.                 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
  285.                 if (unlikely(ref == NULL)) {
  286.                         ttm_mem_global_free(mem_glob, sizeof(*ref));
  287.                         return -ENOMEM;
  288.                 }
  289.  
  290.                 ref->hash.key = base->hash.key;
  291.                 ref->obj = base;
  292.                 ref->tfile = tfile;
  293.                 ref->ref_type = ref_type;
  294.                 kref_init(&ref->kref);
  295.  
  296.                 write_lock(&tfile->lock);
  297.                 ret = drm_ht_insert_item(ht, &ref->hash);
  298.  
  299.                 if (likely(ret == 0)) {
  300.                         list_add_tail(&ref->head, &tfile->ref_list);
  301.                         kref_get(&base->refcount);
  302.                         write_unlock(&tfile->lock);
  303.                         if (existed != NULL)
  304.                                 *existed = false;
  305.                         break;
  306.                 }
  307.  
  308.                 write_unlock(&tfile->lock);
  309.                 BUG_ON(ret != -EINVAL);
  310.  
  311.                 ttm_mem_global_free(mem_glob, sizeof(*ref));
  312.                 kfree(ref);
  313.         }
  314.  
  315.         return ret;
  316. }
  317. EXPORT_SYMBOL(ttm_ref_object_add);
  318.  
  319. static void ttm_ref_object_release(struct kref *kref)
  320. {
  321.         struct ttm_ref_object *ref =
  322.             container_of(kref, struct ttm_ref_object, kref);
  323.         struct ttm_base_object *base = ref->obj;
  324.         struct ttm_object_file *tfile = ref->tfile;
  325.         struct drm_open_hash *ht;
  326.         struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
  327.  
  328.         ht = &tfile->ref_hash[ref->ref_type];
  329.         (void)drm_ht_remove_item(ht, &ref->hash);
  330.         list_del(&ref->head);
  331.         write_unlock(&tfile->lock);
  332.  
  333.         if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
  334.                 base->ref_obj_release(base, ref->ref_type);
  335.  
  336.         ttm_base_object_unref(&ref->obj);
  337.         ttm_mem_global_free(mem_glob, sizeof(*ref));
  338.         kfree(ref);
  339.         write_lock(&tfile->lock);
  340. }
  341.  
  342. int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
  343.                               unsigned long key, enum ttm_ref_type ref_type)
  344. {
  345.         struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
  346.         struct ttm_ref_object *ref;
  347.         struct drm_hash_item *hash;
  348.         int ret;
  349.  
  350.         write_lock(&tfile->lock);
  351.         ret = drm_ht_find_item(ht, key, &hash);
  352.         if (unlikely(ret != 0)) {
  353.                 write_unlock(&tfile->lock);
  354.                 return -EINVAL;
  355.         }
  356.         ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
  357.         kref_put(&ref->kref, ttm_ref_object_release);
  358.         write_unlock(&tfile->lock);
  359.         return 0;
  360. }
  361. EXPORT_SYMBOL(ttm_ref_object_base_unref);
  362.  
  363. void ttm_object_file_release(struct ttm_object_file **p_tfile)
  364. {
  365.         struct ttm_ref_object *ref;
  366.         struct list_head *list;
  367.         unsigned int i;
  368.         struct ttm_object_file *tfile = *p_tfile;
  369.  
  370.         *p_tfile = NULL;
  371.         write_lock(&tfile->lock);
  372.  
  373.         /*
  374.          * Since we release the lock within the loop, we have to
  375.          * restart it from the beginning each time.
  376.          */
  377.  
  378.         while (!list_empty(&tfile->ref_list)) {
  379.                 list = tfile->ref_list.next;
  380.                 ref = list_entry(list, struct ttm_ref_object, head);
  381.                 ttm_ref_object_release(&ref->kref);
  382.         }
  383.  
  384.         for (i = 0; i < TTM_REF_NUM; ++i)
  385.                 drm_ht_remove(&tfile->ref_hash[i]);
  386.  
  387.         write_unlock(&tfile->lock);
  388.         ttm_object_file_unref(&tfile);
  389. }
  390. EXPORT_SYMBOL(ttm_object_file_release);
  391.  
  392. struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
  393.                                              unsigned int hash_order)
  394. {
  395.         struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
  396.         unsigned int i;
  397.         unsigned int j = 0;
  398.         int ret;
  399.  
  400.         if (unlikely(tfile == NULL))
  401.                 return NULL;
  402.  
  403.         rwlock_init(&tfile->lock);
  404.         tfile->tdev = tdev;
  405.         kref_init(&tfile->refcount);
  406.         INIT_LIST_HEAD(&tfile->ref_list);
  407.  
  408.         for (i = 0; i < TTM_REF_NUM; ++i) {
  409.                 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
  410.                 if (ret) {
  411.                         j = i;
  412.                         goto out_err;
  413.                 }
  414.         }
  415.  
  416.         return tfile;
  417. out_err:
  418.         for (i = 0; i < j; ++i)
  419.                 drm_ht_remove(&tfile->ref_hash[i]);
  420.  
  421.         kfree(tfile);
  422.  
  423.         return NULL;
  424. }
  425. EXPORT_SYMBOL(ttm_object_file_init);
  426.  
  427. struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
  428.                                                  *mem_glob,
  429.                                                  unsigned int hash_order)
  430. {
  431.         struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
  432.         int ret;
  433.  
  434.         if (unlikely(tdev == NULL))
  435.                 return NULL;
  436.  
  437.         tdev->mem_glob = mem_glob;
  438.         spin_lock_init(&tdev->object_lock);
  439.         atomic_set(&tdev->object_count, 0);
  440.         ret = drm_ht_create(&tdev->object_hash, hash_order);
  441.  
  442.         if (likely(ret == 0))
  443.                 return tdev;
  444.  
  445.         kfree(tdev);
  446.         return NULL;
  447. }
  448. EXPORT_SYMBOL(ttm_object_device_init);
  449.  
  450. void ttm_object_device_release(struct ttm_object_device **p_tdev)
  451. {
  452.         struct ttm_object_device *tdev = *p_tdev;
  453.  
  454.         *p_tdev = NULL;
  455.  
  456.         spin_lock(&tdev->object_lock);
  457.         drm_ht_remove(&tdev->object_hash);
  458.         spin_unlock(&tdev->object_lock);
  459.  
  460.         kfree(tdev);
  461. }
  462. EXPORT_SYMBOL(ttm_object_device_release);
  463.