Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
  3.  *
  4.  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
  5.  * All Rights Reserved.
  6.  *
  7.  * Author Rickard E. (Rik) Faith <faith@valinux.com>
  8.  *
  9.  * Permission is hereby granted, free of charge, to any person obtaining a
  10.  * copy of this software and associated documentation files (the "Software"),
  11.  * to deal in the Software without restriction, including without limitation
  12.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13.  * and/or sell copies of the Software, and to permit persons to whom the
  14.  * Software is furnished to do so, subject to the following conditions:
  15.  *
  16.  * The above copyright notice and this permission notice (including the next
  17.  * paragraph) shall be included in all copies or substantial portions of the
  18.  * Software.
  19.  *
  20.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23.  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  26.  * DEALINGS IN THE SOFTWARE.
  27.  */
  28.  
  29. #include <linux/fs.h>
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/slab.h>
  33. #include <drm/drmP.h>
  34. #include <drm/drm_core.h>
  35.  
  36. unsigned int drm_debug = 0;     /* 1 to enable debug output */
  37. EXPORT_SYMBOL(drm_debug);
  38.  
  39. unsigned int drm_rnodes = 0;    /* 1 to enable experimental render nodes API */
  40. EXPORT_SYMBOL(drm_rnodes);
  41.  
  42. /* 1 to allow user space to request universal planes (experimental) */
  43. unsigned int drm_universal_planes = 0;
  44. EXPORT_SYMBOL(drm_universal_planes);
  45.  
  46. unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
  47. EXPORT_SYMBOL(drm_vblank_offdelay);
  48.  
  49. unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
  50. EXPORT_SYMBOL(drm_timestamp_precision);
  51.  
  52. /*
  53.  * Default to use monotonic timestamps for wait-for-vblank and page-flip
  54.  * complete events.
  55.  */
  56. unsigned int drm_timestamp_monotonic = 1;
  57.  
  58. struct idr drm_minors_idr;
  59.  
  60. void drm_err(const char *format, ...)
  61. {
  62.     struct va_format vaf;
  63.     va_list args;
  64.  
  65.     va_start(args, format);
  66.  
  67.     vaf.fmt = format;
  68.     vaf.va = &args;
  69.  
  70.     printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
  71.            __builtin_return_address(0), &vaf);
  72.  
  73.     va_end(args);
  74. }
  75. EXPORT_SYMBOL(drm_err);
  76.  
  77. void drm_ut_debug_printk(const char *function_name, const char *format, ...)
  78. {
  79.         struct va_format vaf;
  80.         va_list args;
  81.  
  82. //   if (drm_debug & request_level) {
  83. //       if (function_name)
  84. //           printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
  85. //       va_start(args, format);
  86. //       vprintk(format, args);
  87. //       va_end(args);
  88. //   }
  89. }
  90. EXPORT_SYMBOL(drm_ut_debug_printk);
  91.  
  92. #if 0
  93. struct drm_master *drm_master_create(struct drm_minor *minor)
  94. {
  95.         struct drm_master *master;
  96.  
  97.         master = kzalloc(sizeof(*master), GFP_KERNEL);
  98.         if (!master)
  99.                 return NULL;
  100.  
  101.         kref_init(&master->refcount);
  102.         spin_lock_init(&master->lock.spinlock);
  103.         init_waitqueue_head(&master->lock.lock_queue);
  104.         if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
  105.                 kfree(master);
  106.                 return NULL;
  107.         }
  108.         INIT_LIST_HEAD(&master->magicfree);
  109.         master->minor = minor;
  110.  
  111.         return master;
  112. }
  113.  
  114. struct drm_master *drm_master_get(struct drm_master *master)
  115. {
  116.         kref_get(&master->refcount);
  117.         return master;
  118. }
  119. EXPORT_SYMBOL(drm_master_get);
  120.  
  121. static void drm_master_destroy(struct kref *kref)
  122. {
  123.         struct drm_master *master = container_of(kref, struct drm_master, refcount);
  124.         struct drm_magic_entry *pt, *next;
  125.         struct drm_device *dev = master->minor->dev;
  126.         struct drm_map_list *r_list, *list_temp;
  127.  
  128.         mutex_lock(&dev->struct_mutex);
  129.         if (dev->driver->master_destroy)
  130.                 dev->driver->master_destroy(dev, master);
  131.  
  132.         list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
  133.                 if (r_list->master == master) {
  134.                         drm_rmmap_locked(dev, r_list->map);
  135.                         r_list = NULL;
  136.                 }
  137.         }
  138.  
  139.         if (master->unique) {
  140.                 kfree(master->unique);
  141.                 master->unique = NULL;
  142.                 master->unique_len = 0;
  143.         }
  144.  
  145.         list_for_each_entry_safe(pt, next, &master->magicfree, head) {
  146.                 list_del(&pt->head);
  147.                 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
  148.                 kfree(pt);
  149.         }
  150.  
  151.         drm_ht_remove(&master->magiclist);
  152.  
  153.         mutex_unlock(&dev->struct_mutex);
  154.         kfree(master);
  155. }
  156.  
  157. void drm_master_put(struct drm_master **master)
  158. {
  159.         kref_put(&(*master)->refcount, drm_master_destroy);
  160.         *master = NULL;
  161. }
  162. EXPORT_SYMBOL(drm_master_put);
  163.  
  164. int drm_setmaster_ioctl(struct drm_device *dev, void *data,
  165.                         struct drm_file *file_priv)
  166. {
  167.         int ret = 0;
  168.  
  169.         mutex_lock(&dev->master_mutex);
  170.         if (file_priv->is_master)
  171.                 goto out_unlock;
  172.  
  173.         if (file_priv->minor->master) {
  174.                 ret = -EINVAL;
  175.                 goto out_unlock;
  176.         }
  177.  
  178.         if (!file_priv->master) {
  179.                 ret = -EINVAL;
  180.                 goto out_unlock;
  181.         }
  182.  
  183.         file_priv->minor->master = drm_master_get(file_priv->master);
  184.         file_priv->is_master = 1;
  185.         if (dev->driver->master_set) {
  186.                 ret = dev->driver->master_set(dev, file_priv, false);
  187.                 if (unlikely(ret != 0)) {
  188.                         file_priv->is_master = 0;
  189.                         drm_master_put(&file_priv->minor->master);
  190.                 }
  191.         }
  192.  
  193. out_unlock:
  194.         mutex_unlock(&dev->master_mutex);
  195.         return ret;
  196. }
  197.  
  198. int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
  199.                          struct drm_file *file_priv)
  200. {
  201.         int ret = -EINVAL;
  202.  
  203.         mutex_lock(&dev->master_mutex);
  204.         if (!file_priv->is_master)
  205.                 goto out_unlock;
  206.  
  207.         if (!file_priv->minor->master)
  208.                 goto out_unlock;
  209.  
  210.         ret = 0;
  211.         if (dev->driver->master_drop)
  212.                 dev->driver->master_drop(dev, file_priv, false);
  213.         drm_master_put(&file_priv->minor->master);
  214.         file_priv->is_master = 0;
  215.  
  216. out_unlock:
  217.         mutex_unlock(&dev->master_mutex);
  218.         return ret;
  219. }
  220.  
  221. /*
  222.  * DRM Minors
  223.  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
  224.  * of them is represented by a drm_minor object. Depending on the capabilities
  225.  * of the device-driver, different interfaces are registered.
  226.  *
  227.  * Minors can be accessed via dev->$minor_name. This pointer is either
  228.  * NULL or a valid drm_minor pointer and stays valid as long as the device is
  229.  * valid. This means, DRM minors have the same life-time as the underlying
  230.  * device. However, this doesn't mean that the minor is active. Minors are
  231.  * registered and unregistered dynamically according to device-state.
  232.  */
  233.  
  234. static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
  235.                                              unsigned int type)
  236. {
  237.         switch (type) {
  238.         case DRM_MINOR_LEGACY:
  239.                 return &dev->primary;
  240.         case DRM_MINOR_RENDER:
  241.                 return &dev->render;
  242.         case DRM_MINOR_CONTROL:
  243.                 return &dev->control;
  244.         default:
  245.                 return NULL;
  246.         }
  247. }
  248.  
  249. static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
  250. {
  251.         struct drm_minor *minor;
  252.  
  253.         minor = kzalloc(sizeof(*minor), GFP_KERNEL);
  254.         if (!minor)
  255.                 return -ENOMEM;
  256.  
  257.         minor->type = type;
  258.         minor->dev = dev;
  259.  
  260.         *drm_minor_get_slot(dev, type) = minor;
  261.         return 0;
  262. }
  263.  
  264. static void drm_minor_free(struct drm_device *dev, unsigned int type)
  265. {
  266.         struct drm_minor **slot;
  267.  
  268.         slot = drm_minor_get_slot(dev, type);
  269.         if (*slot) {
  270.                 drm_mode_group_destroy(&(*slot)->mode_group);
  271.                 kfree(*slot);
  272.                 *slot = NULL;
  273.         }
  274. }
  275.  
  276. static int drm_minor_register(struct drm_device *dev, unsigned int type)
  277. {
  278.         struct drm_minor *new_minor;
  279.         unsigned long flags;
  280.         int ret;
  281.         int minor_id;
  282.  
  283.         DRM_DEBUG("\n");
  284.  
  285.         new_minor = *drm_minor_get_slot(dev, type);
  286.         if (!new_minor)
  287.                 return 0;
  288.  
  289.         idr_preload(GFP_KERNEL);
  290.         spin_lock_irqsave(&drm_minor_lock, flags);
  291.         minor_id = idr_alloc(&drm_minors_idr,
  292.                              NULL,
  293.                              64 * type,
  294.                              64 * (type + 1),
  295.                              GFP_NOWAIT);
  296.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  297.         idr_preload_end();
  298.  
  299.         if (minor_id < 0)
  300.                 return minor_id;
  301.  
  302.         new_minor->index = minor_id;
  303.  
  304.         ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
  305.         if (ret) {
  306.                 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
  307.                 goto err_id;
  308.         }
  309.  
  310.         ret = drm_sysfs_device_add(new_minor);
  311.         if (ret) {
  312.                 DRM_ERROR("DRM: Error sysfs_device_add.\n");
  313.                 goto err_debugfs;
  314.         }
  315.  
  316.         /* replace NULL with @minor so lookups will succeed from now on */
  317.         spin_lock_irqsave(&drm_minor_lock, flags);
  318.         idr_replace(&drm_minors_idr, new_minor, new_minor->index);
  319.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  320.  
  321.         DRM_DEBUG("new minor assigned %d\n", minor_id);
  322.         return 0;
  323.  
  324. err_debugfs:
  325.         drm_debugfs_cleanup(new_minor);
  326. err_id:
  327.         spin_lock_irqsave(&drm_minor_lock, flags);
  328.         idr_remove(&drm_minors_idr, minor_id);
  329.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  330.         new_minor->index = 0;
  331.         return ret;
  332. }
  333.  
  334. static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
  335. {
  336.         struct drm_minor *minor;
  337.         unsigned long flags;
  338.  
  339.         minor = *drm_minor_get_slot(dev, type);
  340.         if (!minor || !minor->kdev)
  341.                 return;
  342.  
  343.         spin_lock_irqsave(&drm_minor_lock, flags);
  344.         idr_remove(&drm_minors_idr, minor->index);
  345.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  346.         minor->index = 0;
  347.  
  348.         drm_debugfs_cleanup(minor);
  349.         drm_sysfs_device_remove(minor);
  350. }
  351.  
  352. /**
  353.  * drm_minor_acquire - Acquire a DRM minor
  354.  * @minor_id: Minor ID of the DRM-minor
  355.  *
  356.  * Looks up the given minor-ID and returns the respective DRM-minor object. The
  357.  * refence-count of the underlying device is increased so you must release this
  358.  * object with drm_minor_release().
  359.  *
  360.  * As long as you hold this minor, it is guaranteed that the object and the
  361.  * minor->dev pointer will stay valid! However, the device may get unplugged and
  362.  * unregistered while you hold the minor.
  363.  *
  364.  * Returns:
  365.  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
  366.  * failure.
  367.  */
  368. struct drm_minor *drm_minor_acquire(unsigned int minor_id)
  369. {
  370.         struct drm_minor *minor;
  371.         unsigned long flags;
  372.  
  373.         spin_lock_irqsave(&drm_minor_lock, flags);
  374.         minor = idr_find(&drm_minors_idr, minor_id);
  375.         if (minor)
  376.                 drm_dev_ref(minor->dev);
  377.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  378.  
  379.         if (!minor) {
  380.                 return ERR_PTR(-ENODEV);
  381.         } else if (drm_device_is_unplugged(minor->dev)) {
  382.                 drm_dev_unref(minor->dev);
  383.                 return ERR_PTR(-ENODEV);
  384.         }
  385.  
  386.         return minor;
  387. }
  388.  
  389. /**
  390.  * drm_minor_release - Release DRM minor
  391.  * @minor: Pointer to DRM minor object
  392.  *
  393.  * Release a minor that was previously acquired via drm_minor_acquire().
  394.  */
  395. void drm_minor_release(struct drm_minor *minor)
  396. {
  397.         drm_dev_unref(minor->dev);
  398. }
  399.  
  400. /**
  401.  * drm_put_dev - Unregister and release a DRM device
  402.  * @dev: DRM device
  403.  *
  404.  * Called at module unload time or when a PCI device is unplugged.
  405.  *
  406.  * Use of this function is discouraged. It will eventually go away completely.
  407.  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
  408.  *
  409.  * Cleans up all DRM device, calling drm_lastclose().
  410.  */
  411. void drm_put_dev(struct drm_device *dev)
  412. {
  413.         DRM_DEBUG("\n");
  414.  
  415.         if (!dev) {
  416.                 DRM_ERROR("cleanup called no dev\n");
  417.                 return;
  418.         }
  419.  
  420.         drm_dev_unregister(dev);
  421.         drm_dev_unref(dev);
  422. }
  423. EXPORT_SYMBOL(drm_put_dev);
  424.  
  425. void drm_unplug_dev(struct drm_device *dev)
  426. {
  427.         /* for a USB device */
  428.         drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  429.         drm_minor_unregister(dev, DRM_MINOR_RENDER);
  430.         drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  431.  
  432.         mutex_lock(&drm_global_mutex);
  433.  
  434.         drm_device_set_unplugged(dev);
  435.  
  436.         if (dev->open_count == 0) {
  437.                 drm_put_dev(dev);
  438.         }
  439.         mutex_unlock(&drm_global_mutex);
  440. }
  441. EXPORT_SYMBOL(drm_unplug_dev);
  442.  
  443. /*
  444.  * DRM internal mount
  445.  * We want to be able to allocate our own "struct address_space" to control
  446.  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
  447.  * stand-alone address_space objects, so we need an underlying inode. As there
  448.  * is no way to allocate an independent inode easily, we need a fake internal
  449.  * VFS mount-point.
  450.  *
  451.  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
  452.  * frees it again. You are allowed to use iget() and iput() to get references to
  453.  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
  454.  * drm_fs_inode_free() call (which does not have to be the last iput()).
  455.  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
  456.  * between multiple inode-users. You could, technically, call
  457.  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
  458.  * iput(), but this way you'd end up with a new vfsmount for each inode.
  459.  */
  460.  
  461. static int drm_fs_cnt;
  462. static struct vfsmount *drm_fs_mnt;
  463.  
  464. static const struct dentry_operations drm_fs_dops = {
  465.         .d_dname        = simple_dname,
  466. };
  467.  
  468. static const struct super_operations drm_fs_sops = {
  469.         .statfs         = simple_statfs,
  470. };
  471.  
  472. static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
  473.                                    const char *dev_name, void *data)
  474. {
  475.         return mount_pseudo(fs_type,
  476.                             "drm:",
  477.                             &drm_fs_sops,
  478.                             &drm_fs_dops,
  479.                             0x010203ff);
  480. }
  481.  
  482. static struct file_system_type drm_fs_type = {
  483.         .name           = "drm",
  484.         .owner          = THIS_MODULE,
  485.         .mount          = drm_fs_mount,
  486.         .kill_sb        = kill_anon_super,
  487. };
  488.  
  489. #endif
  490.  
  491.  
  492.  
  493.  
  494.  
  495. int drm_fill_in_dev(struct drm_device *dev,
  496.                            const struct pci_device_id *ent,
  497.                            struct drm_driver *driver)
  498. {
  499.         int ret;
  500.         dev->driver = driver;
  501.  
  502.         INIT_LIST_HEAD(&dev->filelist);
  503.         INIT_LIST_HEAD(&dev->ctxlist);
  504.         INIT_LIST_HEAD(&dev->vmalist);
  505.         INIT_LIST_HEAD(&dev->maplist);
  506.         INIT_LIST_HEAD(&dev->vblank_event_list);
  507.  
  508.         spin_lock_init(&dev->buf_lock);
  509.         spin_lock_init(&dev->event_lock);
  510.         mutex_init(&dev->struct_mutex);
  511.         mutex_init(&dev->ctxlist_mutex);
  512.  
  513. //      if (drm_ht_create(&dev->map_hash, 12)) {
  514. //              return -ENOMEM;
  515. //      }
  516.  
  517.  
  518.  
  519.         if (driver->driver_features & DRIVER_GEM) {
  520.                 ret = drm_gem_init(dev);
  521.                 if (ret) {
  522.                         DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
  523.                         goto err_ctxbitmap;
  524.                 }
  525.         }
  526.  
  527.         return 0;
  528.  
  529. err_ctxbitmap:
  530. //   drm_lastclose(dev);
  531.         return ret;
  532. }
  533. EXPORT_SYMBOL(drm_fill_in_dev);
  534. /**
  535.  * Compute size order.  Returns the exponent of the smaller power of two which
  536.  * is greater or equal to given number.
  537.  *
  538.  * \param size size.
  539.  * \return order.
  540.  *
  541.  * \todo Can be made faster.
  542.  */
  543. int drm_order(unsigned long size)
  544. {
  545.     int order;
  546.     unsigned long tmp;
  547.  
  548.     for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
  549.  
  550.     if (size & (size - 1))
  551.         ++order;
  552.  
  553.     return order;
  554. }
  555.  
  556. extern int x86_clflush_size;
  557.  
  558.  
  559. void drm_clflush_virt_range(void *addr, unsigned long length)
  560. {
  561.     char *tmp = addr;
  562.     char *end = tmp + length;
  563.     mb();
  564.     for (; tmp < end; tmp += x86_clflush_size)
  565.         clflush(tmp);
  566.     clflush(end - 1);
  567.     mb();
  568.     return;
  569. }
  570.  
  571.  
  572.