Subversion Repositories Kolibri OS

Rev

Rev 4560 | Rev 5271 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
  3.  *
  4.  * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
  5.  * All Rights Reserved.
  6.  *
  7.  * Author Rickard E. (Rik) Faith <faith@valinux.com>
  8.  *
  9.  * Permission is hereby granted, free of charge, to any person obtaining a
  10.  * copy of this software and associated documentation files (the "Software"),
  11.  * to deal in the Software without restriction, including without limitation
  12.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13.  * and/or sell copies of the Software, and to permit persons to whom the
  14.  * Software is furnished to do so, subject to the following conditions:
  15.  *
  16.  * The above copyright notice and this permission notice (including the next
  17.  * paragraph) shall be included in all copies or substantial portions of the
  18.  * Software.
  19.  *
  20.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  23.  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  26.  * DEALINGS IN THE SOFTWARE.
  27.  */
  28.  
  29. #include <linux/fs.h>
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/slab.h>
  33. #include <drm/drmP.h>
  34. #include <drm/drm_core.h>
  35.  
  36. struct va_format {
  37.     const char *fmt;
  38.     va_list *va;
  39. };
  40.  
  41. unsigned int drm_debug = 0;     /* 1 to enable debug output */
  42. EXPORT_SYMBOL(drm_debug);
  43.  
  44. unsigned int drm_rnodes = 0;    /* 1 to enable experimental render nodes API */
  45. EXPORT_SYMBOL(drm_rnodes);
  46.  
  47. /* 1 to allow user space to request universal planes (experimental) */
  48. unsigned int drm_universal_planes = 0;
  49. EXPORT_SYMBOL(drm_universal_planes);
  50.  
  51. unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
  52. EXPORT_SYMBOL(drm_vblank_offdelay);
  53.  
  54. unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
  55. EXPORT_SYMBOL(drm_timestamp_precision);
  56.  
  57. /*
  58.  * Default to use monotonic timestamps for wait-for-vblank and page-flip
  59.  * complete events.
  60.  */
  61. unsigned int drm_timestamp_monotonic = 1;
  62.  
  63. struct idr drm_minors_idr;
  64. int drm_err(const char *func, const char *format, ...)
  65. {
  66.         struct va_format vaf;
  67.         va_list args;
  68.         int r;
  69.  
  70.         va_start(args, format);
  71.  
  72.         vaf.fmt = format;
  73.         vaf.va = &args;
  74.  
  75.         r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
  76.  
  77.         va_end(args);
  78.  
  79.         return r;
  80. }
  81. EXPORT_SYMBOL(drm_err);
  82.  
  83. void drm_ut_debug_printk(const char *function_name, const char *format, ...)
  84. {
  85.         struct va_format vaf;
  86.         va_list args;
  87.  
  88. //   if (drm_debug & request_level) {
  89. //       if (function_name)
  90. //           printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
  91. //       va_start(args, format);
  92. //       vprintk(format, args);
  93. //       va_end(args);
  94. //   }
  95. }
  96. EXPORT_SYMBOL(drm_ut_debug_printk);
  97.  
  98. #if 0
  99. struct drm_master *drm_master_create(struct drm_minor *minor)
  100. {
  101.         struct drm_master *master;
  102.  
  103.         master = kzalloc(sizeof(*master), GFP_KERNEL);
  104.         if (!master)
  105.                 return NULL;
  106.  
  107.         kref_init(&master->refcount);
  108.         spin_lock_init(&master->lock.spinlock);
  109.         init_waitqueue_head(&master->lock.lock_queue);
  110.         if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
  111.                 kfree(master);
  112.                 return NULL;
  113.         }
  114.         INIT_LIST_HEAD(&master->magicfree);
  115.         master->minor = minor;
  116.  
  117.         return master;
  118. }
  119.  
  120. struct drm_master *drm_master_get(struct drm_master *master)
  121. {
  122.         kref_get(&master->refcount);
  123.         return master;
  124. }
  125. EXPORT_SYMBOL(drm_master_get);
  126.  
  127. static void drm_master_destroy(struct kref *kref)
  128. {
  129.         struct drm_master *master = container_of(kref, struct drm_master, refcount);
  130.         struct drm_magic_entry *pt, *next;
  131.         struct drm_device *dev = master->minor->dev;
  132.         struct drm_map_list *r_list, *list_temp;
  133.  
  134.         mutex_lock(&dev->struct_mutex);
  135.         if (dev->driver->master_destroy)
  136.                 dev->driver->master_destroy(dev, master);
  137.  
  138.         list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
  139.                 if (r_list->master == master) {
  140.                         drm_rmmap_locked(dev, r_list->map);
  141.                         r_list = NULL;
  142.                 }
  143.         }
  144.  
  145.         if (master->unique) {
  146.                 kfree(master->unique);
  147.                 master->unique = NULL;
  148.                 master->unique_len = 0;
  149.         }
  150.  
  151.         list_for_each_entry_safe(pt, next, &master->magicfree, head) {
  152.                 list_del(&pt->head);
  153.                 drm_ht_remove_item(&master->magiclist, &pt->hash_item);
  154.                 kfree(pt);
  155.         }
  156.  
  157.         drm_ht_remove(&master->magiclist);
  158.  
  159.         mutex_unlock(&dev->struct_mutex);
  160.         kfree(master);
  161. }
  162.  
  163. void drm_master_put(struct drm_master **master)
  164. {
  165.         kref_put(&(*master)->refcount, drm_master_destroy);
  166.         *master = NULL;
  167. }
  168. EXPORT_SYMBOL(drm_master_put);
  169.  
  170. int drm_setmaster_ioctl(struct drm_device *dev, void *data,
  171.                         struct drm_file *file_priv)
  172. {
  173.         int ret = 0;
  174.  
  175.         mutex_lock(&dev->master_mutex);
  176.         if (file_priv->is_master)
  177.                 goto out_unlock;
  178.  
  179.         if (file_priv->minor->master) {
  180.                 ret = -EINVAL;
  181.                 goto out_unlock;
  182.         }
  183.  
  184.         if (!file_priv->master) {
  185.                 ret = -EINVAL;
  186.                 goto out_unlock;
  187.         }
  188.  
  189.         file_priv->minor->master = drm_master_get(file_priv->master);
  190.         file_priv->is_master = 1;
  191.         if (dev->driver->master_set) {
  192.                 ret = dev->driver->master_set(dev, file_priv, false);
  193.                 if (unlikely(ret != 0)) {
  194.                         file_priv->is_master = 0;
  195.                         drm_master_put(&file_priv->minor->master);
  196.                 }
  197.         }
  198.  
  199. out_unlock:
  200.         mutex_unlock(&dev->master_mutex);
  201.         return ret;
  202. }
  203.  
  204. int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
  205.                          struct drm_file *file_priv)
  206. {
  207.         int ret = -EINVAL;
  208.  
  209.         mutex_lock(&dev->master_mutex);
  210.         if (!file_priv->is_master)
  211.                 goto out_unlock;
  212.  
  213.         if (!file_priv->minor->master)
  214.                 goto out_unlock;
  215.  
  216.         ret = 0;
  217.         if (dev->driver->master_drop)
  218.                 dev->driver->master_drop(dev, file_priv, false);
  219.         drm_master_put(&file_priv->minor->master);
  220.         file_priv->is_master = 0;
  221.  
  222. out_unlock:
  223.         mutex_unlock(&dev->master_mutex);
  224.         return ret;
  225. }
  226.  
  227. /*
  228.  * DRM Minors
  229.  * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
  230.  * of them is represented by a drm_minor object. Depending on the capabilities
  231.  * of the device-driver, different interfaces are registered.
  232.  *
  233.  * Minors can be accessed via dev->$minor_name. This pointer is either
  234.  * NULL or a valid drm_minor pointer and stays valid as long as the device is
  235.  * valid. This means, DRM minors have the same life-time as the underlying
  236.  * device. However, this doesn't mean that the minor is active. Minors are
  237.  * registered and unregistered dynamically according to device-state.
  238.  */
  239.  
  240. static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
  241.                                              unsigned int type)
  242. {
  243.         switch (type) {
  244.         case DRM_MINOR_LEGACY:
  245.                 return &dev->primary;
  246.         case DRM_MINOR_RENDER:
  247.                 return &dev->render;
  248.         case DRM_MINOR_CONTROL:
  249.                 return &dev->control;
  250.         default:
  251.                 return NULL;
  252.         }
  253. }
  254.  
  255. static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
  256. {
  257.         struct drm_minor *minor;
  258.  
  259.         minor = kzalloc(sizeof(*minor), GFP_KERNEL);
  260.         if (!minor)
  261.                 return -ENOMEM;
  262.  
  263.         minor->type = type;
  264.         minor->dev = dev;
  265.  
  266.         *drm_minor_get_slot(dev, type) = minor;
  267.         return 0;
  268. }
  269.  
  270. static void drm_minor_free(struct drm_device *dev, unsigned int type)
  271. {
  272.         struct drm_minor **slot;
  273.  
  274.         slot = drm_minor_get_slot(dev, type);
  275.         if (*slot) {
  276.                 drm_mode_group_destroy(&(*slot)->mode_group);
  277.                 kfree(*slot);
  278.                 *slot = NULL;
  279.         }
  280. }
  281.  
  282. static int drm_minor_register(struct drm_device *dev, unsigned int type)
  283. {
  284.         struct drm_minor *new_minor;
  285.         unsigned long flags;
  286.         int ret;
  287.         int minor_id;
  288.  
  289.         DRM_DEBUG("\n");
  290.  
  291.         new_minor = *drm_minor_get_slot(dev, type);
  292.         if (!new_minor)
  293.                 return 0;
  294.  
  295.         idr_preload(GFP_KERNEL);
  296.         spin_lock_irqsave(&drm_minor_lock, flags);
  297.         minor_id = idr_alloc(&drm_minors_idr,
  298.                              NULL,
  299.                              64 * type,
  300.                              64 * (type + 1),
  301.                              GFP_NOWAIT);
  302.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  303.         idr_preload_end();
  304.  
  305.         if (minor_id < 0)
  306.                 return minor_id;
  307.  
  308.         new_minor->index = minor_id;
  309.  
  310.         ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
  311.         if (ret) {
  312.                 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
  313.                 goto err_id;
  314.         }
  315.  
  316.         ret = drm_sysfs_device_add(new_minor);
  317.         if (ret) {
  318.                 DRM_ERROR("DRM: Error sysfs_device_add.\n");
  319.                 goto err_debugfs;
  320.         }
  321.  
  322.         /* replace NULL with @minor so lookups will succeed from now on */
  323.         spin_lock_irqsave(&drm_minor_lock, flags);
  324.         idr_replace(&drm_minors_idr, new_minor, new_minor->index);
  325.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  326.  
  327.         DRM_DEBUG("new minor assigned %d\n", minor_id);
  328.         return 0;
  329.  
  330. err_debugfs:
  331.         drm_debugfs_cleanup(new_minor);
  332. err_id:
  333.         spin_lock_irqsave(&drm_minor_lock, flags);
  334.         idr_remove(&drm_minors_idr, minor_id);
  335.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  336.         new_minor->index = 0;
  337.         return ret;
  338. }
  339.  
  340. static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
  341. {
  342.         struct drm_minor *minor;
  343.         unsigned long flags;
  344.  
  345.         minor = *drm_minor_get_slot(dev, type);
  346.         if (!minor || !minor->kdev)
  347.                 return;
  348.  
  349.         spin_lock_irqsave(&drm_minor_lock, flags);
  350.         idr_remove(&drm_minors_idr, minor->index);
  351.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  352.         minor->index = 0;
  353.  
  354.         drm_debugfs_cleanup(minor);
  355.         drm_sysfs_device_remove(minor);
  356. }
  357.  
  358. /**
  359.  * drm_minor_acquire - Acquire a DRM minor
  360.  * @minor_id: Minor ID of the DRM-minor
  361.  *
  362.  * Looks up the given minor-ID and returns the respective DRM-minor object. The
  363.  * refence-count of the underlying device is increased so you must release this
  364.  * object with drm_minor_release().
  365.  *
  366.  * As long as you hold this minor, it is guaranteed that the object and the
  367.  * minor->dev pointer will stay valid! However, the device may get unplugged and
  368.  * unregistered while you hold the minor.
  369.  *
  370.  * Returns:
  371.  * Pointer to minor-object with increased device-refcount, or PTR_ERR on
  372.  * failure.
  373.  */
  374. struct drm_minor *drm_minor_acquire(unsigned int minor_id)
  375. {
  376.         struct drm_minor *minor;
  377.         unsigned long flags;
  378.  
  379.         spin_lock_irqsave(&drm_minor_lock, flags);
  380.         minor = idr_find(&drm_minors_idr, minor_id);
  381.         if (minor)
  382.                 drm_dev_ref(minor->dev);
  383.         spin_unlock_irqrestore(&drm_minor_lock, flags);
  384.  
  385.         if (!minor) {
  386.                 return ERR_PTR(-ENODEV);
  387.         } else if (drm_device_is_unplugged(minor->dev)) {
  388.                 drm_dev_unref(minor->dev);
  389.                 return ERR_PTR(-ENODEV);
  390.         }
  391.  
  392.         return minor;
  393. }
  394.  
  395. /**
  396.  * drm_minor_release - Release DRM minor
  397.  * @minor: Pointer to DRM minor object
  398.  *
  399.  * Release a minor that was previously acquired via drm_minor_acquire().
  400.  */
  401. void drm_minor_release(struct drm_minor *minor)
  402. {
  403.         drm_dev_unref(minor->dev);
  404. }
  405.  
  406. /**
  407.  * drm_put_dev - Unregister and release a DRM device
  408.  * @dev: DRM device
  409.  *
  410.  * Called at module unload time or when a PCI device is unplugged.
  411.  *
  412.  * Use of this function is discouraged. It will eventually go away completely.
  413.  * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
  414.  *
  415.  * Cleans up all DRM device, calling drm_lastclose().
  416.  */
  417. void drm_put_dev(struct drm_device *dev)
  418. {
  419.         DRM_DEBUG("\n");
  420.  
  421.         if (!dev) {
  422.                 DRM_ERROR("cleanup called no dev\n");
  423.                 return;
  424.         }
  425.  
  426.         drm_dev_unregister(dev);
  427.         drm_dev_unref(dev);
  428. }
  429. EXPORT_SYMBOL(drm_put_dev);
  430.  
  431. void drm_unplug_dev(struct drm_device *dev)
  432. {
  433.         /* for a USB device */
  434.         drm_minor_unregister(dev, DRM_MINOR_LEGACY);
  435.         drm_minor_unregister(dev, DRM_MINOR_RENDER);
  436.         drm_minor_unregister(dev, DRM_MINOR_CONTROL);
  437.  
  438.         mutex_lock(&drm_global_mutex);
  439.  
  440.         drm_device_set_unplugged(dev);
  441.  
  442.         if (dev->open_count == 0) {
  443.                 drm_put_dev(dev);
  444.         }
  445.         mutex_unlock(&drm_global_mutex);
  446. }
  447. EXPORT_SYMBOL(drm_unplug_dev);
  448.  
  449. /*
  450.  * DRM internal mount
  451.  * We want to be able to allocate our own "struct address_space" to control
  452.  * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
  453.  * stand-alone address_space objects, so we need an underlying inode. As there
  454.  * is no way to allocate an independent inode easily, we need a fake internal
  455.  * VFS mount-point.
  456.  *
  457.  * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
  458.  * frees it again. You are allowed to use iget() and iput() to get references to
  459.  * the inode. But each drm_fs_inode_new() call must be paired with exactly one
  460.  * drm_fs_inode_free() call (which does not have to be the last iput()).
  461.  * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
  462.  * between multiple inode-users. You could, technically, call
  463.  * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
  464.  * iput(), but this way you'd end up with a new vfsmount for each inode.
  465.  */
  466.  
  467. static int drm_fs_cnt;
  468. static struct vfsmount *drm_fs_mnt;
  469.  
  470. static const struct dentry_operations drm_fs_dops = {
  471.         .d_dname        = simple_dname,
  472. };
  473.  
  474. static const struct super_operations drm_fs_sops = {
  475.         .statfs         = simple_statfs,
  476. };
  477.  
  478. static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
  479.                                    const char *dev_name, void *data)
  480. {
  481.         return mount_pseudo(fs_type,
  482.                             "drm:",
  483.                             &drm_fs_sops,
  484.                             &drm_fs_dops,
  485.                             0x010203ff);
  486. }
  487.  
  488. static struct file_system_type drm_fs_type = {
  489.         .name           = "drm",
  490.         .owner          = THIS_MODULE,
  491.         .mount          = drm_fs_mount,
  492.         .kill_sb        = kill_anon_super,
  493. };
  494.  
  495. #endif
  496.  
  497.  
  498.  
  499.  
  500.  
  501. int drm_fill_in_dev(struct drm_device *dev,
  502.                            const struct pci_device_id *ent,
  503.                            struct drm_driver *driver)
  504. {
  505.         int ret;
  506.         dev->driver = driver;
  507.  
  508.         INIT_LIST_HEAD(&dev->filelist);
  509.         INIT_LIST_HEAD(&dev->ctxlist);
  510.         INIT_LIST_HEAD(&dev->vmalist);
  511.         INIT_LIST_HEAD(&dev->maplist);
  512.         INIT_LIST_HEAD(&dev->vblank_event_list);
  513.  
  514.         spin_lock_init(&dev->buf_lock);
  515.         spin_lock_init(&dev->event_lock);
  516.         mutex_init(&dev->struct_mutex);
  517.         mutex_init(&dev->ctxlist_mutex);
  518.  
  519. //      if (drm_ht_create(&dev->map_hash, 12)) {
  520. //              return -ENOMEM;
  521. //      }
  522.  
  523.  
  524.  
  525.         if (driver->driver_features & DRIVER_GEM) {
  526.                 ret = drm_gem_init(dev);
  527.                 if (ret) {
  528.                         DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
  529.                         goto err_ctxbitmap;
  530.                 }
  531.         }
  532.  
  533.         return 0;
  534.  
  535. err_ctxbitmap:
  536. //   drm_lastclose(dev);
  537.         return ret;
  538. }
  539. EXPORT_SYMBOL(drm_fill_in_dev);
  540. /**
  541.  * Compute size order.  Returns the exponent of the smaller power of two which
  542.  * is greater or equal to given number.
  543.  *
  544.  * \param size size.
  545.  * \return order.
  546.  *
  547.  * \todo Can be made faster.
  548.  */
  549. int drm_order(unsigned long size)
  550. {
  551.     int order;
  552.     unsigned long tmp;
  553.  
  554.     for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
  555.  
  556.     if (size & (size - 1))
  557.         ++order;
  558.  
  559.     return order;
  560. }
  561.  
  562. extern int x86_clflush_size;
  563.  
  564. static inline void clflush(volatile void *__p)
  565. {
  566.     asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
  567. }
  568.  
  569. void drm_clflush_virt_range(void *addr, unsigned long length)
  570. {
  571.     char *tmp = addr;
  572.     char *end = tmp + length;
  573.     mb();
  574.     for (; tmp < end; tmp += x86_clflush_size)
  575.         clflush(tmp);
  576.     clflush(end - 1);
  577.     mb();
  578.     return;
  579. }
  580.  
  581.  
  582.