Subversion Repositories Kolibri OS

Rev

Rev 6084 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2003 José Fonseca.
  3.  * Copyright 2003 Leif Delgass.
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice (including the next
  14.  * paragraph) shall be included in all copies or substantial portions of the
  15.  * Software.
  16.  *
  17.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
  20.  * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  21.  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  22.  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  23.  */
  24.  
  25. #include <linux/pci.h>
  26. #include <linux/slab.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/export.h>
  29. #include <drm/drmP.h>
  30. #include "drm_internal.h"
  31. #include "drm_legacy.h"
  32.  
  33. #include <syscall.h>
  34. /**
  35.  * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
  36.  * @dev: DRM device
  37.  * @size: size of block to allocate
  38.  * @align: alignment of block
  39.  *
  40.  * Return: A handle to the allocated memory block on success or NULL on
  41.  * failure.
  42.  */
  43. drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
  44. {
  45.         drm_dma_handle_t *dmah;
  46.         unsigned long addr;
  47.         size_t sz;
  48.  
  49.         /* pci_alloc_consistent only guarantees alignment to the smallest
  50.          * PAGE_SIZE order which is greater than or equal to the requested size.
  51.          * Return NULL here for now to make sure nobody tries for larger alignment
  52.          */
  53.         if (align > size)
  54.                 return NULL;
  55.  
  56.         dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
  57.         if (!dmah)
  58.                 return NULL;
  59.  
  60.         dmah->size = size;
  61.         dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
  62.  
  63.         if (dmah->vaddr == NULL) {
  64.                 kfree(dmah);
  65.                 return NULL;
  66.         }
  67.  
  68.         memset(dmah->vaddr, 0, size);
  69.  
  70.         return dmah;
  71. }
  72.  
  73. EXPORT_SYMBOL(drm_pci_alloc);
  74.  
  75. /*
  76.  * Free a PCI consistent memory block without freeing its descriptor.
  77.  *
  78.  * This function is for internal use in the Linux-specific DRM core code.
  79.  */
  80. void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  81. {
  82.         unsigned long addr;
  83.         size_t sz;
  84.  
  85.         if (dmah->vaddr) {
  86.                 KernelFree(dmah->vaddr);
  87.         }
  88. }
  89.  
  90. /**
  91.  * drm_pci_free - Free a PCI consistent memory block
  92.  * @dev: DRM device
  93.  * @dmah: handle to memory block
  94.  */
  95. void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
  96. {
  97.         __drm_legacy_pci_free(dev, dmah);
  98.         kfree(dmah);
  99. }
  100.  
  101. EXPORT_SYMBOL(drm_pci_free);
  102.  
  103. #if 0
  104.  
  105. static int drm_get_pci_domain(struct drm_device *dev)
  106. {
  107. #ifndef __alpha__
  108.         /* For historical reasons, drm_get_pci_domain() is busticated
  109.          * on most archs and has to remain so for userspace interface
  110.          * < 1.4, except on alpha which was right from the beginning
  111.          */
  112.         if (dev->if_version < 0x10004)
  113.                 return 0;
  114. #endif /* __alpha__ */
  115.  
  116.         return pci_domain_nr(dev->pdev->bus);
  117. }
  118.  
  119. int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
  120. {
  121.         master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
  122.                                         drm_get_pci_domain(dev),
  123.                                         dev->pdev->bus->number,
  124.                                         PCI_SLOT(dev->pdev->devfn),
  125.                                         PCI_FUNC(dev->pdev->devfn));
  126.         if (!master->unique)
  127.                 return -ENOMEM;
  128.  
  129.         master->unique_len = strlen(master->unique);
  130.         return 0;
  131. }
  132. EXPORT_SYMBOL(drm_pci_set_busid);
  133.  
  134. int drm_pci_set_unique(struct drm_device *dev,
  135.                        struct drm_master *master,
  136.                        struct drm_unique *u)
  137. {
  138.         int domain, bus, slot, func, ret;
  139.  
  140.         master->unique_len = u->unique_len;
  141.         master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
  142.         if (!master->unique) {
  143.                 ret = -ENOMEM;
  144.                 goto err;
  145.         }
  146.  
  147.         if (copy_from_user(master->unique, u->unique, master->unique_len)) {
  148.                 ret = -EFAULT;
  149.                 goto err;
  150.         }
  151.  
  152.         master->unique[master->unique_len] = '\0';
  153.  
  154.         /* Return error if the busid submitted doesn't match the device's actual
  155.          * busid.
  156.          */
  157.         ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
  158.         if (ret != 3) {
  159.                 ret = -EINVAL;
  160.                 goto err;
  161.         }
  162.  
  163.         domain = bus >> 8;
  164.         bus &= 0xff;
  165.  
  166.         if ((domain != drm_get_pci_domain(dev)) ||
  167.             (bus != dev->pdev->bus->number) ||
  168.             (slot != PCI_SLOT(dev->pdev->devfn)) ||
  169.             (func != PCI_FUNC(dev->pdev->devfn))) {
  170.                 ret = -EINVAL;
  171.                 goto err;
  172.         }
  173.         return 0;
  174. err:
  175.         return ret;
  176. }
  177.  
  178. static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
  179. {
  180.         if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
  181.             (p->busnum & 0xff) != dev->pdev->bus->number ||
  182.             p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
  183.                 return -EINVAL;
  184.  
  185.         p->irq = dev->pdev->irq;
  186.  
  187.         DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
  188.                   p->irq);
  189.         return 0;
  190. }
  191.  
  192. static void drm_pci_agp_init(struct drm_device *dev)
  193. {
  194.         if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
  195.                 if (drm_pci_device_is_agp(dev))
  196.                         dev->agp = drm_agp_init(dev);
  197.                 if (dev->agp) {
  198.                         dev->agp->agp_mtrr = arch_phys_wc_add(
  199.                                 dev->agp->agp_info.aper_base,
  200.                                 dev->agp->agp_info.aper_size *
  201.                                 1024 * 1024);
  202.                 }
  203.         }
  204. }
  205.  
  206. void drm_pci_agp_destroy(struct drm_device *dev)
  207. {
  208.         if (dev->agp) {
  209.                 arch_phys_wc_del(dev->agp->agp_mtrr);
  210.                 drm_agp_clear(dev);
  211.                 kfree(dev->agp);
  212.                 dev->agp = NULL;
  213.         }
  214. }
  215. #endif
  216.  
  217. /**
  218.  * drm_get_pci_dev - Register a PCI device with the DRM subsystem
  219.  * @pdev: PCI device
  220.  * @ent: entry from the PCI ID table that matches @pdev
  221.  * @driver: DRM device driver
  222.  *
  223.  * Attempt to gets inter module "drm" information. If we are first
  224.  * then register the character device and inter module information.
  225.  * Try and register, if we fail to register, backout previous work.
  226.  *
  227.  * NOTE: This function is deprecated, please use drm_dev_alloc() and
  228.  * drm_dev_register() instead and remove your ->load() callback.
  229.  *
  230.  * Return: 0 on success or a negative error code on failure.
  231.  */
  232. int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
  233.                     struct drm_driver *driver)
  234. {
  235.     static struct drm_device drm_dev;
  236.     static struct drm_file   drm_file;
  237.  
  238.         struct drm_device *dev;
  239.     struct drm_file   *priv;
  240.  
  241.         int ret;
  242.  
  243.     dev  = &drm_dev;
  244.     priv = &drm_file;
  245.  
  246.     drm_file_handlers[0] = priv;
  247.  
  248.  //   ret = pci_enable_device(pdev);
  249.  //   if (ret)
  250.  //       goto err_g1;
  251.  
  252.     pci_set_master(pdev);
  253.  
  254.     if ((ret = drm_fill_in_dev(dev, ent, driver))) {
  255.         printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  256.         goto err_g2;
  257.     }
  258.  
  259.         DRM_DEBUG("\n");
  260.  
  261.  
  262.         dev->pdev = pdev;
  263. #ifdef __alpha__
  264.         dev->hose = pdev->sysdata;
  265. #endif
  266.  
  267.  
  268.         if ((ret = drm_fill_in_dev(dev, ent, driver))) {
  269.                 printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  270.                 goto err_g2;
  271.         }
  272.  
  273. #if 0
  274.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  275.                 pci_set_drvdata(pdev, dev);
  276.                 ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
  277.                 if (ret)
  278.                         goto err_g2;
  279.         }
  280.  
  281.         if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
  282.                 ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
  283.                 if (ret)
  284.                         goto err_g21;
  285.         }
  286.  
  287.         if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
  288.                 goto err_g3;
  289. #endif
  290.  
  291.         if (dev->driver->load) {
  292.                 ret = dev->driver->load(dev, ent->driver_data);
  293.                 if (ret)
  294.                         goto err_g4;
  295.         }
  296.  
  297.     if (dev->driver->open) {
  298.         ret = dev->driver->open(dev, priv);
  299.         if (ret < 0)
  300.             goto err_g4;
  301.     }
  302.  
  303.  
  304. //   mutex_unlock(&drm_global_mutex);
  305.         return 0;
  306.  
  307. err_g4:
  308. //   drm_put_minor(&dev->primary);
  309. err_g3:
  310. //   if (dev->render)
  311. //       drm_put_minor(&dev->render);
  312. err_g21:
  313. //   if (drm_core_check_feature(dev, DRIVER_MODESET))
  314. //       drm_put_minor(&dev->control);
  315. err_g2:
  316. //   pci_disable_device(pdev);
  317. err_g1:
  318. //   kfree(dev);
  319. //   mutex_unlock(&drm_global_mutex);
  320.         return ret;
  321. }
  322. EXPORT_SYMBOL(drm_get_pci_dev);
  323.  
  324. int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
  325. {
  326.         struct pci_dev *root;
  327.         u32 lnkcap, lnkcap2;
  328.  
  329.         *mask = 0;
  330.         if (!dev->pdev)
  331.                 return -EINVAL;
  332.  
  333.  
  334.     return -EINVAL;
  335.  
  336. #if 0
  337.         root = dev->pdev->bus->self;
  338.  
  339.         /* we've been informed via and serverworks don't make the cut */
  340.         if (root->vendor == PCI_VENDOR_ID_VIA ||
  341.             root->vendor == PCI_VENDOR_ID_SERVERWORKS)
  342.                 return -EINVAL;
  343.  
  344.         pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
  345.         pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
  346.  
  347.         if (lnkcap2) {  /* PCIe r3.0-compliant */
  348.                 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
  349.                         *mask |= DRM_PCIE_SPEED_25;
  350.                 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
  351.                         *mask |= DRM_PCIE_SPEED_50;
  352.                 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
  353.                         *mask |= DRM_PCIE_SPEED_80;
  354.         } else {        /* pre-r3.0 */
  355.                 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
  356.                         *mask |= DRM_PCIE_SPEED_25;
  357.                 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
  358.                         *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
  359.         }
  360.  
  361.         DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
  362.         return 0;
  363. #endif
  364.  
  365. }
  366. EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
  367.