Subversion Repositories Kolibri OS

Rev

Rev 2325 | Rev 2327 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1.  
  2. /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  3.  */
  4. /*
  5.  *
  6.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  7.  * All Rights Reserved.
  8.  *
  9.  * Permission is hereby granted, free of charge, to any person obtaining a
  10.  * copy of this software and associated documentation files (the
  11.  * "Software"), to deal in the Software without restriction, including
  12.  * without limitation the rights to use, copy, modify, merge, publish,
  13.  * distribute, sub license, and/or sell copies of the Software, and to
  14.  * permit persons to whom the Software is furnished to do so, subject to
  15.  * the following conditions:
  16.  *
  17.  * The above copyright notice and this permission notice (including the
  18.  * next paragraph) shall be included in all copies or substantial portions
  19.  * of the Software.
  20.  *
  21.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  22.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  23.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  24.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  25.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  26.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  27.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  28.  *
  29.  */
  30.  
  31. #include <drm/drmP.h>
  32. #include <drm/drm.h>
  33.  
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/mod_devicetable.h>
  37. #include <errno-base.h>
  38. #include <linux/pci.h>
  39.  
  40. #include "i915_drv.h"
  41. #include <syscall.h>
  42.  
  43. #define PCI_VENDOR_ID_INTEL        0x8086
  44.  
  45. #define INTEL_VGA_DEVICE(id, info) {        \
  46.     .class = PCI_CLASS_DISPLAY_VGA << 8,    \
  47.     .class_mask = 0xff0000,                 \
  48.     .vendor = 0x8086,                       \
  49.     .device = id,                           \
  50.     .subvendor = PCI_ANY_ID,                \
  51.     .subdevice = PCI_ANY_ID,                \
  52.     .driver_data = (unsigned long) info }
  53.  
  54. static const struct intel_device_info intel_sandybridge_d_info = {
  55.     .gen = 6,
  56.     .need_gfx_hws = 1,
  57.     .has_hotplug  = 1,
  58.     .has_bsd_ring = 1,
  59.     .has_blt_ring = 1,
  60. };
  61.  
  62. static const struct intel_device_info intel_sandybridge_m_info = {
  63.     .gen = 6,
  64.     .is_mobile    = 1,
  65.     .need_gfx_hws = 1,
  66.     .has_hotplug  = 1,
  67.     .has_fbc      = 1,
  68.     .has_bsd_ring = 1,
  69.     .has_blt_ring = 1,
  70. };
  71.  
  72.  
  73. static const struct pci_device_id pciidlist[] = {       /* aka */
  74.     INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
  75.     INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info),
  76.     INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info),
  77.     INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
  78.     INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info),
  79.     INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info),
  80.     INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info),
  81.     {0, 0, 0}
  82. };
  83.  
  84. #define INTEL_PCH_DEVICE_ID_MASK        0xff00
  85. #define INTEL_PCH_IBX_DEVICE_ID_TYPE    0x3b00
  86. #define INTEL_PCH_CPT_DEVICE_ID_TYPE    0x1c00
  87. #define INTEL_PCH_PPT_DEVICE_ID_TYPE    0x1e00
  88.  
  89. void intel_detect_pch (struct drm_device *dev)
  90. {
  91.     struct drm_i915_private *dev_priv = dev->dev_private;
  92.     struct pci_dev *pch;
  93.  
  94.     /*
  95.      * The reason to probe ISA bridge instead of Dev31:Fun0 is to
  96.      * make graphics device passthrough work easy for VMM, that only
  97.      * need to expose ISA bridge to let driver know the real hardware
  98.      * underneath. This is a requirement from virtualization team.
  99.      */
  100.     pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
  101.     if (pch) {
  102.         if (pch->vendor == PCI_VENDOR_ID_INTEL) {
  103.             int id;
  104.             id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
  105.  
  106.             if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
  107.                 dev_priv->pch_type = PCH_IBX;
  108.                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
  109.             } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
  110.                 dev_priv->pch_type = PCH_CPT;
  111.                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
  112.             } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
  113.                 /* PantherPoint is CPT compatible */
  114.                 dev_priv->pch_type = PCH_CPT;
  115.                 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
  116.             }
  117.         }
  118.     }
  119. }
  120.  
  121. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  122. {
  123.     int count;
  124.  
  125.     count = 0;
  126.     while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
  127.         udelay(10);
  128.  
  129.     I915_WRITE_NOTRACE(FORCEWAKE, 1);
  130.     POSTING_READ(FORCEWAKE);
  131.  
  132.     count = 0;
  133.     while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
  134.         udelay(10);
  135. }
  136.  
  137. /*
  138.  * Generally this is called implicitly by the register read function. However,
  139.  * if some sequence requires the GT to not power down then this function should
  140.  * be called at the beginning of the sequence followed by a call to
  141.  * gen6_gt_force_wake_put() at the end of the sequence.
  142.  */
  143. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  144. {
  145. //    WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
  146.  
  147.     /* Forcewake is atomic in case we get in here without the lock */
  148.     if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
  149.         __gen6_gt_force_wake_get(dev_priv);
  150. }
  151.  
  152. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  153. {
  154.     I915_WRITE_NOTRACE(FORCEWAKE, 0);
  155.     POSTING_READ(FORCEWAKE);
  156. }
  157.  
  158. /*
  159.  * see gen6_gt_force_wake_get()
  160.  */
  161. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  162. {
  163. //    WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
  164.  
  165.     if (atomic_dec_and_test(&dev_priv->forcewake_count))
  166.         __gen6_gt_force_wake_put(dev_priv);
  167. }
  168.  
  169. void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  170. {
  171.     if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
  172.         int loop = 500;
  173.         u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
  174.         while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  175.             udelay(10);
  176.             fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
  177.         }
  178. //        WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
  179.         dev_priv->gt_fifo_count = fifo;
  180.     }
  181.     dev_priv->gt_fifo_count--;
  182. }
  183.  
  184.  
  185.  
  186.  
  187.  
  188. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent);
  189.  
  190. int i915_init(void)
  191. {
  192.     static pci_dev_t device;
  193.     const struct pci_device_id  *ent;
  194.     int  err;
  195.  
  196.     if( init_agp() != 0)
  197.     {
  198.         DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
  199.         return 0;
  200.     };
  201.  
  202.     ent = find_pci_device(&device, pciidlist);
  203.  
  204.     if( unlikely(ent == NULL) )
  205.     {
  206.         dbgprintf("device not found\n");
  207.         return 0;
  208.     };
  209.  
  210.     dbgprintf("device %x:%x\n", device.pci_dev.vendor,
  211.                                 device.pci_dev.device);
  212.  
  213.     err = drm_get_dev(&device.pci_dev, ent);
  214.  
  215.     return err;
  216. }
  217.  
  218. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
  219. {
  220.     static struct drm_device *dev;
  221.     int ret;
  222.  
  223.     ENTER();
  224.  
  225.     dev = kzalloc(sizeof(*dev), 0);
  226.     if (!dev)
  227.         return -ENOMEM;
  228.  
  229.  //   ret = pci_enable_device(pdev);
  230.  //   if (ret)
  231.  //       goto err_g1;
  232.  
  233.  //   pci_set_master(pdev);
  234.  
  235.  //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
  236.  //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  237.  //       goto err_g2;
  238.  //   }
  239.  
  240.     dev->pdev = pdev;
  241.     dev->pci_device = pdev->device;
  242.     dev->pci_vendor = pdev->vendor;
  243.  
  244.     INIT_LIST_HEAD(&dev->filelist);
  245.     INIT_LIST_HEAD(&dev->ctxlist);
  246.     INIT_LIST_HEAD(&dev->vmalist);
  247.     INIT_LIST_HEAD(&dev->maplist);
  248.  
  249.     spin_lock_init(&dev->count_lock);
  250.     mutex_init(&dev->struct_mutex);
  251.     mutex_init(&dev->ctxlist_mutex);
  252.  
  253. //int i915_driver_load(struct drm_device *dev, unsigned long flags)
  254.  
  255.     ret = i915_driver_load(dev, ent->driver_data );
  256. //    if (ret)
  257. //        goto err_g4;
  258.  
  259. //    if( radeon_modeset )
  260. //        init_display_kms(dev->dev_private, &usermode);
  261. //    else
  262. //        init_display(dev->dev_private, &usermode);
  263.  
  264.     LEAVE();
  265.  
  266.     return 0;
  267.  
  268. err_g4:
  269. //    drm_put_minor(&dev->primary);
  270. //err_g3:
  271. //    if (drm_core_check_feature(dev, DRIVER_MODESET))
  272. //        drm_put_minor(&dev->control);
  273. //err_g2:
  274. //    pci_disable_device(pdev);
  275. //err_g1:
  276.     free(dev);
  277.  
  278.     LEAVE();
  279.  
  280.     return ret;
  281. }
  282.  
  283.  
  284.