Subversion Repositories Kolibri OS

Rev

Rev 2352 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #include <linux/irqreturn.h>
  30. //#include <linux/slab.h>
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "i915_drm.h"
  34. #include "i915_drv.h"
  35. #include "i915_trace.h"
  36. #include "intel_drv.h"
  37.  
  38. #define MAX_NOPID ((u32)~0)
  39.  
  40. /**
  41.  * Interrupts that are always left unmasked.
  42.  *
  43.  * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
  44.  * we leave them always unmasked in IMR and then control enabling them through
  45.  * PIPESTAT alone.
  46.  */
  47. #define I915_INTERRUPT_ENABLE_FIX                       \
  48.         (I915_ASLE_INTERRUPT |                          \
  49.          I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |          \
  50.          I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |          \
  51.          I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |  \
  52.          I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |  \
  53.          I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
  54.  
  55. /** Interrupts that we mask and unmask at runtime. */
  56. #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
  57.  
  58. #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
  59.                                  PIPE_VBLANK_INTERRUPT_STATUS)
  60.  
  61. #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
  62.                                  PIPE_VBLANK_INTERRUPT_ENABLE)
  63.  
  64. #define DRM_I915_VBLANK_PIPE_ALL        (DRM_I915_VBLANK_PIPE_A | \
  65.                                          DRM_I915_VBLANK_PIPE_B)
  66.  
  67. /* For display hotplug interrupt */
  68. static void
  69. ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  70. {
  71.     if ((dev_priv->irq_mask & mask) != 0) {
  72.         dev_priv->irq_mask &= ~mask;
  73.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  74.         POSTING_READ(DEIMR);
  75.     }
  76. }
  77.  
  78. static inline void
  79. ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
  80. {
  81.     if ((dev_priv->irq_mask & mask) != mask) {
  82.         dev_priv->irq_mask |= mask;
  83.         I915_WRITE(DEIMR, dev_priv->irq_mask);
  84.         POSTING_READ(DEIMR);
  85.     }
  86. }
  87.  
  88.  
  89.  
  90. static int ironlake_irq_handler(struct drm_device *dev)
  91. {
  92.     drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  93.     int ret = IRQ_NONE;
  94.     u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
  95.     u32 hotplug_mask;
  96.     struct drm_i915_master_private *master_priv;
  97.     u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
  98.  
  99.     atomic_inc(&dev_priv->irq_received);
  100.  
  101.     if (IS_GEN6(dev))
  102.         bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
  103.  
  104.     /* disable master interrupt before clearing iir  */
  105.     de_ier = I915_READ(DEIER);
  106.     I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
  107.     POSTING_READ(DEIER);
  108.  
  109.     de_iir = I915_READ(DEIIR);
  110.     gt_iir = I915_READ(GTIIR);
  111.     pch_iir = I915_READ(SDEIIR);
  112.     pm_iir = I915_READ(GEN6_PMIIR);
  113.  
  114.     if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
  115.         (!IS_GEN6(dev) || pm_iir == 0))
  116.         goto done;
  117.  
  118.     if (HAS_PCH_CPT(dev))
  119.         hotplug_mask = SDE_HOTPLUG_MASK_CPT;
  120.     else
  121.         hotplug_mask = SDE_HOTPLUG_MASK;
  122.  
  123.     ret = IRQ_HANDLED;
  124.  
  125.  
  126. //    if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
  127. //        notify_ring(dev, &dev_priv->ring[RCS]);
  128. //    if (gt_iir & bsd_usr_interrupt)
  129. //        notify_ring(dev, &dev_priv->ring[VCS]);
  130. //    if (gt_iir & GT_BLT_USER_INTERRUPT)
  131. //        notify_ring(dev, &dev_priv->ring[BCS]);
  132.  
  133. //    if (de_iir & DE_GSE)
  134. //        intel_opregion_gse_intr(dev);
  135.  
  136. //    if (de_iir & DE_PLANEA_FLIP_DONE) {
  137. //        intel_prepare_page_flip(dev, 0);
  138. //        intel_finish_page_flip_plane(dev, 0);
  139. //    }
  140.  
  141. //    if (de_iir & DE_PLANEB_FLIP_DONE) {
  142. //        intel_prepare_page_flip(dev, 1);
  143. //        intel_finish_page_flip_plane(dev, 1);
  144. //    }
  145.  
  146. //    if (de_iir & DE_PIPEA_VBLANK)
  147. //        drm_handle_vblank(dev, 0);
  148.  
  149. //    if (de_iir & DE_PIPEB_VBLANK)
  150. //        drm_handle_vblank(dev, 1);
  151.  
  152.     /* check event from PCH */
  153. //    if (de_iir & DE_PCH_EVENT) {
  154. //        if (pch_iir & hotplug_mask)
  155. //            queue_work(dev_priv->wq, &dev_priv->hotplug_work);
  156. //        pch_irq_handler(dev);
  157. //    }
  158.  
  159. //    if (de_iir & DE_PCU_EVENT) {
  160. //        I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
  161. //        i915_handle_rps_change(dev);
  162. //    }
  163.  
  164.     if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
  165.         /*
  166.          * IIR bits should never already be set because IMR should
  167.          * prevent an interrupt from being shown in IIR. The warning
  168.          * displays a case where we've unsafely cleared
  169.          * dev_priv->pm_iir. Although missing an interrupt of the same
  170.          * type is not a problem, it displays a problem in the logic.
  171.          *
  172.          * The mask bit in IMR is cleared by rps_work.
  173.          */
  174.         unsigned long flags;
  175.         spin_lock_irqsave(&dev_priv->rps_lock, flags);
  176.         WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
  177.         dev_priv->pm_iir |= pm_iir;
  178.         I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
  179.         POSTING_READ(GEN6_PMIMR);
  180.         spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
  181. //        queue_work(dev_priv->wq, &dev_priv->rps_work);
  182.     }
  183.  
  184.     /* should clear PCH hotplug event before clear CPU irq */
  185.     I915_WRITE(SDEIIR, pch_iir);
  186.     I915_WRITE(GTIIR, gt_iir);
  187.     I915_WRITE(DEIIR, de_iir);
  188.     I915_WRITE(GEN6_PMIIR, pm_iir);
  189.  
  190. done:
  191.     I915_WRITE(DEIER, de_ier);
  192.     POSTING_READ(DEIER);
  193.  
  194.     return ret;
  195. }
  196.  
  197.  
  198.  
  199.  
  200.  
  201.  
  202.  
  203.  
  204.  
  205. /* drm_dma.h hooks
  206. */
  207. static void ironlake_irq_preinstall(struct drm_device *dev)
  208. {
  209.     drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  210.  
  211.     atomic_set(&dev_priv->irq_received, 0);
  212.  
  213. //    INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
  214. //    INIT_WORK(&dev_priv->error_work, i915_error_work_func);
  215. //    if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
  216. //        INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
  217.  
  218.     I915_WRITE(HWSTAM, 0xeffe);
  219.  
  220.     if (IS_GEN6(dev)) {
  221.         /* Workaround stalls observed on Sandy Bridge GPUs by
  222.          * making the blitter command streamer generate a
  223.          * write to the Hardware Status Page for
  224.          * MI_USER_INTERRUPT.  This appears to serialize the
  225.          * previous seqno write out before the interrupt
  226.          * happens.
  227.          */
  228.         I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
  229.         I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
  230.     }
  231.  
  232.     /* XXX hotplug from PCH */
  233.  
  234.     I915_WRITE(DEIMR, 0xffffffff);
  235.     I915_WRITE(DEIER, 0x0);
  236.     POSTING_READ(DEIER);
  237.  
  238.     /* and GT */
  239.     I915_WRITE(GTIMR, 0xffffffff);
  240.     I915_WRITE(GTIER, 0x0);
  241.     POSTING_READ(GTIER);
  242.  
  243.     /* south display irq */
  244.     I915_WRITE(SDEIMR, 0xffffffff);
  245.     I915_WRITE(SDEIER, 0x0);
  246.     POSTING_READ(SDEIER);
  247. }
  248.  
  249. /*
  250.  * Enable digital hotplug on the PCH, and configure the DP short pulse
  251.  * duration to 2ms (which is the minimum in the Display Port spec)
  252.  *
  253.  * This register is the same on all known PCH chips.
  254.  */
  255.  
  256. static void ironlake_enable_pch_hotplug(struct drm_device *dev)
  257. {
  258.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  259.         u32     hotplug;
  260.  
  261.         hotplug = I915_READ(PCH_PORT_HOTPLUG);
  262.         hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
  263.         hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
  264.         hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
  265.         hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
  266.         I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  267. }
  268.  
  269. static int ironlake_irq_postinstall(struct drm_device *dev)
  270. {
  271.     drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  272.     /* enable kind of interrupts always enabled */
  273.     u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
  274.                DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
  275.     u32 render_irqs;
  276.     u32 hotplug_mask;
  277.  
  278. //    DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
  279. //    if (HAS_BSD(dev))
  280. //        DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
  281. //    if (HAS_BLT(dev))
  282. //        DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
  283.  
  284.     dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  285.     dev_priv->irq_mask = ~display_mask;
  286.  
  287.     /* should always can generate irq */
  288.     I915_WRITE(DEIIR, I915_READ(DEIIR));
  289.     I915_WRITE(DEIMR, dev_priv->irq_mask);
  290.     I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
  291.     POSTING_READ(DEIER);
  292.  
  293.         dev_priv->gt_irq_mask = ~0;
  294.  
  295.     I915_WRITE(GTIIR, I915_READ(GTIIR));
  296.     I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  297.  
  298.     if (IS_GEN6(dev))
  299.         render_irqs =
  300.             GT_USER_INTERRUPT |
  301.             GT_GEN6_BSD_USER_INTERRUPT |
  302.             GT_BLT_USER_INTERRUPT;
  303.     else
  304.         render_irqs =
  305.             GT_USER_INTERRUPT |
  306.             GT_PIPE_NOTIFY |
  307.             GT_BSD_USER_INTERRUPT;
  308.     I915_WRITE(GTIER, render_irqs);
  309.     POSTING_READ(GTIER);
  310.  
  311.     if (HAS_PCH_CPT(dev)) {
  312.         hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
  313.                 SDE_PORTB_HOTPLUG_CPT |
  314.                 SDE_PORTC_HOTPLUG_CPT |
  315.                 SDE_PORTD_HOTPLUG_CPT);
  316.     } else {
  317.         hotplug_mask = (SDE_CRT_HOTPLUG |
  318.                 SDE_PORTB_HOTPLUG |
  319.                 SDE_PORTC_HOTPLUG |
  320.                 SDE_PORTD_HOTPLUG |
  321.                 SDE_AUX_MASK);
  322.     }
  323.  
  324.     dev_priv->pch_irq_mask = ~hotplug_mask;
  325.  
  326.     I915_WRITE(SDEIIR, I915_READ(SDEIIR));
  327.     I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
  328.     I915_WRITE(SDEIER, hotplug_mask);
  329.     POSTING_READ(SDEIER);
  330.  
  331.     ironlake_enable_pch_hotplug(dev);
  332.  
  333.     if (IS_IRONLAKE_M(dev)) {
  334.         /* Clear & enable PCU event interrupts */
  335.         I915_WRITE(DEIIR, DE_PCU_EVENT);
  336.         I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
  337.         ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
  338.     }
  339.  
  340.     return 0;
  341. }
  342.  
  343.  
  344. void intel_irq_init(struct drm_device *dev)
  345. {
  346. #if 0
  347.         if (IS_IVYBRIDGE(dev)) {
  348.                 /* Share pre & uninstall handlers with ILK/SNB */
  349.                 dev->driver->irq_handler = ivybridge_irq_handler;
  350.                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
  351.                 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
  352.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  353.                 dev->driver->enable_vblank = ivybridge_enable_vblank;
  354.                 dev->driver->disable_vblank = ivybridge_disable_vblank;
  355.         } else if (HAS_PCH_SPLIT(dev)) {
  356.                 dev->driver->irq_handler = ironlake_irq_handler;
  357.                 dev->driver->irq_preinstall = ironlake_irq_preinstall;
  358.                 dev->driver->irq_postinstall = ironlake_irq_postinstall;
  359.                 dev->driver->irq_uninstall = ironlake_irq_uninstall;
  360.                 dev->driver->enable_vblank = ironlake_enable_vblank;
  361.                 dev->driver->disable_vblank = ironlake_disable_vblank;
  362.         } else {
  363.                 dev->driver->irq_preinstall = i915_driver_irq_preinstall;
  364.                 dev->driver->irq_postinstall = i915_driver_irq_postinstall;
  365.                 dev->driver->irq_uninstall = i915_driver_irq_uninstall;
  366.                 dev->driver->irq_handler = i915_driver_irq_handler;
  367.                 dev->driver->enable_vblank = i915_enable_vblank;
  368.                 dev->driver->disable_vblank = i915_disable_vblank;
  369.         }
  370. #endif
  371. }
  372.  
  373.  
  374. static struct drm_device *irq_device;
  375.  
  376. void irq_handler_kms()
  377. {
  378. //    printf("%s\n",__FUNCTION__);
  379.     ironlake_irq_handler(irq_device);
  380. }
  381.  
  382. int drm_irq_install(struct drm_device *dev)
  383. {
  384.     int irq_line;
  385.     int ret = 0;
  386.  
  387.     ENTER();
  388.  
  389.     mutex_lock(&dev->struct_mutex);
  390.  
  391.     /* Driver must have been initialized */
  392.     if (!dev->dev_private) {
  393.         mutex_unlock(&dev->struct_mutex);
  394.         return -EINVAL;
  395.     }
  396.  
  397.     if (dev->irq_enabled) {
  398.         mutex_unlock(&dev->struct_mutex);
  399.         return -EBUSY;
  400.     }
  401.     dev->irq_enabled = 1;
  402.     mutex_unlock(&dev->struct_mutex);
  403.  
  404.     irq_device = dev;
  405.     irq_line   = drm_dev_to_irq(dev);
  406.  
  407.     DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
  408.  
  409.     ironlake_irq_preinstall(dev);
  410.  
  411.     ret = AttachIntHandler(irq_line, irq_handler_kms, 2);
  412.     if (ret == 0) {
  413.         mutex_lock(&dev->struct_mutex);
  414.         dev->irq_enabled = 0;
  415.         mutex_unlock(&dev->struct_mutex);
  416.         return ret;
  417.     }
  418.  
  419.     ret = ironlake_irq_postinstall(dev);
  420.  
  421. //    if (ret < 0) {
  422. //        mutex_lock(&dev->struct_mutex);
  423. //        dev->irq_enabled = 0;
  424. //        mutex_unlock(&dev->struct_mutex);
  425. //        free_irq(drm_dev_to_irq(dev), dev);
  426. //    }
  427.  
  428.     u16_t cmd = PciRead16(dev->pdev->busnr, dev->pdev->devfn, 4);
  429.  
  430.     cmd&= ~(1<<10);
  431.  
  432.     PciWrite16(dev->pdev->busnr, dev->pdev->devfn, 4, cmd);
  433.  
  434.     dbgprintf("PCI_CMD: %04x\n", cmd);
  435.  
  436.     DRM_INFO("i915: irq initialized.\n");
  437.     LEAVE();
  438.     return ret;
  439. }
  440.  
  441.  
  442.  
  443.