Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5354 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc_helper.h>
  33. #include <drm/drm_fb_helper.h>
  34. #include "intel_drv.h"
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_trace.h"
  38. #include <linux/pci.h>
  39. //#include <linux/vgaarb.h>
  40. //#include <linux/acpi.h>
  41. //#include <linux/pnp.h>
  42. //#include <linux/vga_switcheroo.h>
  43. #include <linux/slab.h>
  44. //#include <acpi/video.h>
  45.  
  46. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  47.  
  48. #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
  49.  
  50. #define BEGIN_LP_RING(n) \
  51.         intel_ring_begin(LP_RING(dev_priv), (n))
  52.  
  53. #define OUT_RING(x) \
  54.         intel_ring_emit(LP_RING(dev_priv), x)
  55.  
  56. #define ADVANCE_LP_RING() \
  57.         __intel_ring_advance(LP_RING(dev_priv))
  58.  
  59. /**
  60.  * Lock test for when it's just for synchronization of ring access.
  61.  *
  62.  * In that case, we don't need to do it when GEM is initialized as nobody else
  63.  * has access to the ring.
  64.  */
  65. #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                      \
  66.         if (LP_RING(dev->dev_private)->buffer->obj == NULL)                     \
  67.                 LOCK_TEST_WITH_RETURN(dev, file);                       \
  68. } while (0)
  69.  
  70. static inline u32
  71. intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
  72. {
  73.         if (I915_NEED_GFX_HWS(dev_priv->dev))
  74.                 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
  75.         else
  76.                 return intel_read_status_page(LP_RING(dev_priv), reg);
  77. }
  78.  
  79. #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
  80. #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
  81. #define I915_BREADCRUMB_INDEX           0x21
  82.  
  83. void i915_update_dri1_breadcrumb(struct drm_device *dev)
  84. {
  85.         struct drm_i915_private *dev_priv = dev->dev_private;
  86.         struct drm_i915_master_private *master_priv;
  87.  
  88.         /*
  89.          * The dri breadcrumb update races against the drm master disappearing.
  90.          * Instead of trying to fix this (this is by far not the only ums issue)
  91.          * just don't do the update in kms mode.
  92.          */
  93.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  94.                 return;
  95.  
  96.         if (dev->primary->master) {
  97.                 master_priv = dev->primary->master->driver_priv;
  98.                 if (master_priv->sarea_priv)
  99.                         master_priv->sarea_priv->last_dispatch =
  100.                                 READ_BREADCRUMB(dev_priv);
  101.         }
  102. }
  103.  
  104. static void i915_write_hws_pga(struct drm_device *dev)
  105. {
  106.         struct drm_i915_private *dev_priv = dev->dev_private;
  107.         u32 addr;
  108.  
  109.         addr = dev_priv->status_page_dmah->busaddr;
  110.         if (INTEL_INFO(dev)->gen >= 4)
  111.                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  112.         I915_WRITE(HWS_PGA, addr);
  113. }
  114.  
  115. /**
  116.  * Frees the hardware status page, whether it's a physical address or a virtual
  117.  * address set up by the X Server.
  118.  */
  119. static void i915_free_hws(struct drm_device *dev)
  120. {
  121.         struct drm_i915_private *dev_priv = dev->dev_private;
  122.         struct intel_engine_cs *ring = LP_RING(dev_priv);
  123.  
  124.         if (dev_priv->status_page_dmah) {
  125.                 drm_pci_free(dev, dev_priv->status_page_dmah);
  126.                 dev_priv->status_page_dmah = NULL;
  127.         }
  128.  
  129.         if (ring->status_page.gfx_addr) {
  130.                 ring->status_page.gfx_addr = 0;
  131.                 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
  132.         }
  133.  
  134.         /* Need to rewrite hardware status page */
  135.         I915_WRITE(HWS_PGA, 0x1ffff000);
  136. }
  137.  
  138. #if 0
  139.  
  140. void i915_kernel_lost_context(struct drm_device * dev)
  141. {
  142.         struct drm_i915_private *dev_priv = dev->dev_private;
  143.         struct drm_i915_master_private *master_priv;
  144.         struct intel_engine_cs *ring = LP_RING(dev_priv);
  145.         struct intel_ringbuffer *ringbuf = ring->buffer;
  146.  
  147.         /*
  148.          * We should never lose context on the ring with modesetting
  149.          * as we don't expose it to userspace
  150.          */
  151.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  152.                 return;
  153.  
  154.         ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  155.         ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  156.         ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
  157.         if (ringbuf->space < 0)
  158.                 ringbuf->space += ringbuf->size;
  159.  
  160.         if (!dev->primary->master)
  161.                 return;
  162.  
  163.         master_priv = dev->primary->master->driver_priv;
  164.         if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
  165.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
  166. }
  167.  
  168. static int i915_dma_cleanup(struct drm_device *dev)
  169. {
  170.         struct drm_i915_private *dev_priv = dev->dev_private;
  171.         int i;
  172.  
  173.         /* Make sure interrupts are disabled here because the uninstall ioctl
  174.          * may not have been called from userspace and after dev_private
  175.          * is freed, it's too late.
  176.          */
  177.         if (dev->irq_enabled)
  178.                 drm_irq_uninstall(dev);
  179.  
  180.         mutex_lock(&dev->struct_mutex);
  181.         for (i = 0; i < I915_NUM_RINGS; i++)
  182.                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
  183.         mutex_unlock(&dev->struct_mutex);
  184.  
  185.         /* Clear the HWS virtual address at teardown */
  186.         if (I915_NEED_GFX_HWS(dev))
  187.                 i915_free_hws(dev);
  188.  
  189.         return 0;
  190. }
  191.  
  192. static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
  193. {
  194.         struct drm_i915_private *dev_priv = dev->dev_private;
  195.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  196.         int ret;
  197.  
  198.         master_priv->sarea = drm_getsarea(dev);
  199.         if (master_priv->sarea) {
  200.                 master_priv->sarea_priv = (drm_i915_sarea_t *)
  201.                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
  202.         } else {
  203.                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
  204.         }
  205.  
  206.         if (init->ring_size != 0) {
  207.                 if (LP_RING(dev_priv)->buffer->obj != NULL) {
  208.                         i915_dma_cleanup(dev);
  209.                         DRM_ERROR("Client tried to initialize ringbuffer in "
  210.                                   "GEM mode\n");
  211.                         return -EINVAL;
  212.                 }
  213.  
  214.                 ret = intel_render_ring_init_dri(dev,
  215.                                                  init->ring_start,
  216.                                                  init->ring_size);
  217.                 if (ret) {
  218.                         i915_dma_cleanup(dev);
  219.                         return ret;
  220.                 }
  221.         }
  222.  
  223.         dev_priv->dri1.cpp = init->cpp;
  224.         dev_priv->dri1.back_offset = init->back_offset;
  225.         dev_priv->dri1.front_offset = init->front_offset;
  226.         dev_priv->dri1.current_page = 0;
  227.         if (master_priv->sarea_priv)
  228.                 master_priv->sarea_priv->pf_current_page = 0;
  229.  
  230.         /* Allow hardware batchbuffers unless told otherwise.
  231.          */
  232.         dev_priv->dri1.allow_batchbuffer = 1;
  233.  
  234.         return 0;
  235. }
  236.  
  237. static int i915_dma_resume(struct drm_device *dev)
  238. {
  239.         struct drm_i915_private *dev_priv = dev->dev_private;
  240.         struct intel_engine_cs *ring = LP_RING(dev_priv);
  241.  
  242.         DRM_DEBUG_DRIVER("%s\n", __func__);
  243.  
  244.         if (ring->buffer->virtual_start == NULL) {
  245.                 DRM_ERROR("can not ioremap virtual address for"
  246.                           " ring buffer\n");
  247.                 return -ENOMEM;
  248.         }
  249.  
  250.         /* Program Hardware Status Page */
  251.         if (!ring->status_page.page_addr) {
  252.                 DRM_ERROR("Can not find hardware status page\n");
  253.                 return -EINVAL;
  254.         }
  255.         DRM_DEBUG_DRIVER("hw status page @ %p\n",
  256.                                 ring->status_page.page_addr);
  257.         if (ring->status_page.gfx_addr != 0)
  258.                 intel_ring_setup_status_page(ring);
  259.         else
  260.                 i915_write_hws_pga(dev);
  261.  
  262.         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  263.  
  264.         return 0;
  265. }
  266.  
  267. static int i915_dma_init(struct drm_device *dev, void *data,
  268.                          struct drm_file *file_priv)
  269. {
  270.         drm_i915_init_t *init = data;
  271.         int retcode = 0;
  272.  
  273.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  274.                 return -ENODEV;
  275.  
  276.         switch (init->func) {
  277.         case I915_INIT_DMA:
  278.                 retcode = i915_initialize(dev, init);
  279.                 break;
  280.         case I915_CLEANUP_DMA:
  281.                 retcode = i915_dma_cleanup(dev);
  282.                 break;
  283.         case I915_RESUME_DMA:
  284.                 retcode = i915_dma_resume(dev);
  285.                 break;
  286.         default:
  287.                 retcode = -EINVAL;
  288.                 break;
  289.         }
  290.  
  291.         return retcode;
  292. }
  293.  
  294. /* Implement basically the same security restrictions as hardware does
  295.  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
  296.  *
  297.  * Most of the calculations below involve calculating the size of a
  298.  * particular instruction.  It's important to get the size right as
  299.  * that tells us where the next instruction to check is.  Any illegal
  300.  * instruction detected will be given a size of zero, which is a
  301.  * signal to abort the rest of the buffer.
  302.  */
  303. static int validate_cmd(int cmd)
  304. {
  305.         switch (((cmd >> 29) & 0x7)) {
  306.         case 0x0:
  307.                 switch ((cmd >> 23) & 0x3f) {
  308.                 case 0x0:
  309.                         return 1;       /* MI_NOOP */
  310.                 case 0x4:
  311.                         return 1;       /* MI_FLUSH */
  312.                 default:
  313.                         return 0;       /* disallow everything else */
  314.                 }
  315.                 break;
  316.         case 0x1:
  317.                 return 0;       /* reserved */
  318.         case 0x2:
  319.                 return (cmd & 0xff) + 2;        /* 2d commands */
  320.         case 0x3:
  321.                 if (((cmd >> 24) & 0x1f) <= 0x18)
  322.                         return 1;
  323.  
  324.                 switch ((cmd >> 24) & 0x1f) {
  325.                 case 0x1c:
  326.                         return 1;
  327.                 case 0x1d:
  328.                         switch ((cmd >> 16) & 0xff) {
  329.                         case 0x3:
  330.                                 return (cmd & 0x1f) + 2;
  331.                         case 0x4:
  332.                                 return (cmd & 0xf) + 2;
  333.                         default:
  334.                                 return (cmd & 0xffff) + 2;
  335.                         }
  336.                 case 0x1e:
  337.                         if (cmd & (1 << 23))
  338.                                 return (cmd & 0xffff) + 1;
  339.                         else
  340.                                 return 1;
  341.                 case 0x1f:
  342.                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
  343.                                 return (cmd & 0x1ffff) + 2;
  344.                         else if (cmd & (1 << 17))       /* indirect random */
  345.                                 if ((cmd & 0xffff) == 0)
  346.                                         return 0;       /* unknown length, too hard */
  347.                                 else
  348.                                         return (((cmd & 0xffff) + 1) / 2) + 1;
  349.                         else
  350.                                 return 2;       /* indirect sequential */
  351.                 default:
  352.                         return 0;
  353.                 }
  354.         default:
  355.                 return 0;
  356.         }
  357.  
  358.         return 0;
  359. }
  360.  
  361. static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
  362. {
  363.         struct drm_i915_private *dev_priv = dev->dev_private;
  364.         int i, ret;
  365.  
  366.         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
  367.                 return -EINVAL;
  368.  
  369.         for (i = 0; i < dwords;) {
  370.                 int sz = validate_cmd(buffer[i]);
  371.  
  372.                 if (sz == 0 || i + sz > dwords)
  373.                         return -EINVAL;
  374.                 i += sz;
  375.         }
  376.  
  377.         ret = BEGIN_LP_RING((dwords+1)&~1);
  378.         if (ret)
  379.                 return ret;
  380.  
  381.         for (i = 0; i < dwords; i++)
  382.                 OUT_RING(buffer[i]);
  383.         if (dwords & 1)
  384.                 OUT_RING(0);
  385.  
  386.         ADVANCE_LP_RING();
  387.  
  388.         return 0;
  389. }
  390. #endif
  391.  
  392. int
  393. i915_emit_box(struct drm_device *dev,
  394.               struct drm_clip_rect *box,
  395.               int DR1, int DR4)
  396. {
  397.         struct drm_i915_private *dev_priv = dev->dev_private;
  398.         int ret;
  399.  
  400.         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  401.             box->y2 <= 0 || box->x2 <= 0) {
  402.                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
  403.                           box->x1, box->y1, box->x2, box->y2);
  404.                 return -EINVAL;
  405.         }
  406.  
  407.         if (INTEL_INFO(dev)->gen >= 4) {
  408.                 ret = BEGIN_LP_RING(4);
  409.                 if (ret)
  410.                         return ret;
  411.  
  412.                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  413.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  414.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  415.                 OUT_RING(DR4);
  416.         } else {
  417.                 ret = BEGIN_LP_RING(6);
  418.                 if (ret)
  419.                         return ret;
  420.  
  421.                 OUT_RING(GFX_OP_DRAWRECT_INFO);
  422.                 OUT_RING(DR1);
  423.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  424.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  425.                 OUT_RING(DR4);
  426.                 OUT_RING(0);
  427.         }
  428.         ADVANCE_LP_RING();
  429.  
  430.         return 0;
  431. }
  432.  
  433. #if 0
  434. /* XXX: Emitting the counter should really be moved to part of the IRQ
  435.  * emit. For now, do it in both places:
  436.  */
  437.  
  438. static void i915_emit_breadcrumb(struct drm_device *dev)
  439. {
  440.         struct drm_i915_private *dev_priv = dev->dev_private;
  441.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  442.  
  443.         dev_priv->dri1.counter++;
  444.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  445.                 dev_priv->dri1.counter = 0;
  446.         if (master_priv->sarea_priv)
  447.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  448.  
  449.         if (BEGIN_LP_RING(4) == 0) {
  450.                 OUT_RING(MI_STORE_DWORD_INDEX);
  451.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  452.                 OUT_RING(dev_priv->dri1.counter);
  453.                 OUT_RING(0);
  454.                 ADVANCE_LP_RING();
  455.         }
  456. }
  457.  
  458. static int i915_dispatch_cmdbuffer(struct drm_device *dev,
  459.                                    drm_i915_cmdbuffer_t *cmd,
  460.                                    struct drm_clip_rect *cliprects,
  461.                                    void *cmdbuf)
  462. {
  463.         int nbox = cmd->num_cliprects;
  464.         int i = 0, count, ret;
  465.  
  466.         if (cmd->sz & 0x3) {
  467.                 DRM_ERROR("alignment");
  468.                 return -EINVAL;
  469.         }
  470.  
  471.         i915_kernel_lost_context(dev);
  472.  
  473.         count = nbox ? nbox : 1;
  474.  
  475.         for (i = 0; i < count; i++) {
  476.                 if (i < nbox) {
  477.                         ret = i915_emit_box(dev, &cliprects[i],
  478.                                             cmd->DR1, cmd->DR4);
  479.                         if (ret)
  480.                                 return ret;
  481.                 }
  482.  
  483.                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
  484.                 if (ret)
  485.                         return ret;
  486.         }
  487.  
  488.         i915_emit_breadcrumb(dev);
  489.         return 0;
  490. }
  491.  
  492. static int i915_dispatch_batchbuffer(struct drm_device *dev,
  493.                                      drm_i915_batchbuffer_t *batch,
  494.                                      struct drm_clip_rect *cliprects)
  495. {
  496.         struct drm_i915_private *dev_priv = dev->dev_private;
  497.         int nbox = batch->num_cliprects;
  498.         int i, count, ret;
  499.  
  500.         if ((batch->start | batch->used) & 0x7) {
  501.                 DRM_ERROR("alignment");
  502.                 return -EINVAL;
  503.         }
  504.  
  505.         i915_kernel_lost_context(dev);
  506.  
  507.         count = nbox ? nbox : 1;
  508.         for (i = 0; i < count; i++) {
  509.                 if (i < nbox) {
  510.                         ret = i915_emit_box(dev, &cliprects[i],
  511.                                             batch->DR1, batch->DR4);
  512.                         if (ret)
  513.                                 return ret;
  514.                 }
  515.  
  516.                 if (!IS_I830(dev) && !IS_845G(dev)) {
  517.                         ret = BEGIN_LP_RING(2);
  518.                         if (ret)
  519.                                 return ret;
  520.  
  521.                         if (INTEL_INFO(dev)->gen >= 4) {
  522.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
  523.                                 OUT_RING(batch->start);
  524.                         } else {
  525.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  526.                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  527.                         }
  528.                 } else {
  529.                         ret = BEGIN_LP_RING(4);
  530.                         if (ret)
  531.                                 return ret;
  532.  
  533.                         OUT_RING(MI_BATCH_BUFFER);
  534.                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  535.                         OUT_RING(batch->start + batch->used - 4);
  536.                         OUT_RING(0);
  537.                 }
  538.                 ADVANCE_LP_RING();
  539.         }
  540.  
  541.  
  542.         if (IS_G4X(dev) || IS_GEN5(dev)) {
  543.                 if (BEGIN_LP_RING(2) == 0) {
  544.                         OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
  545.                         OUT_RING(MI_NOOP);
  546.                         ADVANCE_LP_RING();
  547.                 }
  548.         }
  549.  
  550.         i915_emit_breadcrumb(dev);
  551.         return 0;
  552. }
  553.  
  554. static int i915_dispatch_flip(struct drm_device *dev)
  555. {
  556.         struct drm_i915_private *dev_priv = dev->dev_private;
  557.         struct drm_i915_master_private *master_priv =
  558.                 dev->primary->master->driver_priv;
  559.         int ret;
  560.  
  561.         if (!master_priv->sarea_priv)
  562.                 return -EINVAL;
  563.  
  564.         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
  565.                           __func__,
  566.                          dev_priv->dri1.current_page,
  567.                          master_priv->sarea_priv->pf_current_page);
  568.  
  569.         i915_kernel_lost_context(dev);
  570.  
  571.         ret = BEGIN_LP_RING(10);
  572.         if (ret)
  573.                 return ret;
  574.  
  575.         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
  576.         OUT_RING(0);
  577.  
  578.         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  579.         OUT_RING(0);
  580.         if (dev_priv->dri1.current_page == 0) {
  581.                 OUT_RING(dev_priv->dri1.back_offset);
  582.                 dev_priv->dri1.current_page = 1;
  583.         } else {
  584.                 OUT_RING(dev_priv->dri1.front_offset);
  585.                 dev_priv->dri1.current_page = 0;
  586.         }
  587.         OUT_RING(0);
  588.  
  589.         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  590.         OUT_RING(0);
  591.  
  592.         ADVANCE_LP_RING();
  593.  
  594.         master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
  595.  
  596.         if (BEGIN_LP_RING(4) == 0) {
  597.                 OUT_RING(MI_STORE_DWORD_INDEX);
  598.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  599.                 OUT_RING(dev_priv->dri1.counter);
  600.                 OUT_RING(0);
  601.                 ADVANCE_LP_RING();
  602.         }
  603.  
  604.         master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
  605.         return 0;
  606. }
  607.  
  608. static int i915_quiescent(struct drm_device *dev)
  609. {
  610.         i915_kernel_lost_context(dev);
  611.         return intel_ring_idle(LP_RING(dev->dev_private));
  612. }
  613.  
  614. static int i915_flush_ioctl(struct drm_device *dev, void *data,
  615.                             struct drm_file *file_priv)
  616. {
  617.         int ret;
  618.  
  619.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  620.                 return -ENODEV;
  621.  
  622.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  623.  
  624.         mutex_lock(&dev->struct_mutex);
  625.         ret = i915_quiescent(dev);
  626.         mutex_unlock(&dev->struct_mutex);
  627.  
  628.         return ret;
  629. }
  630.  
  631. static int i915_batchbuffer(struct drm_device *dev, void *data,
  632.                             struct drm_file *file_priv)
  633. {
  634.         struct drm_i915_private *dev_priv = dev->dev_private;
  635.         struct drm_i915_master_private *master_priv;
  636.         drm_i915_sarea_t *sarea_priv;
  637.         drm_i915_batchbuffer_t *batch = data;
  638.         int ret;
  639.         struct drm_clip_rect *cliprects = NULL;
  640.  
  641.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  642.                 return -ENODEV;
  643.  
  644.         master_priv = dev->primary->master->driver_priv;
  645.         sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
  646.  
  647.         if (!dev_priv->dri1.allow_batchbuffer) {
  648.                 DRM_ERROR("Batchbuffer ioctl disabled\n");
  649.                 return -EINVAL;
  650.         }
  651.  
  652.         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
  653.                         batch->start, batch->used, batch->num_cliprects);
  654.  
  655.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  656.  
  657.         if (batch->num_cliprects < 0)
  658.                 return -EINVAL;
  659.  
  660.         if (batch->num_cliprects) {
  661.                 cliprects = kcalloc(batch->num_cliprects,
  662.                                     sizeof(*cliprects),
  663.                                     GFP_KERNEL);
  664.                 if (cliprects == NULL)
  665.                         return -ENOMEM;
  666.  
  667.                 ret = copy_from_user(cliprects, batch->cliprects,
  668.                                      batch->num_cliprects *
  669.                                      sizeof(struct drm_clip_rect));
  670.                 if (ret != 0) {
  671.                         ret = -EFAULT;
  672.                         goto fail_free;
  673.                 }
  674.         }
  675.  
  676.         mutex_lock(&dev->struct_mutex);
  677.         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
  678.         mutex_unlock(&dev->struct_mutex);
  679.  
  680.         if (sarea_priv)
  681.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  682.  
  683. fail_free:
  684.         kfree(cliprects);
  685.  
  686.         return ret;
  687. }
  688.  
  689. static int i915_cmdbuffer(struct drm_device *dev, void *data,
  690.                           struct drm_file *file_priv)
  691. {
  692.         struct drm_i915_private *dev_priv = dev->dev_private;
  693.         struct drm_i915_master_private *master_priv;
  694.         drm_i915_sarea_t *sarea_priv;
  695.         drm_i915_cmdbuffer_t *cmdbuf = data;
  696.         struct drm_clip_rect *cliprects = NULL;
  697.         void *batch_data;
  698.         int ret;
  699.  
  700.         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  701.                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  702.  
  703.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  704.                 return -ENODEV;
  705.  
  706.         master_priv = dev->primary->master->driver_priv;
  707.         sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
  708.  
  709.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  710.  
  711.         if (cmdbuf->num_cliprects < 0)
  712.                 return -EINVAL;
  713.  
  714.         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
  715.         if (batch_data == NULL)
  716.                 return -ENOMEM;
  717.  
  718.         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
  719.         if (ret != 0) {
  720.                 ret = -EFAULT;
  721.                 goto fail_batch_free;
  722.         }
  723.  
  724.         if (cmdbuf->num_cliprects) {
  725.                 cliprects = kcalloc(cmdbuf->num_cliprects,
  726.                                     sizeof(*cliprects), GFP_KERNEL);
  727.                 if (cliprects == NULL) {
  728.                         ret = -ENOMEM;
  729.                         goto fail_batch_free;
  730.                 }
  731.  
  732.                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
  733.                                      cmdbuf->num_cliprects *
  734.                                      sizeof(struct drm_clip_rect));
  735.                 if (ret != 0) {
  736.                         ret = -EFAULT;
  737.                         goto fail_clip_free;
  738.                 }
  739.         }
  740.  
  741.         mutex_lock(&dev->struct_mutex);
  742.         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
  743.         mutex_unlock(&dev->struct_mutex);
  744.         if (ret) {
  745.                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
  746.                 goto fail_clip_free;
  747.         }
  748.  
  749.         if (sarea_priv)
  750.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  751.  
  752. fail_clip_free:
  753.         kfree(cliprects);
  754. fail_batch_free:
  755.         kfree(batch_data);
  756.  
  757.         return ret;
  758. }
  759.  
  760. static int i915_emit_irq(struct drm_device *dev)
  761. {
  762.         struct drm_i915_private *dev_priv = dev->dev_private;
  763.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  764.  
  765.         i915_kernel_lost_context(dev);
  766.  
  767.         DRM_DEBUG_DRIVER("\n");
  768.  
  769.         dev_priv->dri1.counter++;
  770.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  771.                 dev_priv->dri1.counter = 1;
  772.         if (master_priv->sarea_priv)
  773.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  774.  
  775.         if (BEGIN_LP_RING(4) == 0) {
  776.                 OUT_RING(MI_STORE_DWORD_INDEX);
  777.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  778.                 OUT_RING(dev_priv->dri1.counter);
  779.                 OUT_RING(MI_USER_INTERRUPT);
  780.                 ADVANCE_LP_RING();
  781.         }
  782.  
  783.         return dev_priv->dri1.counter;
  784. }
  785.  
  786. static int i915_wait_irq(struct drm_device *dev, int irq_nr)
  787. {
  788.         struct drm_i915_private *dev_priv = dev->dev_private;
  789.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  790.         int ret = 0;
  791.         struct intel_engine_cs *ring = LP_RING(dev_priv);
  792.  
  793.         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
  794.                   READ_BREADCRUMB(dev_priv));
  795.  
  796.         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  797.                 if (master_priv->sarea_priv)
  798.                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  799.                 return 0;
  800.         }
  801.  
  802.         if (master_priv->sarea_priv)
  803.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  804.  
  805.         if (ring->irq_get(ring)) {
  806.                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
  807.                             READ_BREADCRUMB(dev_priv) >= irq_nr);
  808.                 ring->irq_put(ring);
  809.         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
  810.                 ret = -EBUSY;
  811.  
  812.         if (ret == -EBUSY) {
  813.                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  814.                           READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
  815.         }
  816.  
  817.         return ret;
  818. }
  819.  
  820. /* Needs the lock as it touches the ring.
  821.  */
  822. static int i915_irq_emit(struct drm_device *dev, void *data,
  823.                          struct drm_file *file_priv)
  824. {
  825.         struct drm_i915_private *dev_priv = dev->dev_private;
  826.         drm_i915_irq_emit_t *emit = data;
  827.         int result;
  828.  
  829.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  830.                 return -ENODEV;
  831.  
  832.         if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
  833.                 DRM_ERROR("called with no initialization\n");
  834.                 return -EINVAL;
  835.         }
  836.  
  837.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  838.  
  839.         mutex_lock(&dev->struct_mutex);
  840.         result = i915_emit_irq(dev);
  841.         mutex_unlock(&dev->struct_mutex);
  842.  
  843.         if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
  844.                 DRM_ERROR("copy_to_user\n");
  845.                 return -EFAULT;
  846.         }
  847.  
  848.         return 0;
  849. }
  850.  
  851. /* Doesn't need the hardware lock.
  852.  */
  853. static int i915_irq_wait(struct drm_device *dev, void *data,
  854.                          struct drm_file *file_priv)
  855. {
  856.         struct drm_i915_private *dev_priv = dev->dev_private;
  857.         drm_i915_irq_wait_t *irqwait = data;
  858.  
  859.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  860.                 return -ENODEV;
  861.  
  862.         if (!dev_priv) {
  863.                 DRM_ERROR("called with no initialization\n");
  864.                 return -EINVAL;
  865.         }
  866.  
  867.         return i915_wait_irq(dev, irqwait->irq_seq);
  868. }
  869.  
  870. static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  871.                          struct drm_file *file_priv)
  872. {
  873.         struct drm_i915_private *dev_priv = dev->dev_private;
  874.         drm_i915_vblank_pipe_t *pipe = data;
  875.  
  876.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  877.                 return -ENODEV;
  878.  
  879.         if (!dev_priv) {
  880.                 DRM_ERROR("called with no initialization\n");
  881.                 return -EINVAL;
  882.         }
  883.  
  884.         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  885.  
  886.         return 0;
  887. }
  888.  
  889. /**
  890.  * Schedule buffer swap at given vertical blank.
  891.  */
  892. static int i915_vblank_swap(struct drm_device *dev, void *data,
  893.                      struct drm_file *file_priv)
  894. {
  895.         /* The delayed swap mechanism was fundamentally racy, and has been
  896.          * removed.  The model was that the client requested a delayed flip/swap
  897.          * from the kernel, then waited for vblank before continuing to perform
  898.          * rendering.  The problem was that the kernel might wake the client
  899.          * up before it dispatched the vblank swap (since the lock has to be
  900.          * held while touching the ringbuffer), in which case the client would
  901.          * clear and start the next frame before the swap occurred, and
  902.          * flicker would occur in addition to likely missing the vblank.
  903.          *
  904.          * In the absence of this ioctl, userland falls back to a correct path
  905.          * of waiting for a vblank, then dispatching the swap on its own.
  906.          * Context switching to userland and back is plenty fast enough for
  907.          * meeting the requirements of vblank swapping.
  908.          */
  909.         return -EINVAL;
  910. }
  911.  
  912. static int i915_flip_bufs(struct drm_device *dev, void *data,
  913.                           struct drm_file *file_priv)
  914. {
  915.         int ret;
  916.  
  917.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  918.                 return -ENODEV;
  919.  
  920.         DRM_DEBUG_DRIVER("%s\n", __func__);
  921.  
  922.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  923.  
  924.         mutex_lock(&dev->struct_mutex);
  925.         ret = i915_dispatch_flip(dev);
  926.         mutex_unlock(&dev->struct_mutex);
  927.  
  928.         return ret;
  929. }
  930. #endif
  931.  
  932. int i915_getparam(struct drm_device *dev, void *data,
  933.                          struct drm_file *file_priv)
  934. {
  935.         struct drm_i915_private *dev_priv = dev->dev_private;
  936.         drm_i915_getparam_t *param = data;
  937.         int value;
  938.  
  939.         if (!dev_priv) {
  940.                 DRM_ERROR("called with no initialization\n");
  941.                 return -EINVAL;
  942.         }
  943.  
  944.         switch (param->param) {
  945.         case I915_PARAM_IRQ_ACTIVE:
  946.                 value = dev->pdev->irq ? 1 : 0;
  947.                 break;
  948.         case I915_PARAM_ALLOW_BATCHBUFFER:
  949.                 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
  950.                 break;
  951.         case I915_PARAM_LAST_DISPATCH:
  952.                 value = READ_BREADCRUMB(dev_priv);
  953.                 break;
  954.         case I915_PARAM_CHIPSET_ID:
  955.                 value = dev->pdev->device;
  956.                 break;
  957.         case I915_PARAM_HAS_GEM:
  958.                 value = 1;
  959.                 break;
  960.         case I915_PARAM_NUM_FENCES_AVAIL:
  961.                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  962.                 break;
  963.         case I915_PARAM_HAS_OVERLAY:
  964.                 value = dev_priv->overlay ? 1 : 0;
  965.                 break;
  966.         case I915_PARAM_HAS_PAGEFLIPPING:
  967.                 value = 1;
  968.                 break;
  969.         case I915_PARAM_HAS_EXECBUF2:
  970.                 /* depends on GEM */
  971.                 value = 1;
  972.                 break;
  973.         case I915_PARAM_HAS_BSD:
  974.                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
  975.                 break;
  976.         case I915_PARAM_HAS_BLT:
  977.                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
  978.                 break;
  979.         case I915_PARAM_HAS_VEBOX:
  980.                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
  981.                 break;
  982.         case I915_PARAM_HAS_RELAXED_FENCING:
  983.                 value = 1;
  984.                 break;
  985.         case I915_PARAM_HAS_COHERENT_RINGS:
  986.                 value = 1;
  987.                 break;
  988.         case I915_PARAM_HAS_EXEC_CONSTANTS:
  989.                 value = INTEL_INFO(dev)->gen >= 4;
  990.                 break;
  991.         case I915_PARAM_HAS_RELAXED_DELTA:
  992.                 value = 1;
  993.                 break;
  994.         case I915_PARAM_HAS_GEN7_SOL_RESET:
  995.                 value = 1;
  996.                 break;
  997.         case I915_PARAM_HAS_LLC:
  998.                 value = HAS_LLC(dev);
  999.                 break;
  1000.         case I915_PARAM_HAS_WT:
  1001.                 value = HAS_WT(dev);
  1002.                 break;
  1003.         case I915_PARAM_HAS_ALIASING_PPGTT:
  1004.                 value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
  1005.                 break;
  1006.         case I915_PARAM_HAS_WAIT_TIMEOUT:
  1007.                 value = 1;
  1008.                 break;
  1009.         case I915_PARAM_HAS_SEMAPHORES:
  1010.                 value = i915_semaphore_is_enabled(dev);
  1011.                 break;
  1012.         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
  1013.                 value = 1;
  1014.                 break;
  1015.     case I915_PARAM_HAS_SECURE_BATCHES:
  1016.         value = 1;
  1017.                 break;
  1018.         case I915_PARAM_HAS_PINNED_BATCHES:
  1019.                 value = 1;
  1020.                 break;
  1021.         case I915_PARAM_HAS_EXEC_NO_RELOC:
  1022.                 value = 1;
  1023.         break;
  1024.         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
  1025.                 value = 1;
  1026.         break;
  1027.         case I915_PARAM_CMD_PARSER_VERSION:
  1028.                 value = i915_cmd_parser_get_version();
  1029.                 break;
  1030.         default:
  1031.                 DRM_DEBUG("Unknown parameter %d\n", param->param);
  1032.                 return -EINVAL;
  1033.         }
  1034.  
  1035.     *param->value = value;
  1036.  
  1037.         return 0;
  1038. }
  1039.  
  1040. #if 0
  1041. static int i915_setparam(struct drm_device *dev, void *data,
  1042.                          struct drm_file *file_priv)
  1043. {
  1044.         struct drm_i915_private *dev_priv = dev->dev_private;
  1045.         drm_i915_setparam_t *param = data;
  1046.  
  1047.         if (!dev_priv) {
  1048.                 DRM_ERROR("called with no initialization\n");
  1049.                 return -EINVAL;
  1050.         }
  1051.  
  1052.         switch (param->param) {
  1053.         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
  1054.                 break;
  1055.         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
  1056.                 break;
  1057.         case I915_SETPARAM_ALLOW_BATCHBUFFER:
  1058.                 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
  1059.                 break;
  1060.         case I915_SETPARAM_NUM_USED_FENCES:
  1061.                 if (param->value > dev_priv->num_fence_regs ||
  1062.                     param->value < 0)
  1063.                         return -EINVAL;
  1064.                 /* Userspace can use first N regs */
  1065.                 dev_priv->fence_reg_start = param->value;
  1066.                 break;
  1067.         default:
  1068.                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
  1069.                                         param->param);
  1070.                 return -EINVAL;
  1071.         }
  1072.  
  1073.         return 0;
  1074. }
  1075. #endif
  1076.  
  1077.  
  1078.  
  1079. static int i915_get_bridge_dev(struct drm_device *dev)
  1080. {
  1081.         struct drm_i915_private *dev_priv = dev->dev_private;
  1082.  
  1083.         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  1084.         if (!dev_priv->bridge_dev) {
  1085.                 DRM_ERROR("bridge device not found\n");
  1086.                 return -1;
  1087.         }
  1088.         return 0;
  1089. }
  1090.  
  1091. #define MCHBAR_I915 0x44
  1092. #define MCHBAR_I965 0x48
  1093. #define MCHBAR_SIZE (4*4096)
  1094.  
  1095. #define DEVEN_REG 0x54
  1096. #define   DEVEN_MCHBAR_EN (1 << 28)
  1097.  
  1098.  
  1099.  
  1100.  
  1101. /* Setup MCHBAR if possible, return true if we should disable it again */
  1102. static void
  1103. intel_setup_mchbar(struct drm_device *dev)
  1104. {
  1105.         struct drm_i915_private *dev_priv = dev->dev_private;
  1106.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  1107.         u32 temp;
  1108.         bool enabled;
  1109.  
  1110.         if (IS_VALLEYVIEW(dev))
  1111.                 return;
  1112.  
  1113.         dev_priv->mchbar_need_disable = false;
  1114.  
  1115.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1116.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  1117.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  1118.         } else {
  1119.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1120.                 enabled = temp & 1;
  1121.         }
  1122.  
  1123.         /* If it's already enabled, don't have to do anything */
  1124.         if (enabled)
  1125.                 return;
  1126.  
  1127.         dbgprintf("Epic fail\n");
  1128.  
  1129. #if 0
  1130.         if (intel_alloc_mchbar_resource(dev))
  1131.                 return;
  1132.  
  1133.         dev_priv->mchbar_need_disable = true;
  1134.  
  1135.         /* Space is allocated or reserved, so enable it. */
  1136.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1137.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  1138.                                        temp | DEVEN_MCHBAR_EN);
  1139.         } else {
  1140.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1141.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  1142.         }
  1143. #endif
  1144. }
  1145.  
  1146.  
  1147. /* true = enable decode, false = disable decoder */
  1148. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  1149. {
  1150.         struct drm_device *dev = cookie;
  1151.  
  1152.         intel_modeset_vga_set_state(dev, state);
  1153.         if (state)
  1154.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  1155.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1156.         else
  1157.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1158. }
  1159.  
  1160.  
  1161.  
  1162.  
  1163.  
  1164.  
  1165. static int i915_load_modeset_init(struct drm_device *dev)
  1166. {
  1167.     struct drm_i915_private *dev_priv = dev->dev_private;
  1168.     int ret;
  1169.  
  1170.     ret = intel_parse_bios(dev);
  1171.     if (ret)
  1172.         DRM_INFO("failed to find VBIOS tables\n");
  1173.  
  1174.  
  1175.  
  1176.         /* Initialise stolen first so that we may reserve preallocated
  1177.          * objects for the BIOS to KMS transition.
  1178.          */
  1179.         ret = i915_gem_init_stolen(dev);
  1180.         if (ret)
  1181.                 goto cleanup_vga_switcheroo;
  1182.  
  1183.         intel_power_domains_init_hw(dev_priv);
  1184.  
  1185.         /*
  1186.          * We enable some interrupt sources in our postinstall hooks, so mark
  1187.          * interrupts as enabled _before_ actually enabling them to avoid
  1188.          * special cases in our ordering checks.
  1189.          */
  1190.         dev_priv->pm._irqs_disabled = false;
  1191.  
  1192.         ret = drm_irq_install(dev, dev->pdev->irq);
  1193.         if (ret)
  1194.                 goto cleanup_gem_stolen;
  1195.  
  1196.         /* Important: The output setup functions called by modeset_init need
  1197.          * working irqs for e.g. gmbus and dp aux transfers. */
  1198.     intel_modeset_init(dev);
  1199.  
  1200.         ret = i915_gem_init(dev);
  1201.     if (ret)
  1202.                 goto cleanup_irq;
  1203.  
  1204.  
  1205.     intel_modeset_gem_init(dev);
  1206.  
  1207.     /* Always safe in the mode setting case. */
  1208.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  1209.         dev->vblank_disable_allowed = true;
  1210.         if (INTEL_INFO(dev)->num_pipes == 0)
  1211.                 return 0;
  1212.  
  1213.     ret = intel_fbdev_init(dev);
  1214.     if (ret)
  1215.                 goto cleanup_gem;
  1216.  
  1217.         /* Only enable hotplug handling once the fbdev is fully set up. */
  1218.         intel_hpd_init(dev);
  1219.  
  1220.         /*
  1221.          * Some ports require correctly set-up hpd registers for detection to
  1222.          * work properly (leading to ghost connected connector status), e.g. VGA
  1223.          * on gm45.  Hence we can only set up the initial fbdev config after hpd
  1224.          * irqs are fully enabled. Now we should scan for the initial config
  1225.          * only once hotplug handling is enabled, but due to screwed-up locking
  1226.          * around kms/fbdev init we can't protect the fdbev initial config
  1227.          * scanning against hotplug events. Hence do this first and ignore the
  1228.          * tiny window where we will loose hotplug notifactions.
  1229.          */
  1230.         intel_fbdev_initial_config(dev);
  1231.  
  1232.         drm_kms_helper_poll_init(dev);
  1233.  
  1234.     return 0;
  1235.  
  1236. cleanup_gem:
  1237.         mutex_lock(&dev->struct_mutex);
  1238.         i915_gem_cleanup_ringbuffer(dev);
  1239.         i915_gem_context_fini(dev);
  1240.         mutex_unlock(&dev->struct_mutex);
  1241.         WARN_ON(dev_priv->mm.aliasing_ppgtt);
  1242. cleanup_irq:
  1243. //      drm_irq_uninstall(dev);
  1244. cleanup_gem_stolen:
  1245. //      i915_gem_cleanup_stolen(dev);
  1246. cleanup_vga_switcheroo:
  1247. //      vga_switcheroo_unregister_client(dev->pdev);
  1248. cleanup_vga_client:
  1249. //      vga_client_register(dev->pdev, NULL, NULL, NULL);
  1250. out:
  1251.     return ret;
  1252. }
  1253.  
  1254.  
  1255.  
  1256. #if IS_ENABLED(CONFIG_FB)
  1257. static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  1258. {
  1259.         struct apertures_struct *ap;
  1260.         struct pci_dev *pdev = dev_priv->dev->pdev;
  1261.         bool primary;
  1262.         int ret;
  1263.  
  1264.         ap = alloc_apertures(1);
  1265.         if (!ap)
  1266.                 return -ENOMEM;
  1267.  
  1268.         ap->ranges[0].base = dev_priv->gtt.mappable_base;
  1269.         ap->ranges[0].size = dev_priv->gtt.mappable_end;
  1270.  
  1271.         primary =
  1272.                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
  1273.  
  1274.         ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
  1275.  
  1276.         kfree(ap);
  1277.  
  1278.         return ret;
  1279. }
  1280. #else
  1281. static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  1282. {
  1283.         return 0;
  1284. }
  1285. #endif
  1286.  
  1287. static void i915_dump_device_info(struct drm_i915_private *dev_priv)
  1288. {
  1289.         const struct intel_device_info *info = &dev_priv->info;
  1290.  
  1291. #define PRINT_S(name) "%s"
  1292. #define SEP_EMPTY
  1293. #define PRINT_FLAG(name) info->name ? #name "," : ""
  1294. #define SEP_COMMA ,
  1295.         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
  1296.                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
  1297.                          info->gen,
  1298.                          dev_priv->dev->pdev->device,
  1299.                          dev_priv->dev->pdev->revision,
  1300.                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
  1301. #undef PRINT_S
  1302. #undef SEP_EMPTY
  1303. #undef PRINT_FLAG
  1304. #undef SEP_COMMA
  1305. }
  1306.  
  1307. /*
  1308.  * Determine various intel_device_info fields at runtime.
  1309.  *
  1310.  * Use it when either:
  1311.  *   - it's judged too laborious to fill n static structures with the limit
  1312.  *     when a simple if statement does the job,
  1313.  *   - run-time checks (eg read fuse/strap registers) are needed.
  1314.  *
  1315.  * This function needs to be called:
  1316.  *   - after the MMIO has been setup as we are reading registers,
  1317.  *   - after the PCH has been detected,
  1318.  *   - before the first usage of the fields it can tweak.
  1319.  */
  1320. static void intel_device_info_runtime_init(struct drm_device *dev)
  1321. {
  1322.         struct drm_i915_private *dev_priv = dev->dev_private;
  1323.         struct intel_device_info *info;
  1324.         enum pipe pipe;
  1325.  
  1326.         info = (struct intel_device_info *)&dev_priv->info;
  1327.  
  1328.         if (IS_VALLEYVIEW(dev))
  1329.                 for_each_pipe(pipe)
  1330.                         info->num_sprites[pipe] = 2;
  1331.         else
  1332.                 for_each_pipe(pipe)
  1333.                         info->num_sprites[pipe] = 1;
  1334.  
  1335.         if (i915.disable_display) {
  1336.                 DRM_INFO("Display disabled (module parameter)\n");
  1337.                 info->num_pipes = 0;
  1338.         } else if (info->num_pipes > 0 &&
  1339.                    (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
  1340.                    !IS_VALLEYVIEW(dev)) {
  1341.                 u32 fuse_strap = I915_READ(FUSE_STRAP);
  1342.                 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
  1343.  
  1344.                 /*
  1345.                  * SFUSE_STRAP is supposed to have a bit signalling the display
  1346.                  * is fused off. Unfortunately it seems that, at least in
  1347.                  * certain cases, fused off display means that PCH display
  1348.                  * reads don't land anywhere. In that case, we read 0s.
  1349.                  *
  1350.                  * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
  1351.                  * should be set when taking over after the firmware.
  1352.                  */
  1353.                 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
  1354.                     sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
  1355.                     (dev_priv->pch_type == PCH_CPT &&
  1356.                      !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
  1357.                         DRM_INFO("Display fused off, disabling\n");
  1358.                         info->num_pipes = 0;
  1359.                 }
  1360.         }
  1361. }
  1362.  
  1363. /**
  1364.  * i915_driver_load - setup chip and create an initial config
  1365.  * @dev: DRM device
  1366.  * @flags: startup flags
  1367.  *
  1368.  * The driver load routine has to do several things:
  1369.  *   - drive output discovery via intel_modeset_init()
  1370.  *   - initialize the memory manager
  1371.  *   - allocate initial config memory
  1372.  *   - setup the DRM framebuffer with the allocated memory
  1373.  */
  1374. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  1375. {
  1376.     struct drm_i915_private *dev_priv;
  1377.         struct intel_device_info *info, *device_info;
  1378.         int ret = 0, mmio_bar, mmio_size;
  1379.         uint32_t aperture_size;
  1380.  
  1381.         info = (struct intel_device_info *) flags;
  1382.  
  1383.  
  1384.         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  1385.     if (dev_priv == NULL)
  1386.         return -ENOMEM;
  1387.  
  1388.         dev->dev_private = dev_priv;
  1389.     dev_priv->dev = dev;
  1390.  
  1391.         /* copy initial configuration to dev_priv->info */
  1392.         device_info = (struct intel_device_info *)&dev_priv->info;
  1393.         *device_info = *info;
  1394.  
  1395.         spin_lock_init(&dev_priv->irq_lock);
  1396.         spin_lock_init(&dev_priv->gpu_error.lock);
  1397.         spin_lock_init(&dev_priv->backlight_lock);
  1398.         spin_lock_init(&dev_priv->uncore.lock);
  1399.         spin_lock_init(&dev_priv->mm.object_stat_lock);
  1400.         spin_lock_init(&dev_priv->mmio_flip_lock);
  1401.         mutex_init(&dev_priv->dpio_lock);
  1402.         mutex_init(&dev_priv->modeset_restore_lock);
  1403.  
  1404.         intel_pm_setup(dev);
  1405.  
  1406.         intel_display_crc_init(dev);
  1407.  
  1408.         i915_dump_device_info(dev_priv);
  1409.  
  1410.         /* Not all pre-production machines fall into this category, only the
  1411.          * very first ones. Almost everything should work, except for maybe
  1412.          * suspend/resume. And we don't implement workarounds that affect only
  1413.          * pre-production machines. */
  1414.         if (IS_HSW_EARLY_SDV(dev))
  1415.                 DRM_INFO("This is an early pre-production Haswell machine. "
  1416.                          "It may not be fully functional.\n");
  1417.  
  1418.     if (i915_get_bridge_dev(dev)) {
  1419.         ret = -EIO;
  1420.         goto free_priv;
  1421.     }
  1422.  
  1423.         mmio_bar = IS_GEN2(dev) ? 1 : 0;
  1424.         /* Before gen4, the registers and the GTT are behind different BARs.
  1425.          * However, from gen4 onwards, the registers and the GTT are shared
  1426.          * in the same BAR, so we want to restrict this ioremap from
  1427.          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
  1428.          * the register BAR remains the same size for all the earlier
  1429.          * generations up to Ironlake.
  1430.          */
  1431.         if (info->gen < 5)
  1432.                 mmio_size = 512*1024;
  1433.         else
  1434.                 mmio_size = 2*1024*1024;
  1435.  
  1436.         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
  1437.         if (!dev_priv->regs) {
  1438.                 DRM_ERROR("failed to map registers\n");
  1439.                 ret = -EIO;
  1440.                 goto put_bridge;
  1441.         }
  1442.  
  1443.         /* This must be called before any calls to HAS_PCH_* */
  1444.         intel_detect_pch(dev);
  1445.  
  1446.         intel_uncore_init(dev);
  1447.  
  1448.         ret = i915_gem_gtt_init(dev);
  1449.         if (ret)
  1450.                 goto out_regs;
  1451.  
  1452.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  1453.                 i915_kick_out_firmware_fb(dev_priv);
  1454.  
  1455.         pci_set_master(dev->pdev);
  1456.  
  1457.     /* overlay on gen2 is broken and can't address above 1G */
  1458.  
  1459.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  1460.      * using 32bit addressing, overwriting memory if HWS is located
  1461.      * above 4GB.
  1462.      *
  1463.      * The documentation also mentions an issue with undefined
  1464.      * behaviour if any general state is accessed within a page above 4GB,
  1465.      * which also needs to be handled carefully.
  1466.      */
  1467.  
  1468.         aperture_size = dev_priv->gtt.mappable_end;
  1469.  
  1470.         dev_priv->gtt.mappable = AllocKernelSpace(8192);
  1471.         if (dev_priv->gtt.mappable == NULL) {
  1472.                 ret = -EIO;
  1473.                 goto out_gtt;
  1474.         }
  1475.  
  1476.     /* The i915 workqueue is primarily used for batched retirement of
  1477.      * requests (and thus managing bo) once the task has been completed
  1478.      * by the GPU. i915_gem_retire_requests() is called directly when we
  1479.      * need high-priority retirement, such as waiting for an explicit
  1480.      * bo.
  1481.      *
  1482.      * It is also used for periodic low-priority events, such as
  1483.      * idle-timers and recording error state.
  1484.      *
  1485.      * All tasks on the workqueue are expected to acquire the dev mutex
  1486.      * so there is no point in running more than one instance of the
  1487.          * workqueue at any time.  Use an ordered one.
  1488.      */
  1489.         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
  1490.         if (dev_priv->wq == NULL) {
  1491.                 DRM_ERROR("Failed to create our workqueue.\n");
  1492.                 ret = -ENOMEM;
  1493.                 goto out_mtrrfree;
  1494.         }
  1495.     system_wq = dev_priv->wq;
  1496.  
  1497.  
  1498.         intel_irq_init(dev);
  1499.         intel_uncore_sanitize(dev);
  1500.  
  1501.     /* Try to make sure MCHBAR is enabled before poking at it */
  1502.         intel_setup_mchbar(dev);
  1503.     intel_setup_gmbus(dev);
  1504.     intel_opregion_setup(dev);
  1505.  
  1506.     intel_setup_bios(dev);
  1507.  
  1508.     i915_gem_load(dev);
  1509.  
  1510.     /* On the 945G/GM, the chipset reports the MSI capability on the
  1511.      * integrated graphics even though the support isn't actually there
  1512.      * according to the published specs.  It doesn't appear to function
  1513.      * correctly in testing on 945G.
  1514.      * This may be a side effect of MSI having been made available for PEG
  1515.      * and the registers being closely associated.
  1516.      *
  1517.      * According to chipset errata, on the 965GM, MSI interrupts may
  1518.      * be lost or delayed, but we use them anyways to avoid
  1519.      * stuck interrupts on some machines.
  1520.      */
  1521.  
  1522.         intel_device_info_runtime_init(dev);
  1523.  
  1524. //   if (INTEL_INFO(dev)->num_pipes) {
  1525. //       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
  1526. //       if (ret)
  1527. //           goto out_gem_unload;
  1528. //   }
  1529.  
  1530.         intel_power_domains_init(dev_priv);
  1531.  
  1532.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1533.                 ret = i915_load_modeset_init(dev);
  1534.                 if (ret < 0) {
  1535.                         DRM_ERROR("failed to init modeset\n");
  1536.                         goto out_power_well;
  1537.                 }
  1538.         } else {
  1539.                 /* Start out suspended in ums mode. */
  1540.                 dev_priv->ums.mm_suspended = 1;
  1541.         }
  1542.  
  1543.  
  1544.         if (INTEL_INFO(dev)->num_pipes) {
  1545.     /* Must be done after probing outputs */
  1546.                 intel_opregion_init(dev);
  1547.         }
  1548.  
  1549.         if (IS_GEN5(dev))
  1550.                 intel_gpu_ips_init(dev_priv);
  1551.  
  1552.         intel_init_runtime_pm(dev_priv);
  1553.  
  1554.     main_device = dev;
  1555.  
  1556.     return 0;
  1557.  
  1558. out_power_well:
  1559. out_gem_unload:
  1560.  
  1561. out_mtrrfree:
  1562. out_gtt:
  1563. out_regs:
  1564. put_bridge:
  1565. free_priv:
  1566.     kfree(dev_priv);
  1567.     return ret;
  1568. }
  1569.  
  1570. #if 0
  1571.  
  1572. int i915_driver_unload(struct drm_device *dev)
  1573. {
  1574.         struct drm_i915_private *dev_priv = dev->dev_private;
  1575.         int ret;
  1576.  
  1577.         ret = i915_gem_suspend(dev);
  1578.         if (ret) {
  1579.                 DRM_ERROR("failed to idle hardware: %d\n", ret);
  1580.                 return ret;
  1581.         }
  1582.  
  1583.         intel_fini_runtime_pm(dev_priv);
  1584.  
  1585.         intel_gpu_ips_teardown();
  1586.  
  1587.                 /* The i915.ko module is still not prepared to be loaded when
  1588.                  * the power well is not enabled, so just enable it in case
  1589.                  * we're going to unload/reload. */
  1590.         intel_display_set_init_power(dev_priv, true);
  1591.         intel_power_domains_remove(dev_priv);
  1592.  
  1593.         i915_teardown_sysfs(dev);
  1594.  
  1595.         if (dev_priv->mm.inactive_shrinker.scan_objects)
  1596.                 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1597.  
  1598.         io_mapping_free(dev_priv->gtt.mappable);
  1599.         arch_phys_wc_del(dev_priv->gtt.mtrr);
  1600.  
  1601.         acpi_video_unregister();
  1602.  
  1603.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1604.                 intel_fbdev_fini(dev);
  1605.                 intel_modeset_cleanup(dev);
  1606.                 cancel_work_sync(&dev_priv->console_resume_work);
  1607.  
  1608.                 /*
  1609.                  * free the memory space allocated for the child device
  1610.                  * config parsed from VBT
  1611.                  */
  1612.                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
  1613.                         kfree(dev_priv->vbt.child_dev);
  1614.                         dev_priv->vbt.child_dev = NULL;
  1615.                         dev_priv->vbt.child_dev_num = 0;
  1616.                 }
  1617.  
  1618.                 vga_switcheroo_unregister_client(dev->pdev);
  1619.                 vga_client_register(dev->pdev, NULL, NULL, NULL);
  1620.         }
  1621.  
  1622.         /* Free error state after interrupts are fully disabled. */
  1623.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  1624.         cancel_work_sync(&dev_priv->gpu_error.work);
  1625.         i915_destroy_error_state(dev);
  1626.  
  1627.         if (dev->pdev->msi_enabled)
  1628.                 pci_disable_msi(dev->pdev);
  1629.  
  1630.         intel_opregion_fini(dev);
  1631.  
  1632.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1633.                 /* Flush any outstanding unpin_work. */
  1634.                 flush_workqueue(dev_priv->wq);
  1635.  
  1636.                 mutex_lock(&dev->struct_mutex);
  1637.                 i915_gem_cleanup_ringbuffer(dev);
  1638.                 i915_gem_context_fini(dev);
  1639.                 WARN_ON(dev_priv->mm.aliasing_ppgtt);
  1640.                 mutex_unlock(&dev->struct_mutex);
  1641.                 i915_gem_cleanup_stolen(dev);
  1642.  
  1643.                 if (!I915_NEED_GFX_HWS(dev))
  1644.                         i915_free_hws(dev);
  1645.         }
  1646.  
  1647.         WARN_ON(!list_empty(&dev_priv->vm_list));
  1648.  
  1649.         drm_vblank_cleanup(dev);
  1650.  
  1651.         intel_teardown_gmbus(dev);
  1652.         intel_teardown_mchbar(dev);
  1653.  
  1654.         destroy_workqueue(dev_priv->dp_wq);
  1655.         destroy_workqueue(dev_priv->wq);
  1656.         pm_qos_remove_request(&dev_priv->pm_qos);
  1657.  
  1658.         dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
  1659.  
  1660.         intel_uncore_fini(dev);
  1661.         if (dev_priv->regs != NULL)
  1662.                 pci_iounmap(dev->pdev, dev_priv->regs);
  1663.  
  1664.         if (dev_priv->slab)
  1665.                 kmem_cache_destroy(dev_priv->slab);
  1666.  
  1667.         pci_dev_put(dev_priv->bridge_dev);
  1668.         kfree(dev_priv);
  1669.  
  1670.         return 0;
  1671. }
  1672. #endif
  1673.  
  1674. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1675. {
  1676.         int ret;
  1677.  
  1678.         ret = i915_gem_open(dev, file);
  1679.         if (ret)
  1680.                 return ret;
  1681.  
  1682.         return 0;
  1683. }
  1684.  
  1685. #if 0
  1686. /**
  1687.  * i915_driver_lastclose - clean up after all DRM clients have exited
  1688.  * @dev: DRM device
  1689.  *
  1690.  * Take care of cleaning up after all DRM clients have exited.  In the
  1691.  * mode setting case, we want to restore the kernel's initial mode (just
  1692.  * in case the last client left us in a bad state).
  1693.  *
  1694.  * Additionally, in the non-mode setting case, we'll tear down the GTT
  1695.  * and DMA structures, since the kernel won't be using them, and clea
  1696.  * up any GEM state.
  1697.  */
  1698. void i915_driver_lastclose(struct drm_device *dev)
  1699. {
  1700.         struct drm_i915_private *dev_priv = dev->dev_private;
  1701.  
  1702.         /* On gen6+ we refuse to init without kms enabled, but then the drm core
  1703.          * goes right around and calls lastclose. Check for this and don't clean
  1704.          * up anything. */
  1705.         if (!dev_priv)
  1706.                 return;
  1707.  
  1708.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1709.                 intel_fbdev_restore_mode(dev);
  1710.                 vga_switcheroo_process_delayed_switch();
  1711.                 return;
  1712.         }
  1713.  
  1714.         i915_gem_lastclose(dev);
  1715.  
  1716.         i915_dma_cleanup(dev);
  1717. }
  1718.  
  1719. void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
  1720. {
  1721.         mutex_lock(&dev->struct_mutex);
  1722.         i915_gem_context_close(dev, file);
  1723.         i915_gem_release(dev, file);
  1724.         mutex_unlock(&dev->struct_mutex);
  1725. }
  1726.  
  1727. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1728. {
  1729.         struct drm_i915_file_private *file_priv = file->driver_priv;
  1730.  
  1731.         if (file_priv && file_priv->bsd_ring)
  1732.                 file_priv->bsd_ring = NULL;
  1733.         kfree(file_priv);
  1734. }
  1735.  
  1736. const struct drm_ioctl_desc i915_ioctls[] = {
  1737.         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1738.         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
  1739.         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
  1740.         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
  1741.         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
  1742.         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
  1743.         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
  1744.         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1745.         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
  1746.         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
  1747.         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1748.         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
  1749.         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1750.         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1751.         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
  1752.         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
  1753.         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1754.         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1755.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
  1756.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1757.         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1758.         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1759.         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1760.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1761.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1762.         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1763.         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1764.         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1765.         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1766.         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1767.         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1768.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1769.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1770.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1771.         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1772.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1773.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1774.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1775.         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
  1776.         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1777.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1778.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1779.         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1780.         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1781.         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1782.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1783.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1784.         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1785.         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1786.         DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1787. };
  1788.  
  1789. int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
  1790.  
  1791. /*
  1792.  * This is really ugly: Because old userspace abused the linux agp interface to
  1793.  * manage the gtt, we need to claim that all intel devices are agp.  For
  1794.  * otherwise the drm core refuses to initialize the agp support code.
  1795.  */
  1796. int i915_driver_device_is_agp(struct drm_device *dev)
  1797. {
  1798.         return 1;
  1799. }
  1800. #endif
  1801.  
  1802.