Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc_helper.h>
  33. #include <drm/drm_fb_helper.h>
  34. #include "intel_drv.h"
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_trace.h"
  38. #include <linux/pci.h>
  39. //#include <linux/vgaarb.h>
  40. //#include <linux/acpi.h>
  41. //#include <linux/pnp.h>
  42. //#include <linux/vga_switcheroo.h>
  43. #include <linux/slab.h>
  44. //#include <acpi/video.h>
  45.  
  46. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  47.  
  48. #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
  49.  
  50. #define BEGIN_LP_RING(n) \
  51.         intel_ring_begin(LP_RING(dev_priv), (n))
  52.  
  53. #define OUT_RING(x) \
  54.         intel_ring_emit(LP_RING(dev_priv), x)
  55.  
  56. #define ADVANCE_LP_RING() \
  57.         __intel_ring_advance(LP_RING(dev_priv))
  58.  
  59. /**
  60.  * Lock test for when it's just for synchronization of ring access.
  61.  *
  62.  * In that case, we don't need to do it when GEM is initialized as nobody else
  63.  * has access to the ring.
  64.  */
  65. #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                      \
  66.         if (LP_RING(dev->dev_private)->obj == NULL)                     \
  67.                 LOCK_TEST_WITH_RETURN(dev, file);                       \
  68. } while (0)
  69.  
  70. static inline u32
  71. intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
  72. {
  73.         if (I915_NEED_GFX_HWS(dev_priv->dev))
  74.                 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
  75.         else
  76.                 return intel_read_status_page(LP_RING(dev_priv), reg);
  77. }
  78.  
  79. #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
  80. #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
  81. #define I915_BREADCRUMB_INDEX           0x21
  82.  
  83. void i915_update_dri1_breadcrumb(struct drm_device *dev)
  84. {
  85.         drm_i915_private_t *dev_priv = dev->dev_private;
  86.         struct drm_i915_master_private *master_priv;
  87.  
  88.         /*
  89.          * The dri breadcrumb update races against the drm master disappearing.
  90.          * Instead of trying to fix this (this is by far not the only ums issue)
  91.          * just don't do the update in kms mode.
  92.          */
  93.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  94.                 return;
  95.  
  96.         if (dev->primary->master) {
  97.                 master_priv = dev->primary->master->driver_priv;
  98.                 if (master_priv->sarea_priv)
  99.                         master_priv->sarea_priv->last_dispatch =
  100.                                 READ_BREADCRUMB(dev_priv);
  101.         }
  102. }
  103.  
  104. static void i915_write_hws_pga(struct drm_device *dev)
  105. {
  106.         drm_i915_private_t *dev_priv = dev->dev_private;
  107.         u32 addr;
  108.  
  109.         addr = dev_priv->status_page_dmah->busaddr;
  110.         if (INTEL_INFO(dev)->gen >= 4)
  111.                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  112.         I915_WRITE(HWS_PGA, addr);
  113. }
  114.  
  115. /**
  116.  * Frees the hardware status page, whether it's a physical address or a virtual
  117.  * address set up by the X Server.
  118.  */
  119. static void i915_free_hws(struct drm_device *dev)
  120. {
  121.         drm_i915_private_t *dev_priv = dev->dev_private;
  122.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  123.  
  124.         if (dev_priv->status_page_dmah) {
  125.                 drm_pci_free(dev, dev_priv->status_page_dmah);
  126.                 dev_priv->status_page_dmah = NULL;
  127.         }
  128.  
  129.         if (ring->status_page.gfx_addr) {
  130.                 ring->status_page.gfx_addr = 0;
  131.                 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
  132.         }
  133.  
  134.         /* Need to rewrite hardware status page */
  135.         I915_WRITE(HWS_PGA, 0x1ffff000);
  136. }
  137.  
  138. #if 0
  139.  
  140. void i915_kernel_lost_context(struct drm_device * dev)
  141. {
  142.         drm_i915_private_t *dev_priv = dev->dev_private;
  143.         struct drm_i915_master_private *master_priv;
  144.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  145.  
  146.         /*
  147.          * We should never lose context on the ring with modesetting
  148.          * as we don't expose it to userspace
  149.          */
  150.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  151.                 return;
  152.  
  153.         ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  154.         ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  155.         ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
  156.         if (ring->space < 0)
  157.                 ring->space += ring->size;
  158.  
  159.         if (!dev->primary->master)
  160.                 return;
  161.  
  162.         master_priv = dev->primary->master->driver_priv;
  163.         if (ring->head == ring->tail && master_priv->sarea_priv)
  164.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
  165. }
  166.  
  167. static int i915_dma_cleanup(struct drm_device * dev)
  168. {
  169.         drm_i915_private_t *dev_priv = dev->dev_private;
  170.         int i;
  171.  
  172.         /* Make sure interrupts are disabled here because the uninstall ioctl
  173.          * may not have been called from userspace and after dev_private
  174.          * is freed, it's too late.
  175.          */
  176.         if (dev->irq_enabled)
  177.                 drm_irq_uninstall(dev);
  178.  
  179.         mutex_lock(&dev->struct_mutex);
  180.         for (i = 0; i < I915_NUM_RINGS; i++)
  181.                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
  182.         mutex_unlock(&dev->struct_mutex);
  183.  
  184.         /* Clear the HWS virtual address at teardown */
  185.         if (I915_NEED_GFX_HWS(dev))
  186.                 i915_free_hws(dev);
  187.  
  188.         return 0;
  189. }
  190.  
  191. static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
  192. {
  193.         drm_i915_private_t *dev_priv = dev->dev_private;
  194.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  195.         int ret;
  196.  
  197.         master_priv->sarea = drm_getsarea(dev);
  198.         if (master_priv->sarea) {
  199.                 master_priv->sarea_priv = (drm_i915_sarea_t *)
  200.                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
  201.         } else {
  202.                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
  203.         }
  204.  
  205.         if (init->ring_size != 0) {
  206.                 if (LP_RING(dev_priv)->obj != NULL) {
  207.                         i915_dma_cleanup(dev);
  208.                         DRM_ERROR("Client tried to initialize ringbuffer in "
  209.                                   "GEM mode\n");
  210.                         return -EINVAL;
  211.                 }
  212.  
  213.                 ret = intel_render_ring_init_dri(dev,
  214.                                                  init->ring_start,
  215.                                                  init->ring_size);
  216.                 if (ret) {
  217.                         i915_dma_cleanup(dev);
  218.                         return ret;
  219.                 }
  220.         }
  221.  
  222.         dev_priv->dri1.cpp = init->cpp;
  223.         dev_priv->dri1.back_offset = init->back_offset;
  224.         dev_priv->dri1.front_offset = init->front_offset;
  225.         dev_priv->dri1.current_page = 0;
  226.         if (master_priv->sarea_priv)
  227.                 master_priv->sarea_priv->pf_current_page = 0;
  228.  
  229.         /* Allow hardware batchbuffers unless told otherwise.
  230.          */
  231.         dev_priv->dri1.allow_batchbuffer = 1;
  232.  
  233.         return 0;
  234. }
  235.  
  236. static int i915_dma_resume(struct drm_device * dev)
  237. {
  238.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  239.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  240.  
  241.         DRM_DEBUG_DRIVER("%s\n", __func__);
  242.  
  243.         if (ring->virtual_start == NULL) {
  244.                 DRM_ERROR("can not ioremap virtual address for"
  245.                           " ring buffer\n");
  246.                 return -ENOMEM;
  247.         }
  248.  
  249.         /* Program Hardware Status Page */
  250.         if (!ring->status_page.page_addr) {
  251.                 DRM_ERROR("Can not find hardware status page\n");
  252.                 return -EINVAL;
  253.         }
  254.         DRM_DEBUG_DRIVER("hw status page @ %p\n",
  255.                                 ring->status_page.page_addr);
  256.         if (ring->status_page.gfx_addr != 0)
  257.                 intel_ring_setup_status_page(ring);
  258.         else
  259.                 i915_write_hws_pga(dev);
  260.  
  261.         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  262.  
  263.         return 0;
  264. }
  265.  
  266. static int i915_dma_init(struct drm_device *dev, void *data,
  267.                          struct drm_file *file_priv)
  268. {
  269.         drm_i915_init_t *init = data;
  270.         int retcode = 0;
  271.  
  272.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  273.                 return -ENODEV;
  274.  
  275.         switch (init->func) {
  276.         case I915_INIT_DMA:
  277.                 retcode = i915_initialize(dev, init);
  278.                 break;
  279.         case I915_CLEANUP_DMA:
  280.                 retcode = i915_dma_cleanup(dev);
  281.                 break;
  282.         case I915_RESUME_DMA:
  283.                 retcode = i915_dma_resume(dev);
  284.                 break;
  285.         default:
  286.                 retcode = -EINVAL;
  287.                 break;
  288.         }
  289.  
  290.         return retcode;
  291. }
  292.  
  293. /* Implement basically the same security restrictions as hardware does
  294.  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
  295.  *
  296.  * Most of the calculations below involve calculating the size of a
  297.  * particular instruction.  It's important to get the size right as
  298.  * that tells us where the next instruction to check is.  Any illegal
  299.  * instruction detected will be given a size of zero, which is a
  300.  * signal to abort the rest of the buffer.
  301.  */
  302. static int validate_cmd(int cmd)
  303. {
  304.         switch (((cmd >> 29) & 0x7)) {
  305.         case 0x0:
  306.                 switch ((cmd >> 23) & 0x3f) {
  307.                 case 0x0:
  308.                         return 1;       /* MI_NOOP */
  309.                 case 0x4:
  310.                         return 1;       /* MI_FLUSH */
  311.                 default:
  312.                         return 0;       /* disallow everything else */
  313.                 }
  314.                 break;
  315.         case 0x1:
  316.                 return 0;       /* reserved */
  317.         case 0x2:
  318.                 return (cmd & 0xff) + 2;        /* 2d commands */
  319.         case 0x3:
  320.                 if (((cmd >> 24) & 0x1f) <= 0x18)
  321.                         return 1;
  322.  
  323.                 switch ((cmd >> 24) & 0x1f) {
  324.                 case 0x1c:
  325.                         return 1;
  326.                 case 0x1d:
  327.                         switch ((cmd >> 16) & 0xff) {
  328.                         case 0x3:
  329.                                 return (cmd & 0x1f) + 2;
  330.                         case 0x4:
  331.                                 return (cmd & 0xf) + 2;
  332.                         default:
  333.                                 return (cmd & 0xffff) + 2;
  334.                         }
  335.                 case 0x1e:
  336.                         if (cmd & (1 << 23))
  337.                                 return (cmd & 0xffff) + 1;
  338.                         else
  339.                                 return 1;
  340.                 case 0x1f:
  341.                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
  342.                                 return (cmd & 0x1ffff) + 2;
  343.                         else if (cmd & (1 << 17))       /* indirect random */
  344.                                 if ((cmd & 0xffff) == 0)
  345.                                         return 0;       /* unknown length, too hard */
  346.                                 else
  347.                                         return (((cmd & 0xffff) + 1) / 2) + 1;
  348.                         else
  349.                                 return 2;       /* indirect sequential */
  350.                 default:
  351.                         return 0;
  352.                 }
  353.         default:
  354.                 return 0;
  355.         }
  356.  
  357.         return 0;
  358. }
  359.  
  360. static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
  361. {
  362.         drm_i915_private_t *dev_priv = dev->dev_private;
  363.         int i, ret;
  364.  
  365.         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
  366.                 return -EINVAL;
  367.  
  368.         for (i = 0; i < dwords;) {
  369.                 int sz = validate_cmd(buffer[i]);
  370.                 if (sz == 0 || i + sz > dwords)
  371.                         return -EINVAL;
  372.                 i += sz;
  373.         }
  374.  
  375.         ret = BEGIN_LP_RING((dwords+1)&~1);
  376.         if (ret)
  377.                 return ret;
  378.  
  379.         for (i = 0; i < dwords; i++)
  380.                 OUT_RING(buffer[i]);
  381.         if (dwords & 1)
  382.                 OUT_RING(0);
  383.  
  384.         ADVANCE_LP_RING();
  385.  
  386.         return 0;
  387. }
  388. #endif
  389.  
  390. int
  391. i915_emit_box(struct drm_device *dev,
  392.               struct drm_clip_rect *box,
  393.               int DR1, int DR4)
  394. {
  395.         struct drm_i915_private *dev_priv = dev->dev_private;
  396.         int ret;
  397.  
  398.         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  399.             box->y2 <= 0 || box->x2 <= 0) {
  400.                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
  401.                           box->x1, box->y1, box->x2, box->y2);
  402.                 return -EINVAL;
  403.         }
  404.  
  405.         if (INTEL_INFO(dev)->gen >= 4) {
  406.                 ret = BEGIN_LP_RING(4);
  407.                 if (ret)
  408.                         return ret;
  409.  
  410.                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  411.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  412.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  413.                 OUT_RING(DR4);
  414.         } else {
  415.                 ret = BEGIN_LP_RING(6);
  416.                 if (ret)
  417.                         return ret;
  418.  
  419.                 OUT_RING(GFX_OP_DRAWRECT_INFO);
  420.                 OUT_RING(DR1);
  421.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  422.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  423.                 OUT_RING(DR4);
  424.                 OUT_RING(0);
  425.         }
  426.         ADVANCE_LP_RING();
  427.  
  428.         return 0;
  429. }
  430.  
  431. #if 0
  432. /* XXX: Emitting the counter should really be moved to part of the IRQ
  433.  * emit. For now, do it in both places:
  434.  */
  435.  
  436. static void i915_emit_breadcrumb(struct drm_device *dev)
  437. {
  438.         drm_i915_private_t *dev_priv = dev->dev_private;
  439.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  440.  
  441.         dev_priv->dri1.counter++;
  442.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  443.                 dev_priv->dri1.counter = 0;
  444.         if (master_priv->sarea_priv)
  445.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  446.  
  447.         if (BEGIN_LP_RING(4) == 0) {
  448.                 OUT_RING(MI_STORE_DWORD_INDEX);
  449.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  450.                 OUT_RING(dev_priv->dri1.counter);
  451.                 OUT_RING(0);
  452.                 ADVANCE_LP_RING();
  453.         }
  454. }
  455.  
  456. static int i915_dispatch_cmdbuffer(struct drm_device * dev,
  457.                                    drm_i915_cmdbuffer_t *cmd,
  458.                                    struct drm_clip_rect *cliprects,
  459.                                    void *cmdbuf)
  460. {
  461.         int nbox = cmd->num_cliprects;
  462.         int i = 0, count, ret;
  463.  
  464.         if (cmd->sz & 0x3) {
  465.                 DRM_ERROR("alignment");
  466.                 return -EINVAL;
  467.         }
  468.  
  469.         i915_kernel_lost_context(dev);
  470.  
  471.         count = nbox ? nbox : 1;
  472.  
  473.         for (i = 0; i < count; i++) {
  474.                 if (i < nbox) {
  475.                         ret = i915_emit_box(dev, &cliprects[i],
  476.                                             cmd->DR1, cmd->DR4);
  477.                         if (ret)
  478.                                 return ret;
  479.                 }
  480.  
  481.                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
  482.                 if (ret)
  483.                         return ret;
  484.         }
  485.  
  486.         i915_emit_breadcrumb(dev);
  487.         return 0;
  488. }
  489.  
  490. static int i915_dispatch_batchbuffer(struct drm_device * dev,
  491.                                      drm_i915_batchbuffer_t * batch,
  492.                                      struct drm_clip_rect *cliprects)
  493. {
  494.         struct drm_i915_private *dev_priv = dev->dev_private;
  495.         int nbox = batch->num_cliprects;
  496.         int i, count, ret;
  497.  
  498.         if ((batch->start | batch->used) & 0x7) {
  499.                 DRM_ERROR("alignment");
  500.                 return -EINVAL;
  501.         }
  502.  
  503.         i915_kernel_lost_context(dev);
  504.  
  505.         count = nbox ? nbox : 1;
  506.         for (i = 0; i < count; i++) {
  507.                 if (i < nbox) {
  508.                         ret = i915_emit_box(dev, &cliprects[i],
  509.                                             batch->DR1, batch->DR4);
  510.                         if (ret)
  511.                                 return ret;
  512.                 }
  513.  
  514.                 if (!IS_I830(dev) && !IS_845G(dev)) {
  515.                         ret = BEGIN_LP_RING(2);
  516.                         if (ret)
  517.                                 return ret;
  518.  
  519.                         if (INTEL_INFO(dev)->gen >= 4) {
  520.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
  521.                                 OUT_RING(batch->start);
  522.                         } else {
  523.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  524.                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  525.                         }
  526.                 } else {
  527.                         ret = BEGIN_LP_RING(4);
  528.                         if (ret)
  529.                                 return ret;
  530.  
  531.                         OUT_RING(MI_BATCH_BUFFER);
  532.                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  533.                         OUT_RING(batch->start + batch->used - 4);
  534.                         OUT_RING(0);
  535.                 }
  536.                 ADVANCE_LP_RING();
  537.         }
  538.  
  539.  
  540.         if (IS_G4X(dev) || IS_GEN5(dev)) {
  541.                 if (BEGIN_LP_RING(2) == 0) {
  542.                         OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
  543.                         OUT_RING(MI_NOOP);
  544.                         ADVANCE_LP_RING();
  545.                 }
  546.         }
  547.  
  548.         i915_emit_breadcrumb(dev);
  549.         return 0;
  550. }
  551.  
  552. static int i915_dispatch_flip(struct drm_device * dev)
  553. {
  554.         drm_i915_private_t *dev_priv = dev->dev_private;
  555.         struct drm_i915_master_private *master_priv =
  556.                 dev->primary->master->driver_priv;
  557.         int ret;
  558.  
  559.         if (!master_priv->sarea_priv)
  560.                 return -EINVAL;
  561.  
  562.         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
  563.                           __func__,
  564.                          dev_priv->dri1.current_page,
  565.                          master_priv->sarea_priv->pf_current_page);
  566.  
  567.         i915_kernel_lost_context(dev);
  568.  
  569.         ret = BEGIN_LP_RING(10);
  570.         if (ret)
  571.                 return ret;
  572.  
  573.         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
  574.         OUT_RING(0);
  575.  
  576.         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  577.         OUT_RING(0);
  578.         if (dev_priv->dri1.current_page == 0) {
  579.                 OUT_RING(dev_priv->dri1.back_offset);
  580.                 dev_priv->dri1.current_page = 1;
  581.         } else {
  582.                 OUT_RING(dev_priv->dri1.front_offset);
  583.                 dev_priv->dri1.current_page = 0;
  584.         }
  585.         OUT_RING(0);
  586.  
  587.         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  588.         OUT_RING(0);
  589.  
  590.         ADVANCE_LP_RING();
  591.  
  592.         master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
  593.  
  594.         if (BEGIN_LP_RING(4) == 0) {
  595.                 OUT_RING(MI_STORE_DWORD_INDEX);
  596.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  597.                 OUT_RING(dev_priv->dri1.counter);
  598.                 OUT_RING(0);
  599.                 ADVANCE_LP_RING();
  600.         }
  601.  
  602.         master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
  603.         return 0;
  604. }
  605.  
  606. static int i915_quiescent(struct drm_device *dev)
  607. {
  608.         i915_kernel_lost_context(dev);
  609.         return intel_ring_idle(LP_RING(dev->dev_private));
  610. }
  611.  
  612. static int i915_flush_ioctl(struct drm_device *dev, void *data,
  613.                             struct drm_file *file_priv)
  614. {
  615.         int ret;
  616.  
  617.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  618.                 return -ENODEV;
  619.  
  620.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  621.  
  622.         mutex_lock(&dev->struct_mutex);
  623.         ret = i915_quiescent(dev);
  624.         mutex_unlock(&dev->struct_mutex);
  625.  
  626.         return ret;
  627. }
  628.  
  629. static int i915_batchbuffer(struct drm_device *dev, void *data,
  630.                             struct drm_file *file_priv)
  631. {
  632.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  633.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  634.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  635.             master_priv->sarea_priv;
  636.         drm_i915_batchbuffer_t *batch = data;
  637.         int ret;
  638.         struct drm_clip_rect *cliprects = NULL;
  639.  
  640.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  641.                 return -ENODEV;
  642.  
  643.         if (!dev_priv->dri1.allow_batchbuffer) {
  644.                 DRM_ERROR("Batchbuffer ioctl disabled\n");
  645.                 return -EINVAL;
  646.         }
  647.  
  648.         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
  649.                         batch->start, batch->used, batch->num_cliprects);
  650.  
  651.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  652.  
  653.         if (batch->num_cliprects < 0)
  654.                 return -EINVAL;
  655.  
  656.         if (batch->num_cliprects) {
  657.                 cliprects = kcalloc(batch->num_cliprects,
  658.                                     sizeof(*cliprects),
  659.                                     GFP_KERNEL);
  660.                 if (cliprects == NULL)
  661.                         return -ENOMEM;
  662.  
  663.                 ret = copy_from_user(cliprects, batch->cliprects,
  664.                                      batch->num_cliprects *
  665.                                      sizeof(struct drm_clip_rect));
  666.                 if (ret != 0) {
  667.                         ret = -EFAULT;
  668.                         goto fail_free;
  669.                 }
  670.         }
  671.  
  672.         mutex_lock(&dev->struct_mutex);
  673.         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
  674.         mutex_unlock(&dev->struct_mutex);
  675.  
  676.         if (sarea_priv)
  677.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  678.  
  679. fail_free:
  680.         kfree(cliprects);
  681.  
  682.         return ret;
  683. }
  684.  
  685. static int i915_cmdbuffer(struct drm_device *dev, void *data,
  686.                           struct drm_file *file_priv)
  687. {
  688.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  689.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  690.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  691.             master_priv->sarea_priv;
  692.         drm_i915_cmdbuffer_t *cmdbuf = data;
  693.         struct drm_clip_rect *cliprects = NULL;
  694.         void *batch_data;
  695.         int ret;
  696.  
  697.         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  698.                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  699.  
  700.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  701.                 return -ENODEV;
  702.  
  703.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  704.  
  705.         if (cmdbuf->num_cliprects < 0)
  706.                 return -EINVAL;
  707.  
  708.         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
  709.         if (batch_data == NULL)
  710.                 return -ENOMEM;
  711.  
  712.         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
  713.         if (ret != 0) {
  714.                 ret = -EFAULT;
  715.                 goto fail_batch_free;
  716.         }
  717.  
  718.         if (cmdbuf->num_cliprects) {
  719.                 cliprects = kcalloc(cmdbuf->num_cliprects,
  720.                                     sizeof(*cliprects), GFP_KERNEL);
  721.                 if (cliprects == NULL) {
  722.                         ret = -ENOMEM;
  723.                         goto fail_batch_free;
  724.                 }
  725.  
  726.                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
  727.                                      cmdbuf->num_cliprects *
  728.                                      sizeof(struct drm_clip_rect));
  729.                 if (ret != 0) {
  730.                         ret = -EFAULT;
  731.                         goto fail_clip_free;
  732.                 }
  733.         }
  734.  
  735.         mutex_lock(&dev->struct_mutex);
  736.         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
  737.         mutex_unlock(&dev->struct_mutex);
  738.         if (ret) {
  739.                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
  740.                 goto fail_clip_free;
  741.         }
  742.  
  743.         if (sarea_priv)
  744.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  745.  
  746. fail_clip_free:
  747.         kfree(cliprects);
  748. fail_batch_free:
  749.         kfree(batch_data);
  750.  
  751.         return ret;
  752. }
  753.  
  754. static int i915_emit_irq(struct drm_device * dev)
  755. {
  756.         drm_i915_private_t *dev_priv = dev->dev_private;
  757.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  758.  
  759.         i915_kernel_lost_context(dev);
  760.  
  761.         DRM_DEBUG_DRIVER("\n");
  762.  
  763.         dev_priv->dri1.counter++;
  764.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  765.                 dev_priv->dri1.counter = 1;
  766.         if (master_priv->sarea_priv)
  767.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  768.  
  769.         if (BEGIN_LP_RING(4) == 0) {
  770.                 OUT_RING(MI_STORE_DWORD_INDEX);
  771.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  772.                 OUT_RING(dev_priv->dri1.counter);
  773.                 OUT_RING(MI_USER_INTERRUPT);
  774.                 ADVANCE_LP_RING();
  775.         }
  776.  
  777.         return dev_priv->dri1.counter;
  778. }
  779.  
  780. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  781. {
  782.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  783.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  784.         int ret = 0;
  785.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  786.  
  787.         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
  788.                   READ_BREADCRUMB(dev_priv));
  789.  
  790.         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  791.                 if (master_priv->sarea_priv)
  792.                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  793.                 return 0;
  794.         }
  795.  
  796.         if (master_priv->sarea_priv)
  797.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  798.  
  799.         if (ring->irq_get(ring)) {
  800.                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
  801.                             READ_BREADCRUMB(dev_priv) >= irq_nr);
  802.                 ring->irq_put(ring);
  803.         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
  804.                 ret = -EBUSY;
  805.  
  806.         if (ret == -EBUSY) {
  807.                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  808.                           READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
  809.         }
  810.  
  811.         return ret;
  812. }
  813.  
  814. /* Needs the lock as it touches the ring.
  815.  */
  816. static int i915_irq_emit(struct drm_device *dev, void *data,
  817.                          struct drm_file *file_priv)
  818. {
  819.         drm_i915_private_t *dev_priv = dev->dev_private;
  820.         drm_i915_irq_emit_t *emit = data;
  821.         int result;
  822.  
  823.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  824.                 return -ENODEV;
  825.  
  826.         if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
  827.                 DRM_ERROR("called with no initialization\n");
  828.                 return -EINVAL;
  829.         }
  830.  
  831.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  832.  
  833.         mutex_lock(&dev->struct_mutex);
  834.         result = i915_emit_irq(dev);
  835.         mutex_unlock(&dev->struct_mutex);
  836.  
  837.         if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
  838.                 DRM_ERROR("copy_to_user\n");
  839.                 return -EFAULT;
  840.         }
  841.  
  842.         return 0;
  843. }
  844.  
  845. /* Doesn't need the hardware lock.
  846.  */
  847. static int i915_irq_wait(struct drm_device *dev, void *data,
  848.                          struct drm_file *file_priv)
  849. {
  850.         drm_i915_private_t *dev_priv = dev->dev_private;
  851.         drm_i915_irq_wait_t *irqwait = data;
  852.  
  853.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  854.                 return -ENODEV;
  855.  
  856.         if (!dev_priv) {
  857.                 DRM_ERROR("called with no initialization\n");
  858.                 return -EINVAL;
  859.         }
  860.  
  861.         return i915_wait_irq(dev, irqwait->irq_seq);
  862. }
  863.  
  864. static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  865.                          struct drm_file *file_priv)
  866. {
  867.         drm_i915_private_t *dev_priv = dev->dev_private;
  868.         drm_i915_vblank_pipe_t *pipe = data;
  869.  
  870.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  871.                 return -ENODEV;
  872.  
  873.         if (!dev_priv) {
  874.                 DRM_ERROR("called with no initialization\n");
  875.                 return -EINVAL;
  876.         }
  877.  
  878.         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  879.  
  880.         return 0;
  881. }
  882.  
  883. /**
  884.  * Schedule buffer swap at given vertical blank.
  885.  */
  886. static int i915_vblank_swap(struct drm_device *dev, void *data,
  887.                      struct drm_file *file_priv)
  888. {
  889.         /* The delayed swap mechanism was fundamentally racy, and has been
  890.          * removed.  The model was that the client requested a delayed flip/swap
  891.          * from the kernel, then waited for vblank before continuing to perform
  892.          * rendering.  The problem was that the kernel might wake the client
  893.          * up before it dispatched the vblank swap (since the lock has to be
  894.          * held while touching the ringbuffer), in which case the client would
  895.          * clear and start the next frame before the swap occurred, and
  896.          * flicker would occur in addition to likely missing the vblank.
  897.          *
  898.          * In the absence of this ioctl, userland falls back to a correct path
  899.          * of waiting for a vblank, then dispatching the swap on its own.
  900.          * Context switching to userland and back is plenty fast enough for
  901.          * meeting the requirements of vblank swapping.
  902.          */
  903.         return -EINVAL;
  904. }
  905.  
  906. static int i915_flip_bufs(struct drm_device *dev, void *data,
  907.                           struct drm_file *file_priv)
  908. {
  909.         int ret;
  910.  
  911.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  912.                 return -ENODEV;
  913.  
  914.         DRM_DEBUG_DRIVER("%s\n", __func__);
  915.  
  916.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  917.  
  918.         mutex_lock(&dev->struct_mutex);
  919.         ret = i915_dispatch_flip(dev);
  920.         mutex_unlock(&dev->struct_mutex);
  921.  
  922.         return ret;
  923. }
  924. #endif
  925.  
  926. int i915_getparam(struct drm_device *dev, void *data,
  927.                          struct drm_file *file_priv)
  928. {
  929.         drm_i915_private_t *dev_priv = dev->dev_private;
  930.         drm_i915_getparam_t *param = data;
  931.         int value;
  932.  
  933.         if (!dev_priv) {
  934.                 DRM_ERROR("called with no initialization\n");
  935.                 return -EINVAL;
  936.         }
  937.  
  938.         switch (param->param) {
  939.         case I915_PARAM_IRQ_ACTIVE:
  940.                 value = dev->pdev->irq ? 1 : 0;
  941.                 break;
  942.         case I915_PARAM_ALLOW_BATCHBUFFER:
  943.                 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
  944.                 break;
  945.         case I915_PARAM_LAST_DISPATCH:
  946.                 value = READ_BREADCRUMB(dev_priv);
  947.                 break;
  948.         case I915_PARAM_CHIPSET_ID:
  949.                 value = dev->pci_device;
  950.                 break;
  951.         case I915_PARAM_HAS_GEM:
  952.                 value = 1;
  953.                 break;
  954.         case I915_PARAM_NUM_FENCES_AVAIL:
  955.                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  956.                 break;
  957.         case I915_PARAM_HAS_OVERLAY:
  958.                 value = dev_priv->overlay ? 1 : 0;
  959.                 break;
  960.         case I915_PARAM_HAS_PAGEFLIPPING:
  961.                 value = 1;
  962.                 break;
  963.         case I915_PARAM_HAS_EXECBUF2:
  964.                 /* depends on GEM */
  965.                 value = 1;
  966.                 break;
  967.         case I915_PARAM_HAS_BSD:
  968.                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
  969.                 break;
  970.         case I915_PARAM_HAS_BLT:
  971.                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
  972.                 break;
  973.         case I915_PARAM_HAS_VEBOX:
  974.                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
  975.                 break;
  976.         case I915_PARAM_HAS_RELAXED_FENCING:
  977.                 value = 1;
  978.                 break;
  979.         case I915_PARAM_HAS_COHERENT_RINGS:
  980.                 value = 1;
  981.                 break;
  982.         case I915_PARAM_HAS_EXEC_CONSTANTS:
  983.                 value = INTEL_INFO(dev)->gen >= 4;
  984.                 break;
  985.         case I915_PARAM_HAS_RELAXED_DELTA:
  986.                 value = 1;
  987.                 break;
  988.         case I915_PARAM_HAS_GEN7_SOL_RESET:
  989.                 value = 1;
  990.                 break;
  991.         case I915_PARAM_HAS_LLC:
  992.                 value = HAS_LLC(dev);
  993.                 break;
  994.         case I915_PARAM_HAS_WT:
  995.                 value = HAS_WT(dev);
  996.                 break;
  997.         case I915_PARAM_HAS_ALIASING_PPGTT:
  998.                 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
  999.                 break;
  1000.         case I915_PARAM_HAS_WAIT_TIMEOUT:
  1001.                 value = 1;
  1002.                 break;
  1003.         case I915_PARAM_HAS_SEMAPHORES:
  1004.                 value = i915_semaphore_is_enabled(dev);
  1005.                 break;
  1006.         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
  1007.                 value = 1;
  1008.                 break;
  1009.     case I915_PARAM_HAS_SECURE_BATCHES:
  1010.         value = 1;
  1011.                 break;
  1012.         case I915_PARAM_HAS_PINNED_BATCHES:
  1013.                 value = 1;
  1014.                 break;
  1015.         case I915_PARAM_HAS_EXEC_NO_RELOC:
  1016.                 value = 1;
  1017.         break;
  1018.         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
  1019.                 value = 1;
  1020.         break;
  1021.         default:
  1022.                 DRM_DEBUG("Unknown parameter %d\n", param->param);
  1023.                 return -EINVAL;
  1024.         }
  1025.  
  1026.     *param->value = value;
  1027.  
  1028.         return 0;
  1029. }
  1030.  
  1031. #if 0
  1032. static int i915_setparam(struct drm_device *dev, void *data,
  1033.                          struct drm_file *file_priv)
  1034. {
  1035.         drm_i915_private_t *dev_priv = dev->dev_private;
  1036.         drm_i915_setparam_t *param = data;
  1037.  
  1038.         if (!dev_priv) {
  1039.                 DRM_ERROR("called with no initialization\n");
  1040.                 return -EINVAL;
  1041.         }
  1042.  
  1043.         switch (param->param) {
  1044.         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
  1045.                 break;
  1046.         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
  1047.                 break;
  1048.         case I915_SETPARAM_ALLOW_BATCHBUFFER:
  1049.                 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
  1050.                 break;
  1051.         case I915_SETPARAM_NUM_USED_FENCES:
  1052.                 if (param->value > dev_priv->num_fence_regs ||
  1053.                     param->value < 0)
  1054.                         return -EINVAL;
  1055.                 /* Userspace can use first N regs */
  1056.                 dev_priv->fence_reg_start = param->value;
  1057.                 break;
  1058.         default:
  1059.                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
  1060.                                         param->param);
  1061.                 return -EINVAL;
  1062.         }
  1063.  
  1064.         return 0;
  1065. }
  1066. #endif
  1067.  
  1068.  
  1069.  
  1070. static int i915_get_bridge_dev(struct drm_device *dev)
  1071. {
  1072.         struct drm_i915_private *dev_priv = dev->dev_private;
  1073.  
  1074.         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  1075.         if (!dev_priv->bridge_dev) {
  1076.                 DRM_ERROR("bridge device not found\n");
  1077.                 return -1;
  1078.         }
  1079.         return 0;
  1080. }
  1081.  
  1082. #define MCHBAR_I915 0x44
  1083. #define MCHBAR_I965 0x48
  1084. #define MCHBAR_SIZE (4*4096)
  1085.  
  1086. #define DEVEN_REG 0x54
  1087. #define   DEVEN_MCHBAR_EN (1 << 28)
  1088.  
  1089.  
  1090.  
  1091.  
  1092. /* Setup MCHBAR if possible, return true if we should disable it again */
  1093. static void
  1094. intel_setup_mchbar(struct drm_device *dev)
  1095. {
  1096.         drm_i915_private_t *dev_priv = dev->dev_private;
  1097.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  1098.         u32 temp;
  1099.         bool enabled;
  1100.  
  1101.         dev_priv->mchbar_need_disable = false;
  1102.  
  1103.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1104.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  1105.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  1106.         } else {
  1107.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1108.                 enabled = temp & 1;
  1109.         }
  1110.  
  1111.         /* If it's already enabled, don't have to do anything */
  1112.         if (enabled)
  1113.                 return;
  1114.  
  1115.         dbgprintf("Epic fail\n");
  1116.  
  1117. #if 0
  1118.         if (intel_alloc_mchbar_resource(dev))
  1119.                 return;
  1120.  
  1121.         dev_priv->mchbar_need_disable = true;
  1122.  
  1123.         /* Space is allocated or reserved, so enable it. */
  1124.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1125.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  1126.                                        temp | DEVEN_MCHBAR_EN);
  1127.         } else {
  1128.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1129.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  1130.         }
  1131. #endif
  1132. }
  1133.  
  1134.  
  1135. /* true = enable decode, false = disable decoder */
  1136. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  1137. {
  1138.         struct drm_device *dev = cookie;
  1139.  
  1140.         intel_modeset_vga_set_state(dev, state);
  1141.         if (state)
  1142.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  1143.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1144.         else
  1145.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1146. }
  1147.  
  1148.  
  1149.  
  1150.  
  1151.  
  1152.  
  1153. static int i915_load_modeset_init(struct drm_device *dev)
  1154. {
  1155.     struct drm_i915_private *dev_priv = dev->dev_private;
  1156.     int ret;
  1157.  
  1158.     ret = intel_parse_bios(dev);
  1159.     if (ret)
  1160.         DRM_INFO("failed to find VBIOS tables\n");
  1161.  
  1162.     main_fb_obj = kos_gem_fb_object_create(dev,0,16*1024*1024);
  1163.  
  1164.         /* Initialise stolen first so that we may reserve preallocated
  1165.          * objects for the BIOS to KMS transition.
  1166.          */
  1167.         ret = i915_gem_init_stolen(dev);
  1168.         if (ret)
  1169.                 goto cleanup_vga_switcheroo;
  1170.  
  1171.         ret = drm_irq_install(dev);
  1172.         if (ret)
  1173.                 goto cleanup_gem_stolen;
  1174.  
  1175.         intel_power_domains_init_hw(dev);
  1176.  
  1177.         /* Important: The output setup functions called by modeset_init need
  1178.          * working irqs for e.g. gmbus and dp aux transfers. */
  1179.     intel_modeset_init(dev);
  1180.  
  1181.         ret = i915_gem_init(dev);
  1182.     if (ret)
  1183.                 goto cleanup_power;
  1184.  
  1185.  
  1186.     intel_modeset_gem_init(dev);
  1187.  
  1188.     /* Always safe in the mode setting case. */
  1189.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  1190.         dev->vblank_disable_allowed = true;
  1191.         if (INTEL_INFO(dev)->num_pipes == 0) {
  1192.                 intel_display_power_put(dev, POWER_DOMAIN_VGA);
  1193.                 return 0;
  1194.         }
  1195.  
  1196.     ret = intel_fbdev_init(dev);
  1197.     if (ret)
  1198.                 goto cleanup_gem;
  1199.  
  1200.         /* Only enable hotplug handling once the fbdev is fully set up. */
  1201.         intel_hpd_init(dev);
  1202.  
  1203.         /*
  1204.          * Some ports require correctly set-up hpd registers for detection to
  1205.          * work properly (leading to ghost connected connector status), e.g. VGA
  1206.          * on gm45.  Hence we can only set up the initial fbdev config after hpd
  1207.          * irqs are fully enabled. Now we should scan for the initial config
  1208.          * only once hotplug handling is enabled, but due to screwed-up locking
  1209.          * around kms/fbdev init we can't protect the fdbev initial config
  1210.          * scanning against hotplug events. Hence do this first and ignore the
  1211.          * tiny window where we will loose hotplug notifactions.
  1212.          */
  1213.         intel_fbdev_initial_config(dev);
  1214.  
  1215.         /* Only enable hotplug handling once the fbdev is fully set up. */
  1216.         dev_priv->enable_hotplug_processing = true;
  1217.  
  1218.         drm_kms_helper_poll_init(dev);
  1219.  
  1220.     return 0;
  1221.  
  1222. cleanup_gem:
  1223.         mutex_lock(&dev->struct_mutex);
  1224.         i915_gem_cleanup_ringbuffer(dev);
  1225.         i915_gem_context_fini(dev);
  1226.         mutex_unlock(&dev->struct_mutex);
  1227.         i915_gem_cleanup_aliasing_ppgtt(dev);
  1228.         drm_mm_takedown(&dev_priv->gtt.base.mm);
  1229. cleanup_power:
  1230.         intel_display_power_put(dev, POWER_DOMAIN_VGA);
  1231. //      drm_irq_uninstall(dev);
  1232. cleanup_gem_stolen:
  1233. //      i915_gem_cleanup_stolen(dev);
  1234. cleanup_vga_switcheroo:
  1235. //      vga_switcheroo_unregister_client(dev->pdev);
  1236. cleanup_vga_client:
  1237. //      vga_client_register(dev->pdev, NULL, NULL, NULL);
  1238. out:
  1239.     return ret;
  1240. }
  1241.  
  1242.  
  1243.  
  1244. #if IS_ENABLED(CONFIG_FB)
  1245. static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  1246. {
  1247.         struct apertures_struct *ap;
  1248.         struct pci_dev *pdev = dev_priv->dev->pdev;
  1249.         bool primary;
  1250.  
  1251.         ap = alloc_apertures(1);
  1252.         if (!ap)
  1253.                 return;
  1254.  
  1255.         ap->ranges[0].base = dev_priv->gtt.mappable_base;
  1256.         ap->ranges[0].size = dev_priv->gtt.mappable_end;
  1257.  
  1258.         primary =
  1259.                 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
  1260.  
  1261.         remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
  1262.  
  1263.         kfree(ap);
  1264. }
  1265. #else
  1266. static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
  1267. {
  1268. }
  1269. #endif
  1270.  
  1271. static void i915_dump_device_info(struct drm_i915_private *dev_priv)
  1272. {
  1273.         const struct intel_device_info *info = dev_priv->info;
  1274.  
  1275. #define PRINT_S(name) "%s"
  1276. #define SEP_EMPTY
  1277. #define PRINT_FLAG(name) info->name ? #name "," : ""
  1278. #define SEP_COMMA ,
  1279.         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
  1280.                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
  1281.                          info->gen,
  1282.                          dev_priv->dev->pdev->device,
  1283.                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
  1284. #undef PRINT_S
  1285. #undef SEP_EMPTY
  1286. #undef PRINT_FLAG
  1287. #undef SEP_COMMA
  1288. }
  1289.  
  1290. /**
  1291.  * i915_driver_load - setup chip and create an initial config
  1292.  * @dev: DRM device
  1293.  * @flags: startup flags
  1294.  *
  1295.  * The driver load routine has to do several things:
  1296.  *   - drive output discovery via intel_modeset_init()
  1297.  *   - initialize the memory manager
  1298.  *   - allocate initial config memory
  1299.  *   - setup the DRM framebuffer with the allocated memory
  1300.  */
  1301. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  1302. {
  1303.     struct drm_i915_private *dev_priv;
  1304.         struct intel_device_info *info;
  1305.         int ret = 0, mmio_bar, mmio_size;
  1306.         uint32_t aperture_size;
  1307.  
  1308.         info = (struct intel_device_info *) flags;
  1309.  
  1310.  
  1311.         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  1312.     if (dev_priv == NULL)
  1313.         return -ENOMEM;
  1314.  
  1315.     dev->dev_private = (void *)dev_priv;
  1316.     dev_priv->dev = dev;
  1317.         dev_priv->info = info;
  1318.  
  1319.         spin_lock_init(&dev_priv->irq_lock);
  1320.         spin_lock_init(&dev_priv->gpu_error.lock);
  1321.         spin_lock_init(&dev_priv->backlight_lock);
  1322.         spin_lock_init(&dev_priv->uncore.lock);
  1323.         spin_lock_init(&dev_priv->mm.object_stat_lock);
  1324.         mutex_init(&dev_priv->dpio_lock);
  1325.         mutex_init(&dev_priv->modeset_restore_lock);
  1326.  
  1327.         intel_pm_setup(dev);
  1328.  
  1329.         intel_display_crc_init(dev);
  1330.  
  1331.         i915_dump_device_info(dev_priv);
  1332.  
  1333.         /* Not all pre-production machines fall into this category, only the
  1334.          * very first ones. Almost everything should work, except for maybe
  1335.          * suspend/resume. And we don't implement workarounds that affect only
  1336.          * pre-production machines. */
  1337.         if (IS_HSW_EARLY_SDV(dev))
  1338.                 DRM_INFO("This is an early pre-production Haswell machine. "
  1339.                          "It may not be fully functional.\n");
  1340.  
  1341.     if (i915_get_bridge_dev(dev)) {
  1342.         ret = -EIO;
  1343.         goto free_priv;
  1344.     }
  1345.  
  1346.         mmio_bar = IS_GEN2(dev) ? 1 : 0;
  1347.         /* Before gen4, the registers and the GTT are behind different BARs.
  1348.          * However, from gen4 onwards, the registers and the GTT are shared
  1349.          * in the same BAR, so we want to restrict this ioremap from
  1350.          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
  1351.          * the register BAR remains the same size for all the earlier
  1352.          * generations up to Ironlake.
  1353.          */
  1354.         if (info->gen < 5)
  1355.                 mmio_size = 512*1024;
  1356.         else
  1357.                 mmio_size = 2*1024*1024;
  1358.  
  1359.         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
  1360.         if (!dev_priv->regs) {
  1361.                 DRM_ERROR("failed to map registers\n");
  1362.                 ret = -EIO;
  1363.                 goto put_bridge;
  1364.         }
  1365.  
  1366.         intel_uncore_early_sanitize(dev);
  1367.  
  1368.         /* This must be called before any calls to HAS_PCH_* */
  1369.         intel_detect_pch(dev);
  1370.  
  1371.         intel_uncore_init(dev);
  1372.  
  1373.         ret = i915_gem_gtt_init(dev);
  1374.         if (ret)
  1375.                 goto out_regs;
  1376.  
  1377.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  1378.                 i915_kick_out_firmware_fb(dev_priv);
  1379.  
  1380.         pci_set_master(dev->pdev);
  1381.  
  1382.     /* overlay on gen2 is broken and can't address above 1G */
  1383.  
  1384.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  1385.      * using 32bit addressing, overwriting memory if HWS is located
  1386.      * above 4GB.
  1387.      *
  1388.      * The documentation also mentions an issue with undefined
  1389.      * behaviour if any general state is accessed within a page above 4GB,
  1390.      * which also needs to be handled carefully.
  1391.      */
  1392.  
  1393.         aperture_size = dev_priv->gtt.mappable_end;
  1394.  
  1395.         dev_priv->gtt.mappable = AllocKernelSpace(8192);
  1396.         if (dev_priv->gtt.mappable == NULL) {
  1397.                 ret = -EIO;
  1398.                 goto out_gtt;
  1399.         }
  1400.  
  1401.     /* The i915 workqueue is primarily used for batched retirement of
  1402.      * requests (and thus managing bo) once the task has been completed
  1403.      * by the GPU. i915_gem_retire_requests() is called directly when we
  1404.      * need high-priority retirement, such as waiting for an explicit
  1405.      * bo.
  1406.      *
  1407.      * It is also used for periodic low-priority events, such as
  1408.      * idle-timers and recording error state.
  1409.      *
  1410.      * All tasks on the workqueue are expected to acquire the dev mutex
  1411.      * so there is no point in running more than one instance of the
  1412.          * workqueue at any time.  Use an ordered one.
  1413.      */
  1414.         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
  1415.         if (dev_priv->wq == NULL) {
  1416.                 DRM_ERROR("Failed to create our workqueue.\n");
  1417.                 ret = -ENOMEM;
  1418.                 goto out_mtrrfree;
  1419.         }
  1420.     system_wq = dev_priv->wq;
  1421.  
  1422.  
  1423.         intel_irq_init(dev);
  1424.         intel_uncore_sanitize(dev);
  1425.  
  1426.     /* Try to make sure MCHBAR is enabled before poking at it */
  1427.         intel_setup_mchbar(dev);
  1428.     intel_setup_gmbus(dev);
  1429.     intel_opregion_setup(dev);
  1430.  
  1431.     intel_setup_bios(dev);
  1432.  
  1433.     i915_gem_load(dev);
  1434.  
  1435.     /* On the 945G/GM, the chipset reports the MSI capability on the
  1436.      * integrated graphics even though the support isn't actually there
  1437.      * according to the published specs.  It doesn't appear to function
  1438.      * correctly in testing on 945G.
  1439.      * This may be a side effect of MSI having been made available for PEG
  1440.      * and the registers being closely associated.
  1441.      *
  1442.      * According to chipset errata, on the 965GM, MSI interrupts may
  1443.      * be lost or delayed, but we use them anyways to avoid
  1444.      * stuck interrupts on some machines.
  1445.      */
  1446.  
  1447.         dev_priv->num_plane = 1;
  1448.         if (IS_VALLEYVIEW(dev))
  1449.                 dev_priv->num_plane = 2;
  1450.  
  1451. //   if (INTEL_INFO(dev)->num_pipes) {
  1452. //       ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
  1453. //       if (ret)
  1454. //           goto out_gem_unload;
  1455. //   }
  1456.  
  1457.         intel_power_domains_init(dev);
  1458.  
  1459.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1460.     ret = i915_load_modeset_init(dev);
  1461.     if (ret < 0) {
  1462.         DRM_ERROR("failed to init modeset\n");
  1463.                         goto out_power_well;
  1464.     }
  1465.         } else {
  1466.                 /* Start out suspended in ums mode. */
  1467.                 dev_priv->ums.mm_suspended = 1;
  1468.         }
  1469.  
  1470.  
  1471.         if (INTEL_INFO(dev)->num_pipes) {
  1472.     /* Must be done after probing outputs */
  1473.                 intel_opregion_init(dev);
  1474. //              acpi_video_register();
  1475.         }
  1476.  
  1477.         if (IS_GEN5(dev))
  1478.                 intel_gpu_ips_init(dev_priv);
  1479.  
  1480.         intel_init_runtime_pm(dev_priv);
  1481.  
  1482.     main_device = dev;
  1483.  
  1484.     return 0;
  1485.  
  1486. out_power_well:
  1487. out_gem_unload:
  1488.  
  1489. out_mtrrfree:
  1490. out_gtt:
  1491. out_regs:
  1492. put_bridge:
  1493. free_priv:
  1494.     kfree(dev_priv);
  1495.     return ret;
  1496. }
  1497.  
  1498. #if 0
  1499.  
  1500. int i915_driver_unload(struct drm_device *dev)
  1501. {
  1502.         struct drm_i915_private *dev_priv = dev->dev_private;
  1503.         int ret;
  1504.  
  1505.         ret = i915_gem_suspend(dev);
  1506.         if (ret) {
  1507.                 DRM_ERROR("failed to idle hardware: %d\n", ret);
  1508.                 return ret;
  1509.         }
  1510.  
  1511.         intel_fini_runtime_pm(dev_priv);
  1512.  
  1513.         intel_gpu_ips_teardown();
  1514.  
  1515.                 /* The i915.ko module is still not prepared to be loaded when
  1516.                  * the power well is not enabled, so just enable it in case
  1517.                  * we're going to unload/reload. */
  1518.         intel_display_set_init_power(dev, true);
  1519.         intel_power_domains_remove(dev);
  1520.  
  1521.         i915_teardown_sysfs(dev);
  1522.  
  1523.         if (dev_priv->mm.inactive_shrinker.scan_objects)
  1524.                 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1525.  
  1526.         io_mapping_free(dev_priv->gtt.mappable);
  1527.         arch_phys_wc_del(dev_priv->gtt.mtrr);
  1528.  
  1529.         acpi_video_unregister();
  1530.  
  1531.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1532.                 intel_fbdev_fini(dev);
  1533.                 intel_modeset_cleanup(dev);
  1534.                 cancel_work_sync(&dev_priv->console_resume_work);
  1535.  
  1536.                 /*
  1537.                  * free the memory space allocated for the child device
  1538.                  * config parsed from VBT
  1539.                  */
  1540.                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
  1541.                         kfree(dev_priv->vbt.child_dev);
  1542.                         dev_priv->vbt.child_dev = NULL;
  1543.                         dev_priv->vbt.child_dev_num = 0;
  1544.                 }
  1545.  
  1546.                 vga_switcheroo_unregister_client(dev->pdev);
  1547.                 vga_client_register(dev->pdev, NULL, NULL, NULL);
  1548.         }
  1549.  
  1550.         /* Free error state after interrupts are fully disabled. */
  1551.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  1552.         cancel_work_sync(&dev_priv->gpu_error.work);
  1553.         i915_destroy_error_state(dev);
  1554.  
  1555.         cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
  1556.  
  1557.         if (dev->pdev->msi_enabled)
  1558.                 pci_disable_msi(dev->pdev);
  1559.  
  1560.         intel_opregion_fini(dev);
  1561.  
  1562.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1563.                 /* Flush any outstanding unpin_work. */
  1564.                 flush_workqueue(dev_priv->wq);
  1565.  
  1566.                 mutex_lock(&dev->struct_mutex);
  1567.                 i915_gem_free_all_phys_object(dev);
  1568.                 i915_gem_cleanup_ringbuffer(dev);
  1569.                 i915_gem_context_fini(dev);
  1570.                 mutex_unlock(&dev->struct_mutex);
  1571.                 i915_gem_cleanup_aliasing_ppgtt(dev);
  1572.                 i915_gem_cleanup_stolen(dev);
  1573.  
  1574.                 if (!I915_NEED_GFX_HWS(dev))
  1575.                         i915_free_hws(dev);
  1576.         }
  1577.  
  1578.         list_del(&dev_priv->gtt.base.global_link);
  1579.         WARN_ON(!list_empty(&dev_priv->vm_list));
  1580.  
  1581.         drm_vblank_cleanup(dev);
  1582.  
  1583.         intel_teardown_gmbus(dev);
  1584.         intel_teardown_mchbar(dev);
  1585.  
  1586.         destroy_workqueue(dev_priv->wq);
  1587.         pm_qos_remove_request(&dev_priv->pm_qos);
  1588.  
  1589.         dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
  1590.  
  1591.         intel_uncore_fini(dev);
  1592.         if (dev_priv->regs != NULL)
  1593.                 pci_iounmap(dev->pdev, dev_priv->regs);
  1594.  
  1595.         if (dev_priv->slab)
  1596.                 kmem_cache_destroy(dev_priv->slab);
  1597.  
  1598.         pci_dev_put(dev_priv->bridge_dev);
  1599.         kfree(dev->dev_private);
  1600.  
  1601.         return 0;
  1602. }
  1603. #endif
  1604.  
  1605. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1606. {
  1607.         struct drm_i915_file_private *file_priv;
  1608.  
  1609.         DRM_DEBUG_DRIVER("\n");
  1610.         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  1611.         if (!file_priv)
  1612.                 return -ENOMEM;
  1613.  
  1614.         file->driver_priv = file_priv;
  1615.  
  1616.         spin_lock_init(&file_priv->mm.lock);
  1617.         INIT_LIST_HEAD(&file_priv->mm.request_list);
  1618.  
  1619.         idr_init(&file_priv->context_idr);
  1620.  
  1621.         return 0;
  1622. }
  1623.  
  1624. #if 0
  1625. /**
  1626.  * i915_driver_lastclose - clean up after all DRM clients have exited
  1627.  * @dev: DRM device
  1628.  *
  1629.  * Take care of cleaning up after all DRM clients have exited.  In the
  1630.  * mode setting case, we want to restore the kernel's initial mode (just
  1631.  * in case the last client left us in a bad state).
  1632.  *
  1633.  * Additionally, in the non-mode setting case, we'll tear down the GTT
  1634.  * and DMA structures, since the kernel won't be using them, and clea
  1635.  * up any GEM state.
  1636.  */
  1637. void i915_driver_lastclose(struct drm_device * dev)
  1638. {
  1639.         drm_i915_private_t *dev_priv = dev->dev_private;
  1640.  
  1641.         /* On gen6+ we refuse to init without kms enabled, but then the drm core
  1642.          * goes right around and calls lastclose. Check for this and don't clean
  1643.          * up anything. */
  1644.         if (!dev_priv)
  1645.                 return;
  1646.  
  1647.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1648.                 intel_fbdev_restore_mode(dev);
  1649.                 vga_switcheroo_process_delayed_switch();
  1650.                 return;
  1651.         }
  1652.  
  1653.         i915_gem_lastclose(dev);
  1654.  
  1655.         i915_dma_cleanup(dev);
  1656. }
  1657.  
  1658. void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
  1659. {
  1660.         mutex_lock(&dev->struct_mutex);
  1661.         i915_gem_context_close(dev, file_priv);
  1662.         i915_gem_release(dev, file_priv);
  1663.         mutex_unlock(&dev->struct_mutex);
  1664. }
  1665.  
  1666. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1667. {
  1668.         struct drm_i915_file_private *file_priv = file->driver_priv;
  1669.  
  1670.         kfree(file_priv);
  1671. }
  1672.  
  1673. const struct drm_ioctl_desc i915_ioctls[] = {
  1674.         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1675.         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
  1676.         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
  1677.         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
  1678.         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
  1679.         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
  1680.         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
  1681.         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1682.         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
  1683.         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
  1684.         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1685.         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
  1686.         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1687.         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1688.         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
  1689.         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
  1690.         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1691.         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1692.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
  1693.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1694.         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1695.         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1696.         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1697.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1698.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1699.         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1700.         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1701.         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1702.         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1703.         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1704.         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1705.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1706.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1707.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1708.         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1709.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1710.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1711.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1712.         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
  1713.         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1714.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1715.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1716.         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1717.         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1718.         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1719.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1720.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1721.         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1722.         DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1723. };
  1724.  
  1725. int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
  1726.  
  1727. /*
  1728.  * This is really ugly: Because old userspace abused the linux agp interface to
  1729.  * manage the gtt, we need to claim that all intel devices are agp.  For
  1730.  * otherwise the drm core refuses to initialize the agp support code.
  1731.  */
  1732. int i915_driver_device_is_agp(struct drm_device * dev)
  1733. {
  1734.         return 1;
  1735. }
  1736. #endif
  1737.  
  1738.