Subversion Repositories Kolibri OS

Rev

Rev 4280 | Rev 4398 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc_helper.h>
  33. #include <drm/drm_fb_helper.h>
  34. #include "intel_drv.h"
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_trace.h"
  38. #include <linux/pci.h>
  39. //#include <linux/vgaarb.h>
  40. //#include <linux/acpi.h>
  41. //#include <linux/pnp.h>
  42. //#include <linux/vga_switcheroo.h>
  43. #include <linux/slab.h>
  44. //#include <acpi/video.h>
  45.  
  46. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  47.  
  48. #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
  49.  
  50. #define BEGIN_LP_RING(n) \
  51.         intel_ring_begin(LP_RING(dev_priv), (n))
  52.  
  53. #define OUT_RING(x) \
  54.         intel_ring_emit(LP_RING(dev_priv), x)
  55.  
  56. #define ADVANCE_LP_RING() \
  57.         intel_ring_advance(LP_RING(dev_priv))
  58.  
  59. /**
  60.  * Lock test for when it's just for synchronization of ring access.
  61.  *
  62.  * In that case, we don't need to do it when GEM is initialized as nobody else
  63.  * has access to the ring.
  64.  */
  65. #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                      \
  66.         if (LP_RING(dev->dev_private)->obj == NULL)                     \
  67.                 LOCK_TEST_WITH_RETURN(dev, file);                       \
  68. } while (0)
  69.  
  70. static inline u32
  71. intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
  72. {
  73.         if (I915_NEED_GFX_HWS(dev_priv->dev))
  74.                 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
  75.         else
  76.                 return intel_read_status_page(LP_RING(dev_priv), reg);
  77. }
  78.  
  79. #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
  80. #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
  81. #define I915_BREADCRUMB_INDEX           0x21
  82.  
  83. void i915_update_dri1_breadcrumb(struct drm_device *dev)
  84. {
  85.         drm_i915_private_t *dev_priv = dev->dev_private;
  86.         struct drm_i915_master_private *master_priv;
  87.  
  88.         if (dev->primary->master) {
  89.                 master_priv = dev->primary->master->driver_priv;
  90.                 if (master_priv->sarea_priv)
  91.                         master_priv->sarea_priv->last_dispatch =
  92.                                 READ_BREADCRUMB(dev_priv);
  93.         }
  94. }
  95.  
  96. static void i915_write_hws_pga(struct drm_device *dev)
  97. {
  98.         drm_i915_private_t *dev_priv = dev->dev_private;
  99.         u32 addr;
  100.  
  101.         addr = dev_priv->status_page_dmah->busaddr;
  102.         if (INTEL_INFO(dev)->gen >= 4)
  103.                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  104.         I915_WRITE(HWS_PGA, addr);
  105. }
  106.  
  107. /**
  108.  * Frees the hardware status page, whether it's a physical address or a virtual
  109.  * address set up by the X Server.
  110.  */
  111. static void i915_free_hws(struct drm_device *dev)
  112. {
  113.         drm_i915_private_t *dev_priv = dev->dev_private;
  114.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  115.  
  116.         if (dev_priv->status_page_dmah) {
  117.                 drm_pci_free(dev, dev_priv->status_page_dmah);
  118.                 dev_priv->status_page_dmah = NULL;
  119.         }
  120.  
  121.         if (ring->status_page.gfx_addr) {
  122.                 ring->status_page.gfx_addr = 0;
  123.                 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
  124.         }
  125.  
  126.         /* Need to rewrite hardware status page */
  127.         I915_WRITE(HWS_PGA, 0x1ffff000);
  128. }
  129.  
  130. #if 0
  131.  
  132. void i915_kernel_lost_context(struct drm_device * dev)
  133. {
  134.         drm_i915_private_t *dev_priv = dev->dev_private;
  135.         struct drm_i915_master_private *master_priv;
  136.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  137.  
  138.         /*
  139.          * We should never lose context on the ring with modesetting
  140.          * as we don't expose it to userspace
  141.          */
  142.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  143.                 return;
  144.  
  145.         ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  146.         ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  147.         ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
  148.         if (ring->space < 0)
  149.                 ring->space += ring->size;
  150.  
  151.         if (!dev->primary->master)
  152.                 return;
  153.  
  154.         master_priv = dev->primary->master->driver_priv;
  155.         if (ring->head == ring->tail && master_priv->sarea_priv)
  156.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
  157. }
  158.  
  159. static int i915_dma_cleanup(struct drm_device * dev)
  160. {
  161.         drm_i915_private_t *dev_priv = dev->dev_private;
  162.         int i;
  163.  
  164.         /* Make sure interrupts are disabled here because the uninstall ioctl
  165.          * may not have been called from userspace and after dev_private
  166.          * is freed, it's too late.
  167.          */
  168.         if (dev->irq_enabled)
  169.                 drm_irq_uninstall(dev);
  170.  
  171.         mutex_lock(&dev->struct_mutex);
  172.         for (i = 0; i < I915_NUM_RINGS; i++)
  173.                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
  174.         mutex_unlock(&dev->struct_mutex);
  175.  
  176.         /* Clear the HWS virtual address at teardown */
  177.         if (I915_NEED_GFX_HWS(dev))
  178.                 i915_free_hws(dev);
  179.  
  180.         return 0;
  181. }
  182.  
  183. static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
  184. {
  185.         drm_i915_private_t *dev_priv = dev->dev_private;
  186.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  187.         int ret;
  188.  
  189.         master_priv->sarea = drm_getsarea(dev);
  190.         if (master_priv->sarea) {
  191.                 master_priv->sarea_priv = (drm_i915_sarea_t *)
  192.                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
  193.         } else {
  194.                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
  195.         }
  196.  
  197.         if (init->ring_size != 0) {
  198.                 if (LP_RING(dev_priv)->obj != NULL) {
  199.                         i915_dma_cleanup(dev);
  200.                         DRM_ERROR("Client tried to initialize ringbuffer in "
  201.                                   "GEM mode\n");
  202.                         return -EINVAL;
  203.                 }
  204.  
  205.                 ret = intel_render_ring_init_dri(dev,
  206.                                                  init->ring_start,
  207.                                                  init->ring_size);
  208.                 if (ret) {
  209.                         i915_dma_cleanup(dev);
  210.                         return ret;
  211.                 }
  212.         }
  213.  
  214.         dev_priv->dri1.cpp = init->cpp;
  215.         dev_priv->dri1.back_offset = init->back_offset;
  216.         dev_priv->dri1.front_offset = init->front_offset;
  217.         dev_priv->dri1.current_page = 0;
  218.         if (master_priv->sarea_priv)
  219.                 master_priv->sarea_priv->pf_current_page = 0;
  220.  
  221.         /* Allow hardware batchbuffers unless told otherwise.
  222.          */
  223.         dev_priv->dri1.allow_batchbuffer = 1;
  224.  
  225.         return 0;
  226. }
  227.  
  228. static int i915_dma_resume(struct drm_device * dev)
  229. {
  230.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  231.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  232.  
  233.         DRM_DEBUG_DRIVER("%s\n", __func__);
  234.  
  235.         if (ring->virtual_start == NULL) {
  236.                 DRM_ERROR("can not ioremap virtual address for"
  237.                           " ring buffer\n");
  238.                 return -ENOMEM;
  239.         }
  240.  
  241.         /* Program Hardware Status Page */
  242.         if (!ring->status_page.page_addr) {
  243.                 DRM_ERROR("Can not find hardware status page\n");
  244.                 return -EINVAL;
  245.         }
  246.         DRM_DEBUG_DRIVER("hw status page @ %p\n",
  247.                                 ring->status_page.page_addr);
  248.         if (ring->status_page.gfx_addr != 0)
  249.                 intel_ring_setup_status_page(ring);
  250.         else
  251.                 i915_write_hws_pga(dev);
  252.  
  253.         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  254.  
  255.         return 0;
  256. }
  257.  
  258. static int i915_dma_init(struct drm_device *dev, void *data,
  259.                          struct drm_file *file_priv)
  260. {
  261.         drm_i915_init_t *init = data;
  262.         int retcode = 0;
  263.  
  264.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  265.                 return -ENODEV;
  266.  
  267.         switch (init->func) {
  268.         case I915_INIT_DMA:
  269.                 retcode = i915_initialize(dev, init);
  270.                 break;
  271.         case I915_CLEANUP_DMA:
  272.                 retcode = i915_dma_cleanup(dev);
  273.                 break;
  274.         case I915_RESUME_DMA:
  275.                 retcode = i915_dma_resume(dev);
  276.                 break;
  277.         default:
  278.                 retcode = -EINVAL;
  279.                 break;
  280.         }
  281.  
  282.         return retcode;
  283. }
  284.  
  285. /* Implement basically the same security restrictions as hardware does
  286.  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
  287.  *
  288.  * Most of the calculations below involve calculating the size of a
  289.  * particular instruction.  It's important to get the size right as
  290.  * that tells us where the next instruction to check is.  Any illegal
  291.  * instruction detected will be given a size of zero, which is a
  292.  * signal to abort the rest of the buffer.
  293.  */
  294. static int validate_cmd(int cmd)
  295. {
  296.         switch (((cmd >> 29) & 0x7)) {
  297.         case 0x0:
  298.                 switch ((cmd >> 23) & 0x3f) {
  299.                 case 0x0:
  300.                         return 1;       /* MI_NOOP */
  301.                 case 0x4:
  302.                         return 1;       /* MI_FLUSH */
  303.                 default:
  304.                         return 0;       /* disallow everything else */
  305.                 }
  306.                 break;
  307.         case 0x1:
  308.                 return 0;       /* reserved */
  309.         case 0x2:
  310.                 return (cmd & 0xff) + 2;        /* 2d commands */
  311.         case 0x3:
  312.                 if (((cmd >> 24) & 0x1f) <= 0x18)
  313.                         return 1;
  314.  
  315.                 switch ((cmd >> 24) & 0x1f) {
  316.                 case 0x1c:
  317.                         return 1;
  318.                 case 0x1d:
  319.                         switch ((cmd >> 16) & 0xff) {
  320.                         case 0x3:
  321.                                 return (cmd & 0x1f) + 2;
  322.                         case 0x4:
  323.                                 return (cmd & 0xf) + 2;
  324.                         default:
  325.                                 return (cmd & 0xffff) + 2;
  326.                         }
  327.                 case 0x1e:
  328.                         if (cmd & (1 << 23))
  329.                                 return (cmd & 0xffff) + 1;
  330.                         else
  331.                                 return 1;
  332.                 case 0x1f:
  333.                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
  334.                                 return (cmd & 0x1ffff) + 2;
  335.                         else if (cmd & (1 << 17))       /* indirect random */
  336.                                 if ((cmd & 0xffff) == 0)
  337.                                         return 0;       /* unknown length, too hard */
  338.                                 else
  339.                                         return (((cmd & 0xffff) + 1) / 2) + 1;
  340.                         else
  341.                                 return 2;       /* indirect sequential */
  342.                 default:
  343.                         return 0;
  344.                 }
  345.         default:
  346.                 return 0;
  347.         }
  348.  
  349.         return 0;
  350. }
  351.  
  352. static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
  353. {
  354.         drm_i915_private_t *dev_priv = dev->dev_private;
  355.         int i, ret;
  356.  
  357.         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
  358.                 return -EINVAL;
  359.  
  360.         for (i = 0; i < dwords;) {
  361.                 int sz = validate_cmd(buffer[i]);
  362.                 if (sz == 0 || i + sz > dwords)
  363.                         return -EINVAL;
  364.                 i += sz;
  365.         }
  366.  
  367.         ret = BEGIN_LP_RING((dwords+1)&~1);
  368.         if (ret)
  369.                 return ret;
  370.  
  371.         for (i = 0; i < dwords; i++)
  372.                 OUT_RING(buffer[i]);
  373.         if (dwords & 1)
  374.                 OUT_RING(0);
  375.  
  376.         ADVANCE_LP_RING();
  377.  
  378.         return 0;
  379. }
  380. #endif
  381.  
  382. int
  383. i915_emit_box(struct drm_device *dev,
  384.               struct drm_clip_rect *box,
  385.               int DR1, int DR4)
  386. {
  387.         struct drm_i915_private *dev_priv = dev->dev_private;
  388.         int ret;
  389.  
  390.         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  391.             box->y2 <= 0 || box->x2 <= 0) {
  392.                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
  393.                           box->x1, box->y1, box->x2, box->y2);
  394.                 return -EINVAL;
  395.         }
  396.  
  397.         if (INTEL_INFO(dev)->gen >= 4) {
  398.                 ret = BEGIN_LP_RING(4);
  399.                 if (ret)
  400.                         return ret;
  401.  
  402.                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  403.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  404.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  405.                 OUT_RING(DR4);
  406.         } else {
  407.                 ret = BEGIN_LP_RING(6);
  408.                 if (ret)
  409.                         return ret;
  410.  
  411.                 OUT_RING(GFX_OP_DRAWRECT_INFO);
  412.                 OUT_RING(DR1);
  413.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  414.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  415.                 OUT_RING(DR4);
  416.                 OUT_RING(0);
  417.         }
  418.         ADVANCE_LP_RING();
  419.  
  420.         return 0;
  421. }
  422.  
  423. #if 0
  424. /* XXX: Emitting the counter should really be moved to part of the IRQ
  425.  * emit. For now, do it in both places:
  426.  */
  427.  
  428. static void i915_emit_breadcrumb(struct drm_device *dev)
  429. {
  430.         drm_i915_private_t *dev_priv = dev->dev_private;
  431.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  432.  
  433.         dev_priv->dri1.counter++;
  434.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  435.                 dev_priv->dri1.counter = 0;
  436.         if (master_priv->sarea_priv)
  437.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  438.  
  439.         if (BEGIN_LP_RING(4) == 0) {
  440.                 OUT_RING(MI_STORE_DWORD_INDEX);
  441.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  442.                 OUT_RING(dev_priv->dri1.counter);
  443.                 OUT_RING(0);
  444.                 ADVANCE_LP_RING();
  445.         }
  446. }
  447.  
  448. static int i915_dispatch_cmdbuffer(struct drm_device * dev,
  449.                                    drm_i915_cmdbuffer_t *cmd,
  450.                                    struct drm_clip_rect *cliprects,
  451.                                    void *cmdbuf)
  452. {
  453.         int nbox = cmd->num_cliprects;
  454.         int i = 0, count, ret;
  455.  
  456.         if (cmd->sz & 0x3) {
  457.                 DRM_ERROR("alignment");
  458.                 return -EINVAL;
  459.         }
  460.  
  461.         i915_kernel_lost_context(dev);
  462.  
  463.         count = nbox ? nbox : 1;
  464.  
  465.         for (i = 0; i < count; i++) {
  466.                 if (i < nbox) {
  467.                         ret = i915_emit_box(dev, &cliprects[i],
  468.                                             cmd->DR1, cmd->DR4);
  469.                         if (ret)
  470.                                 return ret;
  471.                 }
  472.  
  473.                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
  474.                 if (ret)
  475.                         return ret;
  476.         }
  477.  
  478.         i915_emit_breadcrumb(dev);
  479.         return 0;
  480. }
  481.  
  482. static int i915_dispatch_batchbuffer(struct drm_device * dev,
  483.                                      drm_i915_batchbuffer_t * batch,
  484.                                      struct drm_clip_rect *cliprects)
  485. {
  486.         struct drm_i915_private *dev_priv = dev->dev_private;
  487.         int nbox = batch->num_cliprects;
  488.         int i, count, ret;
  489.  
  490.         if ((batch->start | batch->used) & 0x7) {
  491.                 DRM_ERROR("alignment");
  492.                 return -EINVAL;
  493.         }
  494.  
  495.         i915_kernel_lost_context(dev);
  496.  
  497.         count = nbox ? nbox : 1;
  498.         for (i = 0; i < count; i++) {
  499.                 if (i < nbox) {
  500.                         ret = i915_emit_box(dev, &cliprects[i],
  501.                                             batch->DR1, batch->DR4);
  502.                         if (ret)
  503.                                 return ret;
  504.                 }
  505.  
  506.                 if (!IS_I830(dev) && !IS_845G(dev)) {
  507.                         ret = BEGIN_LP_RING(2);
  508.                         if (ret)
  509.                                 return ret;
  510.  
  511.                         if (INTEL_INFO(dev)->gen >= 4) {
  512.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
  513.                                 OUT_RING(batch->start);
  514.                         } else {
  515.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  516.                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  517.                         }
  518.                 } else {
  519.                         ret = BEGIN_LP_RING(4);
  520.                         if (ret)
  521.                                 return ret;
  522.  
  523.                         OUT_RING(MI_BATCH_BUFFER);
  524.                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  525.                         OUT_RING(batch->start + batch->used - 4);
  526.                         OUT_RING(0);
  527.                 }
  528.                 ADVANCE_LP_RING();
  529.         }
  530.  
  531.  
  532.         if (IS_G4X(dev) || IS_GEN5(dev)) {
  533.                 if (BEGIN_LP_RING(2) == 0) {
  534.                         OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
  535.                         OUT_RING(MI_NOOP);
  536.                         ADVANCE_LP_RING();
  537.                 }
  538.         }
  539.  
  540.         i915_emit_breadcrumb(dev);
  541.         return 0;
  542. }
  543.  
  544. static int i915_dispatch_flip(struct drm_device * dev)
  545. {
  546.         drm_i915_private_t *dev_priv = dev->dev_private;
  547.         struct drm_i915_master_private *master_priv =
  548.                 dev->primary->master->driver_priv;
  549.         int ret;
  550.  
  551.         if (!master_priv->sarea_priv)
  552.                 return -EINVAL;
  553.  
  554.         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
  555.                           __func__,
  556.                          dev_priv->dri1.current_page,
  557.                          master_priv->sarea_priv->pf_current_page);
  558.  
  559.         i915_kernel_lost_context(dev);
  560.  
  561.         ret = BEGIN_LP_RING(10);
  562.         if (ret)
  563.                 return ret;
  564.  
  565.         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
  566.         OUT_RING(0);
  567.  
  568.         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  569.         OUT_RING(0);
  570.         if (dev_priv->dri1.current_page == 0) {
  571.                 OUT_RING(dev_priv->dri1.back_offset);
  572.                 dev_priv->dri1.current_page = 1;
  573.         } else {
  574.                 OUT_RING(dev_priv->dri1.front_offset);
  575.                 dev_priv->dri1.current_page = 0;
  576.         }
  577.         OUT_RING(0);
  578.  
  579.         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  580.         OUT_RING(0);
  581.  
  582.         ADVANCE_LP_RING();
  583.  
  584.         master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
  585.  
  586.         if (BEGIN_LP_RING(4) == 0) {
  587.                 OUT_RING(MI_STORE_DWORD_INDEX);
  588.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  589.                 OUT_RING(dev_priv->dri1.counter);
  590.                 OUT_RING(0);
  591.                 ADVANCE_LP_RING();
  592.         }
  593.  
  594.         master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
  595.         return 0;
  596. }
  597.  
  598. static int i915_quiescent(struct drm_device *dev)
  599. {
  600.         i915_kernel_lost_context(dev);
  601.         return intel_ring_idle(LP_RING(dev->dev_private));
  602. }
  603.  
  604. static int i915_flush_ioctl(struct drm_device *dev, void *data,
  605.                             struct drm_file *file_priv)
  606. {
  607.         int ret;
  608.  
  609.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  610.                 return -ENODEV;
  611.  
  612.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  613.  
  614.         mutex_lock(&dev->struct_mutex);
  615.         ret = i915_quiescent(dev);
  616.         mutex_unlock(&dev->struct_mutex);
  617.  
  618.         return ret;
  619. }
  620.  
  621. static int i915_batchbuffer(struct drm_device *dev, void *data,
  622.                             struct drm_file *file_priv)
  623. {
  624.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  625.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  626.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  627.             master_priv->sarea_priv;
  628.         drm_i915_batchbuffer_t *batch = data;
  629.         int ret;
  630.         struct drm_clip_rect *cliprects = NULL;
  631.  
  632.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  633.                 return -ENODEV;
  634.  
  635.         if (!dev_priv->dri1.allow_batchbuffer) {
  636.                 DRM_ERROR("Batchbuffer ioctl disabled\n");
  637.                 return -EINVAL;
  638.         }
  639.  
  640.         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
  641.                         batch->start, batch->used, batch->num_cliprects);
  642.  
  643.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  644.  
  645.         if (batch->num_cliprects < 0)
  646.                 return -EINVAL;
  647.  
  648.         if (batch->num_cliprects) {
  649.                 cliprects = kcalloc(batch->num_cliprects,
  650.                                     sizeof(struct drm_clip_rect),
  651.                                     GFP_KERNEL);
  652.                 if (cliprects == NULL)
  653.                         return -ENOMEM;
  654.  
  655.                 ret = copy_from_user(cliprects, batch->cliprects,
  656.                                      batch->num_cliprects *
  657.                                      sizeof(struct drm_clip_rect));
  658.                 if (ret != 0) {
  659.                         ret = -EFAULT;
  660.                         goto fail_free;
  661.                 }
  662.         }
  663.  
  664.         mutex_lock(&dev->struct_mutex);
  665.         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
  666.         mutex_unlock(&dev->struct_mutex);
  667.  
  668.         if (sarea_priv)
  669.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  670.  
  671. fail_free:
  672.         kfree(cliprects);
  673.  
  674.         return ret;
  675. }
  676.  
  677. static int i915_cmdbuffer(struct drm_device *dev, void *data,
  678.                           struct drm_file *file_priv)
  679. {
  680.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  681.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  682.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  683.             master_priv->sarea_priv;
  684.         drm_i915_cmdbuffer_t *cmdbuf = data;
  685.         struct drm_clip_rect *cliprects = NULL;
  686.         void *batch_data;
  687.         int ret;
  688.  
  689.         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  690.                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  691.  
  692.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  693.                 return -ENODEV;
  694.  
  695.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  696.  
  697.         if (cmdbuf->num_cliprects < 0)
  698.                 return -EINVAL;
  699.  
  700.         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
  701.         if (batch_data == NULL)
  702.                 return -ENOMEM;
  703.  
  704.         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
  705.         if (ret != 0) {
  706.                 ret = -EFAULT;
  707.                 goto fail_batch_free;
  708.         }
  709.  
  710.         if (cmdbuf->num_cliprects) {
  711.                 cliprects = kcalloc(cmdbuf->num_cliprects,
  712.                                     sizeof(struct drm_clip_rect), GFP_KERNEL);
  713.                 if (cliprects == NULL) {
  714.                         ret = -ENOMEM;
  715.                         goto fail_batch_free;
  716.                 }
  717.  
  718.                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
  719.                                      cmdbuf->num_cliprects *
  720.                                      sizeof(struct drm_clip_rect));
  721.                 if (ret != 0) {
  722.                         ret = -EFAULT;
  723.                         goto fail_clip_free;
  724.                 }
  725.         }
  726.  
  727.         mutex_lock(&dev->struct_mutex);
  728.         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
  729.         mutex_unlock(&dev->struct_mutex);
  730.         if (ret) {
  731.                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
  732.                 goto fail_clip_free;
  733.         }
  734.  
  735.         if (sarea_priv)
  736.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  737.  
  738. fail_clip_free:
  739.         kfree(cliprects);
  740. fail_batch_free:
  741.         kfree(batch_data);
  742.  
  743.         return ret;
  744. }
  745.  
  746. static int i915_emit_irq(struct drm_device * dev)
  747. {
  748.         drm_i915_private_t *dev_priv = dev->dev_private;
  749.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  750.  
  751.         i915_kernel_lost_context(dev);
  752.  
  753.         DRM_DEBUG_DRIVER("\n");
  754.  
  755.         dev_priv->dri1.counter++;
  756.         if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
  757.                 dev_priv->dri1.counter = 1;
  758.         if (master_priv->sarea_priv)
  759.                 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
  760.  
  761.         if (BEGIN_LP_RING(4) == 0) {
  762.                 OUT_RING(MI_STORE_DWORD_INDEX);
  763.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  764.                 OUT_RING(dev_priv->dri1.counter);
  765.                 OUT_RING(MI_USER_INTERRUPT);
  766.                 ADVANCE_LP_RING();
  767.         }
  768.  
  769.         return dev_priv->dri1.counter;
  770. }
  771.  
  772. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  773. {
  774.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  775.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  776.         int ret = 0;
  777.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  778.  
  779.         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
  780.                   READ_BREADCRUMB(dev_priv));
  781.  
  782.         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  783.                 if (master_priv->sarea_priv)
  784.                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  785.                 return 0;
  786.         }
  787.  
  788.         if (master_priv->sarea_priv)
  789.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  790.  
  791.         if (ring->irq_get(ring)) {
  792.                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
  793.                             READ_BREADCRUMB(dev_priv) >= irq_nr);
  794.                 ring->irq_put(ring);
  795.         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
  796.                 ret = -EBUSY;
  797.  
  798.         if (ret == -EBUSY) {
  799.                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  800.                           READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
  801.         }
  802.  
  803.         return ret;
  804. }
  805.  
  806. /* Needs the lock as it touches the ring.
  807.  */
  808. static int i915_irq_emit(struct drm_device *dev, void *data,
  809.                          struct drm_file *file_priv)
  810. {
  811.         drm_i915_private_t *dev_priv = dev->dev_private;
  812.         drm_i915_irq_emit_t *emit = data;
  813.         int result;
  814.  
  815.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  816.                 return -ENODEV;
  817.  
  818.         if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
  819.                 DRM_ERROR("called with no initialization\n");
  820.                 return -EINVAL;
  821.         }
  822.  
  823.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  824.  
  825.         mutex_lock(&dev->struct_mutex);
  826.         result = i915_emit_irq(dev);
  827.         mutex_unlock(&dev->struct_mutex);
  828.  
  829.         if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  830.                 DRM_ERROR("copy_to_user\n");
  831.                 return -EFAULT;
  832.         }
  833.  
  834.         return 0;
  835. }
  836.  
  837. /* Doesn't need the hardware lock.
  838.  */
  839. static int i915_irq_wait(struct drm_device *dev, void *data,
  840.                          struct drm_file *file_priv)
  841. {
  842.         drm_i915_private_t *dev_priv = dev->dev_private;
  843.         drm_i915_irq_wait_t *irqwait = data;
  844.  
  845.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  846.                 return -ENODEV;
  847.  
  848.         if (!dev_priv) {
  849.                 DRM_ERROR("called with no initialization\n");
  850.                 return -EINVAL;
  851.         }
  852.  
  853.         return i915_wait_irq(dev, irqwait->irq_seq);
  854. }
  855.  
  856. static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  857.                          struct drm_file *file_priv)
  858. {
  859.         drm_i915_private_t *dev_priv = dev->dev_private;
  860.         drm_i915_vblank_pipe_t *pipe = data;
  861.  
  862.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  863.                 return -ENODEV;
  864.  
  865.         if (!dev_priv) {
  866.                 DRM_ERROR("called with no initialization\n");
  867.                 return -EINVAL;
  868.         }
  869.  
  870.         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  871.  
  872.         return 0;
  873. }
  874.  
  875. /**
  876.  * Schedule buffer swap at given vertical blank.
  877.  */
  878. static int i915_vblank_swap(struct drm_device *dev, void *data,
  879.                      struct drm_file *file_priv)
  880. {
  881.         /* The delayed swap mechanism was fundamentally racy, and has been
  882.          * removed.  The model was that the client requested a delayed flip/swap
  883.          * from the kernel, then waited for vblank before continuing to perform
  884.          * rendering.  The problem was that the kernel might wake the client
  885.          * up before it dispatched the vblank swap (since the lock has to be
  886.          * held while touching the ringbuffer), in which case the client would
  887.          * clear and start the next frame before the swap occurred, and
  888.          * flicker would occur in addition to likely missing the vblank.
  889.          *
  890.          * In the absence of this ioctl, userland falls back to a correct path
  891.          * of waiting for a vblank, then dispatching the swap on its own.
  892.          * Context switching to userland and back is plenty fast enough for
  893.          * meeting the requirements of vblank swapping.
  894.          */
  895.         return -EINVAL;
  896. }
  897.  
  898. static int i915_flip_bufs(struct drm_device *dev, void *data,
  899.                           struct drm_file *file_priv)
  900. {
  901.         int ret;
  902.  
  903.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  904.                 return -ENODEV;
  905.  
  906.         DRM_DEBUG_DRIVER("%s\n", __func__);
  907.  
  908.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  909.  
  910.         mutex_lock(&dev->struct_mutex);
  911.         ret = i915_dispatch_flip(dev);
  912.         mutex_unlock(&dev->struct_mutex);
  913.  
  914.         return ret;
  915. }
  916. #endif
  917.  
  918. int i915_getparam(struct drm_device *dev, void *data,
  919.                          struct drm_file *file_priv)
  920. {
  921.         drm_i915_private_t *dev_priv = dev->dev_private;
  922.         drm_i915_getparam_t *param = data;
  923.         int value;
  924.  
  925.         if (!dev_priv) {
  926.                 DRM_ERROR("called with no initialization\n");
  927.                 return -EINVAL;
  928.         }
  929.  
  930.         switch (param->param) {
  931.         case I915_PARAM_IRQ_ACTIVE:
  932.                 value = dev->pdev->irq ? 1 : 0;
  933.                 break;
  934.         case I915_PARAM_ALLOW_BATCHBUFFER:
  935.                 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
  936.                 break;
  937.         case I915_PARAM_LAST_DISPATCH:
  938.                 value = READ_BREADCRUMB(dev_priv);
  939.                 break;
  940.         case I915_PARAM_CHIPSET_ID:
  941.                 value = dev->pci_device;
  942.                 break;
  943.         case I915_PARAM_HAS_GEM:
  944.                 value = 1;
  945.                 break;
  946.         case I915_PARAM_NUM_FENCES_AVAIL:
  947.                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  948.                 break;
  949.         case I915_PARAM_HAS_OVERLAY:
  950.                 value = dev_priv->overlay ? 1 : 0;
  951.                 break;
  952.         case I915_PARAM_HAS_PAGEFLIPPING:
  953.                 value = 1;
  954.                 break;
  955.         case I915_PARAM_HAS_EXECBUF2:
  956.                 /* depends on GEM */
  957.                 value = 1;
  958.                 break;
  959.         case I915_PARAM_HAS_BSD:
  960.                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
  961.                 break;
  962.         case I915_PARAM_HAS_BLT:
  963.                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
  964.                 break;
  965.         case I915_PARAM_HAS_VEBOX:
  966.                 value = intel_ring_initialized(&dev_priv->ring[VECS]);
  967.                 break;
  968.         case I915_PARAM_HAS_RELAXED_FENCING:
  969.                 value = 1;
  970.                 break;
  971.         case I915_PARAM_HAS_COHERENT_RINGS:
  972.                 value = 1;
  973.                 break;
  974.         case I915_PARAM_HAS_EXEC_CONSTANTS:
  975.                 value = INTEL_INFO(dev)->gen >= 4;
  976.                 break;
  977.         case I915_PARAM_HAS_RELAXED_DELTA:
  978.                 value = 1;
  979.                 break;
  980.         case I915_PARAM_HAS_GEN7_SOL_RESET:
  981.                 value = 1;
  982.                 break;
  983.         case I915_PARAM_HAS_LLC:
  984.                 value = HAS_LLC(dev);
  985.                 break;
  986.         case I915_PARAM_HAS_WT:
  987.                 value = HAS_WT(dev);
  988.                 break;
  989.         case I915_PARAM_HAS_ALIASING_PPGTT:
  990.                 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
  991.                 break;
  992.         case I915_PARAM_HAS_WAIT_TIMEOUT:
  993.                 value = 1;
  994.                 break;
  995.         case I915_PARAM_HAS_SEMAPHORES:
  996.                 value = i915_semaphore_is_enabled(dev);
  997.                 break;
  998.         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
  999.                 value = 1;
  1000.                 break;
  1001.     case I915_PARAM_HAS_SECURE_BATCHES:
  1002.         value = 1;
  1003.                 break;
  1004.         case I915_PARAM_HAS_PINNED_BATCHES:
  1005.                 value = 1;
  1006.                 break;
  1007.         case I915_PARAM_HAS_EXEC_NO_RELOC:
  1008.                 value = 1;
  1009.         break;
  1010.         case I915_PARAM_HAS_EXEC_HANDLE_LUT:
  1011.         value = 0; //1;
  1012.         break;
  1013.         default:
  1014.                 DRM_DEBUG("Unknown parameter %d\n", param->param);
  1015.                 return -EINVAL;
  1016.         }
  1017.  
  1018. //   if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
  1019. //       DRM_ERROR("DRM_COPY_TO_USER failed\n");
  1020. //       return -EFAULT;
  1021. //   }
  1022.  
  1023.     *param->value = value;
  1024.  
  1025.         return 0;
  1026. }
  1027.  
  1028. #if 0
  1029. static int i915_setparam(struct drm_device *dev, void *data,
  1030.                          struct drm_file *file_priv)
  1031. {
  1032.         drm_i915_private_t *dev_priv = dev->dev_private;
  1033.         drm_i915_setparam_t *param = data;
  1034.  
  1035.         if (!dev_priv) {
  1036.                 DRM_ERROR("called with no initialization\n");
  1037.                 return -EINVAL;
  1038.         }
  1039.  
  1040.         switch (param->param) {
  1041.         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
  1042.                 break;
  1043.         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
  1044.                 break;
  1045.         case I915_SETPARAM_ALLOW_BATCHBUFFER:
  1046.                 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
  1047.                 break;
  1048.         case I915_SETPARAM_NUM_USED_FENCES:
  1049.                 if (param->value > dev_priv->num_fence_regs ||
  1050.                     param->value < 0)
  1051.                         return -EINVAL;
  1052.                 /* Userspace can use first N regs */
  1053.                 dev_priv->fence_reg_start = param->value;
  1054.                 break;
  1055.         default:
  1056.                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
  1057.                                         param->param);
  1058.                 return -EINVAL;
  1059.         }
  1060.  
  1061.         return 0;
  1062. }
  1063. #endif
  1064.  
  1065.  
  1066.  
  1067. static int i915_get_bridge_dev(struct drm_device *dev)
  1068. {
  1069.         struct drm_i915_private *dev_priv = dev->dev_private;
  1070.  
  1071.         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  1072.         if (!dev_priv->bridge_dev) {
  1073.                 DRM_ERROR("bridge device not found\n");
  1074.                 return -1;
  1075.         }
  1076.         return 0;
  1077. }
  1078.  
  1079. #define MCHBAR_I915 0x44
  1080. #define MCHBAR_I965 0x48
  1081. #define MCHBAR_SIZE (4*4096)
  1082.  
  1083. #define DEVEN_REG 0x54
  1084. #define   DEVEN_MCHBAR_EN (1 << 28)
  1085.  
  1086.  
  1087.  
  1088.  
  1089. /* Setup MCHBAR if possible, return true if we should disable it again */
  1090. static void
  1091. intel_setup_mchbar(struct drm_device *dev)
  1092. {
  1093.         drm_i915_private_t *dev_priv = dev->dev_private;
  1094.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  1095.         u32 temp;
  1096.         bool enabled;
  1097.  
  1098.         dev_priv->mchbar_need_disable = false;
  1099.  
  1100.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1101.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  1102.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  1103.         } else {
  1104.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1105.                 enabled = temp & 1;
  1106.         }
  1107.  
  1108.         /* If it's already enabled, don't have to do anything */
  1109.         if (enabled)
  1110.                 return;
  1111.  
  1112.         dbgprintf("Epic fail\n");
  1113.  
  1114. #if 0
  1115.         if (intel_alloc_mchbar_resource(dev))
  1116.                 return;
  1117.  
  1118.         dev_priv->mchbar_need_disable = true;
  1119.  
  1120.         /* Space is allocated or reserved, so enable it. */
  1121.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1122.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  1123.                                        temp | DEVEN_MCHBAR_EN);
  1124.         } else {
  1125.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1126.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  1127.         }
  1128. #endif
  1129. }
  1130.  
  1131.  
  1132. /* true = enable decode, false = disable decoder */
  1133. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  1134. {
  1135.         struct drm_device *dev = cookie;
  1136.  
  1137.         intel_modeset_vga_set_state(dev, state);
  1138.         if (state)
  1139.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  1140.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1141.         else
  1142.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1143. }
  1144.  
  1145.  
  1146.  
  1147.  
  1148.  
  1149.  
  1150. static int i915_load_modeset_init(struct drm_device *dev)
  1151. {
  1152.     struct drm_i915_private *dev_priv = dev->dev_private;
  1153.     int ret;
  1154.  
  1155.     ret = intel_parse_bios(dev);
  1156.     if (ret)
  1157.         DRM_INFO("failed to find VBIOS tables\n");
  1158.  
  1159.     fb_obj = kos_gem_fb_object_create(dev,0,12*1024*1024);
  1160.  
  1161.         /* Initialise stolen first so that we may reserve preallocated
  1162.          * objects for the BIOS to KMS transition.
  1163.          */
  1164.         ret = i915_gem_init_stolen(dev);
  1165.         if (ret)
  1166.                 goto cleanup_vga_switcheroo;
  1167.  
  1168.         ret = drm_irq_install(dev);
  1169.         if (ret)
  1170.                 goto cleanup_gem_stolen;
  1171.  
  1172.         /* Important: The output setup functions called by modeset_init need
  1173.          * working irqs for e.g. gmbus and dp aux transfers. */
  1174.     intel_modeset_init(dev);
  1175.  
  1176.         ret = i915_gem_init(dev);
  1177.     if (ret)
  1178.                 goto cleanup_irq;
  1179.  
  1180.  
  1181.     intel_modeset_gem_init(dev);
  1182.  
  1183.     /* Always safe in the mode setting case. */
  1184.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  1185.     dev->vblank_disable_allowed = 1;
  1186.         if (INTEL_INFO(dev)->num_pipes == 0)
  1187.                 return 0;
  1188.  
  1189.     ret = intel_fbdev_init(dev);
  1190.     if (ret)
  1191.                 goto cleanup_gem;
  1192.  
  1193.         /* Only enable hotplug handling once the fbdev is fully set up. */
  1194.         intel_hpd_init(dev);
  1195.  
  1196.         /*
  1197.          * Some ports require correctly set-up hpd registers for detection to
  1198.          * work properly (leading to ghost connected connector status), e.g. VGA
  1199.          * on gm45.  Hence we can only set up the initial fbdev config after hpd
  1200.          * irqs are fully enabled. Now we should scan for the initial config
  1201.          * only once hotplug handling is enabled, but due to screwed-up locking
  1202.          * around kms/fbdev init we can't protect the fdbev initial config
  1203.          * scanning against hotplug events. Hence do this first and ignore the
  1204.          * tiny window where we will loose hotplug notifactions.
  1205.          */
  1206.         intel_fbdev_initial_config(dev);
  1207.  
  1208.         /* Only enable hotplug handling once the fbdev is fully set up. */
  1209.         dev_priv->enable_hotplug_processing = true;
  1210.  
  1211.         drm_kms_helper_poll_init(dev);
  1212.  
  1213.     return 0;
  1214.  
  1215. cleanup_gem:
  1216.         mutex_lock(&dev->struct_mutex);
  1217.         i915_gem_cleanup_ringbuffer(dev);
  1218.         i915_gem_context_fini(dev);
  1219.         mutex_unlock(&dev->struct_mutex);
  1220.         i915_gem_cleanup_aliasing_ppgtt(dev);
  1221. cleanup_irq:
  1222. //      drm_irq_uninstall(dev);
  1223. cleanup_gem_stolen:
  1224. //      i915_gem_cleanup_stolen(dev);
  1225. cleanup_vga_switcheroo:
  1226. //      vga_switcheroo_unregister_client(dev->pdev);
  1227. cleanup_vga_client:
  1228. //      vga_client_register(dev->pdev, NULL, NULL, NULL);
  1229. out:
  1230.     return ret;
  1231. }
  1232.  
  1233.  
  1234.  
  1235.  
  1236. static void i915_dump_device_info(struct drm_i915_private *dev_priv)
  1237. {
  1238.         const struct intel_device_info *info = dev_priv->info;
  1239.  
  1240. #define PRINT_S(name) "%s"
  1241. #define SEP_EMPTY
  1242. #define PRINT_FLAG(name) info->name ? #name "," : ""
  1243. #define SEP_COMMA ,
  1244.         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
  1245.                          DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
  1246.                          info->gen,
  1247.                          dev_priv->dev->pdev->device,
  1248.                          DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
  1249. #undef PRINT_S
  1250. #undef SEP_EMPTY
  1251. #undef PRINT_FLAG
  1252. #undef SEP_COMMA
  1253. }
  1254.  
  1255. /**
  1256.  * i915_driver_load - setup chip and create an initial config
  1257.  * @dev: DRM device
  1258.  * @flags: startup flags
  1259.  *
  1260.  * The driver load routine has to do several things:
  1261.  *   - drive output discovery via intel_modeset_init()
  1262.  *   - initialize the memory manager
  1263.  *   - allocate initial config memory
  1264.  *   - setup the DRM framebuffer with the allocated memory
  1265.  */
  1266. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  1267. {
  1268.     struct drm_i915_private *dev_priv;
  1269.         struct intel_device_info *info;
  1270.         int ret = 0, mmio_bar, mmio_size;
  1271.         uint32_t aperture_size;
  1272.  
  1273.         info = (struct intel_device_info *) flags;
  1274.  
  1275.  
  1276.     dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
  1277.     if (dev_priv == NULL)
  1278.         return -ENOMEM;
  1279.  
  1280.     dev->dev_private = (void *)dev_priv;
  1281.     dev_priv->dev = dev;
  1282.         dev_priv->info = info;
  1283.  
  1284.         spin_lock_init(&dev_priv->irq_lock);
  1285.         spin_lock_init(&dev_priv->gpu_error.lock);
  1286.         spin_lock_init(&dev_priv->backlight.lock);
  1287.         spin_lock_init(&dev_priv->uncore.lock);
  1288.         spin_lock_init(&dev_priv->mm.object_stat_lock);
  1289.         mutex_init(&dev_priv->dpio_lock);
  1290.         mutex_init(&dev_priv->rps.hw_lock);
  1291.         mutex_init(&dev_priv->modeset_restore_lock);
  1292.  
  1293.         mutex_init(&dev_priv->pc8.lock);
  1294.         dev_priv->pc8.requirements_met = false;
  1295.         dev_priv->pc8.gpu_idle = false;
  1296.         dev_priv->pc8.irqs_disabled = false;
  1297.         dev_priv->pc8.enabled = false;
  1298.         dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
  1299.         INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
  1300.  
  1301.         i915_dump_device_info(dev_priv);
  1302.  
  1303.         /* Not all pre-production machines fall into this category, only the
  1304.          * very first ones. Almost everything should work, except for maybe
  1305.          * suspend/resume. And we don't implement workarounds that affect only
  1306.          * pre-production machines. */
  1307.         if (IS_HSW_EARLY_SDV(dev))
  1308.                 DRM_INFO("This is an early pre-production Haswell machine. "
  1309.                          "It may not be fully functional.\n");
  1310.  
  1311.     if (i915_get_bridge_dev(dev)) {
  1312.         ret = -EIO;
  1313.         goto free_priv;
  1314.     }
  1315.  
  1316.         mmio_bar = IS_GEN2(dev) ? 1 : 0;
  1317.         /* Before gen4, the registers and the GTT are behind different BARs.
  1318.          * However, from gen4 onwards, the registers and the GTT are shared
  1319.          * in the same BAR, so we want to restrict this ioremap from
  1320.          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
  1321.          * the register BAR remains the same size for all the earlier
  1322.          * generations up to Ironlake.
  1323.          */
  1324.         if (info->gen < 5)
  1325.                 mmio_size = 512*1024;
  1326.         else
  1327.                 mmio_size = 2*1024*1024;
  1328.  
  1329.         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
  1330.         if (!dev_priv->regs) {
  1331.                 DRM_ERROR("failed to map registers\n");
  1332.                 ret = -EIO;
  1333.                 goto put_bridge;
  1334.         }
  1335.  
  1336.         intel_uncore_early_sanitize(dev);
  1337.  
  1338.         if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
  1339.                 /* The docs do not explain exactly how the calculation can be
  1340.                  * made. It is somewhat guessable, but for now, it's always
  1341.                  * 128MB.
  1342.                  * NB: We can't write IDICR yet because we do not have gt funcs
  1343.                  * set up */
  1344.                 dev_priv->ellc_size = 128;
  1345.                 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  1346.         }
  1347.  
  1348.         ret = i915_gem_gtt_init(dev);
  1349.         if (ret)
  1350.                 goto put_bridge;
  1351.  
  1352.  
  1353.         pci_set_master(dev->pdev);
  1354.  
  1355.     /* overlay on gen2 is broken and can't address above 1G */
  1356.  
  1357.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  1358.      * using 32bit addressing, overwriting memory if HWS is located
  1359.      * above 4GB.
  1360.      *
  1361.      * The documentation also mentions an issue with undefined
  1362.      * behaviour if any general state is accessed within a page above 4GB,
  1363.      * which also needs to be handled carefully.
  1364.      */
  1365.  
  1366.         aperture_size = dev_priv->gtt.mappable_end;
  1367.  
  1368.  
  1369.     /* The i915 workqueue is primarily used for batched retirement of
  1370.      * requests (and thus managing bo) once the task has been completed
  1371.      * by the GPU. i915_gem_retire_requests() is called directly when we
  1372.      * need high-priority retirement, such as waiting for an explicit
  1373.      * bo.
  1374.      *
  1375.      * It is also used for periodic low-priority events, such as
  1376.      * idle-timers and recording error state.
  1377.      *
  1378.      * All tasks on the workqueue are expected to acquire the dev mutex
  1379.      * so there is no point in running more than one instance of the
  1380.          * workqueue at any time.  Use an ordered one.
  1381.      */
  1382.         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
  1383.         if (dev_priv->wq == NULL) {
  1384.                 DRM_ERROR("Failed to create our workqueue.\n");
  1385.                 ret = -ENOMEM;
  1386.                 goto out_mtrrfree;
  1387.         }
  1388.     system_wq = dev_priv->wq;
  1389.  
  1390.         /* This must be called before any calls to HAS_PCH_* */
  1391.         intel_detect_pch(dev);
  1392.  
  1393.         intel_irq_init(dev);
  1394.         intel_pm_init(dev);
  1395.         intel_uncore_sanitize(dev);
  1396.         intel_uncore_init(dev);
  1397.  
  1398.     /* Try to make sure MCHBAR is enabled before poking at it */
  1399.         intel_setup_mchbar(dev);
  1400.     intel_setup_gmbus(dev);
  1401.     intel_opregion_setup(dev);
  1402.  
  1403.     intel_setup_bios(dev);
  1404.  
  1405.     i915_gem_load(dev);
  1406.  
  1407.     /* On the 945G/GM, the chipset reports the MSI capability on the
  1408.      * integrated graphics even though the support isn't actually there
  1409.      * according to the published specs.  It doesn't appear to function
  1410.      * correctly in testing on 945G.
  1411.      * This may be a side effect of MSI having been made available for PEG
  1412.      * and the registers being closely associated.
  1413.      *
  1414.      * According to chipset errata, on the 965GM, MSI interrupts may
  1415.      * be lost or delayed, but we use them anyways to avoid
  1416.      * stuck interrupts on some machines.
  1417.      */
  1418.  
  1419.         dev_priv->num_plane = 1;
  1420.         if (IS_VALLEYVIEW(dev))
  1421.                 dev_priv->num_plane = 2;
  1422.  
  1423.         if (HAS_POWER_WELL(dev))
  1424.                 i915_init_power_well(dev);
  1425.  
  1426.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1427.     ret = i915_load_modeset_init(dev);
  1428.     if (ret < 0) {
  1429.         DRM_ERROR("failed to init modeset\n");
  1430.             goto out_gem_unload;
  1431.     }
  1432.         } else {
  1433.                 /* Start out suspended in ums mode. */
  1434.                 dev_priv->ums.mm_suspended = 1;
  1435.         }
  1436.  
  1437.  
  1438.         if (INTEL_INFO(dev)->num_pipes) {
  1439.     /* Must be done after probing outputs */
  1440.                 intel_opregion_init(dev);
  1441. //              acpi_video_register();
  1442.         }
  1443.  
  1444.         if (IS_GEN5(dev))
  1445.                 intel_gpu_ips_init(dev_priv);
  1446.  
  1447.     main_device = dev;
  1448.  
  1449.     return 0;
  1450.  
  1451. out_gem_unload:
  1452. //    if (dev_priv->mm.inactive_shrinker.shrink)
  1453. //        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1454.  
  1455. //    if (dev->pdev->msi_enabled)
  1456. //        pci_disable_msi(dev->pdev);
  1457.  
  1458. //    intel_teardown_gmbus(dev);
  1459. //    intel_teardown_mchbar(dev);
  1460. //    destroy_workqueue(dev_priv->wq);
  1461. out_mtrrfree:
  1462. //      arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
  1463. //      io_mapping_free(dev_priv->gtt.mappable);
  1464. //      dev_priv->gtt.gtt_remove(dev);
  1465. out_rmmap:
  1466.     pci_iounmap(dev->pdev, dev_priv->regs);
  1467. put_bridge:
  1468. //    pci_dev_put(dev_priv->bridge_dev);
  1469. free_priv:
  1470.     kfree(dev_priv);
  1471.     return ret;
  1472. }
  1473.  
  1474. #if 0
  1475.  
  1476. int i915_driver_unload(struct drm_device *dev)
  1477. {
  1478.         struct drm_i915_private *dev_priv = dev->dev_private;
  1479.         int ret;
  1480.  
  1481.         intel_gpu_ips_teardown();
  1482.  
  1483.         if (HAS_POWER_WELL(dev)) {
  1484.                 /* The i915.ko module is still not prepared to be loaded when
  1485.                  * the power well is not enabled, so just enable it in case
  1486.                  * we're going to unload/reload. */
  1487.                 intel_set_power_well(dev, true);
  1488.                 i915_remove_power_well(dev);
  1489.         }
  1490.  
  1491.         i915_teardown_sysfs(dev);
  1492.  
  1493.         if (dev_priv->mm.inactive_shrinker.scan_objects)
  1494.                 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1495.  
  1496.         mutex_lock(&dev->struct_mutex);
  1497.         ret = i915_gpu_idle(dev);
  1498.         if (ret)
  1499.                 DRM_ERROR("failed to idle hardware: %d\n", ret);
  1500.         i915_gem_retire_requests(dev);
  1501.         mutex_unlock(&dev->struct_mutex);
  1502.  
  1503.         /* Cancel the retire work handler, which should be idle now. */
  1504.         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  1505.  
  1506.         io_mapping_free(dev_priv->gtt.mappable);
  1507.         arch_phys_wc_del(dev_priv->gtt.mtrr);
  1508.  
  1509.         acpi_video_unregister();
  1510.  
  1511.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1512.                 intel_fbdev_fini(dev);
  1513.                 intel_modeset_cleanup(dev);
  1514.                 cancel_work_sync(&dev_priv->console_resume_work);
  1515.  
  1516.                 /*
  1517.                  * free the memory space allocated for the child device
  1518.                  * config parsed from VBT
  1519.                  */
  1520.                 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
  1521.                         kfree(dev_priv->vbt.child_dev);
  1522.                         dev_priv->vbt.child_dev = NULL;
  1523.                         dev_priv->vbt.child_dev_num = 0;
  1524.                 }
  1525.  
  1526.                 vga_switcheroo_unregister_client(dev->pdev);
  1527.                 vga_client_register(dev->pdev, NULL, NULL, NULL);
  1528.         }
  1529.  
  1530.         /* Free error state after interrupts are fully disabled. */
  1531.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  1532.         cancel_work_sync(&dev_priv->gpu_error.work);
  1533.         i915_destroy_error_state(dev);
  1534.  
  1535.         cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
  1536.  
  1537.         if (dev->pdev->msi_enabled)
  1538.                 pci_disable_msi(dev->pdev);
  1539.  
  1540.         intel_opregion_fini(dev);
  1541.  
  1542.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1543.                 /* Flush any outstanding unpin_work. */
  1544.                 flush_workqueue(dev_priv->wq);
  1545.  
  1546.                 mutex_lock(&dev->struct_mutex);
  1547.                 i915_gem_free_all_phys_object(dev);
  1548.                 i915_gem_cleanup_ringbuffer(dev);
  1549.                 i915_gem_context_fini(dev);
  1550.                 mutex_unlock(&dev->struct_mutex);
  1551.                 i915_gem_cleanup_aliasing_ppgtt(dev);
  1552.                 i915_gem_cleanup_stolen(dev);
  1553.  
  1554.                 if (!I915_NEED_GFX_HWS(dev))
  1555.                         i915_free_hws(dev);
  1556.         }
  1557.  
  1558.         list_del(&dev_priv->gtt.base.global_link);
  1559.         WARN_ON(!list_empty(&dev_priv->vm_list));
  1560.         drm_mm_takedown(&dev_priv->gtt.base.mm);
  1561.         if (dev_priv->regs != NULL)
  1562.                 pci_iounmap(dev->pdev, dev_priv->regs);
  1563.  
  1564.         intel_teardown_gmbus(dev);
  1565.         intel_teardown_mchbar(dev);
  1566.  
  1567.         destroy_workqueue(dev_priv->wq);
  1568.         pm_qos_remove_request(&dev_priv->pm_qos);
  1569.  
  1570.         dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
  1571.  
  1572.         if (dev_priv->slab)
  1573.                 kmem_cache_destroy(dev_priv->slab);
  1574.  
  1575.         pci_dev_put(dev_priv->bridge_dev);
  1576.         kfree(dev->dev_private);
  1577.  
  1578.         return 0;
  1579. }
  1580. #endif
  1581.  
  1582. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1583. {
  1584.         struct drm_i915_file_private *file_priv;
  1585.  
  1586.         DRM_DEBUG_DRIVER("\n");
  1587.         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  1588.         if (!file_priv)
  1589.                 return -ENOMEM;
  1590.  
  1591.         file->driver_priv = file_priv;
  1592.  
  1593.         spin_lock_init(&file_priv->mm.lock);
  1594.         INIT_LIST_HEAD(&file_priv->mm.request_list);
  1595.  
  1596.         idr_init(&file_priv->context_idr);
  1597.  
  1598.         return 0;
  1599. }
  1600.  
  1601. #if 0
  1602. /**
  1603.  * i915_driver_lastclose - clean up after all DRM clients have exited
  1604.  * @dev: DRM device
  1605.  *
  1606.  * Take care of cleaning up after all DRM clients have exited.  In the
  1607.  * mode setting case, we want to restore the kernel's initial mode (just
  1608.  * in case the last client left us in a bad state).
  1609.  *
  1610.  * Additionally, in the non-mode setting case, we'll tear down the GTT
  1611.  * and DMA structures, since the kernel won't be using them, and clea
  1612.  * up any GEM state.
  1613.  */
  1614. void i915_driver_lastclose(struct drm_device * dev)
  1615. {
  1616.         drm_i915_private_t *dev_priv = dev->dev_private;
  1617.  
  1618.         /* On gen6+ we refuse to init without kms enabled, but then the drm core
  1619.          * goes right around and calls lastclose. Check for this and don't clean
  1620.          * up anything. */
  1621.         if (!dev_priv)
  1622.                 return;
  1623.  
  1624.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1625.                 intel_fb_restore_mode(dev);
  1626.                 vga_switcheroo_process_delayed_switch();
  1627.                 return;
  1628.         }
  1629.  
  1630.         i915_gem_lastclose(dev);
  1631.  
  1632.         i915_dma_cleanup(dev);
  1633. }
  1634.  
  1635. void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
  1636. {
  1637.         i915_gem_context_close(dev, file_priv);
  1638.         i915_gem_release(dev, file_priv);
  1639. }
  1640.  
  1641. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1642. {
  1643.         struct drm_i915_file_private *file_priv = file->driver_priv;
  1644.  
  1645.         kfree(file_priv);
  1646. }
  1647.  
  1648. const struct drm_ioctl_desc i915_ioctls[] = {
  1649.         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1650.         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
  1651.         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
  1652.         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
  1653.         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
  1654.         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
  1655.         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
  1656.         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1657.         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
  1658.         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
  1659.         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1660.         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
  1661.         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1662.         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1663.         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
  1664.         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
  1665.         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1666.         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1667.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
  1668.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1669.         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1670.         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1671.         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1672.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1673.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1674.         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1675.         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1676.         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1677.         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1678.         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1679.         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1680.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1681.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1682.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1683.         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1684.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1685.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1686.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1687.         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
  1688.         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1689.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1690.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1691.         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1692.         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1693.         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1694.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1695.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1696.         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
  1697. };
  1698.  
  1699. int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
  1700.  
  1701. /*
  1702.  * This is really ugly: Because old userspace abused the linux agp interface to
  1703.  * manage the gtt, we need to claim that all intel devices are agp.  For
  1704.  * otherwise the drm core refuses to initialize the agp support code.
  1705.  */
  1706. int i915_driver_device_is_agp(struct drm_device * dev)
  1707. {
  1708.         return 1;
  1709. }
  1710. #endif
  1711.  
  1712.  
  1713.