Subversion Repositories Kolibri OS

Rev

Rev 2360 | Rev 3037 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  2.  */
  3. /*
  4.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  5.  * All Rights Reserved.
  6.  *
  7.  * Permission is hereby granted, free of charge, to any person obtaining a
  8.  * copy of this software and associated documentation files (the
  9.  * "Software"), to deal in the Software without restriction, including
  10.  * without limitation the rights to use, copy, modify, merge, publish,
  11.  * distribute, sub license, and/or sell copies of the Software, and to
  12.  * permit persons to whom the Software is furnished to do so, subject to
  13.  * the following conditions:
  14.  *
  15.  * The above copyright notice and this permission notice (including the
  16.  * next paragraph) shall be included in all copies or substantial portions
  17.  * of the Software.
  18.  *
  19.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  20.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  21.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  22.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  23.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  24.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  25.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  26.  *
  27.  */
  28.  
  29. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  30.  
  31. #include <drm/drmP.h>
  32. #include <drm/drm_crtc_helper.h>
  33. #include <drm/drm_fb_helper.h>
  34. #include "intel_drv.h"
  35. #include <drm/i915_drm.h>
  36. #include "i915_drv.h"
  37. #include "i915_trace.h"
  38. #include <linux/pci.h>
  39. //#include <linux/vgaarb.h>
  40. //#include <linux/acpi.h>
  41. //#include <linux/pnp.h>
  42. //#include <linux/vga_switcheroo.h>
  43. #include <linux/slab.h>
  44. //#include <acpi/video.h>
  45.  
  46. void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
  47.  
  48.  
  49. #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
  50.  
  51. #define BEGIN_LP_RING(n) \
  52.         intel_ring_begin(LP_RING(dev_priv), (n))
  53.  
  54. #define OUT_RING(x) \
  55.         intel_ring_emit(LP_RING(dev_priv), x)
  56.  
  57. #define ADVANCE_LP_RING() \
  58.         intel_ring_advance(LP_RING(dev_priv))
  59.  
  60. /**
  61.  * Lock test for when it's just for synchronization of ring access.
  62.  *
  63.  * In that case, we don't need to do it when GEM is initialized as nobody else
  64.  * has access to the ring.
  65.  */
  66. #define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                      \
  67.         if (LP_RING(dev->dev_private)->obj == NULL)                     \
  68.                 LOCK_TEST_WITH_RETURN(dev, file);                       \
  69. } while (0)
  70.  
  71. static inline u32
  72. intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
  73. {
  74.         if (I915_NEED_GFX_HWS(dev_priv->dev))
  75.                 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
  76.         else
  77.                 return intel_read_status_page(LP_RING(dev_priv), reg);
  78. }
  79.  
  80. #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
  81. #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
  82. #define I915_BREADCRUMB_INDEX           0x21
  83.  
  84. void i915_update_dri1_breadcrumb(struct drm_device *dev)
  85. {
  86.         drm_i915_private_t *dev_priv = dev->dev_private;
  87.         struct drm_i915_master_private *master_priv;
  88.  
  89.         if (dev->primary->master) {
  90.                 master_priv = dev->primary->master->driver_priv;
  91.                 if (master_priv->sarea_priv)
  92.                         master_priv->sarea_priv->last_dispatch =
  93.                                 READ_BREADCRUMB(dev_priv);
  94.         }
  95. }
  96.  
  97. static void i915_write_hws_pga(struct drm_device *dev)
  98. {
  99.         drm_i915_private_t *dev_priv = dev->dev_private;
  100.         u32 addr;
  101.  
  102.         addr = dev_priv->status_page_dmah->busaddr;
  103.         if (INTEL_INFO(dev)->gen >= 4)
  104.                 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  105.         I915_WRITE(HWS_PGA, addr);
  106. }
  107.  
  108. /**
  109.  * Sets up the hardware status page for devices that need a physical address
  110.  * in the register.
  111.  */
  112. static int i915_init_phys_hws(struct drm_device *dev)
  113. {
  114.         drm_i915_private_t *dev_priv = dev->dev_private;
  115.  
  116.         /* Program Hardware Status Page */
  117.         dev_priv->status_page_dmah =
  118.                 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
  119.  
  120.         if (!dev_priv->status_page_dmah) {
  121.                 DRM_ERROR("Can not allocate hardware status page\n");
  122.                 return -ENOMEM;
  123.         }
  124.  
  125.     memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
  126.                   0, PAGE_SIZE);
  127.  
  128.         i915_write_hws_pga(dev);
  129.  
  130.         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  131.         return 0;
  132. }
  133.  
  134. /**
  135.  * Frees the hardware status page, whether it's a physical address or a virtual
  136.  * address set up by the X Server.
  137.  */
  138. static void i915_free_hws(struct drm_device *dev)
  139. {
  140.         drm_i915_private_t *dev_priv = dev->dev_private;
  141.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  142.  
  143.         if (dev_priv->status_page_dmah) {
  144.                 drm_pci_free(dev, dev_priv->status_page_dmah);
  145.                 dev_priv->status_page_dmah = NULL;
  146.         }
  147.  
  148.         if (ring->status_page.gfx_addr) {
  149.                 ring->status_page.gfx_addr = 0;
  150.                 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
  151.         }
  152.  
  153.         /* Need to rewrite hardware status page */
  154.         I915_WRITE(HWS_PGA, 0x1ffff000);
  155. }
  156.  
  157. #if 0
  158.  
  159. void i915_kernel_lost_context(struct drm_device * dev)
  160. {
  161.         drm_i915_private_t *dev_priv = dev->dev_private;
  162.         struct drm_i915_master_private *master_priv;
  163.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  164.  
  165.         /*
  166.          * We should never lose context on the ring with modesetting
  167.          * as we don't expose it to userspace
  168.          */
  169.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  170.                 return;
  171.  
  172.         ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  173.         ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  174.         ring->space = ring->head - (ring->tail + 8);
  175.         if (ring->space < 0)
  176.                 ring->space += ring->size;
  177.  
  178.         if (!dev->primary->master)
  179.                 return;
  180.  
  181.         master_priv = dev->primary->master->driver_priv;
  182.         if (ring->head == ring->tail && master_priv->sarea_priv)
  183.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
  184. }
  185.  
  186. static int i915_dma_cleanup(struct drm_device * dev)
  187. {
  188.         drm_i915_private_t *dev_priv = dev->dev_private;
  189.         int i;
  190.  
  191.         /* Make sure interrupts are disabled here because the uninstall ioctl
  192.          * may not have been called from userspace and after dev_private
  193.          * is freed, it's too late.
  194.          */
  195.         if (dev->irq_enabled)
  196.                 drm_irq_uninstall(dev);
  197.  
  198.         mutex_lock(&dev->struct_mutex);
  199.         for (i = 0; i < I915_NUM_RINGS; i++)
  200.                 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
  201.         mutex_unlock(&dev->struct_mutex);
  202.  
  203.         /* Clear the HWS virtual address at teardown */
  204.         if (I915_NEED_GFX_HWS(dev))
  205.                 i915_free_hws(dev);
  206.  
  207.         return 0;
  208. }
  209.  
  210. static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
  211. {
  212.         drm_i915_private_t *dev_priv = dev->dev_private;
  213.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  214.         int ret;
  215.  
  216.         master_priv->sarea = drm_getsarea(dev);
  217.         if (master_priv->sarea) {
  218.                 master_priv->sarea_priv = (drm_i915_sarea_t *)
  219.                         ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
  220.         } else {
  221.                 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
  222.         }
  223.  
  224.         if (init->ring_size != 0) {
  225.                 if (LP_RING(dev_priv)->obj != NULL) {
  226.                         i915_dma_cleanup(dev);
  227.                         DRM_ERROR("Client tried to initialize ringbuffer in "
  228.                                   "GEM mode\n");
  229.                         return -EINVAL;
  230.                 }
  231.  
  232.                 ret = intel_render_ring_init_dri(dev,
  233.                                                  init->ring_start,
  234.                                                  init->ring_size);
  235.                 if (ret) {
  236.                         i915_dma_cleanup(dev);
  237.                         return ret;
  238.                 }
  239.         }
  240.  
  241.         dev_priv->dri1.cpp = init->cpp;
  242.         dev_priv->dri1.back_offset = init->back_offset;
  243.         dev_priv->dri1.front_offset = init->front_offset;
  244.         dev_priv->dri1.current_page = 0;
  245.         if (master_priv->sarea_priv)
  246.                 master_priv->sarea_priv->pf_current_page = 0;
  247.  
  248.         /* Allow hardware batchbuffers unless told otherwise.
  249.          */
  250.         dev_priv->dri1.allow_batchbuffer = 1;
  251.  
  252.         return 0;
  253. }
  254.  
  255. static int i915_dma_resume(struct drm_device * dev)
  256. {
  257.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  258.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  259.  
  260.         DRM_DEBUG_DRIVER("%s\n", __func__);
  261.  
  262.         if (ring->virtual_start == NULL) {
  263.                 DRM_ERROR("can not ioremap virtual address for"
  264.                           " ring buffer\n");
  265.                 return -ENOMEM;
  266.         }
  267.  
  268.         /* Program Hardware Status Page */
  269.         if (!ring->status_page.page_addr) {
  270.                 DRM_ERROR("Can not find hardware status page\n");
  271.                 return -EINVAL;
  272.         }
  273.         DRM_DEBUG_DRIVER("hw status page @ %p\n",
  274.                                 ring->status_page.page_addr);
  275.         if (ring->status_page.gfx_addr != 0)
  276.                 intel_ring_setup_status_page(ring);
  277.         else
  278.                 i915_write_hws_pga(dev);
  279.  
  280.         DRM_DEBUG_DRIVER("Enabled hardware status page\n");
  281.  
  282.         return 0;
  283. }
  284.  
  285. static int i915_dma_init(struct drm_device *dev, void *data,
  286.                          struct drm_file *file_priv)
  287. {
  288.         drm_i915_init_t *init = data;
  289.         int retcode = 0;
  290.  
  291.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  292.                 return -ENODEV;
  293.  
  294.         switch (init->func) {
  295.         case I915_INIT_DMA:
  296.                 retcode = i915_initialize(dev, init);
  297.                 break;
  298.         case I915_CLEANUP_DMA:
  299.                 retcode = i915_dma_cleanup(dev);
  300.                 break;
  301.         case I915_RESUME_DMA:
  302.                 retcode = i915_dma_resume(dev);
  303.                 break;
  304.         default:
  305.                 retcode = -EINVAL;
  306.                 break;
  307.         }
  308.  
  309.         return retcode;
  310. }
  311.  
  312. /* Implement basically the same security restrictions as hardware does
  313.  * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
  314.  *
  315.  * Most of the calculations below involve calculating the size of a
  316.  * particular instruction.  It's important to get the size right as
  317.  * that tells us where the next instruction to check is.  Any illegal
  318.  * instruction detected will be given a size of zero, which is a
  319.  * signal to abort the rest of the buffer.
  320.  */
  321. static int validate_cmd(int cmd)
  322. {
  323.         switch (((cmd >> 29) & 0x7)) {
  324.         case 0x0:
  325.                 switch ((cmd >> 23) & 0x3f) {
  326.                 case 0x0:
  327.                         return 1;       /* MI_NOOP */
  328.                 case 0x4:
  329.                         return 1;       /* MI_FLUSH */
  330.                 default:
  331.                         return 0;       /* disallow everything else */
  332.                 }
  333.                 break;
  334.         case 0x1:
  335.                 return 0;       /* reserved */
  336.         case 0x2:
  337.                 return (cmd & 0xff) + 2;        /* 2d commands */
  338.         case 0x3:
  339.                 if (((cmd >> 24) & 0x1f) <= 0x18)
  340.                         return 1;
  341.  
  342.                 switch ((cmd >> 24) & 0x1f) {
  343.                 case 0x1c:
  344.                         return 1;
  345.                 case 0x1d:
  346.                         switch ((cmd >> 16) & 0xff) {
  347.                         case 0x3:
  348.                                 return (cmd & 0x1f) + 2;
  349.                         case 0x4:
  350.                                 return (cmd & 0xf) + 2;
  351.                         default:
  352.                                 return (cmd & 0xffff) + 2;
  353.                         }
  354.                 case 0x1e:
  355.                         if (cmd & (1 << 23))
  356.                                 return (cmd & 0xffff) + 1;
  357.                         else
  358.                                 return 1;
  359.                 case 0x1f:
  360.                         if ((cmd & (1 << 23)) == 0)     /* inline vertices */
  361.                                 return (cmd & 0x1ffff) + 2;
  362.                         else if (cmd & (1 << 17))       /* indirect random */
  363.                                 if ((cmd & 0xffff) == 0)
  364.                                         return 0;       /* unknown length, too hard */
  365.                                 else
  366.                                         return (((cmd & 0xffff) + 1) / 2) + 1;
  367.                         else
  368.                                 return 2;       /* indirect sequential */
  369.                 default:
  370.                         return 0;
  371.                 }
  372.         default:
  373.                 return 0;
  374.         }
  375.  
  376.         return 0;
  377. }
  378.  
  379. static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
  380. {
  381.         drm_i915_private_t *dev_priv = dev->dev_private;
  382.         int i, ret;
  383.  
  384.         if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
  385.                 return -EINVAL;
  386.  
  387.         for (i = 0; i < dwords;) {
  388.                 int sz = validate_cmd(buffer[i]);
  389.                 if (sz == 0 || i + sz > dwords)
  390.                         return -EINVAL;
  391.                 i += sz;
  392.         }
  393.  
  394.         ret = BEGIN_LP_RING((dwords+1)&~1);
  395.         if (ret)
  396.                 return ret;
  397.  
  398.         for (i = 0; i < dwords; i++)
  399.                 OUT_RING(buffer[i]);
  400.         if (dwords & 1)
  401.                 OUT_RING(0);
  402.  
  403.         ADVANCE_LP_RING();
  404.  
  405.         return 0;
  406. }
  407.  
  408. int
  409. i915_emit_box(struct drm_device *dev,
  410.               struct drm_clip_rect *box,
  411.               int DR1, int DR4)
  412. {
  413.         struct drm_i915_private *dev_priv = dev->dev_private;
  414.         int ret;
  415.  
  416.         if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
  417.             box->y2 <= 0 || box->x2 <= 0) {
  418.                 DRM_ERROR("Bad box %d,%d..%d,%d\n",
  419.                           box->x1, box->y1, box->x2, box->y2);
  420.                 return -EINVAL;
  421.         }
  422.  
  423.         if (INTEL_INFO(dev)->gen >= 4) {
  424.                 ret = BEGIN_LP_RING(4);
  425.                 if (ret)
  426.                         return ret;
  427.  
  428.                 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
  429.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  430.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  431.                 OUT_RING(DR4);
  432.         } else {
  433.                 ret = BEGIN_LP_RING(6);
  434.                 if (ret)
  435.                         return ret;
  436.  
  437.                 OUT_RING(GFX_OP_DRAWRECT_INFO);
  438.                 OUT_RING(DR1);
  439.                 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
  440.                 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
  441.                 OUT_RING(DR4);
  442.                 OUT_RING(0);
  443.         }
  444.         ADVANCE_LP_RING();
  445.  
  446.         return 0;
  447. }
  448.  
  449. /* XXX: Emitting the counter should really be moved to part of the IRQ
  450.  * emit. For now, do it in both places:
  451.  */
  452.  
  453. static void i915_emit_breadcrumb(struct drm_device *dev)
  454. {
  455.         drm_i915_private_t *dev_priv = dev->dev_private;
  456.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  457.  
  458.         dev_priv->counter++;
  459.         if (dev_priv->counter > 0x7FFFFFFFUL)
  460.                 dev_priv->counter = 0;
  461.         if (master_priv->sarea_priv)
  462.                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
  463.  
  464.         if (BEGIN_LP_RING(4) == 0) {
  465.                 OUT_RING(MI_STORE_DWORD_INDEX);
  466.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  467.                 OUT_RING(dev_priv->counter);
  468.                 OUT_RING(0);
  469.                 ADVANCE_LP_RING();
  470.         }
  471. }
  472.  
  473. static int i915_dispatch_cmdbuffer(struct drm_device * dev,
  474.                                    drm_i915_cmdbuffer_t *cmd,
  475.                                    struct drm_clip_rect *cliprects,
  476.                                    void *cmdbuf)
  477. {
  478.         int nbox = cmd->num_cliprects;
  479.         int i = 0, count, ret;
  480.  
  481.         if (cmd->sz & 0x3) {
  482.                 DRM_ERROR("alignment");
  483.                 return -EINVAL;
  484.         }
  485.  
  486.         i915_kernel_lost_context(dev);
  487.  
  488.         count = nbox ? nbox : 1;
  489.  
  490.         for (i = 0; i < count; i++) {
  491.                 if (i < nbox) {
  492.                         ret = i915_emit_box(dev, &cliprects[i],
  493.                                             cmd->DR1, cmd->DR4);
  494.                         if (ret)
  495.                                 return ret;
  496.                 }
  497.  
  498.                 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
  499.                 if (ret)
  500.                         return ret;
  501.         }
  502.  
  503.         i915_emit_breadcrumb(dev);
  504.         return 0;
  505. }
  506.  
  507. static int i915_dispatch_batchbuffer(struct drm_device * dev,
  508.                                      drm_i915_batchbuffer_t * batch,
  509.                                      struct drm_clip_rect *cliprects)
  510. {
  511.         struct drm_i915_private *dev_priv = dev->dev_private;
  512.         int nbox = batch->num_cliprects;
  513.         int i, count, ret;
  514.  
  515.         if ((batch->start | batch->used) & 0x7) {
  516.                 DRM_ERROR("alignment");
  517.                 return -EINVAL;
  518.         }
  519.  
  520.         i915_kernel_lost_context(dev);
  521.  
  522.         count = nbox ? nbox : 1;
  523.         for (i = 0; i < count; i++) {
  524.                 if (i < nbox) {
  525.                         ret = i915_emit_box(dev, &cliprects[i],
  526.                                             batch->DR1, batch->DR4);
  527.                         if (ret)
  528.                                 return ret;
  529.                 }
  530.  
  531.                 if (!IS_I830(dev) && !IS_845G(dev)) {
  532.                         ret = BEGIN_LP_RING(2);
  533.                         if (ret)
  534.                                 return ret;
  535.  
  536.                         if (INTEL_INFO(dev)->gen >= 4) {
  537.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
  538.                                 OUT_RING(batch->start);
  539.                         } else {
  540.                                 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
  541.                                 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  542.                         }
  543.                 } else {
  544.                         ret = BEGIN_LP_RING(4);
  545.                         if (ret)
  546.                                 return ret;
  547.  
  548.                         OUT_RING(MI_BATCH_BUFFER);
  549.                         OUT_RING(batch->start | MI_BATCH_NON_SECURE);
  550.                         OUT_RING(batch->start + batch->used - 4);
  551.                         OUT_RING(0);
  552.                 }
  553.                 ADVANCE_LP_RING();
  554.         }
  555.  
  556.  
  557.         if (IS_G4X(dev) || IS_GEN5(dev)) {
  558.                 if (BEGIN_LP_RING(2) == 0) {
  559.                         OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
  560.                         OUT_RING(MI_NOOP);
  561.                         ADVANCE_LP_RING();
  562.                 }
  563.         }
  564.  
  565.         i915_emit_breadcrumb(dev);
  566.         return 0;
  567. }
  568.  
  569. static int i915_dispatch_flip(struct drm_device * dev)
  570. {
  571.         drm_i915_private_t *dev_priv = dev->dev_private;
  572.         struct drm_i915_master_private *master_priv =
  573.                 dev->primary->master->driver_priv;
  574.         int ret;
  575.  
  576.         if (!master_priv->sarea_priv)
  577.                 return -EINVAL;
  578.  
  579.         DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
  580.                           __func__,
  581.                          dev_priv->dri1.current_page,
  582.                          master_priv->sarea_priv->pf_current_page);
  583.  
  584.         i915_kernel_lost_context(dev);
  585.  
  586.         ret = BEGIN_LP_RING(10);
  587.         if (ret)
  588.                 return ret;
  589.  
  590.         OUT_RING(MI_FLUSH | MI_READ_FLUSH);
  591.         OUT_RING(0);
  592.  
  593.         OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
  594.         OUT_RING(0);
  595.         if (dev_priv->dri1.current_page == 0) {
  596.                 OUT_RING(dev_priv->dri1.back_offset);
  597.                 dev_priv->dri1.current_page = 1;
  598.         } else {
  599.                 OUT_RING(dev_priv->dri1.front_offset);
  600.                 dev_priv->dri1.current_page = 0;
  601.         }
  602.         OUT_RING(0);
  603.  
  604.         OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
  605.         OUT_RING(0);
  606.  
  607.         ADVANCE_LP_RING();
  608.  
  609.         master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
  610.  
  611.         if (BEGIN_LP_RING(4) == 0) {
  612.                 OUT_RING(MI_STORE_DWORD_INDEX);
  613.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  614.                 OUT_RING(dev_priv->counter);
  615.                 OUT_RING(0);
  616.                 ADVANCE_LP_RING();
  617.         }
  618.  
  619.         master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
  620.         return 0;
  621. }
  622.  
  623. static int i915_quiescent(struct drm_device *dev)
  624. {
  625.         struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
  626.  
  627.         i915_kernel_lost_context(dev);
  628.         return intel_wait_ring_idle(ring);
  629. }
  630.  
  631. static int i915_flush_ioctl(struct drm_device *dev, void *data,
  632.                             struct drm_file *file_priv)
  633. {
  634.         int ret;
  635.  
  636.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  637.                 return -ENODEV;
  638.  
  639.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  640.  
  641.         mutex_lock(&dev->struct_mutex);
  642.         ret = i915_quiescent(dev);
  643.         mutex_unlock(&dev->struct_mutex);
  644.  
  645.         return ret;
  646. }
  647.  
  648. static int i915_batchbuffer(struct drm_device *dev, void *data,
  649.                             struct drm_file *file_priv)
  650. {
  651.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  652.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  653.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  654.             master_priv->sarea_priv;
  655.         drm_i915_batchbuffer_t *batch = data;
  656.         int ret;
  657.         struct drm_clip_rect *cliprects = NULL;
  658.  
  659.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  660.                 return -ENODEV;
  661.  
  662.         if (!dev_priv->dri1.allow_batchbuffer) {
  663.                 DRM_ERROR("Batchbuffer ioctl disabled\n");
  664.                 return -EINVAL;
  665.         }
  666.  
  667.         DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
  668.                         batch->start, batch->used, batch->num_cliprects);
  669.  
  670.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  671.  
  672.         if (batch->num_cliprects < 0)
  673.                 return -EINVAL;
  674.  
  675.         if (batch->num_cliprects) {
  676.                 cliprects = kcalloc(batch->num_cliprects,
  677.                                     sizeof(struct drm_clip_rect),
  678.                                     GFP_KERNEL);
  679.                 if (cliprects == NULL)
  680.                         return -ENOMEM;
  681.  
  682.                 ret = copy_from_user(cliprects, batch->cliprects,
  683.                                      batch->num_cliprects *
  684.                                      sizeof(struct drm_clip_rect));
  685.                 if (ret != 0) {
  686.                         ret = -EFAULT;
  687.                         goto fail_free;
  688.                 }
  689.         }
  690.  
  691.         mutex_lock(&dev->struct_mutex);
  692.         ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
  693.         mutex_unlock(&dev->struct_mutex);
  694.  
  695.         if (sarea_priv)
  696.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  697.  
  698. fail_free:
  699.         kfree(cliprects);
  700.  
  701.         return ret;
  702. }
  703.  
  704. static int i915_cmdbuffer(struct drm_device *dev, void *data,
  705.                           struct drm_file *file_priv)
  706. {
  707.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  708.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  709.         drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
  710.             master_priv->sarea_priv;
  711.         drm_i915_cmdbuffer_t *cmdbuf = data;
  712.         struct drm_clip_rect *cliprects = NULL;
  713.         void *batch_data;
  714.         int ret;
  715.  
  716.         DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
  717.                         cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  718.  
  719.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  720.                 return -ENODEV;
  721.  
  722.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  723.  
  724.         if (cmdbuf->num_cliprects < 0)
  725.                 return -EINVAL;
  726.  
  727.         batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
  728.         if (batch_data == NULL)
  729.                 return -ENOMEM;
  730.  
  731.         ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
  732.         if (ret != 0) {
  733.                 ret = -EFAULT;
  734.                 goto fail_batch_free;
  735.         }
  736.  
  737.         if (cmdbuf->num_cliprects) {
  738.                 cliprects = kcalloc(cmdbuf->num_cliprects,
  739.                                     sizeof(struct drm_clip_rect), GFP_KERNEL);
  740.                 if (cliprects == NULL) {
  741.                         ret = -ENOMEM;
  742.                         goto fail_batch_free;
  743.                 }
  744.  
  745.                 ret = copy_from_user(cliprects, cmdbuf->cliprects,
  746.                                      cmdbuf->num_cliprects *
  747.                                      sizeof(struct drm_clip_rect));
  748.                 if (ret != 0) {
  749.                         ret = -EFAULT;
  750.                         goto fail_clip_free;
  751.                 }
  752.         }
  753.  
  754.         mutex_lock(&dev->struct_mutex);
  755.         ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
  756.         mutex_unlock(&dev->struct_mutex);
  757.         if (ret) {
  758.                 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
  759.                 goto fail_clip_free;
  760.         }
  761.  
  762.         if (sarea_priv)
  763.                 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  764.  
  765. fail_clip_free:
  766.         kfree(cliprects);
  767. fail_batch_free:
  768.         kfree(batch_data);
  769.  
  770.         return ret;
  771. }
  772.  
  773. static int i915_emit_irq(struct drm_device * dev)
  774. {
  775.         drm_i915_private_t *dev_priv = dev->dev_private;
  776.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  777.  
  778.         i915_kernel_lost_context(dev);
  779.  
  780.         DRM_DEBUG_DRIVER("\n");
  781.  
  782.         dev_priv->counter++;
  783.         if (dev_priv->counter > 0x7FFFFFFFUL)
  784.                 dev_priv->counter = 1;
  785.         if (master_priv->sarea_priv)
  786.                 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
  787.  
  788.         if (BEGIN_LP_RING(4) == 0) {
  789.                 OUT_RING(MI_STORE_DWORD_INDEX);
  790.                 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  791.                 OUT_RING(dev_priv->counter);
  792.                 OUT_RING(MI_USER_INTERRUPT);
  793.                 ADVANCE_LP_RING();
  794.         }
  795.  
  796.         return dev_priv->counter;
  797. }
  798.  
  799. static int i915_wait_irq(struct drm_device * dev, int irq_nr)
  800. {
  801.         drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  802.         struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  803.         int ret = 0;
  804.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  805.  
  806.         DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
  807.                   READ_BREADCRUMB(dev_priv));
  808.  
  809.         if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
  810.                 if (master_priv->sarea_priv)
  811.                         master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
  812.                 return 0;
  813.         }
  814.  
  815.         if (master_priv->sarea_priv)
  816.                 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  817.  
  818.         if (ring->irq_get(ring)) {
  819.                 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
  820.                             READ_BREADCRUMB(dev_priv) >= irq_nr);
  821.                 ring->irq_put(ring);
  822.         } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
  823.                 ret = -EBUSY;
  824.  
  825.         if (ret == -EBUSY) {
  826.                 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
  827.                           READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
  828.         }
  829.  
  830.         return ret;
  831. }
  832.  
  833. /* Needs the lock as it touches the ring.
  834.  */
  835. static int i915_irq_emit(struct drm_device *dev, void *data,
  836.                          struct drm_file *file_priv)
  837. {
  838.         drm_i915_private_t *dev_priv = dev->dev_private;
  839.         drm_i915_irq_emit_t *emit = data;
  840.         int result;
  841.  
  842.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  843.                 return -ENODEV;
  844.  
  845.         if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
  846.                 DRM_ERROR("called with no initialization\n");
  847.                 return -EINVAL;
  848.         }
  849.  
  850.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  851.  
  852.         mutex_lock(&dev->struct_mutex);
  853.         result = i915_emit_irq(dev);
  854.         mutex_unlock(&dev->struct_mutex);
  855.  
  856.         if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
  857.                 DRM_ERROR("copy_to_user\n");
  858.                 return -EFAULT;
  859.         }
  860.  
  861.         return 0;
  862. }
  863.  
  864. /* Doesn't need the hardware lock.
  865.  */
  866. static int i915_irq_wait(struct drm_device *dev, void *data,
  867.                          struct drm_file *file_priv)
  868. {
  869.         drm_i915_private_t *dev_priv = dev->dev_private;
  870.         drm_i915_irq_wait_t *irqwait = data;
  871.  
  872.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  873.                 return -ENODEV;
  874.  
  875.         if (!dev_priv) {
  876.                 DRM_ERROR("called with no initialization\n");
  877.                 return -EINVAL;
  878.         }
  879.  
  880.         return i915_wait_irq(dev, irqwait->irq_seq);
  881. }
  882.  
  883. static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
  884.                          struct drm_file *file_priv)
  885. {
  886.         drm_i915_private_t *dev_priv = dev->dev_private;
  887.         drm_i915_vblank_pipe_t *pipe = data;
  888.  
  889.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  890.                 return -ENODEV;
  891.  
  892.         if (!dev_priv) {
  893.                 DRM_ERROR("called with no initialization\n");
  894.                 return -EINVAL;
  895.         }
  896.  
  897.         pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
  898.  
  899.         return 0;
  900. }
  901.  
  902. /**
  903.  * Schedule buffer swap at given vertical blank.
  904.  */
  905. static int i915_vblank_swap(struct drm_device *dev, void *data,
  906.                      struct drm_file *file_priv)
  907. {
  908.         /* The delayed swap mechanism was fundamentally racy, and has been
  909.          * removed.  The model was that the client requested a delayed flip/swap
  910.          * from the kernel, then waited for vblank before continuing to perform
  911.          * rendering.  The problem was that the kernel might wake the client
  912.          * up before it dispatched the vblank swap (since the lock has to be
  913.          * held while touching the ringbuffer), in which case the client would
  914.          * clear and start the next frame before the swap occurred, and
  915.          * flicker would occur in addition to likely missing the vblank.
  916.          *
  917.          * In the absence of this ioctl, userland falls back to a correct path
  918.          * of waiting for a vblank, then dispatching the swap on its own.
  919.          * Context switching to userland and back is plenty fast enough for
  920.          * meeting the requirements of vblank swapping.
  921.          */
  922.         return -EINVAL;
  923. }
  924.  
  925. static int i915_flip_bufs(struct drm_device *dev, void *data,
  926.                           struct drm_file *file_priv)
  927. {
  928.         int ret;
  929.  
  930.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  931.                 return -ENODEV;
  932.  
  933.         DRM_DEBUG_DRIVER("%s\n", __func__);
  934.  
  935.         RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
  936.  
  937.         mutex_lock(&dev->struct_mutex);
  938.         ret = i915_dispatch_flip(dev);
  939.         mutex_unlock(&dev->struct_mutex);
  940.  
  941.         return ret;
  942. }
  943.  
  944. static int i915_getparam(struct drm_device *dev, void *data,
  945.                          struct drm_file *file_priv)
  946. {
  947.         drm_i915_private_t *dev_priv = dev->dev_private;
  948.         drm_i915_getparam_t *param = data;
  949.         int value;
  950.  
  951.         if (!dev_priv) {
  952.                 DRM_ERROR("called with no initialization\n");
  953.                 return -EINVAL;
  954.         }
  955.  
  956.         switch (param->param) {
  957.         case I915_PARAM_IRQ_ACTIVE:
  958.                 value = dev->pdev->irq ? 1 : 0;
  959.                 break;
  960.         case I915_PARAM_ALLOW_BATCHBUFFER:
  961.                 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
  962.                 break;
  963.         case I915_PARAM_LAST_DISPATCH:
  964.                 value = READ_BREADCRUMB(dev_priv);
  965.                 break;
  966.         case I915_PARAM_CHIPSET_ID:
  967.                 value = dev->pci_device;
  968.                 break;
  969.         case I915_PARAM_HAS_GEM:
  970.                 value = 1;
  971.                 break;
  972.         case I915_PARAM_NUM_FENCES_AVAIL:
  973.                 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
  974.                 break;
  975.         case I915_PARAM_HAS_OVERLAY:
  976.                 value = dev_priv->overlay ? 1 : 0;
  977.                 break;
  978.         case I915_PARAM_HAS_PAGEFLIPPING:
  979.                 value = 1;
  980.                 break;
  981.         case I915_PARAM_HAS_EXECBUF2:
  982.                 /* depends on GEM */
  983.                 value = 1;
  984.                 break;
  985.         case I915_PARAM_HAS_BSD:
  986.                 value = intel_ring_initialized(&dev_priv->ring[VCS]);
  987.                 break;
  988.         case I915_PARAM_HAS_BLT:
  989.                 value = intel_ring_initialized(&dev_priv->ring[BCS]);
  990.                 break;
  991.         case I915_PARAM_HAS_RELAXED_FENCING:
  992.                 value = 1;
  993.                 break;
  994.         case I915_PARAM_HAS_COHERENT_RINGS:
  995.                 value = 1;
  996.                 break;
  997.         case I915_PARAM_HAS_EXEC_CONSTANTS:
  998.                 value = INTEL_INFO(dev)->gen >= 4;
  999.                 break;
  1000.         case I915_PARAM_HAS_RELAXED_DELTA:
  1001.                 value = 1;
  1002.                 break;
  1003.         case I915_PARAM_HAS_GEN7_SOL_RESET:
  1004.                 value = 1;
  1005.                 break;
  1006.         case I915_PARAM_HAS_LLC:
  1007.                 value = HAS_LLC(dev);
  1008.                 break;
  1009.         case I915_PARAM_HAS_ALIASING_PPGTT:
  1010.                 value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
  1011.                 break;
  1012.         case I915_PARAM_HAS_WAIT_TIMEOUT:
  1013.                 value = 1;
  1014.                 break;
  1015.         case I915_PARAM_HAS_SEMAPHORES:
  1016.                 value = i915_semaphore_is_enabled(dev);
  1017.                 break;
  1018.         case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
  1019.                 value = 1;
  1020.                 break;
  1021.         default:
  1022.                 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
  1023.                                  param->param);
  1024.                 return -EINVAL;
  1025.         }
  1026.  
  1027.         if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
  1028.                 DRM_ERROR("DRM_COPY_TO_USER failed\n");
  1029.                 return -EFAULT;
  1030.         }
  1031.  
  1032.         return 0;
  1033. }
  1034.  
  1035. static int i915_setparam(struct drm_device *dev, void *data,
  1036.                          struct drm_file *file_priv)
  1037. {
  1038.         drm_i915_private_t *dev_priv = dev->dev_private;
  1039.         drm_i915_setparam_t *param = data;
  1040.  
  1041.         if (!dev_priv) {
  1042.                 DRM_ERROR("called with no initialization\n");
  1043.                 return -EINVAL;
  1044.         }
  1045.  
  1046.         switch (param->param) {
  1047.         case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
  1048.                 break;
  1049.         case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
  1050.                 break;
  1051.         case I915_SETPARAM_ALLOW_BATCHBUFFER:
  1052.                 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
  1053.                 break;
  1054.         case I915_SETPARAM_NUM_USED_FENCES:
  1055.                 if (param->value > dev_priv->num_fence_regs ||
  1056.                     param->value < 0)
  1057.                         return -EINVAL;
  1058.                 /* Userspace can use first N regs */
  1059.                 dev_priv->fence_reg_start = param->value;
  1060.                 break;
  1061.         default:
  1062.                 DRM_DEBUG_DRIVER("unknown parameter %d\n",
  1063.                                         param->param);
  1064.                 return -EINVAL;
  1065.         }
  1066.  
  1067.         return 0;
  1068. }
  1069. #endif
  1070.  
  1071.  
  1072. static int i915_set_status_page(struct drm_device *dev, void *data,
  1073.                                 struct drm_file *file_priv)
  1074. {
  1075.         drm_i915_private_t *dev_priv = dev->dev_private;
  1076.         drm_i915_hws_addr_t *hws = data;
  1077.         struct intel_ring_buffer *ring = LP_RING(dev_priv);
  1078.  
  1079.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  1080.                 return -ENODEV;
  1081.  
  1082.         if (!I915_NEED_GFX_HWS(dev))
  1083.                 return -EINVAL;
  1084.  
  1085.         if (!dev_priv) {
  1086.                 DRM_ERROR("called with no initialization\n");
  1087.                 return -EINVAL;
  1088.         }
  1089.  
  1090.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1091.                 WARN(1, "tried to set status page when mode setting active\n");
  1092.                 return 0;
  1093.         }
  1094.  
  1095.         DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
  1096.  
  1097.         ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
  1098.  
  1099.         dev_priv->dri1.gfx_hws_cpu_addr =
  1100.         ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
  1101.         if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
  1102.                 i915_dma_cleanup(dev);
  1103.                 ring->status_page.gfx_addr = 0;
  1104.                 DRM_ERROR("can not ioremap virtual address for"
  1105.                                 " G33 hw status page\n");
  1106.                 return -ENOMEM;
  1107.         }
  1108.  
  1109.     memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
  1110.         I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
  1111.  
  1112.         DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
  1113.                          ring->status_page.gfx_addr);
  1114.         DRM_DEBUG_DRIVER("load hws at %p\n",
  1115.                          ring->status_page.page_addr);
  1116.         return 0;
  1117. }
  1118.  
  1119. static int i915_get_bridge_dev(struct drm_device *dev)
  1120. {
  1121.         struct drm_i915_private *dev_priv = dev->dev_private;
  1122.  
  1123.         dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
  1124.         if (!dev_priv->bridge_dev) {
  1125.                 DRM_ERROR("bridge device not found\n");
  1126.                 return -1;
  1127.         }
  1128.         return 0;
  1129. }
  1130.  
  1131. #define MCHBAR_I915 0x44
  1132. #define MCHBAR_I965 0x48
  1133. #define MCHBAR_SIZE (4*4096)
  1134.  
  1135. #define DEVEN_REG 0x54
  1136. #define   DEVEN_MCHBAR_EN (1 << 28)
  1137.  
  1138.  
  1139.  
  1140.  
  1141. /* Setup MCHBAR if possible, return true if we should disable it again */
  1142. static void
  1143. intel_setup_mchbar(struct drm_device *dev)
  1144. {
  1145.         drm_i915_private_t *dev_priv = dev->dev_private;
  1146.         int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
  1147.         u32 temp;
  1148.         bool enabled;
  1149.  
  1150.         dev_priv->mchbar_need_disable = false;
  1151.  
  1152.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1153.                 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
  1154.                 enabled = !!(temp & DEVEN_MCHBAR_EN);
  1155.         } else {
  1156.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1157.                 enabled = temp & 1;
  1158.         }
  1159.  
  1160.         /* If it's already enabled, don't have to do anything */
  1161.         if (enabled)
  1162.                 return;
  1163.  
  1164.         dbgprintf("Epic fail\n");
  1165.  
  1166. #if 0
  1167.         if (intel_alloc_mchbar_resource(dev))
  1168.                 return;
  1169.  
  1170.         dev_priv->mchbar_need_disable = true;
  1171.  
  1172.         /* Space is allocated or reserved, so enable it. */
  1173.         if (IS_I915G(dev) || IS_I915GM(dev)) {
  1174.                 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
  1175.                                        temp | DEVEN_MCHBAR_EN);
  1176.         } else {
  1177.                 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
  1178.                 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
  1179.         }
  1180. #endif
  1181. }
  1182.  
  1183.  
  1184. /* true = enable decode, false = disable decoder */
  1185. static unsigned int i915_vga_set_decode(void *cookie, bool state)
  1186. {
  1187.         struct drm_device *dev = cookie;
  1188.  
  1189.         intel_modeset_vga_set_state(dev, state);
  1190.         if (state)
  1191.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  1192.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1193.         else
  1194.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  1195. }
  1196.  
  1197.  
  1198.  
  1199.  
  1200.  
  1201.  
  1202. static int i915_load_modeset_init(struct drm_device *dev)
  1203. {
  1204.     struct drm_i915_private *dev_priv = dev->dev_private;
  1205.     int ret;
  1206.  
  1207.     ret = intel_parse_bios(dev);
  1208.     if (ret)
  1209.         DRM_INFO("failed to find VBIOS tables\n");
  1210.  
  1211. //    intel_register_dsm_handler();
  1212.  
  1213.         /* Initialise stolen first so that we may reserve preallocated
  1214.          * objects for the BIOS to KMS transition.
  1215.          */
  1216.         ret = i915_gem_init_stolen(dev);
  1217.         if (ret)
  1218.                 goto cleanup_vga_switcheroo;
  1219.  
  1220.     intel_modeset_init(dev);
  1221.  
  1222.         ret = i915_gem_init(dev);
  1223.     if (ret)
  1224.                 goto cleanup_gem_stolen;
  1225.  
  1226.     intel_modeset_gem_init(dev);
  1227.  
  1228.         ret = drm_irq_install(dev);
  1229.         if (ret)
  1230.                 goto cleanup_gem;
  1231.  
  1232.     /* Always safe in the mode setting case. */
  1233.     /* FIXME: do pre/post-mode set stuff in core KMS code */
  1234.     dev->vblank_disable_allowed = 1;
  1235.  
  1236.     ret = intel_fbdev_init(dev);
  1237.     if (ret)
  1238.         goto cleanup_irq;
  1239.  
  1240. //    drm_kms_helper_poll_init(dev);
  1241.  
  1242.     /* We're off and running w/KMS */
  1243.     dev_priv->mm.suspended = 0;
  1244.  
  1245.     return 0;
  1246.  
  1247. cleanup_irq:
  1248. //    drm_irq_uninstall(dev);
  1249. cleanup_gem:
  1250. //    mutex_lock(&dev->struct_mutex);
  1251. //    i915_gem_cleanup_ringbuffer(dev);
  1252. //    mutex_unlock(&dev->struct_mutex);
  1253. //      i915_gem_cleanup_aliasing_ppgtt(dev);
  1254. cleanup_gem_stolen:
  1255. //      i915_gem_cleanup_stolen(dev);
  1256. cleanup_vga_switcheroo:
  1257. //    vga_switcheroo_unregister_client(dev->pdev);
  1258. cleanup_vga_client:
  1259. //    vga_client_register(dev->pdev, NULL, NULL, NULL);
  1260. out:
  1261.     return ret;
  1262. }
  1263.  
  1264.  
  1265.  
  1266.  
  1267. static void i915_dump_device_info(struct drm_i915_private *dev_priv)
  1268. {
  1269.         const struct intel_device_info *info = dev_priv->info;
  1270.  
  1271. #define DEV_INFO_FLAG(name) info->name ? #name "," : ""
  1272. #define DEV_INFO_SEP ,
  1273.         DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
  1274.                          "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
  1275.                          info->gen,
  1276.                          dev_priv->dev->pdev->device,
  1277.                          DEV_INFO_FLAGS);
  1278. #undef DEV_INFO_FLAG
  1279. #undef DEV_INFO_SEP
  1280. }
  1281.  
  1282. /**
  1283.  * i915_driver_load - setup chip and create an initial config
  1284.  * @dev: DRM device
  1285.  * @flags: startup flags
  1286.  *
  1287.  * The driver load routine has to do several things:
  1288.  *   - drive output discovery via intel_modeset_init()
  1289.  *   - initialize the memory manager
  1290.  *   - allocate initial config memory
  1291.  *   - setup the DRM framebuffer with the allocated memory
  1292.  */
  1293. int i915_driver_load(struct drm_device *dev, unsigned long flags)
  1294. {
  1295.     struct drm_i915_private *dev_priv;
  1296.         struct intel_device_info *info;
  1297.         int ret = 0, mmio_bar, mmio_size;
  1298.         uint32_t aperture_size;
  1299.  
  1300.         ENTER();
  1301.  
  1302.         info = (struct intel_device_info *) flags;
  1303.  
  1304. #if 0
  1305.         /* Refuse to load on gen6+ without kms enabled. */
  1306.         if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
  1307.                 return -ENODEV;
  1308.  
  1309.         /* i915 has 4 more counters */
  1310.         dev->counters += 4;
  1311.         dev->types[6] = _DRM_STAT_IRQ;
  1312.         dev->types[7] = _DRM_STAT_PRIMARY;
  1313.         dev->types[8] = _DRM_STAT_SECONDARY;
  1314.         dev->types[9] = _DRM_STAT_DMA;
  1315. #endif
  1316.  
  1317.     dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
  1318.     if (dev_priv == NULL)
  1319.         return -ENOMEM;
  1320.  
  1321.     dev->dev_private = (void *)dev_priv;
  1322.     dev_priv->dev = dev;
  1323.         dev_priv->info = info;
  1324.  
  1325.         i915_dump_device_info(dev_priv);
  1326.  
  1327.     if (i915_get_bridge_dev(dev)) {
  1328.         ret = -EIO;
  1329.         goto free_priv;
  1330.     }
  1331.  
  1332.         ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
  1333.         if (!ret) {
  1334.                 DRM_ERROR("failed to set up gmch\n");
  1335.                 ret = -EIO;
  1336.                 goto put_bridge;
  1337.         }
  1338.  
  1339.         dev_priv->mm.gtt = intel_gtt_get();
  1340.         if (!dev_priv->mm.gtt) {
  1341.                 DRM_ERROR("Failed to initialize GTT\n");
  1342.                 ret = -ENODEV;
  1343.                 goto put_gmch;
  1344.         }
  1345.  
  1346.  
  1347.         pci_set_master(dev->pdev);
  1348.  
  1349.     /* overlay on gen2 is broken and can't address above 1G */
  1350. //    if (IS_GEN2(dev))
  1351. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
  1352.  
  1353.     /* 965GM sometimes incorrectly writes to hardware status page (HWS)
  1354.      * using 32bit addressing, overwriting memory if HWS is located
  1355.      * above 4GB.
  1356.      *
  1357.      * The documentation also mentions an issue with undefined
  1358.      * behaviour if any general state is accessed within a page above 4GB,
  1359.      * which also needs to be handled carefully.
  1360.      */
  1361. //    if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
  1362. //        dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
  1363.  
  1364.     mmio_bar = IS_GEN2(dev) ? 1 : 0;
  1365.         /* Before gen4, the registers and the GTT are behind different BARs.
  1366.          * However, from gen4 onwards, the registers and the GTT are shared
  1367.          * in the same BAR, so we want to restrict this ioremap from
  1368.          * clobbering the GTT which we want ioremap_wc instead. Fortunately,
  1369.          * the register BAR remains the same size for all the earlier
  1370.          * generations up to Ironlake.
  1371.          */
  1372.         if (info->gen < 5)
  1373.                 mmio_size = 512*1024;
  1374.         else
  1375.                 mmio_size = 2*1024*1024;
  1376.  
  1377.         dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
  1378.     if (!dev_priv->regs) {
  1379.         DRM_ERROR("failed to map registers\n");
  1380.         ret = -EIO;
  1381.                 goto put_gmch;
  1382.     }
  1383.  
  1384.         aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
  1385.         dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
  1386.  
  1387.     dbgprintf("gtt_base_addr %x aperture_size %d\n",
  1388.                dev_priv->mm.gtt_base_addr, aperture_size );
  1389.  
  1390. //   dev_priv->mm.gtt_mapping =
  1391. //       io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
  1392. //                    aperture_size);
  1393. //   if (dev_priv->mm.gtt_mapping == NULL) {
  1394. //       ret = -EIO;
  1395. //        goto out_rmmap;
  1396. //    }
  1397.  
  1398. //      i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
  1399. //                      aperture_size);
  1400.  
  1401.     /* The i915 workqueue is primarily used for batched retirement of
  1402.      * requests (and thus managing bo) once the task has been completed
  1403.      * by the GPU. i915_gem_retire_requests() is called directly when we
  1404.      * need high-priority retirement, such as waiting for an explicit
  1405.      * bo.
  1406.      *
  1407.      * It is also used for periodic low-priority events, such as
  1408.      * idle-timers and recording error state.
  1409.      *
  1410.      * All tasks on the workqueue are expected to acquire the dev mutex
  1411.      * so there is no point in running more than one instance of the
  1412.          * workqueue at any time.  Use an ordered one.
  1413.      */
  1414.         dev_priv->wq = alloc_ordered_workqueue("i915", 0);
  1415.       if (dev_priv->wq == NULL) {
  1416.           DRM_ERROR("Failed to create our workqueue.\n");
  1417.           ret = -ENOMEM;
  1418.           goto out_mtrrfree;
  1419.       }
  1420.  
  1421.         /* This must be called before any calls to HAS_PCH_* */
  1422.         intel_detect_pch(dev);
  1423.  
  1424.         intel_irq_init(dev);
  1425.         intel_gt_init(dev);
  1426.  
  1427.     /* Try to make sure MCHBAR is enabled before poking at it */
  1428.         intel_setup_mchbar(dev);
  1429.     intel_setup_gmbus(dev);
  1430.     intel_opregion_setup(dev);
  1431.  
  1432.     /* Make sure the bios did its job and set up vital registers */
  1433.     intel_setup_bios(dev);
  1434.  
  1435.     i915_gem_load(dev);
  1436.  
  1437.     /* Init HWS */
  1438.     if (!I915_NEED_GFX_HWS(dev)) {
  1439.         ret = i915_init_phys_hws(dev);
  1440.         if (ret)
  1441.             goto out_gem_unload;
  1442.     }
  1443.  
  1444.     /* On the 945G/GM, the chipset reports the MSI capability on the
  1445.      * integrated graphics even though the support isn't actually there
  1446.      * according to the published specs.  It doesn't appear to function
  1447.      * correctly in testing on 945G.
  1448.      * This may be a side effect of MSI having been made available for PEG
  1449.      * and the registers being closely associated.
  1450.      *
  1451.      * According to chipset errata, on the 965GM, MSI interrupts may
  1452.      * be lost or delayed, but we use them anyways to avoid
  1453.      * stuck interrupts on some machines.
  1454.      */
  1455.  
  1456.     spin_lock_init(&dev_priv->irq_lock);
  1457.     spin_lock_init(&dev_priv->error_lock);
  1458.         spin_lock_init(&dev_priv->rps.lock);
  1459.         spin_lock_init(&dev_priv->dpio_lock);
  1460.  
  1461.         if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  1462.                 dev_priv->num_pipe = 3;
  1463.         else if (IS_MOBILE(dev) || !IS_GEN2(dev))
  1464.         dev_priv->num_pipe = 2;
  1465.     else
  1466.         dev_priv->num_pipe = 1;
  1467.  
  1468. //    ret = drm_vblank_init(dev, dev_priv->num_pipe);
  1469. //    if (ret)
  1470. //        goto out_gem_unload;
  1471.  
  1472.     /* Start out suspended */
  1473.     dev_priv->mm.suspended = 1;
  1474.  
  1475.     ret = i915_load_modeset_init(dev);
  1476.     if (ret < 0) {
  1477.         DRM_ERROR("failed to init modeset\n");
  1478.             goto out_gem_unload;
  1479.     }
  1480.  
  1481.     /* Must be done after probing outputs */
  1482. //    intel_opregion_init(dev);
  1483. //    acpi_video_register();
  1484.  
  1485. //    setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
  1486. //            (unsigned long) dev);
  1487.  
  1488.  
  1489.         if (IS_GEN5(dev))
  1490.                 intel_gpu_ips_init(dev_priv);
  1491.  
  1492.     LEAVE();
  1493.  
  1494.     return 0;
  1495.  
  1496. out_gem_unload:
  1497. //    if (dev_priv->mm.inactive_shrinker.shrink)
  1498. //        unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1499.  
  1500. //    if (dev->pdev->msi_enabled)
  1501. //        pci_disable_msi(dev->pdev);
  1502.  
  1503. //    intel_teardown_gmbus(dev);
  1504. //    intel_teardown_mchbar(dev);
  1505. //    destroy_workqueue(dev_priv->wq);
  1506. out_mtrrfree:
  1507. //      if (dev_priv->mm.gtt_mtrr >= 0) {
  1508. //              mtrr_del(dev_priv->mm.gtt_mtrr,
  1509. //                       dev_priv->mm.gtt_base_addr,
  1510. //                       aperture_size);
  1511. //              dev_priv->mm.gtt_mtrr = -1;
  1512. //      }
  1513. //      io_mapping_free(dev_priv->mm.gtt_mapping);
  1514. out_rmmap:
  1515.     pci_iounmap(dev->pdev, dev_priv->regs);
  1516. put_gmch:
  1517. //   intel_gmch_remove();
  1518. put_bridge:
  1519. //    pci_dev_put(dev_priv->bridge_dev);
  1520. free_priv:
  1521.     kfree(dev_priv);
  1522.     return ret;
  1523. }
  1524.  
  1525. #if 0
  1526.  
  1527. int i915_driver_unload(struct drm_device *dev)
  1528. {
  1529.         struct drm_i915_private *dev_priv = dev->dev_private;
  1530.         int ret;
  1531.  
  1532.         intel_gpu_ips_teardown();
  1533.  
  1534.         i915_teardown_sysfs(dev);
  1535.  
  1536.         if (dev_priv->mm.inactive_shrinker.shrink)
  1537.                 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
  1538.  
  1539.         mutex_lock(&dev->struct_mutex);
  1540.         ret = i915_gpu_idle(dev);
  1541.         if (ret)
  1542.                 DRM_ERROR("failed to idle hardware: %d\n", ret);
  1543.         i915_gem_retire_requests(dev);
  1544.         mutex_unlock(&dev->struct_mutex);
  1545.  
  1546.         /* Cancel the retire work handler, which should be idle now. */
  1547.         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  1548.  
  1549.         io_mapping_free(dev_priv->mm.gtt_mapping);
  1550.         if (dev_priv->mm.gtt_mtrr >= 0) {
  1551.                 mtrr_del(dev_priv->mm.gtt_mtrr,
  1552.                          dev_priv->mm.gtt_base_addr,
  1553.                          dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
  1554.                 dev_priv->mm.gtt_mtrr = -1;
  1555.         }
  1556.  
  1557.         acpi_video_unregister();
  1558.  
  1559.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1560.                 intel_fbdev_fini(dev);
  1561.                 intel_modeset_cleanup(dev);
  1562.  
  1563.                 /*
  1564.                  * free the memory space allocated for the child device
  1565.                  * config parsed from VBT
  1566.                  */
  1567.                 if (dev_priv->child_dev && dev_priv->child_dev_num) {
  1568.                         kfree(dev_priv->child_dev);
  1569.                         dev_priv->child_dev = NULL;
  1570.                         dev_priv->child_dev_num = 0;
  1571.                 }
  1572.  
  1573.                 vga_switcheroo_unregister_client(dev->pdev);
  1574.                 vga_client_register(dev->pdev, NULL, NULL, NULL);
  1575.         }
  1576.  
  1577.         /* Free error state after interrupts are fully disabled. */
  1578.         del_timer_sync(&dev_priv->hangcheck_timer);
  1579.         cancel_work_sync(&dev_priv->error_work);
  1580.         i915_destroy_error_state(dev);
  1581.  
  1582.         if (dev->pdev->msi_enabled)
  1583.                 pci_disable_msi(dev->pdev);
  1584.  
  1585.         intel_opregion_fini(dev);
  1586.  
  1587.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1588.                 /* Flush any outstanding unpin_work. */
  1589.                 flush_workqueue(dev_priv->wq);
  1590.  
  1591.                 mutex_lock(&dev->struct_mutex);
  1592.                 i915_gem_free_all_phys_object(dev);
  1593.                 i915_gem_cleanup_ringbuffer(dev);
  1594.                 i915_gem_context_fini(dev);
  1595.                 mutex_unlock(&dev->struct_mutex);
  1596.                 i915_gem_cleanup_aliasing_ppgtt(dev);
  1597.                 i915_gem_cleanup_stolen(dev);
  1598.                 drm_mm_takedown(&dev_priv->mm.stolen);
  1599.  
  1600.                 intel_cleanup_overlay(dev);
  1601.  
  1602.                 if (!I915_NEED_GFX_HWS(dev))
  1603.                         i915_free_hws(dev);
  1604.         }
  1605.  
  1606.         if (dev_priv->regs != NULL)
  1607.                 pci_iounmap(dev->pdev, dev_priv->regs);
  1608.  
  1609.         intel_teardown_gmbus(dev);
  1610.         intel_teardown_mchbar(dev);
  1611.  
  1612.         destroy_workqueue(dev_priv->wq);
  1613.  
  1614.         pci_dev_put(dev_priv->bridge_dev);
  1615.         kfree(dev->dev_private);
  1616.  
  1617.         return 0;
  1618. }
  1619.  
  1620. int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  1621. {
  1622.         struct drm_i915_file_private *file_priv;
  1623.  
  1624.         DRM_DEBUG_DRIVER("\n");
  1625.         file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
  1626.         if (!file_priv)
  1627.                 return -ENOMEM;
  1628.  
  1629.         file->driver_priv = file_priv;
  1630.  
  1631.         spin_lock_init(&file_priv->mm.lock);
  1632.         INIT_LIST_HEAD(&file_priv->mm.request_list);
  1633.  
  1634.         idr_init(&file_priv->context_idr);
  1635.  
  1636.         return 0;
  1637. }
  1638.  
  1639. /**
  1640.  * i915_driver_lastclose - clean up after all DRM clients have exited
  1641.  * @dev: DRM device
  1642.  *
  1643.  * Take care of cleaning up after all DRM clients have exited.  In the
  1644.  * mode setting case, we want to restore the kernel's initial mode (just
  1645.  * in case the last client left us in a bad state).
  1646.  *
  1647.  * Additionally, in the non-mode setting case, we'll tear down the GTT
  1648.  * and DMA structures, since the kernel won't be using them, and clea
  1649.  * up any GEM state.
  1650.  */
  1651. void i915_driver_lastclose(struct drm_device * dev)
  1652. {
  1653.         drm_i915_private_t *dev_priv = dev->dev_private;
  1654.  
  1655.         /* On gen6+ we refuse to init without kms enabled, but then the drm core
  1656.          * goes right around and calls lastclose. Check for this and don't clean
  1657.          * up anything. */
  1658.         if (!dev_priv)
  1659.                 return;
  1660.  
  1661.         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  1662.                 intel_fb_restore_mode(dev);
  1663.                 vga_switcheroo_process_delayed_switch();
  1664.                 return;
  1665.         }
  1666.  
  1667.         i915_gem_lastclose(dev);
  1668.  
  1669.         i915_dma_cleanup(dev);
  1670. }
  1671.  
  1672. void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
  1673. {
  1674.         i915_gem_context_close(dev, file_priv);
  1675.         i915_gem_release(dev, file_priv);
  1676. }
  1677.  
  1678. void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
  1679. {
  1680.         struct drm_i915_file_private *file_priv = file->driver_priv;
  1681.  
  1682.         kfree(file_priv);
  1683. }
  1684.  
  1685. struct drm_ioctl_desc i915_ioctls[] = {
  1686.         DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1687.         DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
  1688.         DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
  1689.         DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
  1690.         DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
  1691.         DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
  1692.         DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
  1693.         DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1694.         DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
  1695.         DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
  1696.         DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1697.         DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
  1698.         DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1699.         DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE,  drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1700.         DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH),
  1701.         DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
  1702.         DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
  1703.         DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1704.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
  1705.         DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
  1706.         DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1707.         DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1708.         DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1709.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
  1710.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
  1711.         DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1712.         DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1713.         DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
  1714.         DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
  1715.         DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
  1716.         DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
  1717.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
  1718.         DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
  1719.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
  1720.         DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
  1721.         DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
  1722.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
  1723.         DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
  1724.         DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
  1725.         DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
  1726.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1727.         DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1728.         DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1729.         DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
  1730.         DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
  1731.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
  1732.         DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
  1733.         DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
  1734. };
  1735.  
  1736. int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
  1737.  
  1738. /*
  1739.  * This is really ugly: Because old userspace abused the linux agp interface to
  1740.  * manage the gtt, we need to claim that all intel devices are agp.  For
  1741.  * otherwise the drm core refuses to initialize the agp support code.
  1742.  */
  1743. int i915_driver_device_is_agp(struct drm_device * dev)
  1744. {
  1745.         return 1;
  1746. }
  1747. #endif
  1748.