Subversion Repositories Kolibri OS

Rev

Rev 4539 | Rev 5060 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <drm/drmP.h>
  29. #include <drm/drm_vma_manager.h>
  30. #include <drm/i915_drm.h>
  31. #include "i915_drv.h"
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. #include <linux/shmem_fs.h>
  35. #include <linux/slab.h>
  36. //#include <linux/swap.h>
  37. #include <linux/scatterlist.h>
  38. #include <linux/pci.h>
  39.  
  40. extern int x86_clflush_size;
  41.  
  42. #define PROT_READ       0x1             /* page can be read */
  43. #define PROT_WRITE      0x2             /* page can be written */
  44. #define MAP_SHARED      0x01            /* Share changes */
  45.  
  46.  
  47. struct drm_i915_gem_object *get_fb_obj();
  48.  
  49. unsigned long vm_mmap(struct file *file, unsigned long addr,
  50.          unsigned long len, unsigned long prot,
  51.          unsigned long flag, unsigned long offset);
  52.  
  53. static inline void clflush(volatile void *__p)
  54. {
  55.     asm volatile("clflush %0" : "+m" (*(volatile char*)__p));
  56. }
  57.  
  58. #define MAX_ERRNO       4095
  59.  
  60. #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
  61.  
  62.  
  63.  
  64. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  65. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
  66.                                                    bool force);
  67. static __must_check int
  68. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  69.                                bool readonly);
  70. static __must_check int
  71. i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
  72.                            struct i915_address_space *vm,
  73.                                                     unsigned alignment,
  74.                                                     bool map_and_fenceable,
  75.                                                     bool nonblocking);
  76. static int i915_gem_phys_pwrite(struct drm_device *dev,
  77.                                 struct drm_i915_gem_object *obj,
  78.                                 struct drm_i915_gem_pwrite *args,
  79.                                 struct drm_file *file);
  80.  
  81. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  82.                                  struct drm_i915_gem_object *obj);
  83. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  84.                                          struct drm_i915_fence_reg *fence,
  85.                                          bool enable);
  86.  
  87. static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
  88. static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  89. static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  90.  
  91. static bool cpu_cache_is_coherent(struct drm_device *dev,
  92.                                   enum i915_cache_level level)
  93. {
  94.         return HAS_LLC(dev) || level != I915_CACHE_NONE;
  95. }
  96.  
  97. static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  98. {
  99.         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
  100.                 return true;
  101.  
  102.         return obj->pin_display;
  103. }
  104.  
  105. static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  106. {
  107.         if (obj->tiling_mode)
  108.                 i915_gem_release_mmap(obj);
  109.  
  110.         /* As we do not have an associated fence register, we will force
  111.          * a tiling change if we ever need to acquire one.
  112.          */
  113.         obj->fence_dirty = false;
  114.         obj->fence_reg = I915_FENCE_REG_NONE;
  115. }
  116.  
  117. /* some bookkeeping */
  118. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  119.                                   size_t size)
  120. {
  121.         spin_lock(&dev_priv->mm.object_stat_lock);
  122.         dev_priv->mm.object_count++;
  123.         dev_priv->mm.object_memory += size;
  124.         spin_unlock(&dev_priv->mm.object_stat_lock);
  125. }
  126.  
  127. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  128.                                      size_t size)
  129. {
  130.         spin_lock(&dev_priv->mm.object_stat_lock);
  131.         dev_priv->mm.object_count--;
  132.         dev_priv->mm.object_memory -= size;
  133.         spin_unlock(&dev_priv->mm.object_stat_lock);
  134. }
  135.  
  136. static int
  137. i915_gem_wait_for_error(struct i915_gpu_error *error)
  138. {
  139.         int ret;
  140.  
  141. #define EXIT_COND (!i915_reset_in_progress(error))
  142.         if (EXIT_COND)
  143.                 return 0;
  144. #if 0
  145.         /*
  146.          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  147.          * userspace. If it takes that long something really bad is going on and
  148.          * we should simply try to bail out and fail as gracefully as possible.
  149.          */
  150.         ret = wait_event_interruptible_timeout(error->reset_queue,
  151.                                                EXIT_COND,
  152.                                                10*HZ);
  153.         if (ret == 0) {
  154.                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  155.                 return -EIO;
  156.         } else if (ret < 0) {
  157.                 return ret;
  158.         }
  159.  
  160. #endif
  161. #undef EXIT_COND
  162.  
  163.         return 0;
  164. }
  165.  
  166. int i915_mutex_lock_interruptible(struct drm_device *dev)
  167. {
  168.         struct drm_i915_private *dev_priv = dev->dev_private;
  169.         int ret;
  170.  
  171.         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  172.         if (ret)
  173.                 return ret;
  174.  
  175.         ret = mutex_lock_interruptible(&dev->struct_mutex);
  176.         if (ret)
  177.                 return ret;
  178.  
  179.         WARN_ON(i915_verify_lists(dev));
  180.         return 0;
  181. }
  182.  
  183. static inline bool
  184. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  185. {
  186.         return i915_gem_obj_bound_any(obj) && !obj->active;
  187. }
  188.  
  189.  
  190. #if 0
  191.  
  192. int
  193. i915_gem_init_ioctl(struct drm_device *dev, void *data,
  194.                     struct drm_file *file)
  195. {
  196.         struct drm_i915_private *dev_priv = dev->dev_private;
  197.         struct drm_i915_gem_init *args = data;
  198.  
  199.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  200.                 return -ENODEV;
  201.  
  202.         if (args->gtt_start >= args->gtt_end ||
  203.             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
  204.                 return -EINVAL;
  205.  
  206.         /* GEM with user mode setting was never supported on ilk and later. */
  207.         if (INTEL_INFO(dev)->gen >= 5)
  208.                 return -ENODEV;
  209.  
  210.         mutex_lock(&dev->struct_mutex);
  211.         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
  212.                                   args->gtt_end);
  213.         dev_priv->gtt.mappable_end = args->gtt_end;
  214.         mutex_unlock(&dev->struct_mutex);
  215.  
  216.         return 0;
  217. }
  218. #endif
  219.  
  220. int
  221. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  222.                             struct drm_file *file)
  223. {
  224.         struct drm_i915_private *dev_priv = dev->dev_private;
  225.         struct drm_i915_gem_get_aperture *args = data;
  226.         struct drm_i915_gem_object *obj;
  227.         size_t pinned;
  228.  
  229.         pinned = 0;
  230.         mutex_lock(&dev->struct_mutex);
  231.         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
  232.                 if (obj->pin_count)
  233.                         pinned += i915_gem_obj_ggtt_size(obj);
  234.         mutex_unlock(&dev->struct_mutex);
  235.  
  236.         args->aper_size = dev_priv->gtt.base.total;
  237.         args->aper_available_size = args->aper_size - pinned;
  238.  
  239.         return 0;
  240. }
  241.  
  242. void *i915_gem_object_alloc(struct drm_device *dev)
  243. {
  244.         struct drm_i915_private *dev_priv = dev->dev_private;
  245.         return kmalloc(sizeof(struct drm_i915_gem_object), 0);
  246. }
  247.  
  248. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  249. {
  250.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  251.         kfree(obj);
  252. }
  253.  
  254. static int
  255. i915_gem_create(struct drm_file *file,
  256.                 struct drm_device *dev,
  257.                 uint64_t size,
  258.                 uint32_t *handle_p)
  259. {
  260.         struct drm_i915_gem_object *obj;
  261.         int ret;
  262.         u32 handle;
  263.  
  264.         size = roundup(size, PAGE_SIZE);
  265.         if (size == 0)
  266.                 return -EINVAL;
  267.  
  268.         /* Allocate the new object */
  269.         obj = i915_gem_alloc_object(dev, size);
  270.         if (obj == NULL)
  271.                 return -ENOMEM;
  272.  
  273.         ret = drm_gem_handle_create(file, &obj->base, &handle);
  274.         /* drop reference from allocate - handle holds it now */
  275.         drm_gem_object_unreference_unlocked(&obj->base);
  276.         if (ret)
  277.                 return ret;
  278.  
  279.         *handle_p = handle;
  280.         return 0;
  281. }
  282.  
  283. int
  284. i915_gem_dumb_create(struct drm_file *file,
  285.                      struct drm_device *dev,
  286.                      struct drm_mode_create_dumb *args)
  287. {
  288.         /* have to work out size/pitch and return them */
  289.         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
  290.         args->size = args->pitch * args->height;
  291.         return i915_gem_create(file, dev,
  292.                                args->size, &args->handle);
  293. }
  294.  
  295. /**
  296.  * Creates a new mm object and returns a handle to it.
  297.  */
  298. int
  299. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  300.                       struct drm_file *file)
  301. {
  302.         struct drm_i915_gem_create *args = data;
  303.  
  304.         return i915_gem_create(file, dev,
  305.                                args->size, &args->handle);
  306. }
  307.  
  308.  
  309. #if 0
  310.  
  311. static inline int
  312. __copy_to_user_swizzled(char __user *cpu_vaddr,
  313.                         const char *gpu_vaddr, int gpu_offset,
  314.                 int length)
  315. {
  316.         int ret, cpu_offset = 0;
  317.  
  318.         while (length > 0) {
  319.                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
  320.                 int this_length = min(cacheline_end - gpu_offset, length);
  321.                 int swizzled_gpu_offset = gpu_offset ^ 64;
  322.  
  323.                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
  324.                                      gpu_vaddr + swizzled_gpu_offset,
  325.                                      this_length);
  326.                 if (ret)
  327.                         return ret + length;
  328.  
  329.                 cpu_offset += this_length;
  330.                 gpu_offset += this_length;
  331.                 length -= this_length;
  332.         }
  333.  
  334.         return 0;
  335. }
  336.  
  337. static inline int
  338. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  339.                           const char __user *cpu_vaddr,
  340.                           int length)
  341. {
  342.         int ret, cpu_offset = 0;
  343.  
  344.         while (length > 0) {
  345.                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
  346.                 int this_length = min(cacheline_end - gpu_offset, length);
  347.                 int swizzled_gpu_offset = gpu_offset ^ 64;
  348.  
  349.                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  350.                                cpu_vaddr + cpu_offset,
  351.                                this_length);
  352.                 if (ret)
  353.                         return ret + length;
  354.  
  355.                 cpu_offset += this_length;
  356.                 gpu_offset += this_length;
  357.                 length -= this_length;
  358.         }
  359.  
  360.         return 0;
  361. }
  362.  
  363. /* Per-page copy function for the shmem pread fastpath.
  364.  * Flushes invalid cachelines before reading the target if
  365.  * needs_clflush is set. */
  366. static int
  367. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  368.                  char __user *user_data,
  369.                  bool page_do_bit17_swizzling, bool needs_clflush)
  370. {
  371.                 char *vaddr;
  372.                 int ret;
  373.  
  374.         if (unlikely(page_do_bit17_swizzling))
  375.                 return -EINVAL;
  376.  
  377.                 vaddr = kmap_atomic(page);
  378.         if (needs_clflush)
  379.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  380.                                        page_length);
  381.                 ret = __copy_to_user_inatomic(user_data,
  382.                                       vaddr + shmem_page_offset,
  383.                                               page_length);
  384.                 kunmap_atomic(vaddr);
  385.  
  386.         return ret ? -EFAULT : 0;
  387. }
  388.  
  389. static void
  390. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  391.                              bool swizzled)
  392. {
  393.         if (unlikely(swizzled)) {
  394.                 unsigned long start = (unsigned long) addr;
  395.                 unsigned long end = (unsigned long) addr + length;
  396.  
  397.                 /* For swizzling simply ensure that we always flush both
  398.                  * channels. Lame, but simple and it works. Swizzled
  399.                  * pwrite/pread is far from a hotpath - current userspace
  400.                  * doesn't use it at all. */
  401.                 start = round_down(start, 128);
  402.                 end = round_up(end, 128);
  403.  
  404.                 drm_clflush_virt_range((void *)start, end - start);
  405.         } else {
  406.                 drm_clflush_virt_range(addr, length);
  407.         }
  408.  
  409. }
  410.  
  411. /* Only difference to the fast-path function is that this can handle bit17
  412.  * and uses non-atomic copy and kmap functions. */
  413. static int
  414. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  415.                  char __user *user_data,
  416.                  bool page_do_bit17_swizzling, bool needs_clflush)
  417. {
  418.         char *vaddr;
  419.         int ret;
  420.  
  421.         vaddr = kmap(page);
  422.         if (needs_clflush)
  423.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  424.                                              page_length,
  425.                                              page_do_bit17_swizzling);
  426.  
  427.         if (page_do_bit17_swizzling)
  428.                 ret = __copy_to_user_swizzled(user_data,
  429.                                               vaddr, shmem_page_offset,
  430.                                               page_length);
  431.         else
  432.                 ret = __copy_to_user(user_data,
  433.                                      vaddr + shmem_page_offset,
  434.                                      page_length);
  435.         kunmap(page);
  436.  
  437.         return ret ? - EFAULT : 0;
  438. }
  439.  
  440. static int
  441. i915_gem_shmem_pread(struct drm_device *dev,
  442.                           struct drm_i915_gem_object *obj,
  443.                           struct drm_i915_gem_pread *args,
  444.                           struct drm_file *file)
  445. {
  446.         char __user *user_data;
  447.         ssize_t remain;
  448.         loff_t offset;
  449.         int shmem_page_offset, page_length, ret = 0;
  450.         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  451.         int prefaulted = 0;
  452.         int needs_clflush = 0;
  453.         struct sg_page_iter sg_iter;
  454.  
  455.         user_data = to_user_ptr(args->data_ptr);
  456.         remain = args->size;
  457.  
  458.         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  459.  
  460.         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
  461.                 /* If we're not in the cpu read domain, set ourself into the gtt
  462.                  * read domain and manually flush cachelines (if required). This
  463.                  * optimizes for the case when the gpu will dirty the data
  464.                  * anyway again before the next pread happens. */
  465.                 needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
  466.                 ret = i915_gem_object_wait_rendering(obj, true);
  467.                         if (ret)
  468.                                 return ret;
  469.                 }
  470.  
  471.         ret = i915_gem_object_get_pages(obj);
  472.         if (ret)
  473.                 return ret;
  474.  
  475.         i915_gem_object_pin_pages(obj);
  476.  
  477.         offset = args->offset;
  478.  
  479.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  480.                          offset >> PAGE_SHIFT) {
  481.                 struct page *page = sg_page_iter_page(&sg_iter);
  482.  
  483.                 if (remain <= 0)
  484.                         break;
  485.  
  486.                 /* Operation in this page
  487.                  *
  488.                  * shmem_page_offset = offset within page in shmem file
  489.                  * page_length = bytes to copy for this page
  490.                  */
  491.                 shmem_page_offset = offset_in_page(offset);
  492.                 page_length = remain;
  493.                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
  494.                         page_length = PAGE_SIZE - shmem_page_offset;
  495.  
  496.                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  497.                         (page_to_phys(page) & (1 << 17)) != 0;
  498.  
  499.                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  500.                                        user_data, page_do_bit17_swizzling,
  501.                                        needs_clflush);
  502.                 if (ret == 0)
  503.                         goto next_page;
  504.  
  505.                 mutex_unlock(&dev->struct_mutex);
  506.  
  507.                 if (likely(!i915_prefault_disable) && !prefaulted) {
  508.                         ret = fault_in_multipages_writeable(user_data, remain);
  509.                         /* Userspace is tricking us, but we've already clobbered
  510.                          * its pages with the prefault and promised to write the
  511.                          * data up to the first fault. Hence ignore any errors
  512.                          * and just continue. */
  513.                         (void)ret;
  514.                         prefaulted = 1;
  515.                 }
  516.  
  517.                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  518.                                        user_data, page_do_bit17_swizzling,
  519.                                        needs_clflush);
  520.  
  521.                 mutex_lock(&dev->struct_mutex);
  522.  
  523. next_page:
  524.                 mark_page_accessed(page);
  525.  
  526.                 if (ret)
  527.                         goto out;
  528.  
  529.                 remain -= page_length;
  530.                 user_data += page_length;
  531.                 offset += page_length;
  532.         }
  533.  
  534. out:
  535.         i915_gem_object_unpin_pages(obj);
  536.  
  537.         return ret;
  538. }
  539.  
  540. /**
  541.  * Reads data from the object referenced by handle.
  542.  *
  543.  * On error, the contents of *data are undefined.
  544.  */
  545. int
  546. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  547.                      struct drm_file *file)
  548. {
  549.         struct drm_i915_gem_pread *args = data;
  550.         struct drm_i915_gem_object *obj;
  551.         int ret = 0;
  552.  
  553.         if (args->size == 0)
  554.                 return 0;
  555.  
  556.         if (!access_ok(VERIFY_WRITE,
  557.                        to_user_ptr(args->data_ptr),
  558.                        args->size))
  559.                 return -EFAULT;
  560.  
  561.         ret = i915_mutex_lock_interruptible(dev);
  562.         if (ret)
  563.                 return ret;
  564.  
  565.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  566.         if (&obj->base == NULL) {
  567.                 ret = -ENOENT;
  568.                 goto unlock;
  569.         }
  570.  
  571.         /* Bounds check source.  */
  572.         if (args->offset > obj->base.size ||
  573.             args->size > obj->base.size - args->offset) {
  574.                 ret = -EINVAL;
  575.                 goto out;
  576.         }
  577.  
  578.         /* prime objects have no backing filp to GEM pread/pwrite
  579.          * pages from.
  580.          */
  581.         if (!obj->base.filp) {
  582.                 ret = -EINVAL;
  583.                 goto out;
  584.         }
  585.  
  586.         trace_i915_gem_object_pread(obj, args->offset, args->size);
  587.  
  588.         ret = i915_gem_shmem_pread(dev, obj, args, file);
  589.  
  590. out:
  591.         drm_gem_object_unreference(&obj->base);
  592. unlock:
  593.         mutex_unlock(&dev->struct_mutex);
  594.         return ret;
  595. }
  596.  
  597. /* This is the fast write path which cannot handle
  598.  * page faults in the source data
  599.  */
  600.  
  601. static inline int
  602. fast_user_write(struct io_mapping *mapping,
  603.                 loff_t page_base, int page_offset,
  604.                 char __user *user_data,
  605.                 int length)
  606. {
  607.         void __iomem *vaddr_atomic;
  608.         void *vaddr;
  609.         unsigned long unwritten;
  610.  
  611.         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  612.         /* We can use the cpu mem copy function because this is X86. */
  613.         vaddr = (void __force*)vaddr_atomic + page_offset;
  614.         unwritten = __copy_from_user_inatomic_nocache(vaddr,
  615.                                                       user_data, length);
  616.         io_mapping_unmap_atomic(vaddr_atomic);
  617.         return unwritten;
  618. }
  619. #endif
  620.  
  621. #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
  622. /**
  623.  * This is the fast pwrite path, where we copy the data directly from the
  624.  * user into the GTT, uncached.
  625.  */
  626. static int
  627. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  628.                          struct drm_i915_gem_object *obj,
  629.                          struct drm_i915_gem_pwrite *args,
  630.                          struct drm_file *file)
  631. {
  632.         drm_i915_private_t *dev_priv = dev->dev_private;
  633.         ssize_t remain;
  634.         loff_t offset, page_base;
  635.         char __user *user_data;
  636.         int page_offset, page_length, ret;
  637.  
  638.         ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
  639.         if (ret)
  640.                 goto out;
  641.  
  642.         ret = i915_gem_object_set_to_gtt_domain(obj, true);
  643.         if (ret)
  644.                 goto out_unpin;
  645.  
  646.         ret = i915_gem_object_put_fence(obj);
  647.         if (ret)
  648.                 goto out_unpin;
  649.  
  650.         user_data = to_user_ptr(args->data_ptr);
  651.         remain = args->size;
  652.  
  653.         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
  654.  
  655.         while (remain > 0) {
  656.                 /* Operation in this page
  657.                  *
  658.                  * page_base = page offset within aperture
  659.                  * page_offset = offset within page
  660.                  * page_length = bytes to copy for this page
  661.                  */
  662.                 page_base = offset & PAGE_MASK;
  663.                 page_offset = offset_in_page(offset);
  664.                 page_length = remain;
  665.                 if ((page_offset + remain) > PAGE_SIZE)
  666.                         page_length = PAGE_SIZE - page_offset;
  667.  
  668.         MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
  669.  
  670.         memcpy(dev_priv->gtt.mappable+page_offset, user_data, page_length);
  671.  
  672.                 remain -= page_length;
  673.                 user_data += page_length;
  674.                 offset += page_length;
  675.         }
  676.  
  677. out_unpin:
  678.         i915_gem_object_unpin(obj);
  679. out:
  680.         return ret;
  681. }
  682.  
  683. /* Per-page copy function for the shmem pwrite fastpath.
  684.  * Flushes invalid cachelines before writing to the target if
  685.  * needs_clflush_before is set and flushes out any written cachelines after
  686.  * writing if needs_clflush is set. */
  687. static int
  688. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  689.                   char __user *user_data,
  690.                   bool page_do_bit17_swizzling,
  691.                   bool needs_clflush_before,
  692.                   bool needs_clflush_after)
  693. {
  694.         char *vaddr;
  695.         int ret = 0;
  696.  
  697.         if (unlikely(page_do_bit17_swizzling))
  698.                 return -EINVAL;
  699.  
  700.     vaddr = (char *)MapIoMem((addr_t)page, 4096, PG_SW);
  701.         if (needs_clflush_before)
  702.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  703.                                        page_length);
  704.         memcpy(vaddr + shmem_page_offset,
  705.                                                 user_data,
  706.                                                 page_length);
  707.         if (needs_clflush_after)
  708.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  709.                                        page_length);
  710.         FreeKernelSpace(vaddr);
  711.  
  712.         return ret ? -EFAULT : 0;
  713. }
  714. #if 0
  715.  
  716. /* Only difference to the fast-path function is that this can handle bit17
  717.  * and uses non-atomic copy and kmap functions. */
  718. static int
  719. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  720.                   char __user *user_data,
  721.                   bool page_do_bit17_swizzling,
  722.                   bool needs_clflush_before,
  723.                   bool needs_clflush_after)
  724. {
  725.         char *vaddr;
  726.         int ret;
  727.  
  728.         vaddr = kmap(page);
  729.         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  730.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  731.                                              page_length,
  732.                                              page_do_bit17_swizzling);
  733.         if (page_do_bit17_swizzling)
  734.                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  735.                                                 user_data,
  736.                                                 page_length);
  737.         else
  738.                 ret = __copy_from_user(vaddr + shmem_page_offset,
  739.                                        user_data,
  740.                                        page_length);
  741.         if (needs_clflush_after)
  742.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  743.                                              page_length,
  744.                                              page_do_bit17_swizzling);
  745.         kunmap(page);
  746.  
  747.         return ret ? -EFAULT : 0;
  748. }
  749. #endif
  750.  
  751.  
  752. static int
  753. i915_gem_shmem_pwrite(struct drm_device *dev,
  754.                       struct drm_i915_gem_object *obj,
  755.                       struct drm_i915_gem_pwrite *args,
  756.                       struct drm_file *file)
  757. {
  758.         ssize_t remain;
  759.         loff_t offset;
  760.         char __user *user_data;
  761.         int shmem_page_offset, page_length, ret = 0;
  762.         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  763.         int hit_slowpath = 0;
  764.         int needs_clflush_after = 0;
  765.         int needs_clflush_before = 0;
  766.         struct sg_page_iter sg_iter;
  767.  
  768.         user_data = to_user_ptr(args->data_ptr);
  769.         remain = args->size;
  770.  
  771.         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  772.  
  773.         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  774.                 /* If we're not in the cpu write domain, set ourself into the gtt
  775.                  * write domain and manually flush cachelines (if required). This
  776.                  * optimizes for the case when the gpu will use the data
  777.                  * right away and we therefore have to clflush anyway. */
  778.                 needs_clflush_after = cpu_write_needs_clflush(obj);
  779.                 ret = i915_gem_object_wait_rendering(obj, false);
  780.                         if (ret)
  781.                                 return ret;
  782.                 }
  783.         /* Same trick applies to invalidate partially written cachelines read
  784.          * before writing. */
  785.         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
  786.                 needs_clflush_before =
  787.                         !cpu_cache_is_coherent(dev, obj->cache_level);
  788.  
  789.         ret = i915_gem_object_get_pages(obj);
  790.         if (ret)
  791.                 return ret;
  792.  
  793.         i915_gem_object_pin_pages(obj);
  794.  
  795.         offset = args->offset;
  796.         obj->dirty = 1;
  797.  
  798.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  799.                          offset >> PAGE_SHIFT) {
  800.                 struct page *page = sg_page_iter_page(&sg_iter);
  801.                 int partial_cacheline_write;
  802.  
  803.                 if (remain <= 0)
  804.                         break;
  805.  
  806.                 /* Operation in this page
  807.                  *
  808.                  * shmem_page_offset = offset within page in shmem file
  809.                  * page_length = bytes to copy for this page
  810.                  */
  811.                 shmem_page_offset = offset_in_page(offset);
  812.  
  813.                 page_length = remain;
  814.                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
  815.                         page_length = PAGE_SIZE - shmem_page_offset;
  816.  
  817.                 /* If we don't overwrite a cacheline completely we need to be
  818.                  * careful to have up-to-date data by first clflushing. Don't
  819.                  * overcomplicate things and flush the entire patch. */
  820.                 partial_cacheline_write = needs_clflush_before &&
  821.                         ((shmem_page_offset | page_length)
  822.                                 & (x86_clflush_size - 1));
  823.  
  824.                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  825.                         (page_to_phys(page) & (1 << 17)) != 0;
  826.  
  827.                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  828.                                         user_data, page_do_bit17_swizzling,
  829.                                         partial_cacheline_write,
  830.                                         needs_clflush_after);
  831.                 if (ret == 0)
  832.                         goto next_page;
  833.  
  834.                 hit_slowpath = 1;
  835.                 mutex_unlock(&dev->struct_mutex);
  836.                 dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
  837.  
  838. //              ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  839. //                                      user_data, page_do_bit17_swizzling,
  840. //                                      partial_cacheline_write,
  841. //                                      needs_clflush_after);
  842.  
  843.                 mutex_lock(&dev->struct_mutex);
  844.  
  845. next_page:
  846.  
  847.                 if (ret)
  848.                         goto out;
  849.  
  850.                 remain -= page_length;
  851.                 user_data += page_length;
  852.                 offset += page_length;
  853.         }
  854.  
  855. out:
  856.         i915_gem_object_unpin_pages(obj);
  857.  
  858.         if (hit_slowpath) {
  859.                 /*
  860.                  * Fixup: Flush cpu caches in case we didn't flush the dirty
  861.                  * cachelines in-line while writing and the object moved
  862.                  * out of the cpu write domain while we've dropped the lock.
  863.                  */
  864.                 if (!needs_clflush_after &&
  865.                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  866.                         if (i915_gem_clflush_object(obj, obj->pin_display))
  867.                         i915_gem_chipset_flush(dev);
  868.                 }
  869.         }
  870.  
  871.         if (needs_clflush_after)
  872.                 i915_gem_chipset_flush(dev);
  873.  
  874.         return ret;
  875. }
  876.  
  877. /**
  878.  * Writes data to the object referenced by handle.
  879.  *
  880.  * On error, the contents of the buffer that were to be modified are undefined.
  881.  */
  882. int
  883. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  884.                       struct drm_file *file)
  885. {
  886.         struct drm_i915_gem_pwrite *args = data;
  887.         struct drm_i915_gem_object *obj;
  888.         int ret;
  889.  
  890.         if (args->size == 0)
  891.                 return 0;
  892.  
  893.  
  894.         ret = i915_mutex_lock_interruptible(dev);
  895.         if (ret)
  896.                 return ret;
  897.  
  898.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  899.         if (&obj->base == NULL) {
  900.                 ret = -ENOENT;
  901.                 goto unlock;
  902.         }
  903.  
  904.         /* Bounds check destination. */
  905.         if (args->offset > obj->base.size ||
  906.             args->size > obj->base.size - args->offset) {
  907.                 ret = -EINVAL;
  908.                 goto out;
  909.         }
  910.  
  911.         /* prime objects have no backing filp to GEM pread/pwrite
  912.          * pages from.
  913.          */
  914.         if (!obj->base.filp) {
  915.                 ret = -EINVAL;
  916.                 goto out;
  917.         }
  918.  
  919.         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  920.  
  921.         ret = -EFAULT;
  922.         /* We can only do the GTT pwrite on untiled buffers, as otherwise
  923.          * it would end up going through the fenced access, and we'll get
  924.          * different detiling behavior between reading and writing.
  925.          * pread/pwrite currently are reading and writing from the CPU
  926.          * perspective, requiring manual detiling by the client.
  927.          */
  928. //   if (obj->phys_obj) {
  929. //       ret = i915_gem_phys_pwrite(dev, obj, args, file);
  930. //       goto out;
  931. //   }
  932.  
  933.         if (obj->tiling_mode == I915_TILING_NONE &&
  934.             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
  935.             cpu_write_needs_clflush(obj)) {
  936.                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  937.                 /* Note that the gtt paths might fail with non-page-backed user
  938.                  * pointers (e.g. gtt mappings when moving data between
  939.                  * textures). Fallback to the shmem path in that case. */
  940.         }
  941.  
  942.         if (ret == -EFAULT || ret == -ENOSPC)
  943.        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  944.  
  945. out:
  946.         drm_gem_object_unreference(&obj->base);
  947. unlock:
  948.         mutex_unlock(&dev->struct_mutex);
  949.         return ret;
  950. }
  951.  
  952. int
  953. i915_gem_check_wedge(struct i915_gpu_error *error,
  954.                      bool interruptible)
  955. {
  956.         if (i915_reset_in_progress(error)) {
  957.                 /* Non-interruptible callers can't handle -EAGAIN, hence return
  958.                  * -EIO unconditionally for these. */
  959.                 if (!interruptible)
  960.                         return -EIO;
  961.  
  962.                 /* Recovery complete, but the reset failed ... */
  963.                 if (i915_terminally_wedged(error))
  964.                         return -EIO;
  965.  
  966.                 return -EAGAIN;
  967.         }
  968.  
  969.         return 0;
  970. }
  971.  
  972. /*
  973.  * Compare seqno against outstanding lazy request. Emit a request if they are
  974.  * equal.
  975.  */
  976. static int
  977. i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  978. {
  979.         int ret;
  980.  
  981.         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
  982.  
  983.         ret = 0;
  984.         if (seqno == ring->outstanding_lazy_seqno)
  985.                 ret = i915_add_request(ring, NULL);
  986.  
  987.         return ret;
  988. }
  989.  
  990. static void fake_irq(unsigned long data)
  991. {
  992. //      wake_up_process((struct task_struct *)data);
  993. }
  994.  
  995. static bool missed_irq(struct drm_i915_private *dev_priv,
  996.                        struct intel_ring_buffer *ring)
  997. {
  998.         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
  999. }
  1000.  
  1001. static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  1002. {
  1003.         if (file_priv == NULL)
  1004.                 return true;
  1005.  
  1006.         return !atomic_xchg(&file_priv->rps_wait_boost, true);
  1007. }
  1008.  
  1009. /**
  1010.  * __wait_seqno - wait until execution of seqno has finished
  1011.  * @ring: the ring expected to report seqno
  1012.  * @seqno: duh!
  1013.  * @reset_counter: reset sequence associated with the given seqno
  1014.  * @interruptible: do an interruptible wait (normally yes)
  1015.  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  1016.  *
  1017.  * Note: It is of utmost importance that the passed in seqno and reset_counter
  1018.  * values have been read by the caller in an smp safe manner. Where read-side
  1019.  * locks are involved, it is sufficient to read the reset_counter before
  1020.  * unlocking the lock that protects the seqno. For lockless tricks, the
  1021.  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  1022.  * inserted.
  1023.  *
  1024.  * Returns 0 if the seqno was found within the alloted time. Else returns the
  1025.  * errno with remaining time filled in timeout argument.
  1026.  */
  1027. static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
  1028.                         unsigned reset_counter,
  1029.                         bool interruptible,
  1030.                         struct timespec *timeout,
  1031.                         struct drm_i915_file_private *file_priv)
  1032. {
  1033.         drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1034.         const bool irq_test_in_progress =
  1035.                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
  1036.         struct timespec before, now;
  1037.     unsigned long timeout_expire, wait_time;
  1038.     wait_queue_t __wait;
  1039.         int ret;
  1040.  
  1041.         WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
  1042.  
  1043.         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
  1044.                 return 0;
  1045.  
  1046.     timeout_expire = timeout ? GetTimerTicks() + timespec_to_jiffies_timeout(timeout) : 0;
  1047.     wait_time = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
  1048.  
  1049.         if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
  1050.                 gen6_rps_boost(dev_priv);
  1051.                 if (file_priv)
  1052.                         mod_delayed_work(dev_priv->wq,
  1053.                                          &file_priv->mm.idle_work,
  1054.                                          msecs_to_jiffies(100));
  1055.         }
  1056.  
  1057.         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
  1058.                 return -ENODEV;
  1059.  
  1060.     INIT_LIST_HEAD(&__wait.task_list);
  1061.     __wait.evnt = CreateEvent(NULL, MANUAL_DESTROY);
  1062.  
  1063.         /* Record current time in case interrupted by signal, or wedged */
  1064.         trace_i915_gem_request_wait_begin(ring, seqno);
  1065.  
  1066.         for (;;) {
  1067.         unsigned long flags;
  1068.  
  1069.                 /* We need to check whether any gpu reset happened in between
  1070.                  * the caller grabbing the seqno and now ... */
  1071.                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
  1072.                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
  1073.                          * is truely gone. */
  1074.                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1075.                         if (ret == 0)
  1076.                                 ret = -EAGAIN;
  1077.                         break;
  1078.                 }
  1079.  
  1080.                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
  1081.                         ret = 0;
  1082.                         break;
  1083.                 }
  1084.  
  1085.         if (timeout && time_after_eq(GetTimerTicks(), timeout_expire)) {
  1086.                         ret = -ETIME;
  1087.                         break;
  1088.                 }
  1089.  
  1090.         spin_lock_irqsave(&ring->irq_queue.lock, flags);
  1091.         if (list_empty(&__wait.task_list))
  1092.             __add_wait_queue(&ring->irq_queue, &__wait);
  1093.         spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
  1094.  
  1095.         WaitEventTimeout(__wait.evnt, 1);
  1096.  
  1097.         if (!list_empty(&__wait.task_list)) {
  1098.             spin_lock_irqsave(&ring->irq_queue.lock, flags);
  1099.             list_del_init(&__wait.task_list);
  1100.             spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
  1101.         }
  1102.     };
  1103.     trace_i915_gem_request_wait_end(ring, seqno);
  1104.  
  1105.     DestroyEvent(__wait.evnt);
  1106.  
  1107.         if (!irq_test_in_progress)
  1108.         ring->irq_put(ring);
  1109.  
  1110.         return ret;
  1111. }
  1112.  
  1113. /**
  1114.  * Waits for a sequence number to be signaled, and cleans up the
  1115.  * request and object lists appropriately for that event.
  1116.  */
  1117. int
  1118. i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
  1119. {
  1120.         struct drm_device *dev = ring->dev;
  1121.         struct drm_i915_private *dev_priv = dev->dev_private;
  1122.         bool interruptible = dev_priv->mm.interruptible;
  1123.         int ret;
  1124.  
  1125.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1126.         BUG_ON(seqno == 0);
  1127.  
  1128.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1129.         if (ret)
  1130.                 return ret;
  1131.  
  1132.         ret = i915_gem_check_olr(ring, seqno);
  1133.         if (ret)
  1134.                 return ret;
  1135.  
  1136.         return __wait_seqno(ring, seqno,
  1137.                             atomic_read(&dev_priv->gpu_error.reset_counter),
  1138.                             interruptible, NULL, NULL);
  1139. }
  1140.  
  1141. static int
  1142. i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj,
  1143.                                      struct intel_ring_buffer *ring)
  1144. {
  1145.         i915_gem_retire_requests_ring(ring);
  1146.  
  1147.         /* Manually manage the write flush as we may have not yet
  1148.          * retired the buffer.
  1149.          *
  1150.          * Note that the last_write_seqno is always the earlier of
  1151.          * the two (read/write) seqno, so if we haved successfully waited,
  1152.          * we know we have passed the last write.
  1153.          */
  1154.         obj->last_write_seqno = 0;
  1155.         obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
  1156.  
  1157.         return 0;
  1158. }
  1159.  
  1160. /**
  1161.  * Ensures that all rendering to the object has completed and the object is
  1162.  * safe to unbind from the GTT or access from the CPU.
  1163.  */
  1164. static __must_check int
  1165. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  1166.                                bool readonly)
  1167. {
  1168.         struct intel_ring_buffer *ring = obj->ring;
  1169.         u32 seqno;
  1170.         int ret;
  1171.  
  1172.         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  1173.         if (seqno == 0)
  1174.                 return 0;
  1175.  
  1176.         ret = i915_wait_seqno(ring, seqno);
  1177.     if (ret)
  1178.         return ret;
  1179.  
  1180.         return i915_gem_object_wait_rendering__tail(obj, ring);
  1181. }
  1182.  
  1183. /* A nonblocking variant of the above wait. This is a highly dangerous routine
  1184.  * as the object state may change during this call.
  1185.  */
  1186. static __must_check int
  1187. i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  1188.                                             struct drm_file *file,
  1189.                                             bool readonly)
  1190. {
  1191.         struct drm_device *dev = obj->base.dev;
  1192.         struct drm_i915_private *dev_priv = dev->dev_private;
  1193.         struct intel_ring_buffer *ring = obj->ring;
  1194.         unsigned reset_counter;
  1195.         u32 seqno;
  1196.         int ret;
  1197.  
  1198.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1199.         BUG_ON(!dev_priv->mm.interruptible);
  1200.  
  1201.         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  1202.         if (seqno == 0)
  1203.                 return 0;
  1204.  
  1205.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
  1206.         if (ret)
  1207.                 return ret;
  1208.  
  1209.         ret = i915_gem_check_olr(ring, seqno);
  1210.         if (ret)
  1211.                 return ret;
  1212.  
  1213.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  1214.         mutex_unlock(&dev->struct_mutex);
  1215.         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
  1216.         mutex_lock(&dev->struct_mutex);
  1217.         if (ret)
  1218.                 return ret;
  1219.  
  1220.         return i915_gem_object_wait_rendering__tail(obj, ring);
  1221. }
  1222.  
  1223. /**
  1224.  * Called when user space prepares to use an object with the CPU, either
  1225.  * through the mmap ioctl's mapping or a GTT mapping.
  1226.  */
  1227. int
  1228. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1229.                           struct drm_file *file)
  1230. {
  1231.         struct drm_i915_gem_set_domain *args = data;
  1232.         struct drm_i915_gem_object *obj;
  1233.         uint32_t read_domains = args->read_domains;
  1234.         uint32_t write_domain = args->write_domain;
  1235.         int ret;
  1236.  
  1237.         /* Only handle setting domains to types used by the CPU. */
  1238.         if (write_domain & I915_GEM_GPU_DOMAINS)
  1239.                 return -EINVAL;
  1240.  
  1241.         if (read_domains & I915_GEM_GPU_DOMAINS)
  1242.                 return -EINVAL;
  1243.  
  1244.         /* Having something in the write domain implies it's in the read
  1245.          * domain, and only that read domain.  Enforce that in the request.
  1246.          */
  1247.         if (write_domain != 0 && read_domains != write_domain)
  1248.                 return -EINVAL;
  1249.  
  1250.         ret = i915_mutex_lock_interruptible(dev);
  1251.         if (ret)
  1252.                 return ret;
  1253.  
  1254.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1255.         if (&obj->base == NULL) {
  1256.                 ret = -ENOENT;
  1257.                 goto unlock;
  1258.         }
  1259.  
  1260.         /* Try to flush the object off the GPU without holding the lock.
  1261.          * We will repeat the flush holding the lock in the normal manner
  1262.          * to catch cases where we are gazumped.
  1263.          */
  1264.         ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
  1265.         if (ret)
  1266.                 goto unref;
  1267.  
  1268.         if (read_domains & I915_GEM_DOMAIN_GTT) {
  1269.                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  1270.  
  1271.                 /* Silently promote "you're not bound, there was nothing to do"
  1272.                  * to success, since the client was just asking us to
  1273.                  * make sure everything was done.
  1274.                  */
  1275.                 if (ret == -EINVAL)
  1276.                         ret = 0;
  1277.         } else {
  1278.                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  1279.         }
  1280.  
  1281. unref:
  1282.         drm_gem_object_unreference(&obj->base);
  1283. unlock:
  1284.         mutex_unlock(&dev->struct_mutex);
  1285.         return ret;
  1286. }
  1287.  
  1288. /**
  1289.  * Called when user space has done writes to this buffer
  1290.  */
  1291. int
  1292. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1293.                          struct drm_file *file)
  1294. {
  1295.         struct drm_i915_gem_sw_finish *args = data;
  1296.         struct drm_i915_gem_object *obj;
  1297.         int ret = 0;
  1298.  
  1299.         ret = i915_mutex_lock_interruptible(dev);
  1300.         if (ret)
  1301.                 return ret;
  1302.  
  1303.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1304.         if (&obj->base == NULL) {
  1305.                 ret = -ENOENT;
  1306.                 goto unlock;
  1307.         }
  1308.  
  1309.         /* Pinned buffers may be scanout, so flush the cache */
  1310.         if (obj->pin_display)
  1311.                 i915_gem_object_flush_cpu_write_domain(obj, true);
  1312.  
  1313.         drm_gem_object_unreference(&obj->base);
  1314. unlock:
  1315.         mutex_unlock(&dev->struct_mutex);
  1316.         return ret;
  1317. }
  1318.  
  1319. /**
  1320.  * Maps the contents of an object, returning the address it is mapped
  1321.  * into.
  1322.  *
  1323.  * While the mapping holds a reference on the contents of the object, it doesn't
  1324.  * imply a ref on the object itself.
  1325.  */
  1326. int
  1327. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1328.                     struct drm_file *file)
  1329. {
  1330.         struct drm_i915_gem_mmap *args = data;
  1331.         struct drm_gem_object *obj;
  1332.         unsigned long addr;
  1333.  
  1334.         obj = drm_gem_object_lookup(dev, file, args->handle);
  1335.         if (obj == NULL)
  1336.                 return -ENOENT;
  1337.  
  1338.         /* prime objects have no backing filp to GEM mmap
  1339.          * pages from.
  1340.          */
  1341.         if (!obj->filp) {
  1342.                 drm_gem_object_unreference_unlocked(obj);
  1343.                 return -EINVAL;
  1344.         }
  1345.  
  1346.     addr = vm_mmap(obj->filp, 0, args->size,
  1347.               PROT_READ | PROT_WRITE, MAP_SHARED,
  1348.               args->offset);
  1349.         drm_gem_object_unreference_unlocked(obj);
  1350.     if (IS_ERR((void *)addr))
  1351.         return addr;
  1352.  
  1353.         args->addr_ptr = (uint64_t) addr;
  1354.  
  1355.     return 0;
  1356. }
  1357.  
  1358.  
  1359.  
  1360.  
  1361.  
  1362.  
  1363.  
  1364.  
  1365.  
  1366.  
  1367.  
  1368.  
  1369.  
  1370. /**
  1371.  * i915_gem_release_mmap - remove physical page mappings
  1372.  * @obj: obj in question
  1373.  *
  1374.  * Preserve the reservation of the mmapping with the DRM core code, but
  1375.  * relinquish ownership of the pages back to the system.
  1376.  *
  1377.  * It is vital that we remove the page mapping if we have mapped a tiled
  1378.  * object through the GTT and then lose the fence register due to
  1379.  * resource pressure. Similarly if the object has been moved out of the
  1380.  * aperture, than pages mapped into userspace must be revoked. Removing the
  1381.  * mapping will then trigger a page fault on the next user access, allowing
  1382.  * fixup by i915_gem_fault().
  1383.  */
  1384. void
  1385. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1386. {
  1387.         if (!obj->fault_mappable)
  1388.                 return;
  1389.  
  1390. //      drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
  1391.         obj->fault_mappable = false;
  1392. }
  1393.  
  1394. uint32_t
  1395. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1396. {
  1397.         uint32_t gtt_size;
  1398.  
  1399.         if (INTEL_INFO(dev)->gen >= 4 ||
  1400.             tiling_mode == I915_TILING_NONE)
  1401.                 return size;
  1402.  
  1403.         /* Previous chips need a power-of-two fence region when tiling */
  1404.         if (INTEL_INFO(dev)->gen == 3)
  1405.                 gtt_size = 1024*1024;
  1406.         else
  1407.                 gtt_size = 512*1024;
  1408.  
  1409.         while (gtt_size < size)
  1410.                 gtt_size <<= 1;
  1411.  
  1412.         return gtt_size;
  1413. }
  1414.  
  1415. /**
  1416.  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1417.  * @obj: object to check
  1418.  *
  1419.  * Return the required GTT alignment for an object, taking into account
  1420.  * potential fence register mapping.
  1421.  */
  1422. uint32_t
  1423. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1424.                            int tiling_mode, bool fenced)
  1425. {
  1426.         /*
  1427.          * Minimum alignment is 4k (GTT page size), but might be greater
  1428.          * if a fence register is needed for the object.
  1429.          */
  1430.         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
  1431.             tiling_mode == I915_TILING_NONE)
  1432.                 return 4096;
  1433.  
  1434.         /*
  1435.          * Previous chips need to be aligned to the size of the smallest
  1436.          * fence register that can contain the object.
  1437.          */
  1438.         return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1439. }
  1440.  
  1441.  
  1442.  
  1443. int
  1444. i915_gem_mmap_gtt(struct drm_file *file,
  1445.           struct drm_device *dev,
  1446.           uint32_t handle,
  1447.           uint64_t *offset)
  1448. {
  1449.     struct drm_i915_private *dev_priv = dev->dev_private;
  1450.     struct drm_i915_gem_object *obj;
  1451.     unsigned long pfn;
  1452.     char *mem, *ptr;
  1453.     int ret;
  1454.  
  1455.     ret = i915_mutex_lock_interruptible(dev);
  1456.     if (ret)
  1457.         return ret;
  1458.  
  1459.     obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1460.     if (&obj->base == NULL) {
  1461.         ret = -ENOENT;
  1462.         goto unlock;
  1463.     }
  1464.  
  1465.     if (obj->base.size > dev_priv->gtt.mappable_end) {
  1466.         ret = -E2BIG;
  1467.         goto out;
  1468.     }
  1469.  
  1470.     if (obj->madv != I915_MADV_WILLNEED) {
  1471.         DRM_ERROR("Attempting to mmap a purgeable buffer\n");
  1472.         ret = -EINVAL;
  1473.         goto out;
  1474.     }
  1475.     /* Now bind it into the GTT if needed */
  1476.     ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
  1477.     if (ret)
  1478.         goto out;
  1479.  
  1480.     ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  1481.     if (ret)
  1482.         goto unpin;
  1483.  
  1484.     ret = i915_gem_object_get_fence(obj);
  1485.     if (ret)
  1486.         goto unpin;
  1487.  
  1488.     obj->fault_mappable = true;
  1489.  
  1490.     pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
  1491.  
  1492.     /* Finally, remap it using the new GTT offset */
  1493.  
  1494.     mem = UserAlloc(obj->base.size);
  1495.     if(unlikely(mem == NULL))
  1496.     {
  1497.         ret = -ENOMEM;
  1498.         goto unpin;
  1499.     }
  1500.  
  1501.     for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096)
  1502.         MapPage(ptr, pfn, PG_SHARED|PG_UW);
  1503.  
  1504. unpin:
  1505.     i915_gem_object_unpin(obj);
  1506.  
  1507.  
  1508.     *offset = mem;
  1509.  
  1510. out:
  1511.     drm_gem_object_unreference(&obj->base);
  1512. unlock:
  1513.     mutex_unlock(&dev->struct_mutex);
  1514.     return ret;
  1515. }
  1516.  
  1517. /**
  1518.  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1519.  * @dev: DRM device
  1520.  * @data: GTT mapping ioctl data
  1521.  * @file: GEM object info
  1522.  *
  1523.  * Simply returns the fake offset to userspace so it can mmap it.
  1524.  * The mmap call will end up in drm_gem_mmap(), which will set things
  1525.  * up so we can get faults in the handler above.
  1526.  *
  1527.  * The fault handler will take care of binding the object into the GTT
  1528.  * (since it may have been evicted to make room for something), allocating
  1529.  * a fence register, and mapping the appropriate aperture address into
  1530.  * userspace.
  1531.  */
  1532. int
  1533. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1534.             struct drm_file *file)
  1535. {
  1536.     struct drm_i915_gem_mmap_gtt *args = data;
  1537.  
  1538.     return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1539. }
  1540.  
  1541. /* Immediately discard the backing storage */
  1542. static void
  1543. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1544. {
  1545. //      struct inode *inode;
  1546.  
  1547. //      i915_gem_object_free_mmap_offset(obj);
  1548.  
  1549.         if (obj->base.filp == NULL)
  1550.                 return;
  1551.  
  1552.         /* Our goal here is to return as much of the memory as
  1553.          * is possible back to the system as we are called from OOM.
  1554.          * To do this we must instruct the shmfs to drop all of its
  1555.          * backing pages, *now*.
  1556.          */
  1557. //      inode = obj->base.filp->f_path.dentry->d_inode;
  1558. //      shmem_truncate_range(inode, 0, (loff_t)-1);
  1559.  
  1560.         obj->madv = __I915_MADV_PURGED;
  1561. }
  1562.  
  1563. static inline int
  1564. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1565. {
  1566.         return obj->madv == I915_MADV_DONTNEED;
  1567. }
  1568.  
  1569. static void
  1570. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1571. {
  1572.         struct sg_page_iter sg_iter;
  1573.         int ret;
  1574.  
  1575.         BUG_ON(obj->madv == __I915_MADV_PURGED);
  1576.  
  1577.         ret = i915_gem_object_set_to_cpu_domain(obj, true);
  1578.         if (ret) {
  1579.                 /* In the event of a disaster, abandon all caches and
  1580.                  * hope for the best.
  1581.                  */
  1582.                 WARN_ON(ret != -EIO);
  1583.                 i915_gem_clflush_object(obj, true);
  1584.                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1585.         }
  1586.  
  1587.         if (obj->madv == I915_MADV_DONTNEED)
  1588.                 obj->dirty = 0;
  1589.  
  1590.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
  1591.                 struct page *page = sg_page_iter_page(&sg_iter);
  1592.  
  1593.         page_cache_release(page);
  1594.         }
  1595.     //DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
  1596.  
  1597.     obj->dirty = 0;
  1598.  
  1599.         sg_free_table(obj->pages);
  1600.         kfree(obj->pages);
  1601. }
  1602.  
  1603. int
  1604. i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
  1605. {
  1606.         const struct drm_i915_gem_object_ops *ops = obj->ops;
  1607.  
  1608.         if (obj->pages == NULL)
  1609.                 return 0;
  1610.  
  1611.         if (obj->pages_pin_count)
  1612.                 return -EBUSY;
  1613.  
  1614.         BUG_ON(i915_gem_obj_bound_any(obj));
  1615.  
  1616.         /* ->put_pages might need to allocate memory for the bit17 swizzle
  1617.          * array, hence protect them from being reaped by removing them from gtt
  1618.          * lists early. */
  1619.         list_del(&obj->global_list);
  1620.  
  1621.         ops->put_pages(obj);
  1622.         obj->pages = NULL;
  1623.  
  1624.         if (i915_gem_object_is_purgeable(obj))
  1625.                 i915_gem_object_truncate(obj);
  1626.  
  1627.         return 0;
  1628. }
  1629.  
  1630.  
  1631.  
  1632.  
  1633.  
  1634.  
  1635.  
  1636.  
  1637. static int
  1638. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  1639. {
  1640.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1641.     int page_count, i;
  1642.     struct sg_table *st;
  1643.         struct scatterlist *sg;
  1644.         struct sg_page_iter sg_iter;
  1645.         struct page *page;
  1646.         unsigned long last_pfn = 0;     /* suppress gcc warning */
  1647.         gfp_t gfp;
  1648.  
  1649.         /* Assert that the object is not currently in any GPU domain. As it
  1650.          * wasn't in the GTT, there shouldn't be any way it could have been in
  1651.          * a GPU cache
  1652.          */
  1653.         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  1654.         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  1655.  
  1656.         st = kmalloc(sizeof(*st), GFP_KERNEL);
  1657.         if (st == NULL)
  1658.                 return -ENOMEM;
  1659.  
  1660.         page_count = obj->base.size / PAGE_SIZE;
  1661.         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  1662.                 kfree(st);
  1663.         FAIL();
  1664.                 return -ENOMEM;
  1665.         }
  1666.  
  1667.         /* Get the list of pages out of our struct file.  They'll be pinned
  1668.          * at this point until we release them.
  1669.          *
  1670.          * Fail silently without starting the shrinker
  1671.          */
  1672.         sg = st->sgl;
  1673.         st->nents = 0;
  1674.         for (i = 0; i < page_count; i++) {
  1675.         page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
  1676.                 if (IS_ERR(page)) {
  1677.             dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
  1678.                         goto err_pages;
  1679.  
  1680.                 }
  1681.  
  1682.                 if (!i || page_to_pfn(page) != last_pfn + 1) {
  1683.                         if (i)
  1684.                                 sg = sg_next(sg);
  1685.                         st->nents++;
  1686.                 sg_set_page(sg, page, PAGE_SIZE, 0);
  1687.                 } else {
  1688.                         sg->length += PAGE_SIZE;
  1689.                 }
  1690.                 last_pfn = page_to_pfn(page);
  1691.         }
  1692.  
  1693.                 sg_mark_end(sg);
  1694.         obj->pages = st;
  1695.  
  1696.         return 0;
  1697.  
  1698. err_pages:
  1699.         sg_mark_end(sg);
  1700.         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
  1701.                 page_cache_release(sg_page_iter_page(&sg_iter));
  1702.         sg_free_table(st);
  1703.         kfree(st);
  1704.     FAIL();
  1705.         return PTR_ERR(page);
  1706. }
  1707.  
  1708. /* Ensure that the associated pages are gathered from the backing storage
  1709.  * and pinned into our object. i915_gem_object_get_pages() may be called
  1710.  * multiple times before they are released by a single call to
  1711.  * i915_gem_object_put_pages() - once the pages are no longer referenced
  1712.  * either as a result of memory pressure (reaping pages under the shrinker)
  1713.  * or as the object is itself released.
  1714.  */
  1715. int
  1716. i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  1717. {
  1718.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1719.         const struct drm_i915_gem_object_ops *ops = obj->ops;
  1720.         int ret;
  1721.  
  1722.         if (obj->pages)
  1723.                 return 0;
  1724.  
  1725.         if (obj->madv != I915_MADV_WILLNEED) {
  1726.                 DRM_ERROR("Attempting to obtain a purgeable object\n");
  1727.                 return -EINVAL;
  1728.         }
  1729.  
  1730.         BUG_ON(obj->pages_pin_count);
  1731.  
  1732.         ret = ops->get_pages(obj);
  1733.         if (ret)
  1734.                 return ret;
  1735.  
  1736.         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  1737.     return 0;
  1738. }
  1739.  
  1740. void
  1741. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1742.                                struct intel_ring_buffer *ring)
  1743. {
  1744.         struct drm_device *dev = obj->base.dev;
  1745.         struct drm_i915_private *dev_priv = dev->dev_private;
  1746.         u32 seqno = intel_ring_get_seqno(ring);
  1747.  
  1748.         BUG_ON(ring == NULL);
  1749.         if (obj->ring != ring && obj->last_write_seqno) {
  1750.                 /* Keep the seqno relative to the current ring */
  1751.                 obj->last_write_seqno = seqno;
  1752.         }
  1753.         obj->ring = ring;
  1754.  
  1755.         /* Add a reference if we're newly entering the active list. */
  1756.         if (!obj->active) {
  1757.                 drm_gem_object_reference(&obj->base);
  1758.                 obj->active = 1;
  1759.         }
  1760.  
  1761.         list_move_tail(&obj->ring_list, &ring->active_list);
  1762.  
  1763.         obj->last_read_seqno = seqno;
  1764.  
  1765.         if (obj->fenced_gpu_access) {
  1766.                 obj->last_fenced_seqno = seqno;
  1767.  
  1768.                 /* Bump MRU to take account of the delayed flush */
  1769.                 if (obj->fence_reg != I915_FENCE_REG_NONE) {
  1770.                 struct drm_i915_fence_reg *reg;
  1771.  
  1772.                 reg = &dev_priv->fence_regs[obj->fence_reg];
  1773.                         list_move_tail(&reg->lru_list,
  1774.                                        &dev_priv->mm.fence_list);
  1775.                 }
  1776.         }
  1777. }
  1778.  
  1779. void i915_vma_move_to_active(struct i915_vma *vma,
  1780.                              struct intel_ring_buffer *ring)
  1781. {
  1782.         list_move_tail(&vma->mm_list, &vma->vm->active_list);
  1783.         return i915_gem_object_move_to_active(vma->obj, ring);
  1784. }
  1785.  
  1786. static void
  1787. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1788. {
  1789.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1790.         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
  1791.         struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
  1792.  
  1793.         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
  1794.         BUG_ON(!obj->active);
  1795.  
  1796.         list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
  1797.  
  1798.         list_del_init(&obj->ring_list);
  1799.         obj->ring = NULL;
  1800.  
  1801.         obj->last_read_seqno = 0;
  1802.         obj->last_write_seqno = 0;
  1803.         obj->base.write_domain = 0;
  1804.  
  1805.         obj->last_fenced_seqno = 0;
  1806.         obj->fenced_gpu_access = false;
  1807.  
  1808.         obj->active = 0;
  1809.         drm_gem_object_unreference(&obj->base);
  1810.  
  1811.         WARN_ON(i915_verify_lists(dev));
  1812. }
  1813.  
  1814. static int
  1815. i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  1816. {
  1817.         struct drm_i915_private *dev_priv = dev->dev_private;
  1818.         struct intel_ring_buffer *ring;
  1819.         int ret, i, j;
  1820.  
  1821.         /* Carefully retire all requests without writing to the rings */
  1822.         for_each_ring(ring, dev_priv, i) {
  1823.                 ret = intel_ring_idle(ring);
  1824.         if (ret)
  1825.                 return ret;
  1826.         }
  1827.         i915_gem_retire_requests(dev);
  1828.  
  1829.         /* Finally reset hw state */
  1830.         for_each_ring(ring, dev_priv, i) {
  1831.                 intel_ring_init_seqno(ring, seqno);
  1832.  
  1833.                 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
  1834.                         ring->sync_seqno[j] = 0;
  1835.         }
  1836.  
  1837.         return 0;
  1838. }
  1839.  
  1840. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  1841. {
  1842.         struct drm_i915_private *dev_priv = dev->dev_private;
  1843.         int ret;
  1844.  
  1845.         if (seqno == 0)
  1846.                 return -EINVAL;
  1847.  
  1848.         /* HWS page needs to be set less than what we
  1849.          * will inject to ring
  1850.          */
  1851.         ret = i915_gem_init_seqno(dev, seqno - 1);
  1852.         if (ret)
  1853.                 return ret;
  1854.  
  1855.         /* Carefully set the last_seqno value so that wrap
  1856.          * detection still works
  1857.          */
  1858.         dev_priv->next_seqno = seqno;
  1859.         dev_priv->last_seqno = seqno - 1;
  1860.         if (dev_priv->last_seqno == 0)
  1861.                 dev_priv->last_seqno--;
  1862.  
  1863.         return 0;
  1864. }
  1865.  
  1866. int
  1867. i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  1868. {
  1869.         struct drm_i915_private *dev_priv = dev->dev_private;
  1870.  
  1871.         /* reserve 0 for non-seqno */
  1872.         if (dev_priv->next_seqno == 0) {
  1873.                 int ret = i915_gem_init_seqno(dev, 0);
  1874.                 if (ret)
  1875.                         return ret;
  1876.  
  1877.                 dev_priv->next_seqno = 1;
  1878.         }
  1879.  
  1880.         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  1881.         return 0;
  1882. }
  1883.  
  1884. int __i915_add_request(struct intel_ring_buffer *ring,
  1885.                  struct drm_file *file,
  1886.                        struct drm_i915_gem_object *obj,
  1887.                  u32 *out_seqno)
  1888. {
  1889.         drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1890.         struct drm_i915_gem_request *request;
  1891.         u32 request_ring_position, request_start;
  1892.         int was_empty;
  1893.         int ret;
  1894.  
  1895.         request_start = intel_ring_get_tail(ring);
  1896.         /*
  1897.          * Emit any outstanding flushes - execbuf can fail to emit the flush
  1898.          * after having emitted the batchbuffer command. Hence we need to fix
  1899.          * things up similar to emitting the lazy request. The difference here
  1900.          * is that the flush _must_ happen before the next request, no matter
  1901.          * what.
  1902.          */
  1903.    ret = intel_ring_flush_all_caches(ring);
  1904.    if (ret)
  1905.        return ret;
  1906.  
  1907.         request = ring->preallocated_lazy_request;
  1908.         if (WARN_ON(request == NULL))
  1909.                 return -ENOMEM;
  1910.  
  1911.         /* Record the position of the start of the request so that
  1912.          * should we detect the updated seqno part-way through the
  1913.     * GPU processing the request, we never over-estimate the
  1914.          * position of the head.
  1915.          */
  1916.    request_ring_position = intel_ring_get_tail(ring);
  1917.  
  1918.         ret = ring->add_request(ring);
  1919.         if (ret)
  1920.                 return ret;
  1921.  
  1922.         request->seqno = intel_ring_get_seqno(ring);
  1923.         request->ring = ring;
  1924.         request->head = request_start;
  1925.         request->tail = request_ring_position;
  1926.  
  1927.         /* Whilst this request exists, batch_obj will be on the
  1928.          * active_list, and so will hold the active reference. Only when this
  1929.          * request is retired will the the batch_obj be moved onto the
  1930.          * inactive_list and lose its active reference. Hence we do not need
  1931.          * to explicitly hold another reference here.
  1932.          */
  1933.         request->batch_obj = obj;
  1934.  
  1935.         /* Hold a reference to the current context so that we can inspect
  1936.          * it later in case a hangcheck error event fires.
  1937.          */
  1938.         request->ctx = ring->last_context;
  1939.         if (request->ctx)
  1940.                 i915_gem_context_reference(request->ctx);
  1941.  
  1942.     request->emitted_jiffies = GetTimerTicks();
  1943.         was_empty = list_empty(&ring->request_list);
  1944.         list_add_tail(&request->list, &ring->request_list);
  1945.         request->file_priv = NULL;
  1946.  
  1947.         if (file) {
  1948.                 struct drm_i915_file_private *file_priv = file->driver_priv;
  1949.  
  1950.                 spin_lock(&file_priv->mm.lock);
  1951.                 request->file_priv = file_priv;
  1952.                 list_add_tail(&request->client_list,
  1953.                               &file_priv->mm.request_list);
  1954.                 spin_unlock(&file_priv->mm.lock);
  1955.         }
  1956.  
  1957.         trace_i915_gem_request_add(ring, request->seqno);
  1958.         ring->outstanding_lazy_seqno = 0;
  1959.         ring->preallocated_lazy_request = NULL;
  1960.  
  1961.         if (!dev_priv->ums.mm_suspended) {
  1962. //              i915_queue_hangcheck(ring->dev);
  1963.  
  1964.        if (was_empty) {
  1965.            queue_delayed_work(dev_priv->wq,
  1966.                                            &dev_priv->mm.retire_work,
  1967.                                            round_jiffies_up_relative(HZ));
  1968.            intel_mark_busy(dev_priv->dev);
  1969.        }
  1970.    }
  1971.  
  1972.         if (out_seqno)
  1973.                 *out_seqno = request->seqno;
  1974.         return 0;
  1975. }
  1976.  
  1977. static inline void
  1978. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1979. {
  1980.         struct drm_i915_file_private *file_priv = request->file_priv;
  1981.  
  1982.         if (!file_priv)
  1983.                 return;
  1984.  
  1985.         spin_lock(&file_priv->mm.lock);
  1986.                 list_del(&request->client_list);
  1987.                 request->file_priv = NULL;
  1988.         spin_unlock(&file_priv->mm.lock);
  1989. }
  1990.  
  1991. static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
  1992.                                     struct i915_address_space *vm)
  1993. {
  1994.         if (acthd >= i915_gem_obj_offset(obj, vm) &&
  1995.             acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
  1996.                 return true;
  1997.  
  1998.         return false;
  1999. }
  2000.  
  2001. static bool i915_head_inside_request(const u32 acthd_unmasked,
  2002.                                      const u32 request_start,
  2003.                                      const u32 request_end)
  2004. {
  2005.         const u32 acthd = acthd_unmasked & HEAD_ADDR;
  2006.  
  2007.         if (request_start < request_end) {
  2008.                 if (acthd >= request_start && acthd < request_end)
  2009.                         return true;
  2010.         } else if (request_start > request_end) {
  2011.                 if (acthd >= request_start || acthd < request_end)
  2012.                         return true;
  2013.         }
  2014.  
  2015.         return false;
  2016. }
  2017.  
  2018. static struct i915_address_space *
  2019. request_to_vm(struct drm_i915_gem_request *request)
  2020. {
  2021.         struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
  2022.         struct i915_address_space *vm;
  2023.  
  2024.         vm = &dev_priv->gtt.base;
  2025.  
  2026.         return vm;
  2027. }
  2028.  
  2029. static bool i915_request_guilty(struct drm_i915_gem_request *request,
  2030.                                 const u32 acthd, bool *inside)
  2031. {
  2032.         /* There is a possibility that unmasked head address
  2033.          * pointing inside the ring, matches the batch_obj address range.
  2034.          * However this is extremely unlikely.
  2035.          */
  2036.         if (request->batch_obj) {
  2037.                 if (i915_head_inside_object(acthd, request->batch_obj,
  2038.                                             request_to_vm(request))) {
  2039.                         *inside = true;
  2040.                         return true;
  2041.                 }
  2042.         }
  2043.  
  2044.         if (i915_head_inside_request(acthd, request->head, request->tail)) {
  2045.                 *inside = false;
  2046.                 return true;
  2047.         }
  2048.  
  2049.         return false;
  2050. }
  2051.  
  2052. static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
  2053. {
  2054.     const unsigned long elapsed = GetTimerTicks()/100 - hs->guilty_ts;
  2055.  
  2056.         if (hs->banned)
  2057.                 return true;
  2058.  
  2059.         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
  2060.                 DRM_ERROR("context hanging too fast, declaring banned!\n");
  2061.                 return true;
  2062.         }
  2063.  
  2064.         return false;
  2065. }
  2066.  
  2067. static void i915_set_reset_status(struct intel_ring_buffer *ring,
  2068.                                   struct drm_i915_gem_request *request,
  2069.                                   u32 acthd)
  2070. {
  2071.         struct i915_ctx_hang_stats *hs = NULL;
  2072.         bool inside, guilty;
  2073.         unsigned long offset = 0;
  2074.  
  2075.         /* Innocent until proven guilty */
  2076.         guilty = false;
  2077.  
  2078.         if (request->batch_obj)
  2079.                 offset = i915_gem_obj_offset(request->batch_obj,
  2080.                                              request_to_vm(request));
  2081.  
  2082.         if (ring->hangcheck.action != HANGCHECK_WAIT &&
  2083.             i915_request_guilty(request, acthd, &inside)) {
  2084.                 DRM_DEBUG("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
  2085.                           ring->name,
  2086.                           inside ? "inside" : "flushing",
  2087.                           offset,
  2088.                           request->ctx ? request->ctx->id : 0,
  2089.                           acthd);
  2090.  
  2091.                 guilty = true;
  2092.         }
  2093.  
  2094.         /* If contexts are disabled or this is the default context, use
  2095.          * file_priv->reset_state
  2096.          */
  2097.         if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID)
  2098.                 hs = &request->ctx->hang_stats;
  2099.         else if (request->file_priv)
  2100.                 hs = &request->file_priv->hang_stats;
  2101.  
  2102.         if (hs) {
  2103.                 if (guilty) {
  2104.                         hs->banned = i915_context_is_banned(hs);
  2105.                         hs->batch_active++;
  2106.             hs->guilty_ts = GetTimerTicks()/100;
  2107.                 } else {
  2108.                         hs->batch_pending++;
  2109.         }
  2110.         }
  2111. }
  2112.  
  2113. static void i915_gem_free_request(struct drm_i915_gem_request *request)
  2114. {
  2115.         list_del(&request->list);
  2116.         i915_gem_request_remove_from_client(request);
  2117.  
  2118.         if (request->ctx)
  2119.                 i915_gem_context_unreference(request->ctx);
  2120.  
  2121.         kfree(request);
  2122. }
  2123.  
  2124. static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
  2125.                                       struct intel_ring_buffer *ring)
  2126. {
  2127.         u32 completed_seqno = ring->get_seqno(ring, false);
  2128.         u32 acthd = intel_ring_get_active_head(ring);
  2129.         struct drm_i915_gem_request *request;
  2130.  
  2131.         list_for_each_entry(request, &ring->request_list, list) {
  2132.                 if (i915_seqno_passed(completed_seqno, request->seqno))
  2133.                         continue;
  2134.  
  2135.                 i915_set_reset_status(ring, request, acthd);
  2136.         }
  2137. }
  2138.  
  2139. static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
  2140.                                         struct intel_ring_buffer *ring)
  2141. {
  2142.         while (!list_empty(&ring->active_list)) {
  2143.                 struct drm_i915_gem_object *obj;
  2144.  
  2145.                 obj = list_first_entry(&ring->active_list,
  2146.                                        struct drm_i915_gem_object,
  2147.                                        ring_list);
  2148.  
  2149.                 i915_gem_object_move_to_inactive(obj);
  2150.         }
  2151.  
  2152.         /*
  2153.          * We must free the requests after all the corresponding objects have
  2154.          * been moved off active lists. Which is the same order as the normal
  2155.          * retire_requests function does. This is important if object hold
  2156.          * implicit references on things like e.g. ppgtt address spaces through
  2157.          * the request.
  2158.          */
  2159.         while (!list_empty(&ring->request_list)) {
  2160.                 struct drm_i915_gem_request *request;
  2161.  
  2162.                 request = list_first_entry(&ring->request_list,
  2163.                                            struct drm_i915_gem_request,
  2164.                                            list);
  2165.  
  2166.                 i915_gem_free_request(request);
  2167.         }
  2168. }
  2169.  
  2170. void i915_gem_restore_fences(struct drm_device *dev)
  2171. {
  2172.         struct drm_i915_private *dev_priv = dev->dev_private;
  2173.         int i;
  2174.  
  2175.         for (i = 0; i < dev_priv->num_fence_regs; i++) {
  2176.                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  2177.  
  2178.                 /*
  2179.                  * Commit delayed tiling changes if we have an object still
  2180.                  * attached to the fence, otherwise just clear the fence.
  2181.                  */
  2182.                 if (reg->obj) {
  2183.                         i915_gem_object_update_fence(reg->obj, reg,
  2184.                                                      reg->obj->tiling_mode);
  2185.                 } else {
  2186.                         i915_gem_write_fence(dev, i, NULL);
  2187.                 }
  2188.         }
  2189. }
  2190.  
  2191. void i915_gem_reset(struct drm_device *dev)
  2192. {
  2193.         struct drm_i915_private *dev_priv = dev->dev_private;
  2194.         struct intel_ring_buffer *ring;
  2195.         int i;
  2196.  
  2197.         /*
  2198.          * Before we free the objects from the requests, we need to inspect
  2199.          * them for finding the guilty party. As the requests only borrow
  2200.          * their reference to the objects, the inspection must be done first.
  2201.          */
  2202.         for_each_ring(ring, dev_priv, i)
  2203.                 i915_gem_reset_ring_status(dev_priv, ring);
  2204.  
  2205.         for_each_ring(ring, dev_priv, i)
  2206.                 i915_gem_reset_ring_cleanup(dev_priv, ring);
  2207.  
  2208.         i915_gem_cleanup_ringbuffer(dev);
  2209.  
  2210.         i915_gem_restore_fences(dev);
  2211. }
  2212.  
  2213. /**
  2214.  * This function clears the request list as sequence numbers are passed.
  2215.  */
  2216. void
  2217. i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
  2218. {
  2219.         uint32_t seqno;
  2220.  
  2221.         if (list_empty(&ring->request_list))
  2222.                 return;
  2223.  
  2224.         WARN_ON(i915_verify_lists(ring->dev));
  2225.  
  2226.         seqno = ring->get_seqno(ring, true);
  2227.  
  2228.         while (!list_empty(&ring->request_list)) {
  2229.                 struct drm_i915_gem_request *request;
  2230.  
  2231.                 request = list_first_entry(&ring->request_list,
  2232.                                            struct drm_i915_gem_request,
  2233.                                            list);
  2234.  
  2235.                 if (!i915_seqno_passed(seqno, request->seqno))
  2236.                         break;
  2237.  
  2238.                 trace_i915_gem_request_retire(ring, request->seqno);
  2239.                 /* We know the GPU must have read the request to have
  2240.                  * sent us the seqno + interrupt, so use the position
  2241.                  * of tail of the request to update the last known position
  2242.                  * of the GPU head.
  2243.                  */
  2244.                 ring->last_retired_head = request->tail;
  2245.  
  2246.                 i915_gem_free_request(request);
  2247.         }
  2248.  
  2249.         /* Move any buffers on the active list that are no longer referenced
  2250.          * by the ringbuffer to the flushing/inactive lists as appropriate.
  2251.          */
  2252.         while (!list_empty(&ring->active_list)) {
  2253.                 struct drm_i915_gem_object *obj;
  2254.  
  2255.                 obj = list_first_entry(&ring->active_list,
  2256.                                       struct drm_i915_gem_object,
  2257.                                       ring_list);
  2258.  
  2259.                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
  2260.                         break;
  2261.  
  2262.                         i915_gem_object_move_to_inactive(obj);
  2263.         }
  2264.  
  2265.         if (unlikely(ring->trace_irq_seqno &&
  2266.                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
  2267.                 ring->irq_put(ring);
  2268.                 ring->trace_irq_seqno = 0;
  2269.         }
  2270.  
  2271.         WARN_ON(i915_verify_lists(ring->dev));
  2272. }
  2273.  
  2274. bool
  2275. i915_gem_retire_requests(struct drm_device *dev)
  2276. {
  2277.         drm_i915_private_t *dev_priv = dev->dev_private;
  2278.         struct intel_ring_buffer *ring;
  2279.         bool idle = true;
  2280.         int i;
  2281.  
  2282.         for_each_ring(ring, dev_priv, i) {
  2283.                 i915_gem_retire_requests_ring(ring);
  2284.                 idle &= list_empty(&ring->request_list);
  2285.         }
  2286.  
  2287.         if (idle)
  2288.                 mod_delayed_work(dev_priv->wq,
  2289.                                    &dev_priv->mm.idle_work,
  2290.                                    msecs_to_jiffies(100));
  2291.  
  2292.         return idle;
  2293. }
  2294.  
  2295. static void
  2296. i915_gem_retire_work_handler(struct work_struct *work)
  2297. {
  2298.         struct drm_i915_private *dev_priv =
  2299.                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
  2300.         struct drm_device *dev = dev_priv->dev;
  2301.         bool idle;
  2302.  
  2303.         /* Come back later if the device is busy... */
  2304.         idle = false;
  2305.         if (mutex_trylock(&dev->struct_mutex)) {
  2306.                 idle = i915_gem_retire_requests(dev);
  2307.                 mutex_unlock(&dev->struct_mutex);
  2308.         }
  2309.         if (!idle)
  2310.                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  2311.                                    round_jiffies_up_relative(HZ));
  2312. }
  2313.  
  2314. static void
  2315. i915_gem_idle_work_handler(struct work_struct *work)
  2316. {
  2317.         struct drm_i915_private *dev_priv =
  2318.                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
  2319.  
  2320.         intel_mark_idle(dev_priv->dev);
  2321. }
  2322.  
  2323. /**
  2324.  * Ensures that an object will eventually get non-busy by flushing any required
  2325.  * write domains, emitting any outstanding lazy request and retiring and
  2326.  * completed requests.
  2327.  */
  2328. static int
  2329. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  2330. {
  2331.         int ret;
  2332.  
  2333.         if (obj->active) {
  2334.                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
  2335.                 if (ret)
  2336.                         return ret;
  2337.  
  2338.                 i915_gem_retire_requests_ring(obj->ring);
  2339.         }
  2340.  
  2341.         return 0;
  2342. }
  2343.  
  2344. /**
  2345.  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  2346.  * @DRM_IOCTL_ARGS: standard ioctl arguments
  2347.  *
  2348.  * Returns 0 if successful, else an error is returned with the remaining time in
  2349.  * the timeout parameter.
  2350.  *  -ETIME: object is still busy after timeout
  2351.  *  -ERESTARTSYS: signal interrupted the wait
  2352.  *  -ENONENT: object doesn't exist
  2353.  * Also possible, but rare:
  2354.  *  -EAGAIN: GPU wedged
  2355.  *  -ENOMEM: damn
  2356.  *  -ENODEV: Internal IRQ fail
  2357.  *  -E?: The add request failed
  2358.  *
  2359.  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  2360.  * non-zero timeout parameter the wait ioctl will wait for the given number of
  2361.  * nanoseconds on an object becoming unbusy. Since the wait itself does so
  2362.  * without holding struct_mutex the object may become re-busied before this
  2363.  * function completes. A similar but shorter * race condition exists in the busy
  2364.  * ioctl
  2365.  */
  2366. int
  2367. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  2368. {
  2369.         drm_i915_private_t *dev_priv = dev->dev_private;
  2370.         struct drm_i915_gem_wait *args = data;
  2371.         struct drm_i915_gem_object *obj;
  2372.         struct intel_ring_buffer *ring = NULL;
  2373.         struct timespec timeout_stack, *timeout = NULL;
  2374.         unsigned reset_counter;
  2375.         u32 seqno = 0;
  2376.         int ret = 0;
  2377.  
  2378.         if (args->timeout_ns >= 0) {
  2379.                 timeout_stack = ns_to_timespec(args->timeout_ns);
  2380.                 timeout = &timeout_stack;
  2381.         }
  2382.  
  2383.         ret = i915_mutex_lock_interruptible(dev);
  2384.         if (ret)
  2385.                 return ret;
  2386.  
  2387.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  2388.         if (&obj->base == NULL) {
  2389.                 mutex_unlock(&dev->struct_mutex);
  2390.                 return -ENOENT;
  2391.         }
  2392.  
  2393.         /* Need to make sure the object gets inactive eventually. */
  2394.         ret = i915_gem_object_flush_active(obj);
  2395.         if (ret)
  2396.                 goto out;
  2397.  
  2398.         if (obj->active) {
  2399.                 seqno = obj->last_read_seqno;
  2400.                 ring = obj->ring;
  2401.         }
  2402.  
  2403.         if (seqno == 0)
  2404.                  goto out;
  2405.  
  2406.         /* Do this after OLR check to make sure we make forward progress polling
  2407.          * on this IOCTL with a 0 timeout (like busy ioctl)
  2408.          */
  2409.         if (!args->timeout_ns) {
  2410.                 ret = -ETIME;
  2411.                 goto out;
  2412.         }
  2413.  
  2414.         drm_gem_object_unreference(&obj->base);
  2415.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  2416.         mutex_unlock(&dev->struct_mutex);
  2417.  
  2418.         ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
  2419.         if (timeout)
  2420.                 args->timeout_ns = timespec_to_ns(timeout);
  2421.         return ret;
  2422.  
  2423. out:
  2424.         drm_gem_object_unreference(&obj->base);
  2425.         mutex_unlock(&dev->struct_mutex);
  2426.         return ret;
  2427. }
  2428.  
  2429. /**
  2430.  * i915_gem_object_sync - sync an object to a ring.
  2431.  *
  2432.  * @obj: object which may be in use on another ring.
  2433.  * @to: ring we wish to use the object on. May be NULL.
  2434.  *
  2435.  * This code is meant to abstract object synchronization with the GPU.
  2436.  * Calling with NULL implies synchronizing the object with the CPU
  2437.  * rather than a particular GPU ring.
  2438.  *
  2439.  * Returns 0 if successful, else propagates up the lower layer error.
  2440.  */
  2441. int
  2442. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  2443.                      struct intel_ring_buffer *to)
  2444. {
  2445.         struct intel_ring_buffer *from = obj->ring;
  2446.         u32 seqno;
  2447.         int ret, idx;
  2448.  
  2449.         if (from == NULL || to == from)
  2450.                 return 0;
  2451.  
  2452.         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
  2453.                 return i915_gem_object_wait_rendering(obj, false);
  2454.  
  2455.         idx = intel_ring_sync_index(from, to);
  2456.  
  2457.         seqno = obj->last_read_seqno;
  2458.         if (seqno <= from->sync_seqno[idx])
  2459.                 return 0;
  2460.  
  2461.         ret = i915_gem_check_olr(obj->ring, seqno);
  2462.         if (ret)
  2463.                 return ret;
  2464.  
  2465.         trace_i915_gem_ring_sync_to(from, to, seqno);
  2466.         ret = to->sync_to(to, from, seqno);
  2467.         if (!ret)
  2468.                 /* We use last_read_seqno because sync_to()
  2469.                  * might have just caused seqno wrap under
  2470.                  * the radar.
  2471.                  */
  2472.                 from->sync_seqno[idx] = obj->last_read_seqno;
  2473.  
  2474.         return ret;
  2475. }
  2476.  
  2477. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  2478. {
  2479.         u32 old_write_domain, old_read_domains;
  2480.  
  2481.         /* Force a pagefault for domain tracking on next user access */
  2482. //      i915_gem_release_mmap(obj);
  2483.  
  2484.         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2485.                 return;
  2486.  
  2487.         /* Wait for any direct GTT access to complete */
  2488.         mb();
  2489.  
  2490.         old_read_domains = obj->base.read_domains;
  2491.         old_write_domain = obj->base.write_domain;
  2492.  
  2493.         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  2494.         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  2495.  
  2496.         trace_i915_gem_object_change_domain(obj,
  2497.                                             old_read_domains,
  2498.                                             old_write_domain);
  2499. }
  2500.  
  2501. int i915_vma_unbind(struct i915_vma *vma)
  2502. {
  2503.         struct drm_i915_gem_object *obj = vma->obj;
  2504.         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  2505.         int ret;
  2506.  
  2507.     if(obj == get_fb_obj())
  2508.         return 0;
  2509.  
  2510.         if (list_empty(&vma->vma_link))
  2511.                 return 0;
  2512.  
  2513.         if (!drm_mm_node_allocated(&vma->node)) {
  2514.                 i915_gem_vma_destroy(vma);
  2515.  
  2516.                 return 0;
  2517.         }
  2518.  
  2519.         if (obj->pin_count)
  2520.                 return -EBUSY;
  2521.  
  2522.         BUG_ON(obj->pages == NULL);
  2523.  
  2524.         ret = i915_gem_object_finish_gpu(obj);
  2525.         if (ret)
  2526.                 return ret;
  2527.         /* Continue on if we fail due to EIO, the GPU is hung so we
  2528.          * should be safe and we need to cleanup or else we might
  2529.          * cause memory corruption through use-after-free.
  2530.          */
  2531.  
  2532.         i915_gem_object_finish_gtt(obj);
  2533.  
  2534.         /* release the fence reg _after_ flushing */
  2535.         ret = i915_gem_object_put_fence(obj);
  2536.         if (ret)
  2537.                 return ret;
  2538.  
  2539.         trace_i915_vma_unbind(vma);
  2540.  
  2541.         if (obj->has_global_gtt_mapping)
  2542.         i915_gem_gtt_unbind_object(obj);
  2543.         if (obj->has_aliasing_ppgtt_mapping) {
  2544.                 i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
  2545.                 obj->has_aliasing_ppgtt_mapping = 0;
  2546.         }
  2547.         i915_gem_gtt_finish_object(obj);
  2548.  
  2549.         list_del(&vma->mm_list);
  2550.         /* Avoid an unnecessary call to unbind on rebind. */
  2551.         if (i915_is_ggtt(vma->vm))
  2552.         obj->map_and_fenceable = true;
  2553.  
  2554.         drm_mm_remove_node(&vma->node);
  2555.         i915_gem_vma_destroy(vma);
  2556.  
  2557.         /* Since the unbound list is global, only move to that list if
  2558.          * no more VMAs exist. */
  2559.         if (list_empty(&obj->vma_list))
  2560.                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  2561.  
  2562.         /* And finally now the object is completely decoupled from this vma,
  2563.          * we can drop its hold on the backing storage and allow it to be
  2564.          * reaped by the shrinker.
  2565.          */
  2566.         i915_gem_object_unpin_pages(obj);
  2567.  
  2568.         return 0;
  2569. }
  2570.  
  2571. /**
  2572.  * Unbinds an object from the global GTT aperture.
  2573.  */
  2574. int
  2575. i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
  2576. {
  2577.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2578.         struct i915_address_space *ggtt = &dev_priv->gtt.base;
  2579.  
  2580.         if (!i915_gem_obj_ggtt_bound(obj))
  2581.                 return 0;
  2582.  
  2583.         if (obj->pin_count)
  2584.                 return -EBUSY;
  2585.  
  2586.         BUG_ON(obj->pages == NULL);
  2587.  
  2588.         return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
  2589. }
  2590.  
  2591. int i915_gpu_idle(struct drm_device *dev)
  2592. {
  2593.         drm_i915_private_t *dev_priv = dev->dev_private;
  2594.         struct intel_ring_buffer *ring;
  2595.         int ret, i;
  2596.  
  2597.         /* Flush everything onto the inactive list. */
  2598.         for_each_ring(ring, dev_priv, i) {
  2599.                 ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
  2600.                 if (ret)
  2601.                         return ret;
  2602.  
  2603.                 ret = intel_ring_idle(ring);
  2604.                 if (ret)
  2605.                         return ret;
  2606.         }
  2607.  
  2608.         return 0;
  2609. }
  2610.  
  2611. static void i965_write_fence_reg(struct drm_device *dev, int reg,
  2612.                                         struct drm_i915_gem_object *obj)
  2613. {
  2614.         drm_i915_private_t *dev_priv = dev->dev_private;
  2615.         int fence_reg;
  2616.         int fence_pitch_shift;
  2617.  
  2618.         if (INTEL_INFO(dev)->gen >= 6) {
  2619.                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
  2620.                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
  2621.         } else {
  2622.                 fence_reg = FENCE_REG_965_0;
  2623.                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
  2624.         }
  2625.  
  2626.         fence_reg += reg * 8;
  2627.  
  2628.         /* To w/a incoherency with non-atomic 64-bit register updates,
  2629.          * we split the 64-bit update into two 32-bit writes. In order
  2630.          * for a partial fence not to be evaluated between writes, we
  2631.          * precede the update with write to turn off the fence register,
  2632.          * and only enable the fence as the last step.
  2633.          *
  2634.          * For extra levels of paranoia, we make sure each step lands
  2635.          * before applying the next step.
  2636.          */
  2637.         I915_WRITE(fence_reg, 0);
  2638.         POSTING_READ(fence_reg);
  2639.  
  2640.         if (obj) {
  2641.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2642.                 uint64_t val;
  2643.  
  2644.                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
  2645.                                  0xfffff000) << 32;
  2646.                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
  2647.                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
  2648.                 if (obj->tiling_mode == I915_TILING_Y)
  2649.                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  2650.                 val |= I965_FENCE_REG_VALID;
  2651.  
  2652.                 I915_WRITE(fence_reg + 4, val >> 32);
  2653.                 POSTING_READ(fence_reg + 4);
  2654.  
  2655.                 I915_WRITE(fence_reg + 0, val);
  2656.         POSTING_READ(fence_reg);
  2657.         } else {
  2658.                 I915_WRITE(fence_reg + 4, 0);
  2659.                 POSTING_READ(fence_reg + 4);
  2660.         }
  2661. }
  2662.  
  2663. static void i915_write_fence_reg(struct drm_device *dev, int reg,
  2664.                                  struct drm_i915_gem_object *obj)
  2665. {
  2666.         drm_i915_private_t *dev_priv = dev->dev_private;
  2667.         u32 val;
  2668.  
  2669.         if (obj) {
  2670.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2671.                 int pitch_val;
  2672.                 int tile_width;
  2673.  
  2674.                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
  2675.                      (size & -size) != size ||
  2676.                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2677.                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2678.                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
  2679.  
  2680.                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2681.                         tile_width = 128;
  2682.                 else
  2683.                         tile_width = 512;
  2684.  
  2685.                 /* Note: pitch better be a power of two tile widths */
  2686.                 pitch_val = obj->stride / tile_width;
  2687.                 pitch_val = ffs(pitch_val) - 1;
  2688.  
  2689.                 val = i915_gem_obj_ggtt_offset(obj);
  2690.                 if (obj->tiling_mode == I915_TILING_Y)
  2691.                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2692.                 val |= I915_FENCE_SIZE_BITS(size);
  2693.                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2694.                 val |= I830_FENCE_REG_VALID;
  2695.         } else
  2696.                 val = 0;
  2697.  
  2698.         if (reg < 8)
  2699.                 reg = FENCE_REG_830_0 + reg * 4;
  2700.         else
  2701.                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
  2702.  
  2703.         I915_WRITE(reg, val);
  2704.         POSTING_READ(reg);
  2705. }
  2706.  
  2707. static void i830_write_fence_reg(struct drm_device *dev, int reg,
  2708.                                 struct drm_i915_gem_object *obj)
  2709. {
  2710.         drm_i915_private_t *dev_priv = dev->dev_private;
  2711.         uint32_t val;
  2712.  
  2713.         if (obj) {
  2714.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2715.                 uint32_t pitch_val;
  2716.  
  2717.                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
  2718.                      (size & -size) != size ||
  2719.                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2720.                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
  2721.                      i915_gem_obj_ggtt_offset(obj), size);
  2722.  
  2723.                 pitch_val = obj->stride / 128;
  2724.                 pitch_val = ffs(pitch_val) - 1;
  2725.  
  2726.                 val = i915_gem_obj_ggtt_offset(obj);
  2727.                 if (obj->tiling_mode == I915_TILING_Y)
  2728.                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2729.                 val |= I830_FENCE_SIZE_BITS(size);
  2730.                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2731.                 val |= I830_FENCE_REG_VALID;
  2732.         } else
  2733.                 val = 0;
  2734.  
  2735.         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
  2736.         POSTING_READ(FENCE_REG_830_0 + reg * 4);
  2737. }
  2738.  
  2739. inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
  2740. {
  2741.         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
  2742. }
  2743.  
  2744. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  2745.                                  struct drm_i915_gem_object *obj)
  2746. {
  2747.         struct drm_i915_private *dev_priv = dev->dev_private;
  2748.  
  2749.         /* Ensure that all CPU reads are completed before installing a fence
  2750.          * and all writes before removing the fence.
  2751.          */
  2752.         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
  2753.                 mb();
  2754.  
  2755.         WARN(obj && (!obj->stride || !obj->tiling_mode),
  2756.              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
  2757.              obj->stride, obj->tiling_mode);
  2758.  
  2759.         switch (INTEL_INFO(dev)->gen) {
  2760.         case 8:
  2761.         case 7:
  2762.         case 6:
  2763.         case 5:
  2764.         case 4: i965_write_fence_reg(dev, reg, obj); break;
  2765.         case 3: i915_write_fence_reg(dev, reg, obj); break;
  2766.         case 2: i830_write_fence_reg(dev, reg, obj); break;
  2767.         default: BUG();
  2768.         }
  2769.  
  2770.         /* And similarly be paranoid that no direct access to this region
  2771.          * is reordered to before the fence is installed.
  2772.          */
  2773.         if (i915_gem_object_needs_mb(obj))
  2774.                 mb();
  2775. }
  2776.  
  2777. static inline int fence_number(struct drm_i915_private *dev_priv,
  2778.                                struct drm_i915_fence_reg *fence)
  2779. {
  2780.         return fence - dev_priv->fence_regs;
  2781. }
  2782.  
  2783. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  2784.                                          struct drm_i915_fence_reg *fence,
  2785.                                          bool enable)
  2786. {
  2787.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2788.         int reg = fence_number(dev_priv, fence);
  2789.  
  2790.         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  2791.  
  2792.         if (enable) {
  2793.                 obj->fence_reg = reg;
  2794.                 fence->obj = obj;
  2795.                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
  2796.         } else {
  2797.                 obj->fence_reg = I915_FENCE_REG_NONE;
  2798.                 fence->obj = NULL;
  2799.                 list_del_init(&fence->lru_list);
  2800.         }
  2801.         obj->fence_dirty = false;
  2802. }
  2803.  
  2804. static int
  2805. i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  2806. {
  2807.         if (obj->last_fenced_seqno) {
  2808.                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
  2809.                         if (ret)
  2810.                                 return ret;
  2811.  
  2812.                 obj->last_fenced_seqno = 0;
  2813.         }
  2814.  
  2815.         obj->fenced_gpu_access = false;
  2816.         return 0;
  2817. }
  2818.  
  2819. int
  2820. i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  2821. {
  2822.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2823.         struct drm_i915_fence_reg *fence;
  2824.         int ret;
  2825.  
  2826.         ret = i915_gem_object_wait_fence(obj);
  2827.         if (ret)
  2828.                 return ret;
  2829.  
  2830.         if (obj->fence_reg == I915_FENCE_REG_NONE)
  2831.                 return 0;
  2832.  
  2833.         fence = &dev_priv->fence_regs[obj->fence_reg];
  2834.  
  2835.         i915_gem_object_fence_lost(obj);
  2836.         i915_gem_object_update_fence(obj, fence, false);
  2837.  
  2838.         return 0;
  2839. }
  2840.  
  2841. static struct drm_i915_fence_reg *
  2842. i915_find_fence_reg(struct drm_device *dev)
  2843. {
  2844.         struct drm_i915_private *dev_priv = dev->dev_private;
  2845.         struct drm_i915_fence_reg *reg, *avail;
  2846.         int i;
  2847.  
  2848.         /* First try to find a free reg */
  2849.         avail = NULL;
  2850.         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2851.                 reg = &dev_priv->fence_regs[i];
  2852.                 if (!reg->obj)
  2853.                         return reg;
  2854.  
  2855.                 if (!reg->pin_count)
  2856.                         avail = reg;
  2857.         }
  2858.  
  2859.         if (avail == NULL)
  2860.                 goto deadlock;
  2861.  
  2862.         /* None available, try to steal one or wait for a user to finish */
  2863.         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
  2864.                 if (reg->pin_count)
  2865.                         continue;
  2866.  
  2867.                 return reg;
  2868.         }
  2869.  
  2870. deadlock:
  2871.         /* Wait for completion of pending flips which consume fences */
  2872. //   if (intel_has_pending_fb_unpin(dev))
  2873. //       return ERR_PTR(-EAGAIN);
  2874.  
  2875.         return ERR_PTR(-EDEADLK);
  2876. }
  2877.  
  2878. /**
  2879.  * i915_gem_object_get_fence - set up fencing for an object
  2880.  * @obj: object to map through a fence reg
  2881.  *
  2882.  * When mapping objects through the GTT, userspace wants to be able to write
  2883.  * to them without having to worry about swizzling if the object is tiled.
  2884.  * This function walks the fence regs looking for a free one for @obj,
  2885.  * stealing one if it can't find any.
  2886.  *
  2887.  * It then sets up the reg based on the object's properties: address, pitch
  2888.  * and tiling format.
  2889.  *
  2890.  * For an untiled surface, this removes any existing fence.
  2891.  */
  2892. int
  2893. i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  2894. {
  2895.         struct drm_device *dev = obj->base.dev;
  2896.         struct drm_i915_private *dev_priv = dev->dev_private;
  2897.         bool enable = obj->tiling_mode != I915_TILING_NONE;
  2898.         struct drm_i915_fence_reg *reg;
  2899.         int ret;
  2900.  
  2901.         /* Have we updated the tiling parameters upon the object and so
  2902.          * will need to serialise the write to the associated fence register?
  2903.          */
  2904.         if (obj->fence_dirty) {
  2905.                 ret = i915_gem_object_wait_fence(obj);
  2906.                 if (ret)
  2907.                         return ret;
  2908.         }
  2909.  
  2910.         /* Just update our place in the LRU if our fence is getting reused. */
  2911.         if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2912.                 reg = &dev_priv->fence_regs[obj->fence_reg];
  2913.                 if (!obj->fence_dirty) {
  2914.                         list_move_tail(&reg->lru_list,
  2915.                                        &dev_priv->mm.fence_list);
  2916.                         return 0;
  2917.                 }
  2918.         } else if (enable) {
  2919.                 reg = i915_find_fence_reg(dev);
  2920.                 if (IS_ERR(reg))
  2921.                         return PTR_ERR(reg);
  2922.  
  2923.                 if (reg->obj) {
  2924.                         struct drm_i915_gem_object *old = reg->obj;
  2925.  
  2926.                         ret = i915_gem_object_wait_fence(old);
  2927.                         if (ret)
  2928.                                 return ret;
  2929.  
  2930.                         i915_gem_object_fence_lost(old);
  2931.                 }
  2932.         } else
  2933.                 return 0;
  2934.  
  2935.         i915_gem_object_update_fence(obj, reg, enable);
  2936.  
  2937.         return 0;
  2938. }
  2939.  
  2940. static bool i915_gem_valid_gtt_space(struct drm_device *dev,
  2941.                                      struct drm_mm_node *gtt_space,
  2942.                                      unsigned long cache_level)
  2943. {
  2944.         struct drm_mm_node *other;
  2945.  
  2946.         /* On non-LLC machines we have to be careful when putting differing
  2947.          * types of snoopable memory together to avoid the prefetcher
  2948.          * crossing memory domains and dying.
  2949.          */
  2950.         if (HAS_LLC(dev))
  2951.                 return true;
  2952.  
  2953.         if (!drm_mm_node_allocated(gtt_space))
  2954.                 return true;
  2955.  
  2956.         if (list_empty(&gtt_space->node_list))
  2957.                 return true;
  2958.  
  2959.         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
  2960.         if (other->allocated && !other->hole_follows && other->color != cache_level)
  2961.                 return false;
  2962.  
  2963.         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
  2964.         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
  2965.                 return false;
  2966.  
  2967.         return true;
  2968. }
  2969.  
  2970. static void i915_gem_verify_gtt(struct drm_device *dev)
  2971. {
  2972. #if WATCH_GTT
  2973.         struct drm_i915_private *dev_priv = dev->dev_private;
  2974.         struct drm_i915_gem_object *obj;
  2975.         int err = 0;
  2976.  
  2977.         list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
  2978.                 if (obj->gtt_space == NULL) {
  2979.                         printk(KERN_ERR "object found on GTT list with no space reserved\n");
  2980.                         err++;
  2981.                         continue;
  2982.                 }
  2983.  
  2984.                 if (obj->cache_level != obj->gtt_space->color) {
  2985.                         printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
  2986.                                i915_gem_obj_ggtt_offset(obj),
  2987.                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
  2988.                                obj->cache_level,
  2989.                                obj->gtt_space->color);
  2990.                         err++;
  2991.                         continue;
  2992.                 }
  2993.  
  2994.                 if (!i915_gem_valid_gtt_space(dev,
  2995.                                               obj->gtt_space,
  2996.                                               obj->cache_level)) {
  2997.                         printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
  2998.                                i915_gem_obj_ggtt_offset(obj),
  2999.                                i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
  3000.                                obj->cache_level);
  3001.                         err++;
  3002.                         continue;
  3003.                 }
  3004.         }
  3005.  
  3006.         WARN_ON(err);
  3007. #endif
  3008. }
  3009.  
  3010. /**
  3011.  * Finds free space in the GTT aperture and binds the object there.
  3012.  */
  3013. static int
  3014. i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
  3015.                            struct i915_address_space *vm,
  3016.                             unsigned alignment,
  3017.                             bool map_and_fenceable,
  3018.                             bool nonblocking)
  3019. {
  3020.         struct drm_device *dev = obj->base.dev;
  3021.         drm_i915_private_t *dev_priv = dev->dev_private;
  3022.         u32 size, fence_size, fence_alignment, unfenced_alignment;
  3023.         size_t gtt_max =
  3024.                 map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
  3025.         struct i915_vma *vma;
  3026.         int ret;
  3027.  
  3028.         fence_size = i915_gem_get_gtt_size(dev,
  3029.                                            obj->base.size,
  3030.                                            obj->tiling_mode);
  3031.         fence_alignment = i915_gem_get_gtt_alignment(dev,
  3032.                                                      obj->base.size,
  3033.                                                      obj->tiling_mode, true);
  3034.         unfenced_alignment =
  3035.                 i915_gem_get_gtt_alignment(dev,
  3036.                                                     obj->base.size,
  3037.                                                     obj->tiling_mode, false);
  3038.  
  3039.         if (alignment == 0)
  3040.                 alignment = map_and_fenceable ? fence_alignment :
  3041.                                                 unfenced_alignment;
  3042.         if (map_and_fenceable && alignment & (fence_alignment - 1)) {
  3043.                 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
  3044.                 return -EINVAL;
  3045.         }
  3046.  
  3047.         size = map_and_fenceable ? fence_size : obj->base.size;
  3048.  
  3049.         /* If the object is bigger than the entire aperture, reject it early
  3050.          * before evicting everything in a vain attempt to find space.
  3051.          */
  3052.         if (obj->base.size > gtt_max) {
  3053.                 DRM_ERROR("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
  3054.                           obj->base.size,
  3055.                           map_and_fenceable ? "mappable" : "total",
  3056.                           gtt_max);
  3057.                 return -E2BIG;
  3058.         }
  3059.  
  3060.         ret = i915_gem_object_get_pages(obj);
  3061.         if (ret)
  3062.                 return ret;
  3063.  
  3064.         i915_gem_object_pin_pages(obj);
  3065.  
  3066.         BUG_ON(!i915_is_ggtt(vm));
  3067.  
  3068.         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
  3069.         if (IS_ERR(vma)) {
  3070.                 ret = PTR_ERR(vma);
  3071.                 goto err_unpin;
  3072.         }
  3073.  
  3074.         /* For now we only ever use 1 vma per object */
  3075.         WARN_ON(!list_is_singular(&obj->vma_list));
  3076.  
  3077. search_free:
  3078.         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
  3079.                                                   size, alignment,
  3080.                                                   obj->cache_level, 0, gtt_max,
  3081.                                                   DRM_MM_SEARCH_DEFAULT);
  3082.         if (ret) {
  3083.  
  3084.                 goto err_free_vma;
  3085.         }
  3086.         if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
  3087.                                               obj->cache_level))) {
  3088.                 ret = -EINVAL;
  3089.                 goto err_remove_node;
  3090.         }
  3091.  
  3092.         ret = i915_gem_gtt_prepare_object(obj);
  3093.         if (ret)
  3094.                 goto err_remove_node;
  3095.  
  3096.         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
  3097.         list_add_tail(&vma->mm_list, &vm->inactive_list);
  3098.  
  3099.         if (i915_is_ggtt(vm)) {
  3100.                 bool mappable, fenceable;
  3101.  
  3102.                 fenceable = (vma->node.size == fence_size &&
  3103.                              (vma->node.start & (fence_alignment - 1)) == 0);
  3104.  
  3105.                 mappable = (vma->node.start + obj->base.size <=
  3106.                             dev_priv->gtt.mappable_end);
  3107.  
  3108.         obj->map_and_fenceable = mappable && fenceable;
  3109.         }
  3110.  
  3111.         WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
  3112.  
  3113.         trace_i915_vma_bind(vma, map_and_fenceable);
  3114.         i915_gem_verify_gtt(dev);
  3115.         return 0;
  3116.  
  3117. err_remove_node:
  3118.         drm_mm_remove_node(&vma->node);
  3119. err_free_vma:
  3120.         i915_gem_vma_destroy(vma);
  3121. err_unpin:
  3122.         i915_gem_object_unpin_pages(obj);
  3123.         return ret;
  3124. }
  3125.  
  3126. bool
  3127. i915_gem_clflush_object(struct drm_i915_gem_object *obj,
  3128.                         bool force)
  3129. {
  3130.         /* If we don't have a page list set up, then we're not pinned
  3131.          * to GPU, and we can ignore the cache flush because it'll happen
  3132.          * again at bind time.
  3133.          */
  3134.         if (obj->pages == NULL)
  3135.                 return false;
  3136.  
  3137.         /*
  3138.          * Stolen memory is always coherent with the GPU as it is explicitly
  3139.          * marked as wc by the system, or the system is cache-coherent.
  3140.          */
  3141.         if (obj->stolen)
  3142.                 return false;
  3143.  
  3144.         /* If the GPU is snooping the contents of the CPU cache,
  3145.          * we do not need to manually clear the CPU cache lines.  However,
  3146.          * the caches are only snooped when the render cache is
  3147.          * flushed/invalidated.  As we always have to emit invalidations
  3148.          * and flushes when moving into and out of the RENDER domain, correct
  3149.          * snooping behaviour occurs naturally as the result of our domain
  3150.          * tracking.
  3151.          */
  3152.         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
  3153.                 return false;
  3154.  
  3155.         trace_i915_gem_object_clflush(obj);
  3156.         drm_clflush_sg(obj->pages);
  3157.  
  3158.         return true;
  3159. }
  3160.  
  3161. /** Flushes the GTT write domain for the object if it's dirty. */
  3162. static void
  3163. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  3164. {
  3165.         uint32_t old_write_domain;
  3166.  
  3167.         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  3168.                 return;
  3169.  
  3170.         /* No actual flushing is required for the GTT write domain.  Writes
  3171.          * to it immediately go to main memory as far as we know, so there's
  3172.          * no chipset flush.  It also doesn't land in render cache.
  3173.          *
  3174.          * However, we do have to enforce the order so that all writes through
  3175.          * the GTT land before any writes to the device, such as updates to
  3176.          * the GATT itself.
  3177.          */
  3178.         wmb();
  3179.  
  3180.         old_write_domain = obj->base.write_domain;
  3181.         obj->base.write_domain = 0;
  3182.  
  3183.         trace_i915_gem_object_change_domain(obj,
  3184.                                             obj->base.read_domains,
  3185.                                             old_write_domain);
  3186. }
  3187.  
  3188. /** Flushes the CPU write domain for the object if it's dirty. */
  3189. static void
  3190. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
  3191.                                        bool force)
  3192. {
  3193.         uint32_t old_write_domain;
  3194.  
  3195.         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  3196.                 return;
  3197.  
  3198.         if (i915_gem_clflush_object(obj, force))
  3199.         i915_gem_chipset_flush(obj->base.dev);
  3200.  
  3201.         old_write_domain = obj->base.write_domain;
  3202.         obj->base.write_domain = 0;
  3203.  
  3204.         trace_i915_gem_object_change_domain(obj,
  3205.                                             obj->base.read_domains,
  3206.                                             old_write_domain);
  3207. }
  3208.  
  3209. /**
  3210.  * Moves a single object to the GTT read, and possibly write domain.
  3211.  *
  3212.  * This function returns when the move is complete, including waiting on
  3213.  * flushes to occur.
  3214.  */
  3215. int
  3216. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  3217. {
  3218.         drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
  3219.         uint32_t old_write_domain, old_read_domains;
  3220.         int ret;
  3221.  
  3222.         /* Not valid to be called on unbound objects. */
  3223.         if (!i915_gem_obj_bound_any(obj))
  3224.                 return -EINVAL;
  3225.  
  3226.         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  3227.                 return 0;
  3228.  
  3229.         ret = i915_gem_object_wait_rendering(obj, !write);
  3230.                 if (ret)
  3231.                         return ret;
  3232.  
  3233.         i915_gem_object_flush_cpu_write_domain(obj, false);
  3234.  
  3235.         /* Serialise direct access to this object with the barriers for
  3236.          * coherent writes from the GPU, by effectively invalidating the
  3237.          * GTT domain upon first access.
  3238.          */
  3239.         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  3240.                 mb();
  3241.  
  3242.         old_write_domain = obj->base.write_domain;
  3243.         old_read_domains = obj->base.read_domains;
  3244.  
  3245.         /* It should now be out of any other write domains, and we can update
  3246.          * the domain values for our changes.
  3247.          */
  3248.         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  3249.         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  3250.         if (write) {
  3251.                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  3252.                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  3253.                 obj->dirty = 1;
  3254.         }
  3255.  
  3256.         trace_i915_gem_object_change_domain(obj,
  3257.                                             old_read_domains,
  3258.                                             old_write_domain);
  3259.  
  3260.         /* And bump the LRU for this access */
  3261.         if (i915_gem_object_is_inactive(obj)) {
  3262.                 struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
  3263.                 if (vma)
  3264.                         list_move_tail(&vma->mm_list,
  3265.                                        &dev_priv->gtt.base.inactive_list);
  3266.  
  3267.         }
  3268.  
  3269.         return 0;
  3270. }
  3271.  
  3272. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  3273.                                     enum i915_cache_level cache_level)
  3274. {
  3275.         struct drm_device *dev = obj->base.dev;
  3276.         drm_i915_private_t *dev_priv = dev->dev_private;
  3277.         struct i915_vma *vma;
  3278.         int ret;
  3279.  
  3280.         if (obj->cache_level == cache_level)
  3281.                 return 0;
  3282.  
  3283.         if (obj->pin_count) {
  3284.                 DRM_DEBUG("can not change the cache level of pinned objects\n");
  3285.                 return -EBUSY;
  3286.         }
  3287.  
  3288.         list_for_each_entry(vma, &obj->vma_list, vma_link) {
  3289.                 if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
  3290.                         ret = i915_vma_unbind(vma);
  3291.                 if (ret)
  3292.                         return ret;
  3293.  
  3294.                         break;
  3295.                 }
  3296.         }
  3297.  
  3298.         if (i915_gem_obj_bound_any(obj)) {
  3299.                 ret = i915_gem_object_finish_gpu(obj);
  3300.                 if (ret)
  3301.                         return ret;
  3302.  
  3303.                 i915_gem_object_finish_gtt(obj);
  3304.  
  3305.                 /* Before SandyBridge, you could not use tiling or fence
  3306.                  * registers with snooped memory, so relinquish any fences
  3307.                  * currently pointing to our region in the aperture.
  3308.                  */
  3309.                 if (INTEL_INFO(dev)->gen < 6) {
  3310.                         ret = i915_gem_object_put_fence(obj);
  3311.                         if (ret)
  3312.                                 return ret;
  3313.                 }
  3314.  
  3315.                 if (obj->has_global_gtt_mapping)
  3316.                         i915_gem_gtt_bind_object(obj, cache_level);
  3317.                 if (obj->has_aliasing_ppgtt_mapping)
  3318.                         i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
  3319.                                                obj, cache_level);
  3320.         }
  3321.  
  3322.         list_for_each_entry(vma, &obj->vma_list, vma_link)
  3323.                 vma->node.color = cache_level;
  3324.         obj->cache_level = cache_level;
  3325.  
  3326.         if (cpu_write_needs_clflush(obj)) {
  3327.                 u32 old_read_domains, old_write_domain;
  3328.  
  3329.                 /* If we're coming from LLC cached, then we haven't
  3330.                  * actually been tracking whether the data is in the
  3331.                  * CPU cache or not, since we only allow one bit set
  3332.                  * in obj->write_domain and have been skipping the clflushes.
  3333.                  * Just set it to the CPU cache for now.
  3334.                  */
  3335.                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
  3336.  
  3337.                 old_read_domains = obj->base.read_domains;
  3338.                 old_write_domain = obj->base.write_domain;
  3339.  
  3340.                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3341.                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3342.  
  3343.                 trace_i915_gem_object_change_domain(obj,
  3344.                                                     old_read_domains,
  3345.                                                     old_write_domain);
  3346.     }
  3347.  
  3348.         i915_gem_verify_gtt(dev);
  3349.         return 0;
  3350. }
  3351.  
  3352. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  3353.                                struct drm_file *file)
  3354. {
  3355.         struct drm_i915_gem_caching *args = data;
  3356.         struct drm_i915_gem_object *obj;
  3357.         int ret;
  3358.  
  3359.         ret = i915_mutex_lock_interruptible(dev);
  3360.         if (ret)
  3361.                 return ret;
  3362.  
  3363.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3364.         if (&obj->base == NULL) {
  3365.                 ret = -ENOENT;
  3366.                 goto unlock;
  3367.         }
  3368.  
  3369.         switch (obj->cache_level) {
  3370.         case I915_CACHE_LLC:
  3371.         case I915_CACHE_L3_LLC:
  3372.                 args->caching = I915_CACHING_CACHED;
  3373.                 break;
  3374.  
  3375.         case I915_CACHE_WT:
  3376.                 args->caching = I915_CACHING_DISPLAY;
  3377.                 break;
  3378.  
  3379.         default:
  3380.                 args->caching = I915_CACHING_NONE;
  3381.                 break;
  3382.         }
  3383.  
  3384.         drm_gem_object_unreference(&obj->base);
  3385. unlock:
  3386.         mutex_unlock(&dev->struct_mutex);
  3387.         return ret;
  3388. }
  3389.  
  3390. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  3391.                                struct drm_file *file)
  3392. {
  3393.         struct drm_i915_gem_caching *args = data;
  3394.         struct drm_i915_gem_object *obj;
  3395.         enum i915_cache_level level;
  3396.         int ret;
  3397.  
  3398.         switch (args->caching) {
  3399.         case I915_CACHING_NONE:
  3400.                 level = I915_CACHE_NONE;
  3401.                 break;
  3402.         case I915_CACHING_CACHED:
  3403.                 level = I915_CACHE_LLC;
  3404.                 break;
  3405.         case I915_CACHING_DISPLAY:
  3406.                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
  3407.                 break;
  3408.         default:
  3409.                 return -EINVAL;
  3410.         }
  3411.  
  3412.         ret = i915_mutex_lock_interruptible(dev);
  3413.         if (ret)
  3414.                 return ret;
  3415.  
  3416.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3417.         if (&obj->base == NULL) {
  3418.                 ret = -ENOENT;
  3419.                 goto unlock;
  3420.         }
  3421.  
  3422.         ret = i915_gem_object_set_cache_level(obj, level);
  3423.  
  3424.         drm_gem_object_unreference(&obj->base);
  3425. unlock:
  3426.         mutex_unlock(&dev->struct_mutex);
  3427.         return ret;
  3428. }
  3429.  
  3430. static bool is_pin_display(struct drm_i915_gem_object *obj)
  3431. {
  3432.         /* There are 3 sources that pin objects:
  3433.          *   1. The display engine (scanouts, sprites, cursors);
  3434.          *   2. Reservations for execbuffer;
  3435.          *   3. The user.
  3436.          *
  3437.          * We can ignore reservations as we hold the struct_mutex and
  3438.          * are only called outside of the reservation path.  The user
  3439.          * can only increment pin_count once, and so if after
  3440.          * subtracting the potential reference by the user, any pin_count
  3441.          * remains, it must be due to another use by the display engine.
  3442.          */
  3443.         return obj->pin_count - !!obj->user_pin_count;
  3444. }
  3445.  
  3446. /*
  3447.  * Prepare buffer for display plane (scanout, cursors, etc).
  3448.  * Can be called from an uninterruptible phase (modesetting) and allows
  3449.  * any flushes to be pipelined (for pageflips).
  3450.  */
  3451. int
  3452. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  3453.                                      u32 alignment,
  3454.                                      struct intel_ring_buffer *pipelined)
  3455. {
  3456.         u32 old_read_domains, old_write_domain;
  3457.         int ret;
  3458.  
  3459.         if (pipelined != obj->ring) {
  3460.                 ret = i915_gem_object_sync(obj, pipelined);
  3461.         if (ret)
  3462.                 return ret;
  3463.         }
  3464.  
  3465.         /* Mark the pin_display early so that we account for the
  3466.          * display coherency whilst setting up the cache domains.
  3467.          */
  3468.         obj->pin_display = true;
  3469.  
  3470.         /* The display engine is not coherent with the LLC cache on gen6.  As
  3471.          * a result, we make sure that the pinning that is about to occur is
  3472.          * done with uncached PTEs. This is lowest common denominator for all
  3473.          * chipsets.
  3474.          *
  3475.          * However for gen6+, we could do better by using the GFDT bit instead
  3476.          * of uncaching, which would allow us to flush all the LLC-cached data
  3477.          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  3478.          */
  3479.         ret = i915_gem_object_set_cache_level(obj,
  3480.                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
  3481.         if (ret)
  3482.                 goto err_unpin_display;
  3483.  
  3484.         /* As the user may map the buffer once pinned in the display plane
  3485.          * (e.g. libkms for the bootup splash), we have to ensure that we
  3486.          * always use map_and_fenceable for all scanout buffers.
  3487.          */
  3488.         ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
  3489.         if (ret)
  3490.                 goto err_unpin_display;
  3491.  
  3492.         i915_gem_object_flush_cpu_write_domain(obj, true);
  3493.  
  3494.         old_write_domain = obj->base.write_domain;
  3495.         old_read_domains = obj->base.read_domains;
  3496.  
  3497.         /* It should now be out of any other write domains, and we can update
  3498.          * the domain values for our changes.
  3499.          */
  3500.         obj->base.write_domain = 0;
  3501.         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  3502.  
  3503.         trace_i915_gem_object_change_domain(obj,
  3504.                                             old_read_domains,
  3505.                                             old_write_domain);
  3506.  
  3507.         return 0;
  3508.  
  3509. err_unpin_display:
  3510.         obj->pin_display = is_pin_display(obj);
  3511.         return ret;
  3512. }
  3513.  
  3514. void
  3515. i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
  3516. {
  3517.         i915_gem_object_unpin(obj);
  3518.         obj->pin_display = is_pin_display(obj);
  3519. }
  3520.  
  3521. int
  3522. i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
  3523. {
  3524.         int ret;
  3525.  
  3526.         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
  3527.                 return 0;
  3528.  
  3529.         ret = i915_gem_object_wait_rendering(obj, false);
  3530.     if (ret)
  3531.         return ret;
  3532.  
  3533.         /* Ensure that we invalidate the GPU's caches and TLBs. */
  3534.         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  3535.         return 0;
  3536. }
  3537.  
  3538. /**
  3539.  * Moves a single object to the CPU read, and possibly write domain.
  3540.  *
  3541.  * This function returns when the move is complete, including waiting on
  3542.  * flushes to occur.
  3543.  */
  3544. int
  3545. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  3546. {
  3547.         uint32_t old_write_domain, old_read_domains;
  3548.         int ret;
  3549.  
  3550.         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  3551.                 return 0;
  3552.  
  3553.         ret = i915_gem_object_wait_rendering(obj, !write);
  3554.         if (ret)
  3555.                 return ret;
  3556.  
  3557.         i915_gem_object_flush_gtt_write_domain(obj);
  3558.  
  3559.         old_write_domain = obj->base.write_domain;
  3560.         old_read_domains = obj->base.read_domains;
  3561.  
  3562.         /* Flush the CPU cache if it's still invalid. */
  3563.         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  3564.                 i915_gem_clflush_object(obj, false);
  3565.  
  3566.                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  3567.         }
  3568.  
  3569.         /* It should now be out of any other write domains, and we can update
  3570.          * the domain values for our changes.
  3571.          */
  3572.         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  3573.  
  3574.         /* If we're writing through the CPU, then the GPU read domains will
  3575.          * need to be invalidated at next use.
  3576.          */
  3577.         if (write) {
  3578.                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3579.                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3580.         }
  3581.  
  3582.         trace_i915_gem_object_change_domain(obj,
  3583.                                             old_read_domains,
  3584.                                             old_write_domain);
  3585.  
  3586.         return 0;
  3587. }
  3588.  
  3589. /* Throttle our rendering by waiting until the ring has completed our requests
  3590.  * emitted over 20 msec ago.
  3591.  *
  3592.  * Note that if we were to use the current jiffies each time around the loop,
  3593.  * we wouldn't escape the function with any frames outstanding if the time to
  3594.  * render a frame was over 20ms.
  3595.  *
  3596.  * This should get us reasonable parallelism between CPU and GPU but also
  3597.  * relatively low latency when blocking on a particular request to finish.
  3598.  */
  3599. static int
  3600. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3601. {
  3602.         struct drm_i915_private *dev_priv = dev->dev_private;
  3603.         struct drm_i915_file_private *file_priv = file->driver_priv;
  3604.         unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20);
  3605.         struct drm_i915_gem_request *request;
  3606.         struct intel_ring_buffer *ring = NULL;
  3607.         unsigned reset_counter;
  3608.         u32 seqno = 0;
  3609.         int ret;
  3610.  
  3611.         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  3612.         if (ret)
  3613.                 return ret;
  3614.  
  3615.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
  3616.         if (ret)
  3617.                 return ret;
  3618.  
  3619.         spin_lock(&file_priv->mm.lock);
  3620.         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  3621.                 if (time_after_eq(request->emitted_jiffies, recent_enough))
  3622.                         break;
  3623.  
  3624.                 ring = request->ring;
  3625.                 seqno = request->seqno;
  3626.         }
  3627.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  3628.         spin_unlock(&file_priv->mm.lock);
  3629.  
  3630.         if (seqno == 0)
  3631.                 return 0;
  3632.  
  3633.         ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
  3634.         if (ret == 0)
  3635.                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3636.  
  3637.         return ret;
  3638. }
  3639.  
  3640. int
  3641. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  3642.                     struct i915_address_space *vm,
  3643.                     uint32_t alignment,
  3644.                     bool map_and_fenceable,
  3645.                     bool nonblocking)
  3646. {
  3647.         struct i915_vma *vma;
  3648.         int ret;
  3649.  
  3650.         if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
  3651.                 return -EBUSY;
  3652.  
  3653.         WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
  3654.  
  3655.         vma = i915_gem_obj_to_vma(obj, vm);
  3656.  
  3657.         if (vma) {
  3658.                 if ((alignment &&
  3659.                      vma->node.start & (alignment - 1)) ||
  3660.                     (map_and_fenceable && !obj->map_and_fenceable)) {
  3661.                         WARN(obj->pin_count,
  3662.                              "bo is already pinned with incorrect alignment:"
  3663.                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
  3664.                              " obj->map_and_fenceable=%d\n",
  3665.                              i915_gem_obj_offset(obj, vm), alignment,
  3666.                              map_and_fenceable,
  3667.                              obj->map_and_fenceable);
  3668.                         ret = i915_vma_unbind(vma);
  3669.                         if (ret)
  3670.                                 return ret;
  3671.                 }
  3672.         }
  3673.  
  3674.         if (!i915_gem_obj_bound(obj, vm)) {
  3675.                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3676.  
  3677.                 ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
  3678.                                                   map_and_fenceable,
  3679.                                                   nonblocking);
  3680.                 if (ret)
  3681.                         return ret;
  3682.  
  3683.                 if (!dev_priv->mm.aliasing_ppgtt)
  3684.                         i915_gem_gtt_bind_object(obj, obj->cache_level);
  3685.         }
  3686.  
  3687.         if (!obj->has_global_gtt_mapping && map_and_fenceable)
  3688.                 i915_gem_gtt_bind_object(obj, obj->cache_level);
  3689.  
  3690.         obj->pin_count++;
  3691.         obj->pin_mappable |= map_and_fenceable;
  3692.  
  3693.         return 0;
  3694. }
  3695.  
  3696. void
  3697. i915_gem_object_unpin(struct drm_i915_gem_object *obj)
  3698. {
  3699.         BUG_ON(obj->pin_count == 0);
  3700.         BUG_ON(!i915_gem_obj_bound_any(obj));
  3701.  
  3702.         if (--obj->pin_count == 0)
  3703.                 obj->pin_mappable = false;
  3704. }
  3705.  
  3706. int
  3707. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3708.                    struct drm_file *file)
  3709. {
  3710.         struct drm_i915_gem_pin *args = data;
  3711.         struct drm_i915_gem_object *obj;
  3712.         int ret;
  3713.  
  3714.         ret = i915_mutex_lock_interruptible(dev);
  3715.         if (ret)
  3716.                 return ret;
  3717.  
  3718.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3719.         if (&obj->base == NULL) {
  3720.                 ret = -ENOENT;
  3721.                 goto unlock;
  3722.         }
  3723.  
  3724.         if (obj->madv != I915_MADV_WILLNEED) {
  3725.                 DRM_ERROR("Attempting to pin a purgeable buffer\n");
  3726.                 ret = -EINVAL;
  3727.                 goto out;
  3728.         }
  3729.  
  3730.         if (obj->pin_filp != NULL && obj->pin_filp != file) {
  3731.                 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3732.                           args->handle);
  3733.                 ret = -EINVAL;
  3734.                 goto out;
  3735.         }
  3736.  
  3737.         if (obj->user_pin_count == ULONG_MAX) {
  3738.                 ret = -EBUSY;
  3739.                 goto out;
  3740.         }
  3741.  
  3742.         if (obj->user_pin_count == 0) {
  3743.                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
  3744.                 if (ret)
  3745.                         goto out;
  3746.         }
  3747.  
  3748.         obj->user_pin_count++;
  3749.         obj->pin_filp = file;
  3750.  
  3751.         args->offset = i915_gem_obj_ggtt_offset(obj);
  3752. out:
  3753.         drm_gem_object_unreference(&obj->base);
  3754. unlock:
  3755.         mutex_unlock(&dev->struct_mutex);
  3756.         return ret;
  3757. }
  3758.  
  3759. int
  3760. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3761.                      struct drm_file *file)
  3762. {
  3763.         struct drm_i915_gem_pin *args = data;
  3764.         struct drm_i915_gem_object *obj;
  3765.         int ret;
  3766.  
  3767.         ret = i915_mutex_lock_interruptible(dev);
  3768.         if (ret)
  3769.                 return ret;
  3770.  
  3771.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3772.         if (&obj->base == NULL) {
  3773.                 ret = -ENOENT;
  3774.                 goto unlock;
  3775.         }
  3776.  
  3777.         if (obj->pin_filp != file) {
  3778.                 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3779.                           args->handle);
  3780.                 ret = -EINVAL;
  3781.                 goto out;
  3782.         }
  3783.         obj->user_pin_count--;
  3784.         if (obj->user_pin_count == 0) {
  3785.                 obj->pin_filp = NULL;
  3786.                 i915_gem_object_unpin(obj);
  3787.         }
  3788.  
  3789. out:
  3790.         drm_gem_object_unreference(&obj->base);
  3791. unlock:
  3792.         mutex_unlock(&dev->struct_mutex);
  3793.         return ret;
  3794. }
  3795.  
  3796. int
  3797. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3798.                     struct drm_file *file)
  3799. {
  3800.         struct drm_i915_gem_busy *args = data;
  3801.         struct drm_i915_gem_object *obj;
  3802.         int ret;
  3803.  
  3804.         ret = i915_mutex_lock_interruptible(dev);
  3805.         if (ret)
  3806.                 return ret;
  3807.  
  3808.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3809.         if (&obj->base == NULL) {
  3810.                 ret = -ENOENT;
  3811.                 goto unlock;
  3812.         }
  3813.  
  3814.         /* Count all active objects as busy, even if they are currently not used
  3815.          * by the gpu. Users of this interface expect objects to eventually
  3816.          * become non-busy without any further actions, therefore emit any
  3817.          * necessary flushes here.
  3818.          */
  3819.         ret = i915_gem_object_flush_active(obj);
  3820.  
  3821.         args->busy = obj->active;
  3822.         if (obj->ring) {
  3823.                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
  3824.                 args->busy |= intel_ring_flag(obj->ring) << 16;
  3825.         }
  3826.  
  3827.         drm_gem_object_unreference(&obj->base);
  3828. unlock:
  3829.         mutex_unlock(&dev->struct_mutex);
  3830.         return ret;
  3831. }
  3832.  
  3833. int
  3834. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3835.                         struct drm_file *file_priv)
  3836. {
  3837.         return i915_gem_ring_throttle(dev, file_priv);
  3838. }
  3839.  
  3840. #if 0
  3841.  
  3842. int
  3843. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3844.                        struct drm_file *file_priv)
  3845. {
  3846.         struct drm_i915_gem_madvise *args = data;
  3847.         struct drm_i915_gem_object *obj;
  3848.         int ret;
  3849.  
  3850.         switch (args->madv) {
  3851.         case I915_MADV_DONTNEED:
  3852.         case I915_MADV_WILLNEED:
  3853.             break;
  3854.         default:
  3855.             return -EINVAL;
  3856.         }
  3857.  
  3858.         ret = i915_mutex_lock_interruptible(dev);
  3859.         if (ret)
  3860.                 return ret;
  3861.  
  3862.         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3863.         if (&obj->base == NULL) {
  3864.                 ret = -ENOENT;
  3865.                 goto unlock;
  3866.         }
  3867.  
  3868.         if (obj->pin_count) {
  3869.                 ret = -EINVAL;
  3870.                 goto out;
  3871.         }
  3872.  
  3873.         if (obj->madv != __I915_MADV_PURGED)
  3874.                 obj->madv = args->madv;
  3875.  
  3876.         /* if the object is no longer attached, discard its backing storage */
  3877.         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
  3878.                 i915_gem_object_truncate(obj);
  3879.  
  3880.         args->retained = obj->madv != __I915_MADV_PURGED;
  3881.  
  3882. out:
  3883.         drm_gem_object_unreference(&obj->base);
  3884. unlock:
  3885.         mutex_unlock(&dev->struct_mutex);
  3886.         return ret;
  3887. }
  3888. #endif
  3889.  
  3890. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3891.                           const struct drm_i915_gem_object_ops *ops)
  3892. {
  3893.         INIT_LIST_HEAD(&obj->global_list);
  3894.         INIT_LIST_HEAD(&obj->ring_list);
  3895.         INIT_LIST_HEAD(&obj->obj_exec_link);
  3896.         INIT_LIST_HEAD(&obj->vma_list);
  3897.  
  3898.         obj->ops = ops;
  3899.  
  3900.         obj->fence_reg = I915_FENCE_REG_NONE;
  3901.         obj->madv = I915_MADV_WILLNEED;
  3902.         /* Avoid an unnecessary call to unbind on the first bind. */
  3903.         obj->map_and_fenceable = true;
  3904.  
  3905.         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
  3906. }
  3907.  
  3908. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3909.         .get_pages = i915_gem_object_get_pages_gtt,
  3910.         .put_pages = i915_gem_object_put_pages_gtt,
  3911. };
  3912.  
  3913. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3914.                                                   size_t size)
  3915. {
  3916.         struct drm_i915_gem_object *obj;
  3917.         struct address_space *mapping;
  3918.         gfp_t mask;
  3919.  
  3920.         obj = i915_gem_object_alloc(dev);
  3921.         if (obj == NULL)
  3922.                 return NULL;
  3923.  
  3924.         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3925.                 i915_gem_object_free(obj);
  3926.                 return NULL;
  3927.         }
  3928.  
  3929.  
  3930.         i915_gem_object_init(obj, &i915_gem_object_ops);
  3931.  
  3932.         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3933.         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3934.  
  3935.         if (HAS_LLC(dev)) {
  3936.                 /* On some devices, we can have the GPU use the LLC (the CPU
  3937.                  * cache) for about a 10% performance improvement
  3938.                  * compared to uncached.  Graphics requests other than
  3939.                  * display scanout are coherent with the CPU in
  3940.                  * accessing this cache.  This means in this mode we
  3941.                  * don't need to clflush on the CPU side, and on the
  3942.                  * GPU side we only need to flush internal caches to
  3943.                  * get data visible to the CPU.
  3944.                  *
  3945.                  * However, we maintain the display planes as UC, and so
  3946.                  * need to rebind when first used as such.
  3947.                  */
  3948.                 obj->cache_level = I915_CACHE_LLC;
  3949.         } else
  3950.                 obj->cache_level = I915_CACHE_NONE;
  3951.  
  3952.         trace_i915_gem_object_create(obj);
  3953.  
  3954.         return obj;
  3955. }
  3956.  
  3957. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  3958. {
  3959.         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  3960.         struct drm_device *dev = obj->base.dev;
  3961.         drm_i915_private_t *dev_priv = dev->dev_private;
  3962.         struct i915_vma *vma, *next;
  3963.  
  3964.         intel_runtime_pm_get(dev_priv);
  3965.  
  3966.         trace_i915_gem_object_destroy(obj);
  3967.  
  3968.  
  3969.         obj->pin_count = 0;
  3970.         /* NB: 0 or 1 elements */
  3971.         WARN_ON(!list_empty(&obj->vma_list) &&
  3972.                 !list_is_singular(&obj->vma_list));
  3973.         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
  3974.                 int ret = i915_vma_unbind(vma);
  3975.                 if (WARN_ON(ret == -ERESTARTSYS)) {
  3976.                 bool was_interruptible;
  3977.  
  3978.                 was_interruptible = dev_priv->mm.interruptible;
  3979.                 dev_priv->mm.interruptible = false;
  3980.  
  3981.                         WARN_ON(i915_vma_unbind(vma));
  3982.  
  3983.                 dev_priv->mm.interruptible = was_interruptible;
  3984.         }
  3985.         }
  3986.  
  3987.         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
  3988.          * before progressing. */
  3989.         if (obj->stolen)
  3990.                 i915_gem_object_unpin_pages(obj);
  3991.  
  3992.         if (WARN_ON(obj->pages_pin_count))
  3993.         obj->pages_pin_count = 0;
  3994.         i915_gem_object_put_pages(obj);
  3995. //   i915_gem_object_free_mmap_offset(obj);
  3996.         i915_gem_object_release_stolen(obj);
  3997.  
  3998.         BUG_ON(obj->pages);
  3999.  
  4000.  
  4001.     if(obj->base.filp != NULL)
  4002.     {
  4003. //        printf("filp %p\n", obj->base.filp);
  4004.         shmem_file_delete(obj->base.filp);
  4005.     }
  4006.  
  4007.         drm_gem_object_release(&obj->base);
  4008.         i915_gem_info_remove_obj(dev_priv, obj->base.size);
  4009.  
  4010.         kfree(obj->bit_17);
  4011.         i915_gem_object_free(obj);
  4012.  
  4013.         intel_runtime_pm_put(dev_priv);
  4014. }
  4015.  
  4016. struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
  4017.                                      struct i915_address_space *vm)
  4018. {
  4019.         struct i915_vma *vma;
  4020.         list_for_each_entry(vma, &obj->vma_list, vma_link)
  4021.                 if (vma->vm == vm)
  4022.                         return vma;
  4023.  
  4024.         return NULL;
  4025. }
  4026.  
  4027. static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
  4028.                                      struct i915_address_space *vm)
  4029. {
  4030.         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
  4031.         if (vma == NULL)
  4032.                 return ERR_PTR(-ENOMEM);
  4033.  
  4034.         INIT_LIST_HEAD(&vma->vma_link);
  4035.         INIT_LIST_HEAD(&vma->mm_list);
  4036.         INIT_LIST_HEAD(&vma->exec_list);
  4037.         vma->vm = vm;
  4038.         vma->obj = obj;
  4039.  
  4040.         /* Keep GGTT vmas first to make debug easier */
  4041.         if (i915_is_ggtt(vm))
  4042.                 list_add(&vma->vma_link, &obj->vma_list);
  4043.         else
  4044.                 list_add_tail(&vma->vma_link, &obj->vma_list);
  4045.  
  4046.         return vma;
  4047. }
  4048.  
  4049. struct i915_vma *
  4050. i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
  4051.                                   struct i915_address_space *vm)
  4052. {
  4053.         struct i915_vma *vma;
  4054.  
  4055.         vma = i915_gem_obj_to_vma(obj, vm);
  4056.         if (!vma)
  4057.                 vma = __i915_gem_vma_create(obj, vm);
  4058.  
  4059.         return vma;
  4060. }
  4061.  
  4062. void i915_gem_vma_destroy(struct i915_vma *vma)
  4063. {
  4064.         WARN_ON(vma->node.allocated);
  4065.  
  4066.         /* Keep the vma as a placeholder in the execbuffer reservation lists */
  4067.         if (!list_empty(&vma->exec_list))
  4068.                 return;
  4069.  
  4070.         list_del(&vma->vma_link);
  4071.  
  4072.         kfree(vma);
  4073. }
  4074.  
  4075. #if 0
  4076. int
  4077. i915_gem_suspend(struct drm_device *dev)
  4078. {
  4079.         drm_i915_private_t *dev_priv = dev->dev_private;
  4080.         int ret = 0;
  4081.  
  4082.         mutex_lock(&dev->struct_mutex);
  4083.         if (dev_priv->ums.mm_suspended)
  4084.                 goto err;
  4085.  
  4086.         ret = i915_gpu_idle(dev);
  4087.         if (ret)
  4088.                 goto err;
  4089.  
  4090.         i915_gem_retire_requests(dev);
  4091.  
  4092.         /* Under UMS, be paranoid and evict. */
  4093.         if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4094.                 i915_gem_evict_everything(dev);
  4095.  
  4096.         i915_kernel_lost_context(dev);
  4097.         i915_gem_cleanup_ringbuffer(dev);
  4098.  
  4099.         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
  4100.          * We need to replace this with a semaphore, or something.
  4101.          * And not confound ums.mm_suspended!
  4102.          */
  4103.         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
  4104.                                                              DRIVER_MODESET);
  4105.         mutex_unlock(&dev->struct_mutex);
  4106.  
  4107.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  4108.         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  4109.         cancel_delayed_work_sync(&dev_priv->mm.idle_work);
  4110.  
  4111.         return 0;
  4112.  
  4113. err:
  4114.         mutex_unlock(&dev->struct_mutex);
  4115.         return ret;
  4116. }
  4117. #endif
  4118.  
  4119. int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
  4120. {
  4121.         struct drm_device *dev = ring->dev;
  4122.         drm_i915_private_t *dev_priv = dev->dev_private;
  4123.         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
  4124.         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
  4125.         int i, ret;
  4126.  
  4127.         if (!HAS_L3_DPF(dev) || !remap_info)
  4128.                 return 0;
  4129.  
  4130.         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
  4131.         if (ret)
  4132.                 return ret;
  4133.  
  4134.         /*
  4135.          * Note: We do not worry about the concurrent register cacheline hang
  4136.          * here because no other code should access these registers other than
  4137.          * at initialization time.
  4138.          */
  4139.         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  4140.                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  4141.                 intel_ring_emit(ring, reg_base + i);
  4142.                 intel_ring_emit(ring, remap_info[i/4]);
  4143.         }
  4144.  
  4145.         intel_ring_advance(ring);
  4146.  
  4147.         return ret;
  4148. }
  4149.  
  4150. void i915_gem_init_swizzling(struct drm_device *dev)
  4151. {
  4152.         drm_i915_private_t *dev_priv = dev->dev_private;
  4153.  
  4154.         if (INTEL_INFO(dev)->gen < 5 ||
  4155.             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  4156.                 return;
  4157.  
  4158.         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  4159.                                  DISP_TILE_SURFACE_SWIZZLING);
  4160.  
  4161.         if (IS_GEN5(dev))
  4162.                 return;
  4163.  
  4164.         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  4165.         if (IS_GEN6(dev))
  4166.                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  4167.         else if (IS_GEN7(dev))
  4168.                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  4169.         else if (IS_GEN8(dev))
  4170.                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  4171.         else
  4172.                 BUG();
  4173. }
  4174.  
  4175. static bool
  4176. intel_enable_blt(struct drm_device *dev)
  4177. {
  4178.         if (!HAS_BLT(dev))
  4179.                 return false;
  4180.  
  4181.         /* The blitter was dysfunctional on early prototypes */
  4182.         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  4183.                 DRM_INFO("BLT not supported on this pre-production hardware;"
  4184.                          " graphics performance will be degraded.\n");
  4185.                 return false;
  4186.         }
  4187.  
  4188.         return true;
  4189. }
  4190.  
  4191. static int i915_gem_init_rings(struct drm_device *dev)
  4192. {
  4193.         struct drm_i915_private *dev_priv = dev->dev_private;
  4194.         int ret;
  4195.  
  4196.         ret = intel_init_render_ring_buffer(dev);
  4197.         if (ret)
  4198.                 return ret;
  4199.  
  4200.     if (HAS_BSD(dev)) {
  4201.                 ret = intel_init_bsd_ring_buffer(dev);
  4202.                 if (ret)
  4203.                         goto cleanup_render_ring;
  4204.         }
  4205.  
  4206.         if (intel_enable_blt(dev)) {
  4207.                 ret = intel_init_blt_ring_buffer(dev);
  4208.                 if (ret)
  4209.                         goto cleanup_bsd_ring;
  4210.         }
  4211.  
  4212.         if (HAS_VEBOX(dev)) {
  4213.                 ret = intel_init_vebox_ring_buffer(dev);
  4214.                 if (ret)
  4215.                         goto cleanup_blt_ring;
  4216.         }
  4217.  
  4218.  
  4219.         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  4220.         if (ret)
  4221.                 goto cleanup_vebox_ring;
  4222.  
  4223.         return 0;
  4224.  
  4225. cleanup_vebox_ring:
  4226.         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
  4227. cleanup_blt_ring:
  4228.         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
  4229. cleanup_bsd_ring:
  4230.         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  4231. cleanup_render_ring:
  4232.         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  4233.  
  4234.         return ret;
  4235. }
  4236.  
  4237. int
  4238. i915_gem_init_hw(struct drm_device *dev)
  4239. {
  4240.         drm_i915_private_t *dev_priv = dev->dev_private;
  4241.         int ret, i;
  4242.  
  4243.         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  4244.                 return -EIO;
  4245.  
  4246.         if (dev_priv->ellc_size)
  4247.                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  4248.  
  4249.         if (IS_HASWELL(dev))
  4250.                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
  4251.                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  4252.  
  4253.         if (HAS_PCH_NOP(dev)) {
  4254.                 u32 temp = I915_READ(GEN7_MSG_CTL);
  4255.                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  4256.                 I915_WRITE(GEN7_MSG_CTL, temp);
  4257.         }
  4258.  
  4259.         i915_gem_init_swizzling(dev);
  4260.  
  4261.         ret = i915_gem_init_rings(dev);
  4262.         if (ret)
  4263.                 return ret;
  4264.  
  4265.         for (i = 0; i < NUM_L3_SLICES(dev); i++)
  4266.                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
  4267.  
  4268.         /*
  4269.          * XXX: There was some w/a described somewhere suggesting loading
  4270.          * contexts before PPGTT.
  4271.          */
  4272.         ret = i915_gem_context_init(dev);
  4273.         if (ret) {
  4274.                 i915_gem_cleanup_ringbuffer(dev);
  4275.                 DRM_ERROR("Context initialization failed %d\n", ret);
  4276.                 return ret;
  4277.         }
  4278.  
  4279.         if (dev_priv->mm.aliasing_ppgtt) {
  4280.                 ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
  4281.                 if (ret) {
  4282.                         i915_gem_cleanup_aliasing_ppgtt(dev);
  4283.                         DRM_INFO("PPGTT enable failed. This is not fatal, but unexpected\n");
  4284.                 }
  4285.         }
  4286.  
  4287.         return 0;
  4288. }
  4289.  
  4290. int i915_gem_init(struct drm_device *dev)
  4291. {
  4292.         struct drm_i915_private *dev_priv = dev->dev_private;
  4293.         int ret;
  4294.  
  4295.         mutex_lock(&dev->struct_mutex);
  4296.  
  4297.         if (IS_VALLEYVIEW(dev)) {
  4298.                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
  4299.                 I915_WRITE(VLV_GTLC_WAKE_CTRL, 1);
  4300.                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) & 1) == 1, 10))
  4301.                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
  4302.         }
  4303.  
  4304.         i915_gem_init_global_gtt(dev);
  4305.  
  4306.         ret = i915_gem_init_hw(dev);
  4307.         mutex_unlock(&dev->struct_mutex);
  4308.         if (ret) {
  4309.                 i915_gem_cleanup_aliasing_ppgtt(dev);
  4310.                 return ret;
  4311.         }
  4312.  
  4313.  
  4314.     return 0;
  4315. }
  4316.  
  4317. void
  4318. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  4319. {
  4320.         drm_i915_private_t *dev_priv = dev->dev_private;
  4321.         struct intel_ring_buffer *ring;
  4322.         int i;
  4323.  
  4324.         for_each_ring(ring, dev_priv, i)
  4325.                 intel_cleanup_ring_buffer(ring);
  4326. }
  4327.  
  4328. #if 0
  4329.  
  4330. int
  4331. i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
  4332.                        struct drm_file *file_priv)
  4333. {
  4334.         struct drm_i915_private *dev_priv = dev->dev_private;
  4335.         int ret;
  4336.  
  4337.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  4338.                 return 0;
  4339.  
  4340.         if (i915_reset_in_progress(&dev_priv->gpu_error)) {
  4341.                 DRM_ERROR("Reenabling wedged hardware, good luck\n");
  4342.                 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
  4343.         }
  4344.  
  4345.         mutex_lock(&dev->struct_mutex);
  4346.         dev_priv->ums.mm_suspended = 0;
  4347.  
  4348.         ret = i915_gem_init_hw(dev);
  4349.         if (ret != 0) {
  4350.                 mutex_unlock(&dev->struct_mutex);
  4351.                 return ret;
  4352.         }
  4353.  
  4354.         BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
  4355.         mutex_unlock(&dev->struct_mutex);
  4356.  
  4357.         ret = drm_irq_install(dev);
  4358.         if (ret)
  4359.                 goto cleanup_ringbuffer;
  4360.  
  4361.         return 0;
  4362.  
  4363. cleanup_ringbuffer:
  4364.         mutex_lock(&dev->struct_mutex);
  4365.         i915_gem_cleanup_ringbuffer(dev);
  4366.         dev_priv->ums.mm_suspended = 1;
  4367.         mutex_unlock(&dev->struct_mutex);
  4368.  
  4369.         return ret;
  4370. }
  4371.  
  4372. int
  4373. i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
  4374.                        struct drm_file *file_priv)
  4375. {
  4376.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  4377.                 return 0;
  4378.  
  4379.         drm_irq_uninstall(dev);
  4380.  
  4381.         return i915_gem_suspend(dev);
  4382. }
  4383.  
  4384. void
  4385. i915_gem_lastclose(struct drm_device *dev)
  4386. {
  4387.         int ret;
  4388.  
  4389.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  4390.                 return;
  4391.  
  4392.         ret = i915_gem_suspend(dev);
  4393.         if (ret)
  4394.                 DRM_ERROR("failed to idle hardware: %d\n", ret);
  4395. }
  4396. #endif
  4397.  
  4398. static void
  4399. init_ring_lists(struct intel_ring_buffer *ring)
  4400. {
  4401.     INIT_LIST_HEAD(&ring->active_list);
  4402.     INIT_LIST_HEAD(&ring->request_list);
  4403. }
  4404.  
  4405. static void i915_init_vm(struct drm_i915_private *dev_priv,
  4406.                          struct i915_address_space *vm)
  4407. {
  4408.         vm->dev = dev_priv->dev;
  4409.         INIT_LIST_HEAD(&vm->active_list);
  4410.         INIT_LIST_HEAD(&vm->inactive_list);
  4411.         INIT_LIST_HEAD(&vm->global_link);
  4412.         list_add(&vm->global_link, &dev_priv->vm_list);
  4413. }
  4414.  
  4415. void
  4416. i915_gem_load(struct drm_device *dev)
  4417. {
  4418.         drm_i915_private_t *dev_priv = dev->dev_private;
  4419.     int i;
  4420.  
  4421.         INIT_LIST_HEAD(&dev_priv->vm_list);
  4422.         i915_init_vm(dev_priv, &dev_priv->gtt.base);
  4423.  
  4424.         INIT_LIST_HEAD(&dev_priv->context_list);
  4425.         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
  4426.         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
  4427.     INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4428.     for (i = 0; i < I915_NUM_RINGS; i++)
  4429.         init_ring_lists(&dev_priv->ring[i]);
  4430.         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  4431.         INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  4432.         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  4433.                           i915_gem_retire_work_handler);
  4434.         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
  4435.                           i915_gem_idle_work_handler);
  4436.         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  4437.  
  4438.     /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  4439.     if (IS_GEN3(dev)) {
  4440.                 I915_WRITE(MI_ARB_STATE,
  4441.                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  4442.     }
  4443.  
  4444.     dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  4445.  
  4446.         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
  4447.                 dev_priv->num_fence_regs = 32;
  4448.         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4449.         dev_priv->num_fence_regs = 16;
  4450.     else
  4451.         dev_priv->num_fence_regs = 8;
  4452.  
  4453.     /* Initialize fence registers to zero */
  4454.         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4455.         i915_gem_restore_fences(dev);
  4456.  
  4457.     i915_gem_detect_bit_6_swizzle(dev);
  4458.  
  4459.     dev_priv->mm.interruptible = true;
  4460.  
  4461. }
  4462.  
  4463. #if 0
  4464. /*
  4465.  * Create a physically contiguous memory object for this object
  4466.  * e.g. for cursor + overlay regs
  4467.  */
  4468. static int i915_gem_init_phys_object(struct drm_device *dev,
  4469.                                      int id, int size, int align)
  4470. {
  4471.         drm_i915_private_t *dev_priv = dev->dev_private;
  4472.         struct drm_i915_gem_phys_object *phys_obj;
  4473.         int ret;
  4474.  
  4475.         if (dev_priv->mm.phys_objs[id - 1] || !size)
  4476.                 return 0;
  4477.  
  4478.         phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
  4479.         if (!phys_obj)
  4480.                 return -ENOMEM;
  4481.  
  4482.         phys_obj->id = id;
  4483.  
  4484.         phys_obj->handle = drm_pci_alloc(dev, size, align);
  4485.         if (!phys_obj->handle) {
  4486.                 ret = -ENOMEM;
  4487.                 goto kfree_obj;
  4488.         }
  4489. #ifdef CONFIG_X86
  4490.         set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4491. #endif
  4492.  
  4493.         dev_priv->mm.phys_objs[id - 1] = phys_obj;
  4494.  
  4495.         return 0;
  4496. kfree_obj:
  4497.         kfree(phys_obj);
  4498.         return ret;
  4499. }
  4500.  
  4501. static void i915_gem_free_phys_object(struct drm_device *dev, int id)
  4502. {
  4503.         drm_i915_private_t *dev_priv = dev->dev_private;
  4504.         struct drm_i915_gem_phys_object *phys_obj;
  4505.  
  4506.         if (!dev_priv->mm.phys_objs[id - 1])
  4507.                 return;
  4508.  
  4509.         phys_obj = dev_priv->mm.phys_objs[id - 1];
  4510.         if (phys_obj->cur_obj) {
  4511.                 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
  4512.         }
  4513.  
  4514. #ifdef CONFIG_X86
  4515.         set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
  4516. #endif
  4517.         drm_pci_free(dev, phys_obj->handle);
  4518.         kfree(phys_obj);
  4519.         dev_priv->mm.phys_objs[id - 1] = NULL;
  4520. }
  4521.  
  4522. void i915_gem_free_all_phys_object(struct drm_device *dev)
  4523. {
  4524.         int i;
  4525.  
  4526.         for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
  4527.                 i915_gem_free_phys_object(dev, i);
  4528. }
  4529.  
  4530. void i915_gem_detach_phys_object(struct drm_device *dev,
  4531.                                  struct drm_i915_gem_object *obj)
  4532. {
  4533.         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  4534.         char *vaddr;
  4535.         int i;
  4536.         int page_count;
  4537.  
  4538.         if (!obj->phys_obj)
  4539.                 return;
  4540.         vaddr = obj->phys_obj->handle->vaddr;
  4541.  
  4542.         page_count = obj->base.size / PAGE_SIZE;
  4543.         for (i = 0; i < page_count; i++) {
  4544.                 struct page *page = shmem_read_mapping_page(mapping, i);
  4545.                 if (!IS_ERR(page)) {
  4546.                         char *dst = kmap_atomic(page);
  4547.                         memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
  4548.                         kunmap_atomic(dst);
  4549.  
  4550.                         drm_clflush_pages(&page, 1);
  4551.  
  4552.                         set_page_dirty(page);
  4553.                         mark_page_accessed(page);
  4554.                         page_cache_release(page);
  4555.                 }
  4556.         }
  4557.         i915_gem_chipset_flush(dev);
  4558.  
  4559.         obj->phys_obj->cur_obj = NULL;
  4560.         obj->phys_obj = NULL;
  4561. }
  4562.  
  4563. int
  4564. i915_gem_attach_phys_object(struct drm_device *dev,
  4565.                             struct drm_i915_gem_object *obj,
  4566.                             int id,
  4567.                             int align)
  4568. {
  4569.         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
  4570.         drm_i915_private_t *dev_priv = dev->dev_private;
  4571.         int ret = 0;
  4572.         int page_count;
  4573.         int i;
  4574.  
  4575.         if (id > I915_MAX_PHYS_OBJECT)
  4576.                 return -EINVAL;
  4577.  
  4578.         if (obj->phys_obj) {
  4579.                 if (obj->phys_obj->id == id)
  4580.                         return 0;
  4581.                 i915_gem_detach_phys_object(dev, obj);
  4582.         }
  4583.  
  4584.         /* create a new object */
  4585.         if (!dev_priv->mm.phys_objs[id - 1]) {
  4586.                 ret = i915_gem_init_phys_object(dev, id,
  4587.                                                 obj->base.size, align);
  4588.                 if (ret) {
  4589.                         DRM_ERROR("failed to init phys object %d size: %zu\n",
  4590.                                   id, obj->base.size);
  4591.                         return ret;
  4592.                 }
  4593.         }
  4594.  
  4595.         /* bind to the object */
  4596.         obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
  4597.         obj->phys_obj->cur_obj = obj;
  4598.  
  4599.         page_count = obj->base.size / PAGE_SIZE;
  4600.  
  4601.         for (i = 0; i < page_count; i++) {
  4602.                 struct page *page;
  4603.                 char *dst, *src;
  4604.  
  4605.                 page = shmem_read_mapping_page(mapping, i);
  4606.                 if (IS_ERR(page))
  4607.                         return PTR_ERR(page);
  4608.  
  4609.                 src = kmap_atomic(page);
  4610.                 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
  4611.                 memcpy(dst, src, PAGE_SIZE);
  4612.                 kunmap_atomic(src);
  4613.  
  4614.                 mark_page_accessed(page);
  4615.                 page_cache_release(page);
  4616.         }
  4617.  
  4618.         return 0;
  4619. }
  4620.  
  4621. static int
  4622. i915_gem_phys_pwrite(struct drm_device *dev,
  4623.                      struct drm_i915_gem_object *obj,
  4624.                      struct drm_i915_gem_pwrite *args,
  4625.                      struct drm_file *file_priv)
  4626. {
  4627.         void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
  4628.         char __user *user_data = to_user_ptr(args->data_ptr);
  4629.  
  4630.         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
  4631.                 unsigned long unwritten;
  4632.  
  4633.                 /* The physical object once assigned is fixed for the lifetime
  4634.                  * of the obj, so we can safely drop the lock and continue
  4635.                  * to access vaddr.
  4636.                  */
  4637.                 mutex_unlock(&dev->struct_mutex);
  4638.                 unwritten = copy_from_user(vaddr, user_data, args->size);
  4639.                 mutex_lock(&dev->struct_mutex);
  4640.                 if (unwritten)
  4641.                         return -EFAULT;
  4642.         }
  4643.  
  4644.         i915_gem_chipset_flush(dev);
  4645.         return 0;
  4646. }
  4647.  
  4648. void i915_gem_release(struct drm_device *dev, struct drm_file *file)
  4649. {
  4650.         struct drm_i915_file_private *file_priv = file->driver_priv;
  4651.  
  4652.         /* Clean up our request list when the client is going away, so that
  4653.          * later retire_requests won't dereference our soon-to-be-gone
  4654.          * file_priv.
  4655.          */
  4656.         spin_lock(&file_priv->mm.lock);
  4657.         while (!list_empty(&file_priv->mm.request_list)) {
  4658.                 struct drm_i915_gem_request *request;
  4659.  
  4660.                 request = list_first_entry(&file_priv->mm.request_list,
  4661.                                            struct drm_i915_gem_request,
  4662.                                            client_list);
  4663.                 list_del(&request->client_list);
  4664.                 request->file_priv = NULL;
  4665.         }
  4666.         spin_unlock(&file_priv->mm.lock);
  4667. }
  4668. #endif
  4669.  
  4670. static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
  4671. {
  4672.         if (!mutex_is_locked(mutex))
  4673.                 return false;
  4674.  
  4675. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
  4676.         return mutex->owner == task;
  4677. #else
  4678.         /* Since UP may be pre-empted, we cannot assume that we own the lock */
  4679.         return false;
  4680. #endif
  4681. }
  4682.  
  4683. /* All the new VM stuff */
  4684. unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
  4685.                                   struct i915_address_space *vm)
  4686. {
  4687.         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4688.         struct i915_vma *vma;
  4689.  
  4690.         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
  4691.                 vm = &dev_priv->gtt.base;
  4692.  
  4693.         BUG_ON(list_empty(&o->vma_list));
  4694.         list_for_each_entry(vma, &o->vma_list, vma_link) {
  4695.                 if (vma->vm == vm)
  4696.                         return vma->node.start;
  4697.  
  4698.         }
  4699.     return 0; //-1;
  4700. }
  4701.  
  4702. bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
  4703.                         struct i915_address_space *vm)
  4704. {
  4705.         struct i915_vma *vma;
  4706.  
  4707.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4708.                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
  4709.                         return true;
  4710.  
  4711.         return false;
  4712. }
  4713.  
  4714. bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
  4715. {
  4716.         struct i915_vma *vma;
  4717.  
  4718.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4719.                 if (drm_mm_node_allocated(&vma->node))
  4720.                         return true;
  4721.  
  4722.         return false;
  4723. }
  4724.  
  4725. unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
  4726.                                 struct i915_address_space *vm)
  4727. {
  4728.         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4729.         struct i915_vma *vma;
  4730.  
  4731.         if (vm == &dev_priv->mm.aliasing_ppgtt->base)
  4732.                 vm = &dev_priv->gtt.base;
  4733.  
  4734.         BUG_ON(list_empty(&o->vma_list));
  4735.  
  4736.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4737.                 if (vma->vm == vm)
  4738.                         return vma->node.size;
  4739.  
  4740.         return 0;
  4741. }
  4742.  
  4743.  
  4744. struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
  4745. {
  4746.         struct i915_vma *vma;
  4747.  
  4748.         if (WARN_ON(list_empty(&obj->vma_list)))
  4749.         return NULL;
  4750.  
  4751.         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
  4752.         if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
  4753.                 return NULL;
  4754.  
  4755.         return vma;
  4756. }
  4757.