Subversion Repositories Kolibri OS

Rev

Rev 5060 | Rev 5367 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *
  26.  */
  27.  
  28. #include <drm/drmP.h>
  29. #include <drm/drm_vma_manager.h>
  30. #include <drm/i915_drm.h>
  31. #include "i915_drv.h"
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. #include <linux/shmem_fs.h>
  35. #include <linux/slab.h>
  36. //#include <linux/swap.h>
  37. #include <linux/scatterlist.h>
  38. #include <linux/pci.h>
  39.  
  40. extern int x86_clflush_size;
  41.  
  42. #define PROT_READ       0x1             /* page can be read */
  43. #define PROT_WRITE      0x2             /* page can be written */
  44. #define MAP_SHARED      0x01            /* Share changes */
  45.  
  46.  
  47. u64 nsecs_to_jiffies64(u64 n)
  48. {
  49. #if (NSEC_PER_SEC % HZ) == 0
  50.         /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
  51.         return div_u64(n, NSEC_PER_SEC / HZ);
  52. #elif (HZ % 512) == 0
  53.         /* overflow after 292 years if HZ = 1024 */
  54.         return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
  55. #else
  56.         /*
  57.          * Generic case - optimized for cases where HZ is a multiple of 3.
  58.          * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
  59.          */
  60.         return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
  61. #endif
  62. }
  63.  
  64. unsigned long nsecs_to_jiffies(u64 n)
  65. {
  66.     return (unsigned long)nsecs_to_jiffies64(n);
  67. }
  68.  
  69.  
  70. struct drm_i915_gem_object *get_fb_obj();
  71.  
  72. unsigned long vm_mmap(struct file *file, unsigned long addr,
  73.          unsigned long len, unsigned long prot,
  74.          unsigned long flag, unsigned long offset);
  75.  
  76.  
  77. #define MAX_ERRNO       4095
  78.  
  79. #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
  80.  
  81.  
  82. static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  83. static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
  84.                                                    bool force);
  85. static __must_check int
  86. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  87.                                bool readonly);
  88. static void
  89. i915_gem_object_retire(struct drm_i915_gem_object *obj);
  90.  
  91. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  92.                                  struct drm_i915_gem_object *obj);
  93. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  94.                                          struct drm_i915_fence_reg *fence,
  95.                                          bool enable);
  96.  
  97.  
  98. static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
  99.  
  100. static bool cpu_cache_is_coherent(struct drm_device *dev,
  101.                                   enum i915_cache_level level)
  102. {
  103.         return HAS_LLC(dev) || level != I915_CACHE_NONE;
  104. }
  105.  
  106. static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  107. {
  108.         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
  109.                 return true;
  110.  
  111.         return obj->pin_display;
  112. }
  113.  
  114. static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
  115. {
  116.         if (obj->tiling_mode)
  117.                 i915_gem_release_mmap(obj);
  118.  
  119.         /* As we do not have an associated fence register, we will force
  120.          * a tiling change if we ever need to acquire one.
  121.          */
  122.         obj->fence_dirty = false;
  123.         obj->fence_reg = I915_FENCE_REG_NONE;
  124. }
  125.  
  126. /* some bookkeeping */
  127. static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
  128.                                   size_t size)
  129. {
  130.         spin_lock(&dev_priv->mm.object_stat_lock);
  131.         dev_priv->mm.object_count++;
  132.         dev_priv->mm.object_memory += size;
  133.         spin_unlock(&dev_priv->mm.object_stat_lock);
  134. }
  135.  
  136. static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
  137.                                      size_t size)
  138. {
  139.         spin_lock(&dev_priv->mm.object_stat_lock);
  140.         dev_priv->mm.object_count--;
  141.         dev_priv->mm.object_memory -= size;
  142.         spin_unlock(&dev_priv->mm.object_stat_lock);
  143. }
  144.  
  145. static int
  146. i915_gem_wait_for_error(struct i915_gpu_error *error)
  147. {
  148.         int ret;
  149.  
  150. #define EXIT_COND (!i915_reset_in_progress(error))
  151.         if (EXIT_COND)
  152.                 return 0;
  153. #if 0
  154.         /*
  155.          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
  156.          * userspace. If it takes that long something really bad is going on and
  157.          * we should simply try to bail out and fail as gracefully as possible.
  158.          */
  159.         ret = wait_event_interruptible_timeout(error->reset_queue,
  160.                                                EXIT_COND,
  161.                                                10*HZ);
  162.         if (ret == 0) {
  163.                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
  164.                 return -EIO;
  165.         } else if (ret < 0) {
  166.                 return ret;
  167.         }
  168.  
  169. #endif
  170. #undef EXIT_COND
  171.  
  172.         return 0;
  173. }
  174.  
  175. int i915_mutex_lock_interruptible(struct drm_device *dev)
  176. {
  177.         struct drm_i915_private *dev_priv = dev->dev_private;
  178.         int ret;
  179.  
  180.         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  181.         if (ret)
  182.                 return ret;
  183.  
  184.         ret = mutex_lock_interruptible(&dev->struct_mutex);
  185.         if (ret)
  186.                 return ret;
  187.  
  188.         WARN_ON(i915_verify_lists(dev));
  189.         return 0;
  190. }
  191.  
  192. static inline bool
  193. i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
  194. {
  195.         return i915_gem_obj_bound_any(obj) && !obj->active;
  196. }
  197.  
  198. int
  199. i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  200.                             struct drm_file *file)
  201. {
  202.         struct drm_i915_private *dev_priv = dev->dev_private;
  203.         struct drm_i915_gem_get_aperture *args = data;
  204.         struct drm_i915_gem_object *obj;
  205.         size_t pinned;
  206.  
  207.         pinned = 0;
  208.         mutex_lock(&dev->struct_mutex);
  209.         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
  210.                 if (i915_gem_obj_is_pinned(obj))
  211.                         pinned += i915_gem_obj_ggtt_size(obj);
  212.         mutex_unlock(&dev->struct_mutex);
  213.  
  214.         args->aper_size = dev_priv->gtt.base.total;
  215.         args->aper_available_size = args->aper_size - pinned;
  216.  
  217.         return 0;
  218. }
  219.  
  220. void *i915_gem_object_alloc(struct drm_device *dev)
  221. {
  222.         struct drm_i915_private *dev_priv = dev->dev_private;
  223.         return kmalloc(sizeof(struct drm_i915_gem_object), 0);
  224. }
  225.  
  226. void i915_gem_object_free(struct drm_i915_gem_object *obj)
  227. {
  228.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  229.         kfree(obj);
  230. }
  231.  
  232. static int
  233. i915_gem_create(struct drm_file *file,
  234.                 struct drm_device *dev,
  235.                 uint64_t size,
  236.                 uint32_t *handle_p)
  237. {
  238.         struct drm_i915_gem_object *obj;
  239.         int ret;
  240.         u32 handle;
  241.  
  242.         size = roundup(size, PAGE_SIZE);
  243.         if (size == 0)
  244.                 return -EINVAL;
  245.  
  246.         /* Allocate the new object */
  247.         obj = i915_gem_alloc_object(dev, size);
  248.         if (obj == NULL)
  249.                 return -ENOMEM;
  250.  
  251.         ret = drm_gem_handle_create(file, &obj->base, &handle);
  252.         /* drop reference from allocate - handle holds it now */
  253.         drm_gem_object_unreference_unlocked(&obj->base);
  254.         if (ret)
  255.                 return ret;
  256.  
  257.         *handle_p = handle;
  258.         return 0;
  259. }
  260.  
  261. int
  262. i915_gem_dumb_create(struct drm_file *file,
  263.                      struct drm_device *dev,
  264.                      struct drm_mode_create_dumb *args)
  265. {
  266.         /* have to work out size/pitch and return them */
  267.         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
  268.         args->size = args->pitch * args->height;
  269.         return i915_gem_create(file, dev,
  270.                                args->size, &args->handle);
  271. }
  272.  
  273. /**
  274.  * Creates a new mm object and returns a handle to it.
  275.  */
  276. int
  277. i915_gem_create_ioctl(struct drm_device *dev, void *data,
  278.                       struct drm_file *file)
  279. {
  280.         struct drm_i915_gem_create *args = data;
  281.  
  282.         return i915_gem_create(file, dev,
  283.                                args->size, &args->handle);
  284. }
  285.  
  286.  
  287. #if 0
  288.  
  289. static inline int
  290. __copy_to_user_swizzled(char __user *cpu_vaddr,
  291.                         const char *gpu_vaddr, int gpu_offset,
  292.                 int length)
  293. {
  294.         int ret, cpu_offset = 0;
  295.  
  296.         while (length > 0) {
  297.                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
  298.                 int this_length = min(cacheline_end - gpu_offset, length);
  299.                 int swizzled_gpu_offset = gpu_offset ^ 64;
  300.  
  301.                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
  302.                                      gpu_vaddr + swizzled_gpu_offset,
  303.                                      this_length);
  304.                 if (ret)
  305.                         return ret + length;
  306.  
  307.                 cpu_offset += this_length;
  308.                 gpu_offset += this_length;
  309.                 length -= this_length;
  310.         }
  311.  
  312.         return 0;
  313. }
  314.  
  315. static inline int
  316. __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
  317.                           const char __user *cpu_vaddr,
  318.                           int length)
  319. {
  320.         int ret, cpu_offset = 0;
  321.  
  322.         while (length > 0) {
  323.                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
  324.                 int this_length = min(cacheline_end - gpu_offset, length);
  325.                 int swizzled_gpu_offset = gpu_offset ^ 64;
  326.  
  327.                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
  328.                                cpu_vaddr + cpu_offset,
  329.                                this_length);
  330.                 if (ret)
  331.                         return ret + length;
  332.  
  333.                 cpu_offset += this_length;
  334.                 gpu_offset += this_length;
  335.                 length -= this_length;
  336.         }
  337.  
  338.         return 0;
  339. }
  340.  
  341. /* Per-page copy function for the shmem pread fastpath.
  342.  * Flushes invalid cachelines before reading the target if
  343.  * needs_clflush is set. */
  344. static int
  345. shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
  346.                  char __user *user_data,
  347.                  bool page_do_bit17_swizzling, bool needs_clflush)
  348. {
  349.                 char *vaddr;
  350.                 int ret;
  351.  
  352.         if (unlikely(page_do_bit17_swizzling))
  353.                 return -EINVAL;
  354.  
  355.                 vaddr = kmap_atomic(page);
  356.         if (needs_clflush)
  357.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  358.                                        page_length);
  359.                 ret = __copy_to_user_inatomic(user_data,
  360.                                       vaddr + shmem_page_offset,
  361.                                               page_length);
  362.                 kunmap_atomic(vaddr);
  363.  
  364.         return ret ? -EFAULT : 0;
  365. }
  366.  
  367. static void
  368. shmem_clflush_swizzled_range(char *addr, unsigned long length,
  369.                              bool swizzled)
  370. {
  371.         if (unlikely(swizzled)) {
  372.                 unsigned long start = (unsigned long) addr;
  373.                 unsigned long end = (unsigned long) addr + length;
  374.  
  375.                 /* For swizzling simply ensure that we always flush both
  376.                  * channels. Lame, but simple and it works. Swizzled
  377.                  * pwrite/pread is far from a hotpath - current userspace
  378.                  * doesn't use it at all. */
  379.                 start = round_down(start, 128);
  380.                 end = round_up(end, 128);
  381.  
  382.                 drm_clflush_virt_range((void *)start, end - start);
  383.         } else {
  384.                 drm_clflush_virt_range(addr, length);
  385.         }
  386.  
  387. }
  388.  
  389. /* Only difference to the fast-path function is that this can handle bit17
  390.  * and uses non-atomic copy and kmap functions. */
  391. static int
  392. shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
  393.                  char __user *user_data,
  394.                  bool page_do_bit17_swizzling, bool needs_clflush)
  395. {
  396.         char *vaddr;
  397.         int ret;
  398.  
  399.         vaddr = kmap(page);
  400.         if (needs_clflush)
  401.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  402.                                              page_length,
  403.                                              page_do_bit17_swizzling);
  404.  
  405.         if (page_do_bit17_swizzling)
  406.                 ret = __copy_to_user_swizzled(user_data,
  407.                                               vaddr, shmem_page_offset,
  408.                                               page_length);
  409.         else
  410.                 ret = __copy_to_user(user_data,
  411.                                      vaddr + shmem_page_offset,
  412.                                      page_length);
  413.         kunmap(page);
  414.  
  415.         return ret ? - EFAULT : 0;
  416. }
  417.  
  418. static int
  419. i915_gem_shmem_pread(struct drm_device *dev,
  420.                           struct drm_i915_gem_object *obj,
  421.                           struct drm_i915_gem_pread *args,
  422.                           struct drm_file *file)
  423. {
  424.         char __user *user_data;
  425.         ssize_t remain;
  426.         loff_t offset;
  427.         int shmem_page_offset, page_length, ret = 0;
  428.         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  429.         int prefaulted = 0;
  430.         int needs_clflush = 0;
  431.         struct sg_page_iter sg_iter;
  432.  
  433.         user_data = to_user_ptr(args->data_ptr);
  434.         remain = args->size;
  435.  
  436.         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  437.  
  438.         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
  439.         if (ret)
  440.                 return ret;
  441.  
  442.         offset = args->offset;
  443.  
  444.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  445.                          offset >> PAGE_SHIFT) {
  446.                 struct page *page = sg_page_iter_page(&sg_iter);
  447.  
  448.                 if (remain <= 0)
  449.                         break;
  450.  
  451.                 /* Operation in this page
  452.                  *
  453.                  * shmem_page_offset = offset within page in shmem file
  454.                  * page_length = bytes to copy for this page
  455.                  */
  456.                 shmem_page_offset = offset_in_page(offset);
  457.                 page_length = remain;
  458.                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
  459.                         page_length = PAGE_SIZE - shmem_page_offset;
  460.  
  461.                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  462.                         (page_to_phys(page) & (1 << 17)) != 0;
  463.  
  464.                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
  465.                                        user_data, page_do_bit17_swizzling,
  466.                                        needs_clflush);
  467.                 if (ret == 0)
  468.                         goto next_page;
  469.  
  470.                 mutex_unlock(&dev->struct_mutex);
  471.  
  472.                 if (likely(!i915.prefault_disable) && !prefaulted) {
  473.                         ret = fault_in_multipages_writeable(user_data, remain);
  474.                         /* Userspace is tricking us, but we've already clobbered
  475.                          * its pages with the prefault and promised to write the
  476.                          * data up to the first fault. Hence ignore any errors
  477.                          * and just continue. */
  478.                         (void)ret;
  479.                         prefaulted = 1;
  480.                 }
  481.  
  482.                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
  483.                                        user_data, page_do_bit17_swizzling,
  484.                                        needs_clflush);
  485.  
  486.                 mutex_lock(&dev->struct_mutex);
  487.  
  488.                 if (ret)
  489.                         goto out;
  490.  
  491. next_page:
  492.                 remain -= page_length;
  493.                 user_data += page_length;
  494.                 offset += page_length;
  495.         }
  496.  
  497. out:
  498.         i915_gem_object_unpin_pages(obj);
  499.  
  500.         return ret;
  501. }
  502.  
  503. /**
  504.  * Reads data from the object referenced by handle.
  505.  *
  506.  * On error, the contents of *data are undefined.
  507.  */
  508. int
  509. i915_gem_pread_ioctl(struct drm_device *dev, void *data,
  510.                      struct drm_file *file)
  511. {
  512.         struct drm_i915_gem_pread *args = data;
  513.         struct drm_i915_gem_object *obj;
  514.         int ret = 0;
  515.  
  516.         if (args->size == 0)
  517.                 return 0;
  518.  
  519.         if (!access_ok(VERIFY_WRITE,
  520.                        to_user_ptr(args->data_ptr),
  521.                        args->size))
  522.                 return -EFAULT;
  523.  
  524.         ret = i915_mutex_lock_interruptible(dev);
  525.         if (ret)
  526.                 return ret;
  527.  
  528.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  529.         if (&obj->base == NULL) {
  530.                 ret = -ENOENT;
  531.                 goto unlock;
  532.         }
  533.  
  534.         /* Bounds check source.  */
  535.         if (args->offset > obj->base.size ||
  536.             args->size > obj->base.size - args->offset) {
  537.                 ret = -EINVAL;
  538.                 goto out;
  539.         }
  540.  
  541.         /* prime objects have no backing filp to GEM pread/pwrite
  542.          * pages from.
  543.          */
  544.         if (!obj->base.filp) {
  545.                 ret = -EINVAL;
  546.                 goto out;
  547.         }
  548.  
  549.         trace_i915_gem_object_pread(obj, args->offset, args->size);
  550.  
  551.         ret = i915_gem_shmem_pread(dev, obj, args, file);
  552.  
  553. out:
  554.         drm_gem_object_unreference(&obj->base);
  555. unlock:
  556.         mutex_unlock(&dev->struct_mutex);
  557.         return ret;
  558. }
  559.  
  560. /* This is the fast write path which cannot handle
  561.  * page faults in the source data
  562.  */
  563.  
  564. static inline int
  565. fast_user_write(struct io_mapping *mapping,
  566.                 loff_t page_base, int page_offset,
  567.                 char __user *user_data,
  568.                 int length)
  569. {
  570.         void __iomem *vaddr_atomic;
  571.         void *vaddr;
  572.         unsigned long unwritten;
  573.  
  574.         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
  575.         /* We can use the cpu mem copy function because this is X86. */
  576.         vaddr = (void __force*)vaddr_atomic + page_offset;
  577.         unwritten = __copy_from_user_inatomic_nocache(vaddr,
  578.                                                       user_data, length);
  579.         io_mapping_unmap_atomic(vaddr_atomic);
  580.         return unwritten;
  581. }
  582. #endif
  583.  
  584. #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
  585. /**
  586.  * This is the fast pwrite path, where we copy the data directly from the
  587.  * user into the GTT, uncached.
  588.  */
  589. static int
  590. i915_gem_gtt_pwrite_fast(struct drm_device *dev,
  591.                          struct drm_i915_gem_object *obj,
  592.                          struct drm_i915_gem_pwrite *args,
  593.                          struct drm_file *file)
  594. {
  595.         struct drm_i915_private *dev_priv = dev->dev_private;
  596.         ssize_t remain;
  597.         loff_t offset, page_base;
  598.         char __user *user_data;
  599.         int page_offset, page_length, ret;
  600.  
  601.         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
  602.         if (ret)
  603.                 goto out;
  604.  
  605.         ret = i915_gem_object_set_to_gtt_domain(obj, true);
  606.         if (ret)
  607.                 goto out_unpin;
  608.  
  609.         ret = i915_gem_object_put_fence(obj);
  610.         if (ret)
  611.                 goto out_unpin;
  612.  
  613.         user_data = to_user_ptr(args->data_ptr);
  614.         remain = args->size;
  615.  
  616.         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
  617.  
  618.         while (remain > 0) {
  619.                 /* Operation in this page
  620.                  *
  621.                  * page_base = page offset within aperture
  622.                  * page_offset = offset within page
  623.                  * page_length = bytes to copy for this page
  624.                  */
  625.                 page_base = offset & PAGE_MASK;
  626.                 page_offset = offset_in_page(offset);
  627.                 page_length = remain;
  628.                 if ((page_offset + remain) > PAGE_SIZE)
  629.                         page_length = PAGE_SIZE - page_offset;
  630.  
  631.         MapPage(dev_priv->gtt.mappable, dev_priv->gtt.mappable_base+page_base, PG_SW);
  632.  
  633.         memcpy((char*)dev_priv->gtt.mappable+page_offset, user_data, page_length);
  634.  
  635.                 remain -= page_length;
  636.                 user_data += page_length;
  637.                 offset += page_length;
  638.         }
  639.  
  640. out_unpin:
  641.         i915_gem_object_ggtt_unpin(obj);
  642. out:
  643.     return ret;
  644. }
  645.  
  646. /* Per-page copy function for the shmem pwrite fastpath.
  647.  * Flushes invalid cachelines before writing to the target if
  648.  * needs_clflush_before is set and flushes out any written cachelines after
  649.  * writing if needs_clflush is set. */
  650. static int
  651. shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
  652.                   char __user *user_data,
  653.                   bool page_do_bit17_swizzling,
  654.                   bool needs_clflush_before,
  655.                   bool needs_clflush_after)
  656. {
  657.         char *vaddr;
  658.         int ret;
  659.  
  660.         if (unlikely(page_do_bit17_swizzling))
  661.                 return -EINVAL;
  662.  
  663.         vaddr = kmap_atomic(page);
  664.         if (needs_clflush_before)
  665.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  666.                                        page_length);
  667.         memcpy(vaddr + shmem_page_offset,
  668.                                                 user_data,
  669.                                                 page_length);
  670.         if (needs_clflush_after)
  671.                 drm_clflush_virt_range(vaddr + shmem_page_offset,
  672.                                        page_length);
  673.         kunmap_atomic(vaddr);
  674.  
  675.         return ret ? -EFAULT : 0;
  676. }
  677. #if 0
  678.  
  679. /* Only difference to the fast-path function is that this can handle bit17
  680.  * and uses non-atomic copy and kmap functions. */
  681. static int
  682. shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
  683.                   char __user *user_data,
  684.                   bool page_do_bit17_swizzling,
  685.                   bool needs_clflush_before,
  686.                   bool needs_clflush_after)
  687. {
  688.         char *vaddr;
  689.         int ret;
  690.  
  691.         vaddr = kmap(page);
  692.         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
  693.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  694.                                              page_length,
  695.                                              page_do_bit17_swizzling);
  696.         if (page_do_bit17_swizzling)
  697.                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
  698.                                                 user_data,
  699.                                                 page_length);
  700.         else
  701.                 ret = __copy_from_user(vaddr + shmem_page_offset,
  702.                                        user_data,
  703.                                        page_length);
  704.         if (needs_clflush_after)
  705.                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
  706.                                              page_length,
  707.                                              page_do_bit17_swizzling);
  708.         kunmap(page);
  709.  
  710.         return ret ? -EFAULT : 0;
  711. }
  712. #endif
  713.  
  714.  
  715. static int
  716. i915_gem_shmem_pwrite(struct drm_device *dev,
  717.                       struct drm_i915_gem_object *obj,
  718.                       struct drm_i915_gem_pwrite *args,
  719.                       struct drm_file *file)
  720. {
  721.         ssize_t remain;
  722.         loff_t offset;
  723.         char __user *user_data;
  724.         int shmem_page_offset, page_length, ret = 0;
  725.         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
  726.         int hit_slowpath = 0;
  727.         int needs_clflush_after = 0;
  728.         int needs_clflush_before = 0;
  729.         struct sg_page_iter sg_iter;
  730.  
  731.         user_data = to_user_ptr(args->data_ptr);
  732.         remain = args->size;
  733.  
  734.         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
  735.  
  736.         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  737.                 /* If we're not in the cpu write domain, set ourself into the gtt
  738.                  * write domain and manually flush cachelines (if required). This
  739.                  * optimizes for the case when the gpu will use the data
  740.                  * right away and we therefore have to clflush anyway. */
  741.                 needs_clflush_after = cpu_write_needs_clflush(obj);
  742.                 ret = i915_gem_object_wait_rendering(obj, false);
  743.                         if (ret)
  744.                                 return ret;
  745.  
  746.                 i915_gem_object_retire(obj);
  747.                 }
  748.         /* Same trick applies to invalidate partially written cachelines read
  749.          * before writing. */
  750.         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
  751.                 needs_clflush_before =
  752.                         !cpu_cache_is_coherent(dev, obj->cache_level);
  753.  
  754.         ret = i915_gem_object_get_pages(obj);
  755.         if (ret)
  756.                 return ret;
  757.  
  758.         i915_gem_object_pin_pages(obj);
  759.  
  760.         offset = args->offset;
  761.         obj->dirty = 1;
  762.  
  763.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
  764.                          offset >> PAGE_SHIFT) {
  765.                 struct page *page = sg_page_iter_page(&sg_iter);
  766.                 int partial_cacheline_write;
  767.  
  768.                 if (remain <= 0)
  769.                         break;
  770.  
  771.                 /* Operation in this page
  772.                  *
  773.                  * shmem_page_offset = offset within page in shmem file
  774.                  * page_length = bytes to copy for this page
  775.                  */
  776.                 shmem_page_offset = offset_in_page(offset);
  777.  
  778.                 page_length = remain;
  779.                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
  780.                         page_length = PAGE_SIZE - shmem_page_offset;
  781.  
  782.                 /* If we don't overwrite a cacheline completely we need to be
  783.                  * careful to have up-to-date data by first clflushing. Don't
  784.                  * overcomplicate things and flush the entire patch. */
  785.                 partial_cacheline_write = needs_clflush_before &&
  786.                         ((shmem_page_offset | page_length)
  787.                                 & (x86_clflush_size - 1));
  788.  
  789.                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
  790.                         (page_to_phys(page) & (1 << 17)) != 0;
  791.  
  792.                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
  793.                                         user_data, page_do_bit17_swizzling,
  794.                                         partial_cacheline_write,
  795.                                         needs_clflush_after);
  796.                 if (ret == 0)
  797.                         goto next_page;
  798.  
  799.                 hit_slowpath = 1;
  800.                 mutex_unlock(&dev->struct_mutex);
  801.                 dbgprintf("%s need shmem_pwrite_slow\n",__FUNCTION__);
  802.  
  803. //              ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
  804. //                                      user_data, page_do_bit17_swizzling,
  805. //                                      partial_cacheline_write,
  806. //                                      needs_clflush_after);
  807.  
  808.                 mutex_lock(&dev->struct_mutex);
  809.  
  810.                 if (ret)
  811.                         goto out;
  812.  
  813. next_page:
  814.                 remain -= page_length;
  815.                 user_data += page_length;
  816.                 offset += page_length;
  817.         }
  818.  
  819. out:
  820.         i915_gem_object_unpin_pages(obj);
  821.  
  822.         if (hit_slowpath) {
  823.                 /*
  824.                  * Fixup: Flush cpu caches in case we didn't flush the dirty
  825.                  * cachelines in-line while writing and the object moved
  826.                  * out of the cpu write domain while we've dropped the lock.
  827.                  */
  828.                 if (!needs_clflush_after &&
  829.                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
  830.                         if (i915_gem_clflush_object(obj, obj->pin_display))
  831.                         i915_gem_chipset_flush(dev);
  832.                 }
  833.         }
  834.  
  835.         if (needs_clflush_after)
  836.                 i915_gem_chipset_flush(dev);
  837.  
  838.         return ret;
  839. }
  840.  
  841. /**
  842.  * Writes data to the object referenced by handle.
  843.  *
  844.  * On error, the contents of the buffer that were to be modified are undefined.
  845.  */
  846. int
  847. i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  848.                       struct drm_file *file)
  849. {
  850.         struct drm_i915_gem_pwrite *args = data;
  851.         struct drm_i915_gem_object *obj;
  852.         int ret;
  853.  
  854.         if (args->size == 0)
  855.                 return 0;
  856.  
  857.  
  858.         ret = i915_mutex_lock_interruptible(dev);
  859.         if (ret)
  860.                 return ret;
  861.  
  862.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  863.         if (&obj->base == NULL) {
  864.                 ret = -ENOENT;
  865.                 goto unlock;
  866.         }
  867.  
  868.         /* Bounds check destination. */
  869.         if (args->offset > obj->base.size ||
  870.             args->size > obj->base.size - args->offset) {
  871.                 ret = -EINVAL;
  872.                 goto out;
  873.         }
  874.  
  875.         /* prime objects have no backing filp to GEM pread/pwrite
  876.          * pages from.
  877.          */
  878.         if (!obj->base.filp) {
  879.                 ret = -EINVAL;
  880.                 goto out;
  881.         }
  882.  
  883.         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
  884.  
  885.         ret = -EFAULT;
  886.         /* We can only do the GTT pwrite on untiled buffers, as otherwise
  887.          * it would end up going through the fenced access, and we'll get
  888.          * different detiling behavior between reading and writing.
  889.          * pread/pwrite currently are reading and writing from the CPU
  890.          * perspective, requiring manual detiling by the client.
  891.          */
  892.         if (obj->tiling_mode == I915_TILING_NONE &&
  893.             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
  894.             cpu_write_needs_clflush(obj)) {
  895.                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
  896.                 /* Note that the gtt paths might fail with non-page-backed user
  897.                  * pointers (e.g. gtt mappings when moving data between
  898.                  * textures). Fallback to the shmem path in that case. */
  899.         }
  900.  
  901.         if (ret == -EFAULT || ret == -ENOSPC)
  902.        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
  903.  
  904. out:
  905.         drm_gem_object_unreference(&obj->base);
  906. unlock:
  907.         mutex_unlock(&dev->struct_mutex);
  908.         return ret;
  909. }
  910.  
  911. int
  912. i915_gem_check_wedge(struct i915_gpu_error *error,
  913.                      bool interruptible)
  914. {
  915.         if (i915_reset_in_progress(error)) {
  916.                 /* Non-interruptible callers can't handle -EAGAIN, hence return
  917.                  * -EIO unconditionally for these. */
  918.                 if (!interruptible)
  919.                         return -EIO;
  920.  
  921.                 /* Recovery complete, but the reset failed ... */
  922.                 if (i915_terminally_wedged(error))
  923.                         return -EIO;
  924.  
  925.                 return -EAGAIN;
  926.         }
  927.  
  928.         return 0;
  929. }
  930.  
  931. /*
  932.  * Compare seqno against outstanding lazy request. Emit a request if they are
  933.  * equal.
  934.  */
  935. int
  936. i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
  937. {
  938.         int ret;
  939.  
  940.         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
  941.  
  942.         ret = 0;
  943.         if (seqno == ring->outstanding_lazy_seqno)
  944.                 ret = i915_add_request(ring, NULL);
  945.  
  946.         return ret;
  947. }
  948.  
  949. static void fake_irq(unsigned long data)
  950. {
  951. //      wake_up_process((struct task_struct *)data);
  952. }
  953.  
  954. static bool missed_irq(struct drm_i915_private *dev_priv,
  955.                        struct intel_engine_cs *ring)
  956. {
  957.         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
  958. }
  959.  
  960. static bool can_wait_boost(struct drm_i915_file_private *file_priv)
  961. {
  962.         if (file_priv == NULL)
  963.                 return true;
  964.  
  965.         return !atomic_xchg(&file_priv->rps_wait_boost, true);
  966. }
  967.  
  968. /**
  969.  * __i915_wait_seqno - wait until execution of seqno has finished
  970.  * @ring: the ring expected to report seqno
  971.  * @seqno: duh!
  972.  * @reset_counter: reset sequence associated with the given seqno
  973.  * @interruptible: do an interruptible wait (normally yes)
  974.  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
  975.  *
  976.  * Note: It is of utmost importance that the passed in seqno and reset_counter
  977.  * values have been read by the caller in an smp safe manner. Where read-side
  978.  * locks are involved, it is sufficient to read the reset_counter before
  979.  * unlocking the lock that protects the seqno. For lockless tricks, the
  980.  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
  981.  * inserted.
  982.  *
  983.  * Returns 0 if the seqno was found within the alloted time. Else returns the
  984.  * errno with remaining time filled in timeout argument.
  985.  */
  986. int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
  987.                         unsigned reset_counter,
  988.                         bool interruptible,
  989.                         s64 *timeout,
  990.                         struct drm_i915_file_private *file_priv)
  991. {
  992.         struct drm_device *dev = ring->dev;
  993.         struct drm_i915_private *dev_priv = dev->dev_private;
  994.         const bool irq_test_in_progress =
  995.                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
  996.         unsigned long timeout_expire;
  997.         s64 before, now;
  998.  
  999.     wait_queue_t __wait;
  1000.         int ret;
  1001.  
  1002.         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
  1003.  
  1004.         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
  1005.                 return 0;
  1006.  
  1007.         timeout_expire = timeout ?
  1008.                 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
  1009.  
  1010.         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
  1011.                 gen6_rps_boost(dev_priv);
  1012.                 if (file_priv)
  1013.                         mod_delayed_work(dev_priv->wq,
  1014.                                          &file_priv->mm.idle_work,
  1015.                                          msecs_to_jiffies(100));
  1016.         }
  1017.  
  1018.         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
  1019.                 return -ENODEV;
  1020.  
  1021.     INIT_LIST_HEAD(&__wait.task_list);
  1022.     __wait.evnt = CreateEvent(NULL, MANUAL_DESTROY);
  1023.  
  1024.         /* Record current time in case interrupted by signal, or wedged */
  1025.         trace_i915_gem_request_wait_begin(ring, seqno);
  1026.  
  1027.         for (;;) {
  1028.         unsigned long flags;
  1029.  
  1030.                 /* We need to check whether any gpu reset happened in between
  1031.                  * the caller grabbing the seqno and now ... */
  1032.                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
  1033.                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
  1034.                          * is truely gone. */
  1035.                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1036.                         if (ret == 0)
  1037.                                 ret = -EAGAIN;
  1038.                         break;
  1039.                 }
  1040.  
  1041.                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
  1042.                         ret = 0;
  1043.                         break;
  1044.                 }
  1045.  
  1046.         if (timeout && time_after_eq(jiffies, timeout_expire)) {
  1047.                         ret = -ETIME;
  1048.                         break;
  1049.                 }
  1050.  
  1051.         spin_lock_irqsave(&ring->irq_queue.lock, flags);
  1052.         if (list_empty(&__wait.task_list))
  1053.             __add_wait_queue(&ring->irq_queue, &__wait);
  1054.         spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
  1055.  
  1056.         WaitEventTimeout(__wait.evnt, 1);
  1057.  
  1058.         if (!list_empty(&__wait.task_list)) {
  1059.             spin_lock_irqsave(&ring->irq_queue.lock, flags);
  1060.             list_del_init(&__wait.task_list);
  1061.             spin_unlock_irqrestore(&ring->irq_queue.lock, flags);
  1062.         }
  1063.     };
  1064.     trace_i915_gem_request_wait_end(ring, seqno);
  1065.  
  1066.     DestroyEvent(__wait.evnt);
  1067.  
  1068.         if (!irq_test_in_progress)
  1069.         ring->irq_put(ring);
  1070.  
  1071. //      finish_wait(&ring->irq_queue, &wait);
  1072.         return ret;
  1073. }
  1074.  
  1075. /**
  1076.  * Waits for a sequence number to be signaled, and cleans up the
  1077.  * request and object lists appropriately for that event.
  1078.  */
  1079. int
  1080. i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
  1081. {
  1082.         struct drm_device *dev = ring->dev;
  1083.         struct drm_i915_private *dev_priv = dev->dev_private;
  1084.         bool interruptible = dev_priv->mm.interruptible;
  1085.         unsigned reset_counter;
  1086.         int ret;
  1087.  
  1088.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1089.         BUG_ON(seqno == 0);
  1090.  
  1091.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
  1092.         if (ret)
  1093.                 return ret;
  1094.  
  1095.         ret = i915_gem_check_olr(ring, seqno);
  1096.         if (ret)
  1097.                 return ret;
  1098.  
  1099.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  1100.         return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
  1101.                                  NULL, NULL);
  1102. }
  1103.  
  1104. static int
  1105. i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
  1106. {
  1107.         if (!obj->active)
  1108.                 return 0;
  1109.  
  1110.         /* Manually manage the write flush as we may have not yet
  1111.          * retired the buffer.
  1112.          *
  1113.          * Note that the last_write_seqno is always the earlier of
  1114.          * the two (read/write) seqno, so if we haved successfully waited,
  1115.          * we know we have passed the last write.
  1116.          */
  1117.         obj->last_write_seqno = 0;
  1118.  
  1119.         return 0;
  1120. }
  1121.  
  1122. /**
  1123.  * Ensures that all rendering to the object has completed and the object is
  1124.  * safe to unbind from the GTT or access from the CPU.
  1125.  */
  1126. static __must_check int
  1127. i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  1128.                                bool readonly)
  1129. {
  1130.         struct intel_engine_cs *ring = obj->ring;
  1131.         u32 seqno;
  1132.         int ret;
  1133.  
  1134.         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  1135.         if (seqno == 0)
  1136.                 return 0;
  1137.  
  1138.         ret = i915_wait_seqno(ring, seqno);
  1139.     if (ret)
  1140.         return ret;
  1141.  
  1142.         return i915_gem_object_wait_rendering__tail(obj);
  1143. }
  1144.  
  1145. /* A nonblocking variant of the above wait. This is a highly dangerous routine
  1146.  * as the object state may change during this call.
  1147.  */
  1148. static __must_check int
  1149. i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
  1150.                                             struct drm_i915_file_private *file_priv,
  1151.                                             bool readonly)
  1152. {
  1153.         struct drm_device *dev = obj->base.dev;
  1154.         struct drm_i915_private *dev_priv = dev->dev_private;
  1155.         struct intel_engine_cs *ring = obj->ring;
  1156.         unsigned reset_counter;
  1157.         u32 seqno;
  1158.         int ret;
  1159.  
  1160.         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
  1161.         BUG_ON(!dev_priv->mm.interruptible);
  1162.  
  1163.         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
  1164.         if (seqno == 0)
  1165.                 return 0;
  1166.  
  1167.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
  1168.         if (ret)
  1169.                 return ret;
  1170.  
  1171.         ret = i915_gem_check_olr(ring, seqno);
  1172.         if (ret)
  1173.                 return ret;
  1174.  
  1175.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  1176.         mutex_unlock(&dev->struct_mutex);
  1177.         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
  1178.                                 file_priv);
  1179.         mutex_lock(&dev->struct_mutex);
  1180.         if (ret)
  1181.                 return ret;
  1182.  
  1183.         return i915_gem_object_wait_rendering__tail(obj);
  1184. }
  1185.  
  1186. /**
  1187.  * Called when user space prepares to use an object with the CPU, either
  1188.  * through the mmap ioctl's mapping or a GTT mapping.
  1189.  */
  1190. int
  1191. i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
  1192.                           struct drm_file *file)
  1193. {
  1194.         struct drm_i915_gem_set_domain *args = data;
  1195.         struct drm_i915_gem_object *obj;
  1196.         uint32_t read_domains = args->read_domains;
  1197.         uint32_t write_domain = args->write_domain;
  1198.         int ret;
  1199.  
  1200.         /* Only handle setting domains to types used by the CPU. */
  1201.         if (write_domain & I915_GEM_GPU_DOMAINS)
  1202.                 return -EINVAL;
  1203.  
  1204.         if (read_domains & I915_GEM_GPU_DOMAINS)
  1205.                 return -EINVAL;
  1206.  
  1207.         /* Having something in the write domain implies it's in the read
  1208.          * domain, and only that read domain.  Enforce that in the request.
  1209.          */
  1210.         if (write_domain != 0 && read_domains != write_domain)
  1211.                 return -EINVAL;
  1212.  
  1213.         ret = i915_mutex_lock_interruptible(dev);
  1214.         if (ret)
  1215.                 return ret;
  1216.  
  1217.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1218.         if (&obj->base == NULL) {
  1219.                 ret = -ENOENT;
  1220.                 goto unlock;
  1221.         }
  1222.  
  1223.         /* Try to flush the object off the GPU without holding the lock.
  1224.          * We will repeat the flush holding the lock in the normal manner
  1225.          * to catch cases where we are gazumped.
  1226.          */
  1227.         ret = i915_gem_object_wait_rendering__nonblocking(obj,
  1228.                                                           file->driver_priv,
  1229.                                                           !write_domain);
  1230.         if (ret)
  1231.                 goto unref;
  1232.  
  1233.         if (read_domains & I915_GEM_DOMAIN_GTT) {
  1234.                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
  1235.  
  1236.                 /* Silently promote "you're not bound, there was nothing to do"
  1237.                  * to success, since the client was just asking us to
  1238.                  * make sure everything was done.
  1239.                  */
  1240.                 if (ret == -EINVAL)
  1241.                         ret = 0;
  1242.         } else {
  1243.                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
  1244.         }
  1245.  
  1246. unref:
  1247.         drm_gem_object_unreference(&obj->base);
  1248. unlock:
  1249.         mutex_unlock(&dev->struct_mutex);
  1250.         return ret;
  1251. }
  1252.  
  1253. /**
  1254.  * Called when user space has done writes to this buffer
  1255.  */
  1256. int
  1257. i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
  1258.                          struct drm_file *file)
  1259. {
  1260.         struct drm_i915_gem_sw_finish *args = data;
  1261.         struct drm_i915_gem_object *obj;
  1262.         int ret = 0;
  1263.  
  1264.         ret = i915_mutex_lock_interruptible(dev);
  1265.         if (ret)
  1266.                 return ret;
  1267.  
  1268.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  1269.         if (&obj->base == NULL) {
  1270.                 ret = -ENOENT;
  1271.                 goto unlock;
  1272.         }
  1273.  
  1274.         /* Pinned buffers may be scanout, so flush the cache */
  1275.         if (obj->pin_display)
  1276.                 i915_gem_object_flush_cpu_write_domain(obj, true);
  1277.  
  1278.         drm_gem_object_unreference(&obj->base);
  1279. unlock:
  1280.         mutex_unlock(&dev->struct_mutex);
  1281.         return ret;
  1282. }
  1283.  
  1284. /**
  1285.  * Maps the contents of an object, returning the address it is mapped
  1286.  * into.
  1287.  *
  1288.  * While the mapping holds a reference on the contents of the object, it doesn't
  1289.  * imply a ref on the object itself.
  1290.  *
  1291.  * IMPORTANT:
  1292.  *
  1293.  * DRM driver writers who look a this function as an example for how to do GEM
  1294.  * mmap support, please don't implement mmap support like here. The modern way
  1295.  * to implement DRM mmap support is with an mmap offset ioctl (like
  1296.  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
  1297.  * That way debug tooling like valgrind will understand what's going on, hiding
  1298.  * the mmap call in a driver private ioctl will break that. The i915 driver only
  1299.  * does cpu mmaps this way because we didn't know better.
  1300.  */
  1301. int
  1302. i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  1303.                     struct drm_file *file)
  1304. {
  1305.         struct drm_i915_gem_mmap *args = data;
  1306.         struct drm_gem_object *obj;
  1307.         unsigned long addr;
  1308.  
  1309.         obj = drm_gem_object_lookup(dev, file, args->handle);
  1310.         if (obj == NULL)
  1311.                 return -ENOENT;
  1312.  
  1313.         /* prime objects have no backing filp to GEM mmap
  1314.          * pages from.
  1315.          */
  1316.         if (!obj->filp) {
  1317.                 drm_gem_object_unreference_unlocked(obj);
  1318.                 return -EINVAL;
  1319.         }
  1320.  
  1321.     addr = vm_mmap(obj->filp, 0, args->size,
  1322.               PROT_READ | PROT_WRITE, MAP_SHARED,
  1323.               args->offset);
  1324.         drm_gem_object_unreference_unlocked(obj);
  1325.     if (IS_ERR((void *)addr))
  1326.         return addr;
  1327.  
  1328.         args->addr_ptr = (uint64_t) addr;
  1329.  
  1330.     return 0;
  1331. }
  1332.  
  1333.  
  1334.  
  1335.  
  1336.  
  1337.  
  1338.  
  1339.  
  1340.  
  1341.  
  1342.  
  1343.  
  1344.  
  1345. /**
  1346.  * i915_gem_release_mmap - remove physical page mappings
  1347.  * @obj: obj in question
  1348.  *
  1349.  * Preserve the reservation of the mmapping with the DRM core code, but
  1350.  * relinquish ownership of the pages back to the system.
  1351.  *
  1352.  * It is vital that we remove the page mapping if we have mapped a tiled
  1353.  * object through the GTT and then lose the fence register due to
  1354.  * resource pressure. Similarly if the object has been moved out of the
  1355.  * aperture, than pages mapped into userspace must be revoked. Removing the
  1356.  * mapping will then trigger a page fault on the next user access, allowing
  1357.  * fixup by i915_gem_fault().
  1358.  */
  1359. void
  1360. i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  1361. {
  1362.         if (!obj->fault_mappable)
  1363.                 return;
  1364.  
  1365. //      drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
  1366.         obj->fault_mappable = false;
  1367. }
  1368.  
  1369. uint32_t
  1370. i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  1371. {
  1372.         uint32_t gtt_size;
  1373.  
  1374.         if (INTEL_INFO(dev)->gen >= 4 ||
  1375.             tiling_mode == I915_TILING_NONE)
  1376.                 return size;
  1377.  
  1378.         /* Previous chips need a power-of-two fence region when tiling */
  1379.         if (INTEL_INFO(dev)->gen == 3)
  1380.                 gtt_size = 1024*1024;
  1381.         else
  1382.                 gtt_size = 512*1024;
  1383.  
  1384.         while (gtt_size < size)
  1385.                 gtt_size <<= 1;
  1386.  
  1387.         return gtt_size;
  1388. }
  1389.  
  1390. /**
  1391.  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
  1392.  * @obj: object to check
  1393.  *
  1394.  * Return the required GTT alignment for an object, taking into account
  1395.  * potential fence register mapping.
  1396.  */
  1397. uint32_t
  1398. i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
  1399.                            int tiling_mode, bool fenced)
  1400. {
  1401.         /*
  1402.          * Minimum alignment is 4k (GTT page size), but might be greater
  1403.          * if a fence register is needed for the object.
  1404.          */
  1405.         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
  1406.             tiling_mode == I915_TILING_NONE)
  1407.                 return 4096;
  1408.  
  1409.         /*
  1410.          * Previous chips need to be aligned to the size of the smallest
  1411.          * fence register that can contain the object.
  1412.          */
  1413.         return i915_gem_get_gtt_size(dev, size, tiling_mode);
  1414. }
  1415.  
  1416.  
  1417.  
  1418. int
  1419. i915_gem_mmap_gtt(struct drm_file *file,
  1420.           struct drm_device *dev,
  1421.           uint32_t handle,
  1422.           uint64_t *offset)
  1423. {
  1424.     struct drm_i915_private *dev_priv = dev->dev_private;
  1425.     struct drm_i915_gem_object *obj;
  1426.     unsigned long pfn;
  1427.     char *mem, *ptr;
  1428.     int ret;
  1429.  
  1430.     ret = i915_mutex_lock_interruptible(dev);
  1431.     if (ret)
  1432.         return ret;
  1433.  
  1434.     obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
  1435.     if (&obj->base == NULL) {
  1436.         ret = -ENOENT;
  1437.         goto unlock;
  1438.     }
  1439.  
  1440.     if (obj->base.size > dev_priv->gtt.mappable_end) {
  1441.         ret = -E2BIG;
  1442.         goto out;
  1443.     }
  1444.  
  1445.     if (obj->madv != I915_MADV_WILLNEED) {
  1446.                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
  1447.                 ret = -EFAULT;
  1448.         goto out;
  1449.     }
  1450.     /* Now bind it into the GTT if needed */
  1451.         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
  1452.     if (ret)
  1453.         goto out;
  1454.  
  1455.     ret = i915_gem_object_set_to_gtt_domain(obj, 1);
  1456.     if (ret)
  1457.         goto unpin;
  1458.  
  1459.     ret = i915_gem_object_get_fence(obj);
  1460.     if (ret)
  1461.         goto unpin;
  1462.  
  1463.     obj->fault_mappable = true;
  1464.  
  1465.     pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
  1466.  
  1467.     /* Finally, remap it using the new GTT offset */
  1468.  
  1469.     mem = UserAlloc(obj->base.size);
  1470.     if(unlikely(mem == NULL))
  1471.     {
  1472.         ret = -ENOMEM;
  1473.         goto unpin;
  1474.     }
  1475.  
  1476.     for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096)
  1477.         MapPage(ptr, pfn, PG_SHARED|PG_UW);
  1478.  
  1479. unpin:
  1480.     i915_gem_object_unpin_pages(obj);
  1481.  
  1482.  
  1483.     *offset = mem;
  1484.  
  1485. out:
  1486.     drm_gem_object_unreference(&obj->base);
  1487. unlock:
  1488.     mutex_unlock(&dev->struct_mutex);
  1489.     return ret;
  1490. }
  1491.  
  1492. /**
  1493.  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  1494.  * @dev: DRM device
  1495.  * @data: GTT mapping ioctl data
  1496.  * @file: GEM object info
  1497.  *
  1498.  * Simply returns the fake offset to userspace so it can mmap it.
  1499.  * The mmap call will end up in drm_gem_mmap(), which will set things
  1500.  * up so we can get faults in the handler above.
  1501.  *
  1502.  * The fault handler will take care of binding the object into the GTT
  1503.  * (since it may have been evicted to make room for something), allocating
  1504.  * a fence register, and mapping the appropriate aperture address into
  1505.  * userspace.
  1506.  */
  1507. int
  1508. i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
  1509.             struct drm_file *file)
  1510. {
  1511.     struct drm_i915_gem_mmap_gtt *args = data;
  1512.  
  1513.     return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
  1514. }
  1515.  
  1516. static inline int
  1517. i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
  1518. {
  1519.         return obj->madv == I915_MADV_DONTNEED;
  1520. }
  1521.  
  1522. /* Immediately discard the backing storage */
  1523. static void
  1524. i915_gem_object_truncate(struct drm_i915_gem_object *obj)
  1525. {
  1526. //      i915_gem_object_free_mmap_offset(obj);
  1527.  
  1528.         if (obj->base.filp == NULL)
  1529.                 return;
  1530.  
  1531.         /* Our goal here is to return as much of the memory as
  1532.          * is possible back to the system as we are called from OOM.
  1533.          * To do this we must instruct the shmfs to drop all of its
  1534.          * backing pages, *now*.
  1535.          */
  1536. //      shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
  1537.         obj->madv = __I915_MADV_PURGED;
  1538. }
  1539.  
  1540. /* Try to discard unwanted pages */
  1541. static void
  1542. i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
  1543. {
  1544.         struct address_space *mapping;
  1545.  
  1546.         switch (obj->madv) {
  1547.         case I915_MADV_DONTNEED:
  1548.                 i915_gem_object_truncate(obj);
  1549.         case __I915_MADV_PURGED:
  1550.                 return;
  1551.         }
  1552.  
  1553.         if (obj->base.filp == NULL)
  1554.                 return;
  1555.  
  1556. }
  1557.  
  1558. static void
  1559. i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
  1560. {
  1561.         struct sg_page_iter sg_iter;
  1562.         int ret;
  1563.  
  1564.         BUG_ON(obj->madv == __I915_MADV_PURGED);
  1565.  
  1566.         ret = i915_gem_object_set_to_cpu_domain(obj, true);
  1567.         if (ret) {
  1568.                 /* In the event of a disaster, abandon all caches and
  1569.                  * hope for the best.
  1570.                  */
  1571.                 WARN_ON(ret != -EIO);
  1572.                 i915_gem_clflush_object(obj, true);
  1573.                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  1574.         }
  1575.  
  1576.         if (obj->madv == I915_MADV_DONTNEED)
  1577.                 obj->dirty = 0;
  1578.  
  1579.         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
  1580.                 struct page *page = sg_page_iter_page(&sg_iter);
  1581.  
  1582.         page_cache_release(page);
  1583.         }
  1584.     obj->dirty = 0;
  1585.  
  1586.         sg_free_table(obj->pages);
  1587.         kfree(obj->pages);
  1588. }
  1589.  
  1590. int
  1591. i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
  1592. {
  1593.         const struct drm_i915_gem_object_ops *ops = obj->ops;
  1594.  
  1595.         if (obj->pages == NULL)
  1596.                 return 0;
  1597.  
  1598.         if (obj->pages_pin_count)
  1599.                 return -EBUSY;
  1600.  
  1601.         BUG_ON(i915_gem_obj_bound_any(obj));
  1602.  
  1603.         /* ->put_pages might need to allocate memory for the bit17 swizzle
  1604.          * array, hence protect them from being reaped by removing them from gtt
  1605.          * lists early. */
  1606.         list_del(&obj->global_list);
  1607.  
  1608.         ops->put_pages(obj);
  1609.         obj->pages = NULL;
  1610.  
  1611.         i915_gem_object_invalidate(obj);
  1612.  
  1613.         return 0;
  1614. }
  1615.  
  1616.  
  1617.  
  1618.  
  1619.  
  1620.  
  1621.  
  1622.  
  1623. static int
  1624. i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
  1625. {
  1626.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1627.     int page_count, i;
  1628.     struct sg_table *st;
  1629.         struct scatterlist *sg;
  1630.         struct sg_page_iter sg_iter;
  1631.         struct page *page;
  1632.         unsigned long last_pfn = 0;     /* suppress gcc warning */
  1633.         gfp_t gfp;
  1634.  
  1635.         /* Assert that the object is not currently in any GPU domain. As it
  1636.          * wasn't in the GTT, there shouldn't be any way it could have been in
  1637.          * a GPU cache
  1638.          */
  1639.         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
  1640.         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
  1641.  
  1642.         st = kmalloc(sizeof(*st), GFP_KERNEL);
  1643.         if (st == NULL)
  1644.                 return -ENOMEM;
  1645.  
  1646.         page_count = obj->base.size / PAGE_SIZE;
  1647.         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
  1648.                 kfree(st);
  1649.         FAIL();
  1650.                 return -ENOMEM;
  1651.         }
  1652.  
  1653.         /* Get the list of pages out of our struct file.  They'll be pinned
  1654.          * at this point until we release them.
  1655.          *
  1656.          * Fail silently without starting the shrinker
  1657.          */
  1658.         sg = st->sgl;
  1659.         st->nents = 0;
  1660.         for (i = 0; i < page_count; i++) {
  1661.         page = shmem_read_mapping_page_gfp(obj->base.filp, i, gfp);
  1662.                 if (IS_ERR(page)) {
  1663.             dbgprintf("%s invalid page %p\n", __FUNCTION__, page);
  1664.                         goto err_pages;
  1665.  
  1666.                 }
  1667. #ifdef CONFIG_SWIOTLB
  1668.                 if (swiotlb_nr_tbl()) {
  1669.                         st->nents++;
  1670.                         sg_set_page(sg, page, PAGE_SIZE, 0);
  1671.                         sg = sg_next(sg);
  1672.                         continue;
  1673.                 }
  1674. #endif
  1675.                 if (!i || page_to_pfn(page) != last_pfn + 1) {
  1676.                         if (i)
  1677.                                 sg = sg_next(sg);
  1678.                         st->nents++;
  1679.                 sg_set_page(sg, page, PAGE_SIZE, 0);
  1680.                 } else {
  1681.                         sg->length += PAGE_SIZE;
  1682.                 }
  1683.                 last_pfn = page_to_pfn(page);
  1684.         }
  1685. #ifdef CONFIG_SWIOTLB
  1686.         if (!swiotlb_nr_tbl())
  1687. #endif
  1688.                 sg_mark_end(sg);
  1689.         obj->pages = st;
  1690.  
  1691.         return 0;
  1692.  
  1693. err_pages:
  1694.         sg_mark_end(sg);
  1695.         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
  1696.                 page_cache_release(sg_page_iter_page(&sg_iter));
  1697.         sg_free_table(st);
  1698.         kfree(st);
  1699.     FAIL();
  1700.         return PTR_ERR(page);
  1701. }
  1702.  
  1703. /* Ensure that the associated pages are gathered from the backing storage
  1704.  * and pinned into our object. i915_gem_object_get_pages() may be called
  1705.  * multiple times before they are released by a single call to
  1706.  * i915_gem_object_put_pages() - once the pages are no longer referenced
  1707.  * either as a result of memory pressure (reaping pages under the shrinker)
  1708.  * or as the object is itself released.
  1709.  */
  1710. int
  1711. i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  1712. {
  1713.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1714.         const struct drm_i915_gem_object_ops *ops = obj->ops;
  1715.         int ret;
  1716.  
  1717.         if (obj->pages)
  1718.                 return 0;
  1719.  
  1720.         if (obj->madv != I915_MADV_WILLNEED) {
  1721.                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
  1722.                 return -EFAULT;
  1723.         }
  1724.  
  1725.         BUG_ON(obj->pages_pin_count);
  1726.  
  1727.         ret = ops->get_pages(obj);
  1728.         if (ret)
  1729.                 return ret;
  1730.  
  1731.         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  1732.     return 0;
  1733. }
  1734.  
  1735. static void
  1736. i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
  1737.                                struct intel_engine_cs *ring)
  1738. {
  1739.         u32 seqno = intel_ring_get_seqno(ring);
  1740.  
  1741.         BUG_ON(ring == NULL);
  1742.         if (obj->ring != ring && obj->last_write_seqno) {
  1743.                 /* Keep the seqno relative to the current ring */
  1744.                 obj->last_write_seqno = seqno;
  1745.         }
  1746.         obj->ring = ring;
  1747.  
  1748.         /* Add a reference if we're newly entering the active list. */
  1749.         if (!obj->active) {
  1750.                 drm_gem_object_reference(&obj->base);
  1751.                 obj->active = 1;
  1752.         }
  1753.  
  1754.         list_move_tail(&obj->ring_list, &ring->active_list);
  1755.  
  1756.         obj->last_read_seqno = seqno;
  1757. }
  1758.  
  1759. void i915_vma_move_to_active(struct i915_vma *vma,
  1760.                              struct intel_engine_cs *ring)
  1761. {
  1762.         list_move_tail(&vma->mm_list, &vma->vm->active_list);
  1763.         return i915_gem_object_move_to_active(vma->obj, ring);
  1764. }
  1765.  
  1766. static void
  1767. i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
  1768. {
  1769.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  1770.         struct i915_address_space *vm;
  1771.         struct i915_vma *vma;
  1772.  
  1773.         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
  1774.         BUG_ON(!obj->active);
  1775.  
  1776.         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
  1777.                 vma = i915_gem_obj_to_vma(obj, vm);
  1778.                 if (vma && !list_empty(&vma->mm_list))
  1779.                         list_move_tail(&vma->mm_list, &vm->inactive_list);
  1780.         }
  1781.  
  1782.         intel_fb_obj_flush(obj, true);
  1783.  
  1784.         list_del_init(&obj->ring_list);
  1785.         obj->ring = NULL;
  1786.  
  1787.         obj->last_read_seqno = 0;
  1788.         obj->last_write_seqno = 0;
  1789.         obj->base.write_domain = 0;
  1790.  
  1791.         obj->last_fenced_seqno = 0;
  1792.  
  1793.         obj->active = 0;
  1794.         drm_gem_object_unreference(&obj->base);
  1795.  
  1796.         WARN_ON(i915_verify_lists(dev));
  1797. }
  1798.  
  1799. static void
  1800. i915_gem_object_retire(struct drm_i915_gem_object *obj)
  1801. {
  1802.         struct intel_engine_cs *ring = obj->ring;
  1803.  
  1804.         if (ring == NULL)
  1805.                 return;
  1806.  
  1807.         if (i915_seqno_passed(ring->get_seqno(ring, true),
  1808.                               obj->last_read_seqno))
  1809.                 i915_gem_object_move_to_inactive(obj);
  1810. }
  1811.  
  1812. static int
  1813. i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  1814. {
  1815.         struct drm_i915_private *dev_priv = dev->dev_private;
  1816.         struct intel_engine_cs *ring;
  1817.         int ret, i, j;
  1818.  
  1819.         /* Carefully retire all requests without writing to the rings */
  1820.         for_each_ring(ring, dev_priv, i) {
  1821.                 ret = intel_ring_idle(ring);
  1822.         if (ret)
  1823.                 return ret;
  1824.         }
  1825.         i915_gem_retire_requests(dev);
  1826.  
  1827.         /* Finally reset hw state */
  1828.         for_each_ring(ring, dev_priv, i) {
  1829.                 intel_ring_init_seqno(ring, seqno);
  1830.  
  1831.                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
  1832.                         ring->semaphore.sync_seqno[j] = 0;
  1833.         }
  1834.  
  1835.         return 0;
  1836. }
  1837.  
  1838. int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
  1839. {
  1840.         struct drm_i915_private *dev_priv = dev->dev_private;
  1841.         int ret;
  1842.  
  1843.         if (seqno == 0)
  1844.                 return -EINVAL;
  1845.  
  1846.         /* HWS page needs to be set less than what we
  1847.          * will inject to ring
  1848.          */
  1849.         ret = i915_gem_init_seqno(dev, seqno - 1);
  1850.         if (ret)
  1851.                 return ret;
  1852.  
  1853.         /* Carefully set the last_seqno value so that wrap
  1854.          * detection still works
  1855.          */
  1856.         dev_priv->next_seqno = seqno;
  1857.         dev_priv->last_seqno = seqno - 1;
  1858.         if (dev_priv->last_seqno == 0)
  1859.                 dev_priv->last_seqno--;
  1860.  
  1861.         return 0;
  1862. }
  1863.  
  1864. int
  1865. i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  1866. {
  1867.         struct drm_i915_private *dev_priv = dev->dev_private;
  1868.  
  1869.         /* reserve 0 for non-seqno */
  1870.         if (dev_priv->next_seqno == 0) {
  1871.                 int ret = i915_gem_init_seqno(dev, 0);
  1872.                 if (ret)
  1873.                         return ret;
  1874.  
  1875.                 dev_priv->next_seqno = 1;
  1876.         }
  1877.  
  1878.         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
  1879.         return 0;
  1880. }
  1881.  
  1882. int __i915_add_request(struct intel_engine_cs *ring,
  1883.                  struct drm_file *file,
  1884.                        struct drm_i915_gem_object *obj,
  1885.                  u32 *out_seqno)
  1886. {
  1887.         struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1888.         struct drm_i915_gem_request *request;
  1889.         struct intel_ringbuffer *ringbuf;
  1890.         u32 request_ring_position, request_start;
  1891.         int ret;
  1892.  
  1893.         request = ring->preallocated_lazy_request;
  1894.         if (WARN_ON(request == NULL))
  1895.                 return -ENOMEM;
  1896.  
  1897.         if (i915.enable_execlists) {
  1898.                 struct intel_context *ctx = request->ctx;
  1899.                 ringbuf = ctx->engine[ring->id].ringbuf;
  1900.         } else
  1901.                 ringbuf = ring->buffer;
  1902.  
  1903.         request_start = intel_ring_get_tail(ringbuf);
  1904.         /*
  1905.          * Emit any outstanding flushes - execbuf can fail to emit the flush
  1906.          * after having emitted the batchbuffer command. Hence we need to fix
  1907.          * things up similar to emitting the lazy request. The difference here
  1908.          * is that the flush _must_ happen before the next request, no matter
  1909.          * what.
  1910.          */
  1911.         if (i915.enable_execlists) {
  1912.                 ret = logical_ring_flush_all_caches(ringbuf);
  1913.                 if (ret)
  1914.                         return ret;
  1915.         } else {
  1916.    ret = intel_ring_flush_all_caches(ring);
  1917.    if (ret)
  1918.        return ret;
  1919.         }
  1920.  
  1921.         /* Record the position of the start of the request so that
  1922.          * should we detect the updated seqno part-way through the
  1923.     * GPU processing the request, we never over-estimate the
  1924.          * position of the head.
  1925.          */
  1926.         request_ring_position = intel_ring_get_tail(ringbuf);
  1927.  
  1928.         if (i915.enable_execlists) {
  1929.                 ret = ring->emit_request(ringbuf);
  1930.                 if (ret)
  1931.                         return ret;
  1932.         } else {
  1933.         ret = ring->add_request(ring);
  1934.         if (ret)
  1935.                 return ret;
  1936.         }
  1937.  
  1938.         request->seqno = intel_ring_get_seqno(ring);
  1939.         request->ring = ring;
  1940.         request->head = request_start;
  1941.         request->tail = request_ring_position;
  1942.  
  1943.         /* Whilst this request exists, batch_obj will be on the
  1944.          * active_list, and so will hold the active reference. Only when this
  1945.          * request is retired will the the batch_obj be moved onto the
  1946.          * inactive_list and lose its active reference. Hence we do not need
  1947.          * to explicitly hold another reference here.
  1948.          */
  1949.         request->batch_obj = obj;
  1950.  
  1951.         if (!i915.enable_execlists) {
  1952.         /* Hold a reference to the current context so that we can inspect
  1953.          * it later in case a hangcheck error event fires.
  1954.          */
  1955.         request->ctx = ring->last_context;
  1956.         if (request->ctx)
  1957.                 i915_gem_context_reference(request->ctx);
  1958.         }
  1959.  
  1960.         request->emitted_jiffies = jiffies;
  1961.         list_add_tail(&request->list, &ring->request_list);
  1962.         request->file_priv = NULL;
  1963.  
  1964.         if (file) {
  1965.                 struct drm_i915_file_private *file_priv = file->driver_priv;
  1966.  
  1967.                 spin_lock(&file_priv->mm.lock);
  1968.                 request->file_priv = file_priv;
  1969.                 list_add_tail(&request->client_list,
  1970.                               &file_priv->mm.request_list);
  1971.                 spin_unlock(&file_priv->mm.lock);
  1972.         }
  1973.  
  1974.         trace_i915_gem_request_add(ring, request->seqno);
  1975.         ring->outstanding_lazy_seqno = 0;
  1976.         ring->preallocated_lazy_request = NULL;
  1977.  
  1978. //              i915_queue_hangcheck(ring->dev);
  1979.  
  1980.            queue_delayed_work(dev_priv->wq,
  1981.                                            &dev_priv->mm.retire_work,
  1982.                                            round_jiffies_up_relative(HZ));
  1983.            intel_mark_busy(dev_priv->dev);
  1984.  
  1985.         if (out_seqno)
  1986.                 *out_seqno = request->seqno;
  1987.         return 0;
  1988. }
  1989.  
  1990. static inline void
  1991. i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
  1992. {
  1993.         struct drm_i915_file_private *file_priv = request->file_priv;
  1994.  
  1995.         if (!file_priv)
  1996.                 return;
  1997.  
  1998.         spin_lock(&file_priv->mm.lock);
  1999.                 list_del(&request->client_list);
  2000.                 request->file_priv = NULL;
  2001.         spin_unlock(&file_priv->mm.lock);
  2002. }
  2003.  
  2004. static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
  2005.                                    const struct intel_context *ctx)
  2006. {
  2007.         unsigned long elapsed;
  2008.  
  2009.     elapsed = GetTimerTicks()/100 - ctx->hang_stats.guilty_ts;
  2010.  
  2011.         if (ctx->hang_stats.banned)
  2012.                 return true;
  2013.  
  2014.         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
  2015.                 if (!i915_gem_context_is_default(ctx)) {
  2016.                         DRM_DEBUG("context hanging too fast, banning!\n");
  2017.                         return true;
  2018.                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
  2019.                         if (i915_stop_ring_allow_warn(dev_priv))
  2020.                         DRM_ERROR("gpu hanging too fast, banning!\n");
  2021.                         return true;
  2022.         }
  2023.         }
  2024.  
  2025.         return false;
  2026. }
  2027.  
  2028. static void i915_set_reset_status(struct drm_i915_private *dev_priv,
  2029.                                   struct intel_context *ctx,
  2030.                                   const bool guilty)
  2031. {
  2032.         struct i915_ctx_hang_stats *hs;
  2033.  
  2034.         if (WARN_ON(!ctx))
  2035.                 return;
  2036.  
  2037.         hs = &ctx->hang_stats;
  2038.  
  2039.         if (guilty) {
  2040.                 hs->banned = i915_context_is_banned(dev_priv, ctx);
  2041.                 hs->batch_active++;
  2042.         hs->guilty_ts = GetTimerTicks()/100;
  2043.         } else {
  2044.                 hs->batch_pending++;
  2045.         }
  2046. }
  2047.  
  2048. static void i915_gem_free_request(struct drm_i915_gem_request *request)
  2049. {
  2050.         struct intel_context *ctx = request->ctx;
  2051.  
  2052.         list_del(&request->list);
  2053.         i915_gem_request_remove_from_client(request);
  2054.  
  2055.         if (ctx) {
  2056.                 if (i915.enable_execlists) {
  2057.                         struct intel_engine_cs *ring = request->ring;
  2058.  
  2059.                         if (ctx != ring->default_context)
  2060.                                 intel_lr_context_unpin(ring, ctx);
  2061.                 }
  2062.                 i915_gem_context_unreference(ctx);
  2063.         }
  2064.         kfree(request);
  2065. }
  2066.  
  2067. struct drm_i915_gem_request *
  2068. i915_gem_find_active_request(struct intel_engine_cs *ring)
  2069. {
  2070.         struct drm_i915_gem_request *request;
  2071.         u32 completed_seqno;
  2072.  
  2073.         completed_seqno = ring->get_seqno(ring, false);
  2074.  
  2075.         list_for_each_entry(request, &ring->request_list, list) {
  2076.                 if (i915_seqno_passed(completed_seqno, request->seqno))
  2077.                         continue;
  2078.  
  2079.                 return request;
  2080.         }
  2081.  
  2082.         return NULL;
  2083. }
  2084.  
  2085. static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
  2086.                                        struct intel_engine_cs *ring)
  2087. {
  2088.         struct drm_i915_gem_request *request;
  2089.         bool ring_hung;
  2090.  
  2091.         request = i915_gem_find_active_request(ring);
  2092.  
  2093.         if (request == NULL)
  2094.                 return;
  2095.  
  2096.         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
  2097.  
  2098.         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
  2099.  
  2100.         list_for_each_entry_continue(request, &ring->request_list, list)
  2101.                 i915_set_reset_status(dev_priv, request->ctx, false);
  2102. }
  2103.  
  2104. static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
  2105.                                         struct intel_engine_cs *ring)
  2106. {
  2107.         while (!list_empty(&ring->active_list)) {
  2108.                 struct drm_i915_gem_object *obj;
  2109.  
  2110.                 obj = list_first_entry(&ring->active_list,
  2111.                                        struct drm_i915_gem_object,
  2112.                                        ring_list);
  2113.  
  2114.                 i915_gem_object_move_to_inactive(obj);
  2115.         }
  2116.  
  2117.         /*
  2118.          * Clear the execlists queue up before freeing the requests, as those
  2119.          * are the ones that keep the context and ringbuffer backing objects
  2120.          * pinned in place.
  2121.          */
  2122.         while (!list_empty(&ring->execlist_queue)) {
  2123.                 struct intel_ctx_submit_request *submit_req;
  2124.  
  2125.                 submit_req = list_first_entry(&ring->execlist_queue,
  2126.                                 struct intel_ctx_submit_request,
  2127.                                 execlist_link);
  2128.                 list_del(&submit_req->execlist_link);
  2129.                 intel_runtime_pm_put(dev_priv);
  2130.                 i915_gem_context_unreference(submit_req->ctx);
  2131.                 kfree(submit_req);
  2132.         }
  2133.  
  2134.         /*
  2135.          * We must free the requests after all the corresponding objects have
  2136.          * been moved off active lists. Which is the same order as the normal
  2137.          * retire_requests function does. This is important if object hold
  2138.          * implicit references on things like e.g. ppgtt address spaces through
  2139.          * the request.
  2140.          */
  2141.         while (!list_empty(&ring->request_list)) {
  2142.                 struct drm_i915_gem_request *request;
  2143.  
  2144.                 request = list_first_entry(&ring->request_list,
  2145.                                            struct drm_i915_gem_request,
  2146.                                            list);
  2147.  
  2148.                 i915_gem_free_request(request);
  2149.         }
  2150.  
  2151.         /* These may not have been flush before the reset, do so now */
  2152.         kfree(ring->preallocated_lazy_request);
  2153.         ring->preallocated_lazy_request = NULL;
  2154.         ring->outstanding_lazy_seqno = 0;
  2155. }
  2156.  
  2157. void i915_gem_restore_fences(struct drm_device *dev)
  2158. {
  2159.         struct drm_i915_private *dev_priv = dev->dev_private;
  2160.         int i;
  2161.  
  2162.         for (i = 0; i < dev_priv->num_fence_regs; i++) {
  2163.                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
  2164.  
  2165.                 /*
  2166.                  * Commit delayed tiling changes if we have an object still
  2167.                  * attached to the fence, otherwise just clear the fence.
  2168.                  */
  2169.                 if (reg->obj) {
  2170.                         i915_gem_object_update_fence(reg->obj, reg,
  2171.                                                      reg->obj->tiling_mode);
  2172.                 } else {
  2173.                         i915_gem_write_fence(dev, i, NULL);
  2174.                 }
  2175.         }
  2176. }
  2177.  
  2178. void i915_gem_reset(struct drm_device *dev)
  2179. {
  2180.         struct drm_i915_private *dev_priv = dev->dev_private;
  2181.         struct intel_engine_cs *ring;
  2182.         int i;
  2183.  
  2184.         /*
  2185.          * Before we free the objects from the requests, we need to inspect
  2186.          * them for finding the guilty party. As the requests only borrow
  2187.          * their reference to the objects, the inspection must be done first.
  2188.          */
  2189.         for_each_ring(ring, dev_priv, i)
  2190.                 i915_gem_reset_ring_status(dev_priv, ring);
  2191.  
  2192.         for_each_ring(ring, dev_priv, i)
  2193.                 i915_gem_reset_ring_cleanup(dev_priv, ring);
  2194.  
  2195.         i915_gem_context_reset(dev);
  2196.  
  2197.         i915_gem_restore_fences(dev);
  2198. }
  2199.  
  2200. /**
  2201.  * This function clears the request list as sequence numbers are passed.
  2202.  */
  2203. void
  2204. i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
  2205. {
  2206.         uint32_t seqno;
  2207.  
  2208.         if (list_empty(&ring->request_list))
  2209.                 return;
  2210.  
  2211.         WARN_ON(i915_verify_lists(ring->dev));
  2212.  
  2213.         seqno = ring->get_seqno(ring, true);
  2214.  
  2215.         /* Move any buffers on the active list that are no longer referenced
  2216.          * by the ringbuffer to the flushing/inactive lists as appropriate,
  2217.          * before we free the context associated with the requests.
  2218.          */
  2219.         while (!list_empty(&ring->active_list)) {
  2220.                 struct drm_i915_gem_object *obj;
  2221.  
  2222.                 obj = list_first_entry(&ring->active_list,
  2223.                                       struct drm_i915_gem_object,
  2224.                                       ring_list);
  2225.  
  2226.                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
  2227.                         break;
  2228.  
  2229.                 i915_gem_object_move_to_inactive(obj);
  2230.         }
  2231.  
  2232.  
  2233.         while (!list_empty(&ring->request_list)) {
  2234.                 struct drm_i915_gem_request *request;
  2235.                 struct intel_ringbuffer *ringbuf;
  2236.  
  2237.                 request = list_first_entry(&ring->request_list,
  2238.                                            struct drm_i915_gem_request,
  2239.                                            list);
  2240.  
  2241.                 if (!i915_seqno_passed(seqno, request->seqno))
  2242.                         break;
  2243.  
  2244.                 trace_i915_gem_request_retire(ring, request->seqno);
  2245.  
  2246.                 /* This is one of the few common intersection points
  2247.                  * between legacy ringbuffer submission and execlists:
  2248.                  * we need to tell them apart in order to find the correct
  2249.                  * ringbuffer to which the request belongs to.
  2250.                  */
  2251.                 if (i915.enable_execlists) {
  2252.                         struct intel_context *ctx = request->ctx;
  2253.                         ringbuf = ctx->engine[ring->id].ringbuf;
  2254.                 } else
  2255.                         ringbuf = ring->buffer;
  2256.  
  2257.                 /* We know the GPU must have read the request to have
  2258.                  * sent us the seqno + interrupt, so use the position
  2259.                  * of tail of the request to update the last known position
  2260.                  * of the GPU head.
  2261.                  */
  2262.                 ringbuf->last_retired_head = request->tail;
  2263.  
  2264.                 i915_gem_free_request(request);
  2265.         }
  2266.  
  2267.         if (unlikely(ring->trace_irq_seqno &&
  2268.                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
  2269.                 ring->irq_put(ring);
  2270.                 ring->trace_irq_seqno = 0;
  2271.         }
  2272.  
  2273.         WARN_ON(i915_verify_lists(ring->dev));
  2274. }
  2275.  
  2276. bool
  2277. i915_gem_retire_requests(struct drm_device *dev)
  2278. {
  2279.         struct drm_i915_private *dev_priv = dev->dev_private;
  2280.         struct intel_engine_cs *ring;
  2281.         bool idle = true;
  2282.         int i;
  2283.  
  2284.         for_each_ring(ring, dev_priv, i) {
  2285.                 i915_gem_retire_requests_ring(ring);
  2286.                 idle &= list_empty(&ring->request_list);
  2287.                 if (i915.enable_execlists) {
  2288.                         unsigned long flags;
  2289.  
  2290.                         spin_lock_irqsave(&ring->execlist_lock, flags);
  2291.                         idle &= list_empty(&ring->execlist_queue);
  2292.                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
  2293.  
  2294.                         intel_execlists_retire_requests(ring);
  2295.                 }
  2296.         }
  2297.  
  2298.         if (idle)
  2299.                 mod_delayed_work(dev_priv->wq,
  2300.                                    &dev_priv->mm.idle_work,
  2301.                                    msecs_to_jiffies(100));
  2302.  
  2303.         return idle;
  2304. }
  2305.  
  2306. static void
  2307. i915_gem_retire_work_handler(struct work_struct *work)
  2308. {
  2309.         struct drm_i915_private *dev_priv =
  2310.                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
  2311.         struct drm_device *dev = dev_priv->dev;
  2312.         bool idle;
  2313.  
  2314.         /* Come back later if the device is busy... */
  2315.         idle = false;
  2316.         if (mutex_trylock(&dev->struct_mutex)) {
  2317.                 idle = i915_gem_retire_requests(dev);
  2318.                 mutex_unlock(&dev->struct_mutex);
  2319.         }
  2320.         if (!idle)
  2321.                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
  2322.                                    round_jiffies_up_relative(HZ));
  2323. }
  2324.  
  2325. static void
  2326. i915_gem_idle_work_handler(struct work_struct *work)
  2327. {
  2328.         struct drm_i915_private *dev_priv =
  2329.                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
  2330.  
  2331.         intel_mark_idle(dev_priv->dev);
  2332. }
  2333.  
  2334. /**
  2335.  * Ensures that an object will eventually get non-busy by flushing any required
  2336.  * write domains, emitting any outstanding lazy request and retiring and
  2337.  * completed requests.
  2338.  */
  2339. static int
  2340. i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
  2341. {
  2342.         int ret;
  2343.  
  2344.         if (obj->active) {
  2345.                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
  2346.                 if (ret)
  2347.                         return ret;
  2348.  
  2349.                 i915_gem_retire_requests_ring(obj->ring);
  2350.         }
  2351.  
  2352.         return 0;
  2353. }
  2354.  
  2355. /**
  2356.  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  2357.  * @DRM_IOCTL_ARGS: standard ioctl arguments
  2358.  *
  2359.  * Returns 0 if successful, else an error is returned with the remaining time in
  2360.  * the timeout parameter.
  2361.  *  -ETIME: object is still busy after timeout
  2362.  *  -ERESTARTSYS: signal interrupted the wait
  2363.  *  -ENONENT: object doesn't exist
  2364.  * Also possible, but rare:
  2365.  *  -EAGAIN: GPU wedged
  2366.  *  -ENOMEM: damn
  2367.  *  -ENODEV: Internal IRQ fail
  2368.  *  -E?: The add request failed
  2369.  *
  2370.  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
  2371.  * non-zero timeout parameter the wait ioctl will wait for the given number of
  2372.  * nanoseconds on an object becoming unbusy. Since the wait itself does so
  2373.  * without holding struct_mutex the object may become re-busied before this
  2374.  * function completes. A similar but shorter * race condition exists in the busy
  2375.  * ioctl
  2376.  */
  2377. int
  2378. i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  2379. {
  2380.         struct drm_i915_private *dev_priv = dev->dev_private;
  2381.         struct drm_i915_gem_wait *args = data;
  2382.         struct drm_i915_gem_object *obj;
  2383.         struct intel_engine_cs *ring = NULL;
  2384.         unsigned reset_counter;
  2385.         u32 seqno = 0;
  2386.         int ret = 0;
  2387.  
  2388.         if (args->flags != 0)
  2389.                 return -EINVAL;
  2390.  
  2391.         ret = i915_mutex_lock_interruptible(dev);
  2392.         if (ret)
  2393.                 return ret;
  2394.  
  2395.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
  2396.         if (&obj->base == NULL) {
  2397.                 mutex_unlock(&dev->struct_mutex);
  2398.                 return -ENOENT;
  2399.         }
  2400.  
  2401.         /* Need to make sure the object gets inactive eventually. */
  2402.         ret = i915_gem_object_flush_active(obj);
  2403.         if (ret)
  2404.                 goto out;
  2405.  
  2406.         if (obj->active) {
  2407.                 seqno = obj->last_read_seqno;
  2408.                 ring = obj->ring;
  2409.         }
  2410.  
  2411.         if (seqno == 0)
  2412.                  goto out;
  2413.  
  2414.         /* Do this after OLR check to make sure we make forward progress polling
  2415.          * on this IOCTL with a timeout <=0 (like busy ioctl)
  2416.          */
  2417.         if (args->timeout_ns <= 0) {
  2418.                 ret = -ETIME;
  2419.                 goto out;
  2420.         }
  2421.  
  2422.         drm_gem_object_unreference(&obj->base);
  2423.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  2424.         mutex_unlock(&dev->struct_mutex);
  2425.  
  2426.         return __i915_wait_seqno(ring, seqno, reset_counter, true,
  2427.                                  &args->timeout_ns, file->driver_priv);
  2428.  
  2429. out:
  2430.         drm_gem_object_unreference(&obj->base);
  2431.         mutex_unlock(&dev->struct_mutex);
  2432.         return ret;
  2433. }
  2434.  
  2435. /**
  2436.  * i915_gem_object_sync - sync an object to a ring.
  2437.  *
  2438.  * @obj: object which may be in use on another ring.
  2439.  * @to: ring we wish to use the object on. May be NULL.
  2440.  *
  2441.  * This code is meant to abstract object synchronization with the GPU.
  2442.  * Calling with NULL implies synchronizing the object with the CPU
  2443.  * rather than a particular GPU ring.
  2444.  *
  2445.  * Returns 0 if successful, else propagates up the lower layer error.
  2446.  */
  2447. int
  2448. i915_gem_object_sync(struct drm_i915_gem_object *obj,
  2449.                      struct intel_engine_cs *to)
  2450. {
  2451.         struct intel_engine_cs *from = obj->ring;
  2452.         u32 seqno;
  2453.         int ret, idx;
  2454.  
  2455.         if (from == NULL || to == from)
  2456.                 return 0;
  2457.  
  2458.         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
  2459.                 return i915_gem_object_wait_rendering(obj, false);
  2460.  
  2461.         idx = intel_ring_sync_index(from, to);
  2462.  
  2463.         seqno = obj->last_read_seqno;
  2464.         /* Optimization: Avoid semaphore sync when we are sure we already
  2465.          * waited for an object with higher seqno */
  2466.         if (seqno <= from->semaphore.sync_seqno[idx])
  2467.                 return 0;
  2468.  
  2469.         ret = i915_gem_check_olr(obj->ring, seqno);
  2470.         if (ret)
  2471.                 return ret;
  2472.  
  2473.         trace_i915_gem_ring_sync_to(from, to, seqno);
  2474.         ret = to->semaphore.sync_to(to, from, seqno);
  2475.         if (!ret)
  2476.                 /* We use last_read_seqno because sync_to()
  2477.                  * might have just caused seqno wrap under
  2478.                  * the radar.
  2479.                  */
  2480.                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
  2481.  
  2482.         return ret;
  2483. }
  2484.  
  2485. static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
  2486. {
  2487.         u32 old_write_domain, old_read_domains;
  2488.  
  2489.         /* Force a pagefault for domain tracking on next user access */
  2490. //      i915_gem_release_mmap(obj);
  2491.  
  2492.         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  2493.                 return;
  2494.  
  2495.         /* Wait for any direct GTT access to complete */
  2496.         mb();
  2497.  
  2498.         old_read_domains = obj->base.read_domains;
  2499.         old_write_domain = obj->base.write_domain;
  2500.  
  2501.         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
  2502.         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
  2503.  
  2504.         trace_i915_gem_object_change_domain(obj,
  2505.                                             old_read_domains,
  2506.                                             old_write_domain);
  2507. }
  2508.  
  2509. int i915_vma_unbind(struct i915_vma *vma)
  2510. {
  2511.         struct drm_i915_gem_object *obj = vma->obj;
  2512.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2513.         int ret;
  2514.  
  2515.     if(obj == get_fb_obj())
  2516.         return 0;
  2517.  
  2518.         if (list_empty(&vma->vma_link))
  2519.                 return 0;
  2520.  
  2521.         if (!drm_mm_node_allocated(&vma->node)) {
  2522.                 i915_gem_vma_destroy(vma);
  2523.                 return 0;
  2524.         }
  2525.  
  2526.         if (vma->pin_count)
  2527.                 return -EBUSY;
  2528.  
  2529.         BUG_ON(obj->pages == NULL);
  2530.  
  2531.         ret = i915_gem_object_finish_gpu(obj);
  2532.         if (ret)
  2533.                 return ret;
  2534.         /* Continue on if we fail due to EIO, the GPU is hung so we
  2535.          * should be safe and we need to cleanup or else we might
  2536.          * cause memory corruption through use-after-free.
  2537.          */
  2538.  
  2539.         /* Throw away the active reference before moving to the unbound list */
  2540.         i915_gem_object_retire(obj);
  2541.  
  2542.         if (i915_is_ggtt(vma->vm)) {
  2543.         i915_gem_object_finish_gtt(obj);
  2544.  
  2545.         /* release the fence reg _after_ flushing */
  2546.         ret = i915_gem_object_put_fence(obj);
  2547.         if (ret)
  2548.                 return ret;
  2549.         }
  2550.  
  2551.         trace_i915_vma_unbind(vma);
  2552.  
  2553.         vma->unbind_vma(vma);
  2554.  
  2555.         list_del_init(&vma->mm_list);
  2556.         if (i915_is_ggtt(vma->vm))
  2557.                 obj->map_and_fenceable = false;
  2558.  
  2559.         drm_mm_remove_node(&vma->node);
  2560.         i915_gem_vma_destroy(vma);
  2561.  
  2562.         /* Since the unbound list is global, only move to that list if
  2563.          * no more VMAs exist. */
  2564.         if (list_empty(&obj->vma_list)) {
  2565.                 i915_gem_gtt_finish_object(obj);
  2566.                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
  2567.         }
  2568.  
  2569.         /* And finally now the object is completely decoupled from this vma,
  2570.          * we can drop its hold on the backing storage and allow it to be
  2571.          * reaped by the shrinker.
  2572.          */
  2573.         i915_gem_object_unpin_pages(obj);
  2574.  
  2575.         return 0;
  2576. }
  2577.  
  2578. int i915_gpu_idle(struct drm_device *dev)
  2579. {
  2580.         struct drm_i915_private *dev_priv = dev->dev_private;
  2581.         struct intel_engine_cs *ring;
  2582.         int ret, i;
  2583.  
  2584.         /* Flush everything onto the inactive list. */
  2585.         for_each_ring(ring, dev_priv, i) {
  2586.                 if (!i915.enable_execlists) {
  2587.                 ret = i915_switch_context(ring, ring->default_context);
  2588.                 if (ret)
  2589.                         return ret;
  2590.                 }
  2591.  
  2592.                 ret = intel_ring_idle(ring);
  2593.                 if (ret)
  2594.                         return ret;
  2595.         }
  2596.  
  2597.         return 0;
  2598. }
  2599.  
  2600. static void i965_write_fence_reg(struct drm_device *dev, int reg,
  2601.                                         struct drm_i915_gem_object *obj)
  2602. {
  2603.         struct drm_i915_private *dev_priv = dev->dev_private;
  2604.         int fence_reg;
  2605.         int fence_pitch_shift;
  2606.  
  2607.         if (INTEL_INFO(dev)->gen >= 6) {
  2608.                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
  2609.                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
  2610.         } else {
  2611.                 fence_reg = FENCE_REG_965_0;
  2612.                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
  2613.         }
  2614.  
  2615.         fence_reg += reg * 8;
  2616.  
  2617.         /* To w/a incoherency with non-atomic 64-bit register updates,
  2618.          * we split the 64-bit update into two 32-bit writes. In order
  2619.          * for a partial fence not to be evaluated between writes, we
  2620.          * precede the update with write to turn off the fence register,
  2621.          * and only enable the fence as the last step.
  2622.          *
  2623.          * For extra levels of paranoia, we make sure each step lands
  2624.          * before applying the next step.
  2625.          */
  2626.         I915_WRITE(fence_reg, 0);
  2627.         POSTING_READ(fence_reg);
  2628.  
  2629.         if (obj) {
  2630.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2631.                 uint64_t val;
  2632.  
  2633.                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
  2634.                                  0xfffff000) << 32;
  2635.                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
  2636.                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
  2637.                 if (obj->tiling_mode == I915_TILING_Y)
  2638.                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
  2639.                 val |= I965_FENCE_REG_VALID;
  2640.  
  2641.                 I915_WRITE(fence_reg + 4, val >> 32);
  2642.                 POSTING_READ(fence_reg + 4);
  2643.  
  2644.                 I915_WRITE(fence_reg + 0, val);
  2645.                 POSTING_READ(fence_reg);
  2646.         } else {
  2647.                 I915_WRITE(fence_reg + 4, 0);
  2648.                 POSTING_READ(fence_reg + 4);
  2649.         }
  2650. }
  2651.  
  2652. static void i915_write_fence_reg(struct drm_device *dev, int reg,
  2653.                                  struct drm_i915_gem_object *obj)
  2654. {
  2655.         struct drm_i915_private *dev_priv = dev->dev_private;
  2656.         u32 val;
  2657.  
  2658.         if (obj) {
  2659.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2660.                 int pitch_val;
  2661.                 int tile_width;
  2662.  
  2663.                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
  2664.                      (size & -size) != size ||
  2665.                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2666.                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
  2667.                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
  2668.  
  2669.                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
  2670.                         tile_width = 128;
  2671.                 else
  2672.                         tile_width = 512;
  2673.  
  2674.                 /* Note: pitch better be a power of two tile widths */
  2675.                 pitch_val = obj->stride / tile_width;
  2676.                 pitch_val = ffs(pitch_val) - 1;
  2677.  
  2678.                 val = i915_gem_obj_ggtt_offset(obj);
  2679.                 if (obj->tiling_mode == I915_TILING_Y)
  2680.                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2681.                 val |= I915_FENCE_SIZE_BITS(size);
  2682.                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2683.                 val |= I830_FENCE_REG_VALID;
  2684.         } else
  2685.                 val = 0;
  2686.  
  2687.         if (reg < 8)
  2688.                 reg = FENCE_REG_830_0 + reg * 4;
  2689.         else
  2690.                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
  2691.  
  2692.         I915_WRITE(reg, val);
  2693.         POSTING_READ(reg);
  2694. }
  2695.  
  2696. static void i830_write_fence_reg(struct drm_device *dev, int reg,
  2697.                                 struct drm_i915_gem_object *obj)
  2698. {
  2699.         struct drm_i915_private *dev_priv = dev->dev_private;
  2700.         uint32_t val;
  2701.  
  2702.         if (obj) {
  2703.                 u32 size = i915_gem_obj_ggtt_size(obj);
  2704.                 uint32_t pitch_val;
  2705.  
  2706.                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
  2707.                      (size & -size) != size ||
  2708.                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
  2709.                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
  2710.                      i915_gem_obj_ggtt_offset(obj), size);
  2711.  
  2712.                 pitch_val = obj->stride / 128;
  2713.                 pitch_val = ffs(pitch_val) - 1;
  2714.  
  2715.                 val = i915_gem_obj_ggtt_offset(obj);
  2716.                 if (obj->tiling_mode == I915_TILING_Y)
  2717.                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
  2718.                 val |= I830_FENCE_SIZE_BITS(size);
  2719.                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
  2720.                 val |= I830_FENCE_REG_VALID;
  2721.         } else
  2722.                 val = 0;
  2723.  
  2724.         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
  2725.         POSTING_READ(FENCE_REG_830_0 + reg * 4);
  2726. }
  2727.  
  2728. inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
  2729. {
  2730.         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
  2731. }
  2732.  
  2733. static void i915_gem_write_fence(struct drm_device *dev, int reg,
  2734.                                  struct drm_i915_gem_object *obj)
  2735. {
  2736.         struct drm_i915_private *dev_priv = dev->dev_private;
  2737.  
  2738.         /* Ensure that all CPU reads are completed before installing a fence
  2739.          * and all writes before removing the fence.
  2740.          */
  2741.         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
  2742.                 mb();
  2743.  
  2744.         WARN(obj && (!obj->stride || !obj->tiling_mode),
  2745.              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
  2746.              obj->stride, obj->tiling_mode);
  2747.  
  2748.         switch (INTEL_INFO(dev)->gen) {
  2749.         case 9:
  2750.         case 8:
  2751.         case 7:
  2752.         case 6:
  2753.         case 5:
  2754.         case 4: i965_write_fence_reg(dev, reg, obj); break;
  2755.         case 3: i915_write_fence_reg(dev, reg, obj); break;
  2756.         case 2: i830_write_fence_reg(dev, reg, obj); break;
  2757.         default: BUG();
  2758.         }
  2759.  
  2760.         /* And similarly be paranoid that no direct access to this region
  2761.          * is reordered to before the fence is installed.
  2762.          */
  2763.         if (i915_gem_object_needs_mb(obj))
  2764.                 mb();
  2765. }
  2766.  
  2767. static inline int fence_number(struct drm_i915_private *dev_priv,
  2768.                                struct drm_i915_fence_reg *fence)
  2769. {
  2770.         return fence - dev_priv->fence_regs;
  2771. }
  2772.  
  2773. static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
  2774.                                          struct drm_i915_fence_reg *fence,
  2775.                                          bool enable)
  2776. {
  2777.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2778.         int reg = fence_number(dev_priv, fence);
  2779.  
  2780.         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
  2781.  
  2782.         if (enable) {
  2783.                 obj->fence_reg = reg;
  2784.                 fence->obj = obj;
  2785.                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
  2786.         } else {
  2787.                 obj->fence_reg = I915_FENCE_REG_NONE;
  2788.                 fence->obj = NULL;
  2789.                 list_del_init(&fence->lru_list);
  2790.         }
  2791.         obj->fence_dirty = false;
  2792. }
  2793.  
  2794. static int
  2795. i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
  2796. {
  2797.         if (obj->last_fenced_seqno) {
  2798.                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
  2799.                         if (ret)
  2800.                                 return ret;
  2801.  
  2802.                 obj->last_fenced_seqno = 0;
  2803.         }
  2804.  
  2805.         return 0;
  2806. }
  2807.  
  2808. int
  2809. i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
  2810. {
  2811.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  2812.         struct drm_i915_fence_reg *fence;
  2813.         int ret;
  2814.  
  2815.         ret = i915_gem_object_wait_fence(obj);
  2816.     if (ret)
  2817.        return ret;
  2818.  
  2819.         if (obj->fence_reg == I915_FENCE_REG_NONE)
  2820.                 return 0;
  2821.  
  2822.         fence = &dev_priv->fence_regs[obj->fence_reg];
  2823.  
  2824.         if (WARN_ON(fence->pin_count))
  2825.                 return -EBUSY;
  2826.  
  2827.         i915_gem_object_fence_lost(obj);
  2828.         i915_gem_object_update_fence(obj, fence, false);
  2829.  
  2830.         return 0;
  2831. }
  2832.  
  2833. static struct drm_i915_fence_reg *
  2834. i915_find_fence_reg(struct drm_device *dev)
  2835. {
  2836.         struct drm_i915_private *dev_priv = dev->dev_private;
  2837.         struct drm_i915_fence_reg *reg, *avail;
  2838.         int i;
  2839.  
  2840.         /* First try to find a free reg */
  2841.         avail = NULL;
  2842.         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
  2843.                 reg = &dev_priv->fence_regs[i];
  2844.                 if (!reg->obj)
  2845.                         return reg;
  2846.  
  2847.                 if (!reg->pin_count)
  2848.                         avail = reg;
  2849.         }
  2850.  
  2851.         if (avail == NULL)
  2852.                 goto deadlock;
  2853.  
  2854.         /* None available, try to steal one or wait for a user to finish */
  2855.         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
  2856.                 if (reg->pin_count)
  2857.                         continue;
  2858.  
  2859.                 return reg;
  2860.         }
  2861.  
  2862. deadlock:
  2863.         /* Wait for completion of pending flips which consume fences */
  2864. //   if (intel_has_pending_fb_unpin(dev))
  2865. //       return ERR_PTR(-EAGAIN);
  2866.  
  2867.         return ERR_PTR(-EDEADLK);
  2868. }
  2869.  
  2870. /**
  2871.  * i915_gem_object_get_fence - set up fencing for an object
  2872.  * @obj: object to map through a fence reg
  2873.  *
  2874.  * When mapping objects through the GTT, userspace wants to be able to write
  2875.  * to them without having to worry about swizzling if the object is tiled.
  2876.  * This function walks the fence regs looking for a free one for @obj,
  2877.  * stealing one if it can't find any.
  2878.  *
  2879.  * It then sets up the reg based on the object's properties: address, pitch
  2880.  * and tiling format.
  2881.  *
  2882.  * For an untiled surface, this removes any existing fence.
  2883.  */
  2884. int
  2885. i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
  2886. {
  2887.         struct drm_device *dev = obj->base.dev;
  2888.         struct drm_i915_private *dev_priv = dev->dev_private;
  2889.         bool enable = obj->tiling_mode != I915_TILING_NONE;
  2890.         struct drm_i915_fence_reg *reg;
  2891.         int ret;
  2892.  
  2893.         /* Have we updated the tiling parameters upon the object and so
  2894.          * will need to serialise the write to the associated fence register?
  2895.          */
  2896.         if (obj->fence_dirty) {
  2897.                 ret = i915_gem_object_wait_fence(obj);
  2898.                 if (ret)
  2899.                         return ret;
  2900.         }
  2901.  
  2902.         /* Just update our place in the LRU if our fence is getting reused. */
  2903.         if (obj->fence_reg != I915_FENCE_REG_NONE) {
  2904.                 reg = &dev_priv->fence_regs[obj->fence_reg];
  2905.                 if (!obj->fence_dirty) {
  2906.                         list_move_tail(&reg->lru_list,
  2907.                                        &dev_priv->mm.fence_list);
  2908.                         return 0;
  2909.                 }
  2910.         } else if (enable) {
  2911.                 if (WARN_ON(!obj->map_and_fenceable))
  2912.                         return -EINVAL;
  2913.  
  2914.                 reg = i915_find_fence_reg(dev);
  2915.                 if (IS_ERR(reg))
  2916.                         return PTR_ERR(reg);
  2917.  
  2918.                 if (reg->obj) {
  2919.                         struct drm_i915_gem_object *old = reg->obj;
  2920.  
  2921.                         ret = i915_gem_object_wait_fence(old);
  2922.                         if (ret)
  2923.                                 return ret;
  2924.  
  2925.                         i915_gem_object_fence_lost(old);
  2926.                 }
  2927.         } else
  2928.                 return 0;
  2929.  
  2930.         i915_gem_object_update_fence(obj, reg, enable);
  2931.  
  2932.         return 0;
  2933. }
  2934.  
  2935. static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  2936.                                      unsigned long cache_level)
  2937. {
  2938.         struct drm_mm_node *gtt_space = &vma->node;
  2939.         struct drm_mm_node *other;
  2940.  
  2941.         /*
  2942.          * On some machines we have to be careful when putting differing types
  2943.          * of snoopable memory together to avoid the prefetcher crossing memory
  2944.          * domains and dying. During vm initialisation, we decide whether or not
  2945.          * these constraints apply and set the drm_mm.color_adjust
  2946.          * appropriately.
  2947.          */
  2948.         if (vma->vm->mm.color_adjust == NULL)
  2949.                 return true;
  2950.  
  2951.         if (!drm_mm_node_allocated(gtt_space))
  2952.                 return true;
  2953.  
  2954.         if (list_empty(&gtt_space->node_list))
  2955.                 return true;
  2956.  
  2957.         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
  2958.         if (other->allocated && !other->hole_follows && other->color != cache_level)
  2959.                 return false;
  2960.  
  2961.         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
  2962.         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
  2963.                 return false;
  2964.  
  2965.         return true;
  2966. }
  2967.  
  2968. /**
  2969.  * Finds free space in the GTT aperture and binds the object there.
  2970.  */
  2971. static struct i915_vma *
  2972. i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
  2973.                            struct i915_address_space *vm,
  2974.                             unsigned alignment,
  2975.                            uint64_t flags)
  2976. {
  2977.         struct drm_device *dev = obj->base.dev;
  2978.         struct drm_i915_private *dev_priv = dev->dev_private;
  2979.         u32 size, fence_size, fence_alignment, unfenced_alignment;
  2980.         unsigned long start =
  2981.                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
  2982.         unsigned long end =
  2983.                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
  2984.         struct i915_vma *vma;
  2985.         int ret;
  2986.  
  2987.         fence_size = i915_gem_get_gtt_size(dev,
  2988.                                            obj->base.size,
  2989.                                            obj->tiling_mode);
  2990.         fence_alignment = i915_gem_get_gtt_alignment(dev,
  2991.                                                      obj->base.size,
  2992.                                                      obj->tiling_mode, true);
  2993.         unfenced_alignment =
  2994.                 i915_gem_get_gtt_alignment(dev,
  2995.                                                     obj->base.size,
  2996.                                                     obj->tiling_mode, false);
  2997.  
  2998.         if (alignment == 0)
  2999.                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
  3000.                                                 unfenced_alignment;
  3001.         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
  3002.                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
  3003.                 return ERR_PTR(-EINVAL);
  3004.         }
  3005.  
  3006.         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
  3007.  
  3008.         /* If the object is bigger than the entire aperture, reject it early
  3009.          * before evicting everything in a vain attempt to find space.
  3010.          */
  3011.         if (obj->base.size > end) {
  3012.                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
  3013.                           obj->base.size,
  3014.                           flags & PIN_MAPPABLE ? "mappable" : "total",
  3015.                           end);
  3016.                 return ERR_PTR(-E2BIG);
  3017.         }
  3018.  
  3019.         ret = i915_gem_object_get_pages(obj);
  3020.         if (ret)
  3021.                 return ERR_PTR(ret);
  3022.  
  3023.         i915_gem_object_pin_pages(obj);
  3024.  
  3025.         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
  3026.         if (IS_ERR(vma))
  3027.                 goto err_unpin;
  3028.  
  3029. search_free:
  3030.         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
  3031.                                                   size, alignment,
  3032.                                                   obj->cache_level,
  3033.                                                   start, end,
  3034.                                                   DRM_MM_SEARCH_DEFAULT,
  3035.                                                   DRM_MM_CREATE_DEFAULT);
  3036.         if (ret) {
  3037.  
  3038.                 goto err_free_vma;
  3039.         }
  3040.         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
  3041.                 ret = -EINVAL;
  3042.                 goto err_remove_node;
  3043.         }
  3044.  
  3045.         ret = i915_gem_gtt_prepare_object(obj);
  3046.         if (ret)
  3047.                 goto err_remove_node;
  3048.  
  3049.         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
  3050.         list_add_tail(&vma->mm_list, &vm->inactive_list);
  3051.  
  3052.         trace_i915_vma_bind(vma, flags);
  3053.         vma->bind_vma(vma, obj->cache_level,
  3054.                       flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
  3055.  
  3056.         return vma;
  3057.  
  3058. err_remove_node:
  3059.         drm_mm_remove_node(&vma->node);
  3060. err_free_vma:
  3061.         i915_gem_vma_destroy(vma);
  3062.         vma = ERR_PTR(ret);
  3063. err_unpin:
  3064.         i915_gem_object_unpin_pages(obj);
  3065.         return vma;
  3066. }
  3067.  
  3068. bool
  3069. i915_gem_clflush_object(struct drm_i915_gem_object *obj,
  3070.                         bool force)
  3071. {
  3072.         /* If we don't have a page list set up, then we're not pinned
  3073.          * to GPU, and we can ignore the cache flush because it'll happen
  3074.          * again at bind time.
  3075.          */
  3076.         if (obj->pages == NULL)
  3077.                 return false;
  3078.  
  3079.         /*
  3080.          * Stolen memory is always coherent with the GPU as it is explicitly
  3081.          * marked as wc by the system, or the system is cache-coherent.
  3082.          */
  3083.         if (obj->stolen || obj->phys_handle)
  3084.                 return false;
  3085.  
  3086.         /* If the GPU is snooping the contents of the CPU cache,
  3087.          * we do not need to manually clear the CPU cache lines.  However,
  3088.          * the caches are only snooped when the render cache is
  3089.          * flushed/invalidated.  As we always have to emit invalidations
  3090.          * and flushes when moving into and out of the RENDER domain, correct
  3091.          * snooping behaviour occurs naturally as the result of our domain
  3092.          * tracking.
  3093.          */
  3094.         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
  3095.                 return false;
  3096.  
  3097.         trace_i915_gem_object_clflush(obj);
  3098.         drm_clflush_sg(obj->pages);
  3099.  
  3100.         return true;
  3101. }
  3102.  
  3103. /** Flushes the GTT write domain for the object if it's dirty. */
  3104. static void
  3105. i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
  3106. {
  3107.         uint32_t old_write_domain;
  3108.  
  3109.         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
  3110.                 return;
  3111.  
  3112.         /* No actual flushing is required for the GTT write domain.  Writes
  3113.          * to it immediately go to main memory as far as we know, so there's
  3114.          * no chipset flush.  It also doesn't land in render cache.
  3115.          *
  3116.          * However, we do have to enforce the order so that all writes through
  3117.          * the GTT land before any writes to the device, such as updates to
  3118.          * the GATT itself.
  3119.          */
  3120.         wmb();
  3121.  
  3122.         old_write_domain = obj->base.write_domain;
  3123.         obj->base.write_domain = 0;
  3124.  
  3125.         intel_fb_obj_flush(obj, false);
  3126.  
  3127.         trace_i915_gem_object_change_domain(obj,
  3128.                                             obj->base.read_domains,
  3129.                                             old_write_domain);
  3130. }
  3131.  
  3132. /** Flushes the CPU write domain for the object if it's dirty. */
  3133. static void
  3134. i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
  3135.                                        bool force)
  3136. {
  3137.         uint32_t old_write_domain;
  3138.  
  3139.         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
  3140.                 return;
  3141.  
  3142.         if (i915_gem_clflush_object(obj, force))
  3143.         i915_gem_chipset_flush(obj->base.dev);
  3144.  
  3145.         old_write_domain = obj->base.write_domain;
  3146.         obj->base.write_domain = 0;
  3147.  
  3148.         intel_fb_obj_flush(obj, false);
  3149.  
  3150.         trace_i915_gem_object_change_domain(obj,
  3151.                                             obj->base.read_domains,
  3152.                                             old_write_domain);
  3153. }
  3154.  
  3155. /**
  3156.  * Moves a single object to the GTT read, and possibly write domain.
  3157.  *
  3158.  * This function returns when the move is complete, including waiting on
  3159.  * flushes to occur.
  3160.  */
  3161. int
  3162. i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  3163. {
  3164.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3165.         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
  3166.         uint32_t old_write_domain, old_read_domains;
  3167.         int ret;
  3168.  
  3169.         /* Not valid to be called on unbound objects. */
  3170.         if (vma == NULL)
  3171.                 return -EINVAL;
  3172.  
  3173.         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
  3174.                 return 0;
  3175.  
  3176.         ret = i915_gem_object_wait_rendering(obj, !write);
  3177.                 if (ret)
  3178.                         return ret;
  3179.  
  3180.         i915_gem_object_retire(obj);
  3181.         i915_gem_object_flush_cpu_write_domain(obj, false);
  3182.  
  3183.         /* Serialise direct access to this object with the barriers for
  3184.          * coherent writes from the GPU, by effectively invalidating the
  3185.          * GTT domain upon first access.
  3186.          */
  3187.         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
  3188.                 mb();
  3189.  
  3190.         old_write_domain = obj->base.write_domain;
  3191.         old_read_domains = obj->base.read_domains;
  3192.  
  3193.         /* It should now be out of any other write domains, and we can update
  3194.          * the domain values for our changes.
  3195.          */
  3196.         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
  3197.         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  3198.         if (write) {
  3199.                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
  3200.                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
  3201.                 obj->dirty = 1;
  3202.         }
  3203.  
  3204.         if (write)
  3205.                 intel_fb_obj_invalidate(obj, NULL);
  3206.  
  3207.         trace_i915_gem_object_change_domain(obj,
  3208.                                             old_read_domains,
  3209.                                             old_write_domain);
  3210.  
  3211.         /* And bump the LRU for this access */
  3212.         if (i915_gem_object_is_inactive(obj))
  3213.                         list_move_tail(&vma->mm_list,
  3214.                                        &dev_priv->gtt.base.inactive_list);
  3215.  
  3216.         return 0;
  3217. }
  3218.  
  3219. int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
  3220.                                     enum i915_cache_level cache_level)
  3221. {
  3222.         struct drm_device *dev = obj->base.dev;
  3223.         struct i915_vma *vma, *next;
  3224.         int ret;
  3225.  
  3226.         if (obj->cache_level == cache_level)
  3227.                 return 0;
  3228.  
  3229.         if (i915_gem_obj_is_pinned(obj)) {
  3230.                 DRM_DEBUG("can not change the cache level of pinned objects\n");
  3231.                 return -EBUSY;
  3232.         }
  3233.  
  3234.         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
  3235.                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
  3236.                         ret = i915_vma_unbind(vma);
  3237.                 if (ret)
  3238.                         return ret;
  3239.                 }
  3240.         }
  3241.  
  3242.         if (i915_gem_obj_bound_any(obj)) {
  3243.                 ret = i915_gem_object_finish_gpu(obj);
  3244.                 if (ret)
  3245.                         return ret;
  3246.  
  3247.                 i915_gem_object_finish_gtt(obj);
  3248.  
  3249.                 /* Before SandyBridge, you could not use tiling or fence
  3250.                  * registers with snooped memory, so relinquish any fences
  3251.                  * currently pointing to our region in the aperture.
  3252.                  */
  3253.                 if (INTEL_INFO(dev)->gen < 6) {
  3254.                         ret = i915_gem_object_put_fence(obj);
  3255.                         if (ret)
  3256.                                 return ret;
  3257.             }
  3258.  
  3259.                 list_for_each_entry(vma, &obj->vma_list, vma_link)
  3260.                         if (drm_mm_node_allocated(&vma->node))
  3261.                                 vma->bind_vma(vma, cache_level,
  3262.                                                 vma->bound & GLOBAL_BIND);
  3263.         }
  3264.  
  3265.         list_for_each_entry(vma, &obj->vma_list, vma_link)
  3266.                 vma->node.color = cache_level;
  3267.         obj->cache_level = cache_level;
  3268.  
  3269.         if (cpu_write_needs_clflush(obj)) {
  3270.                 u32 old_read_domains, old_write_domain;
  3271.  
  3272.                 /* If we're coming from LLC cached, then we haven't
  3273.                  * actually been tracking whether the data is in the
  3274.                  * CPU cache or not, since we only allow one bit set
  3275.                  * in obj->write_domain and have been skipping the clflushes.
  3276.                  * Just set it to the CPU cache for now.
  3277.                  */
  3278.                 i915_gem_object_retire(obj);
  3279.                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
  3280.  
  3281.                 old_read_domains = obj->base.read_domains;
  3282.                 old_write_domain = obj->base.write_domain;
  3283.  
  3284.                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3285.                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3286.  
  3287.                 trace_i915_gem_object_change_domain(obj,
  3288.                                                     old_read_domains,
  3289.                                                     old_write_domain);
  3290.     }
  3291.  
  3292.         return 0;
  3293. }
  3294.  
  3295. int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
  3296.                                struct drm_file *file)
  3297. {
  3298.         struct drm_i915_gem_caching *args = data;
  3299.         struct drm_i915_gem_object *obj;
  3300.         int ret;
  3301.  
  3302.         ret = i915_mutex_lock_interruptible(dev);
  3303.         if (ret)
  3304.                 return ret;
  3305.  
  3306.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3307.         if (&obj->base == NULL) {
  3308.                 ret = -ENOENT;
  3309.                 goto unlock;
  3310.         }
  3311.  
  3312.         switch (obj->cache_level) {
  3313.         case I915_CACHE_LLC:
  3314.         case I915_CACHE_L3_LLC:
  3315.                 args->caching = I915_CACHING_CACHED;
  3316.                 break;
  3317.  
  3318.         case I915_CACHE_WT:
  3319.                 args->caching = I915_CACHING_DISPLAY;
  3320.                 break;
  3321.  
  3322.         default:
  3323.                 args->caching = I915_CACHING_NONE;
  3324.                 break;
  3325.         }
  3326.  
  3327.         drm_gem_object_unreference(&obj->base);
  3328. unlock:
  3329.         mutex_unlock(&dev->struct_mutex);
  3330.         return ret;
  3331. }
  3332.  
  3333. int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
  3334.                                struct drm_file *file)
  3335. {
  3336.         struct drm_i915_gem_caching *args = data;
  3337.         struct drm_i915_gem_object *obj;
  3338.         enum i915_cache_level level;
  3339.         int ret;
  3340.  
  3341.         switch (args->caching) {
  3342.         case I915_CACHING_NONE:
  3343.                 level = I915_CACHE_NONE;
  3344.                 break;
  3345.         case I915_CACHING_CACHED:
  3346.                 level = I915_CACHE_LLC;
  3347.                 break;
  3348.         case I915_CACHING_DISPLAY:
  3349.                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
  3350.                 break;
  3351.         default:
  3352.                 return -EINVAL;
  3353.         }
  3354.  
  3355.         ret = i915_mutex_lock_interruptible(dev);
  3356.         if (ret)
  3357.                 return ret;
  3358.  
  3359.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3360.         if (&obj->base == NULL) {
  3361.                 ret = -ENOENT;
  3362.                 goto unlock;
  3363.         }
  3364.  
  3365.         ret = i915_gem_object_set_cache_level(obj, level);
  3366.  
  3367.         drm_gem_object_unreference(&obj->base);
  3368. unlock:
  3369.         mutex_unlock(&dev->struct_mutex);
  3370.         return ret;
  3371. }
  3372.  
  3373. static bool is_pin_display(struct drm_i915_gem_object *obj)
  3374. {
  3375.         struct i915_vma *vma;
  3376.  
  3377.         vma = i915_gem_obj_to_ggtt(obj);
  3378.         if (!vma)
  3379.                 return false;
  3380.  
  3381.         /* There are 3 sources that pin objects:
  3382.          *   1. The display engine (scanouts, sprites, cursors);
  3383.          *   2. Reservations for execbuffer;
  3384.          *   3. The user.
  3385.          *
  3386.          * We can ignore reservations as we hold the struct_mutex and
  3387.          * are only called outside of the reservation path.  The user
  3388.          * can only increment pin_count once, and so if after
  3389.          * subtracting the potential reference by the user, any pin_count
  3390.          * remains, it must be due to another use by the display engine.
  3391.          */
  3392.         return vma->pin_count - !!obj->user_pin_count;
  3393. }
  3394.  
  3395. /*
  3396.  * Prepare buffer for display plane (scanout, cursors, etc).
  3397.  * Can be called from an uninterruptible phase (modesetting) and allows
  3398.  * any flushes to be pipelined (for pageflips).
  3399.  */
  3400. int
  3401. i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
  3402.                                      u32 alignment,
  3403.                                      struct intel_engine_cs *pipelined)
  3404. {
  3405.         u32 old_read_domains, old_write_domain;
  3406.         bool was_pin_display;
  3407.         int ret;
  3408.  
  3409.         if (pipelined != obj->ring) {
  3410.                 ret = i915_gem_object_sync(obj, pipelined);
  3411.         if (ret)
  3412.                 return ret;
  3413.         }
  3414.  
  3415.         /* Mark the pin_display early so that we account for the
  3416.          * display coherency whilst setting up the cache domains.
  3417.          */
  3418.         was_pin_display = obj->pin_display;
  3419.         obj->pin_display = true;
  3420.  
  3421.         /* The display engine is not coherent with the LLC cache on gen6.  As
  3422.          * a result, we make sure that the pinning that is about to occur is
  3423.          * done with uncached PTEs. This is lowest common denominator for all
  3424.          * chipsets.
  3425.          *
  3426.          * However for gen6+, we could do better by using the GFDT bit instead
  3427.          * of uncaching, which would allow us to flush all the LLC-cached data
  3428.          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
  3429.          */
  3430.         ret = i915_gem_object_set_cache_level(obj,
  3431.                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
  3432.         if (ret)
  3433.                 goto err_unpin_display;
  3434.  
  3435.         /* As the user may map the buffer once pinned in the display plane
  3436.          * (e.g. libkms for the bootup splash), we have to ensure that we
  3437.          * always use map_and_fenceable for all scanout buffers.
  3438.          */
  3439.         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
  3440.         if (ret)
  3441.                 goto err_unpin_display;
  3442.  
  3443.         i915_gem_object_flush_cpu_write_domain(obj, true);
  3444.  
  3445.         old_write_domain = obj->base.write_domain;
  3446.         old_read_domains = obj->base.read_domains;
  3447.  
  3448.         /* It should now be out of any other write domains, and we can update
  3449.          * the domain values for our changes.
  3450.          */
  3451.         obj->base.write_domain = 0;
  3452.         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
  3453.  
  3454.         trace_i915_gem_object_change_domain(obj,
  3455.                                             old_read_domains,
  3456.                                             old_write_domain);
  3457.  
  3458.         return 0;
  3459.  
  3460. err_unpin_display:
  3461.         WARN_ON(was_pin_display != is_pin_display(obj));
  3462.         obj->pin_display = was_pin_display;
  3463.         return ret;
  3464. }
  3465.  
  3466. void
  3467. i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
  3468. {
  3469.         i915_gem_object_ggtt_unpin(obj);
  3470.         obj->pin_display = is_pin_display(obj);
  3471. }
  3472.  
  3473. int
  3474. i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
  3475. {
  3476.         int ret;
  3477.  
  3478.         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
  3479.                 return 0;
  3480.  
  3481.         ret = i915_gem_object_wait_rendering(obj, false);
  3482.     if (ret)
  3483.         return ret;
  3484.  
  3485.         /* Ensure that we invalidate the GPU's caches and TLBs. */
  3486.         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
  3487.         return 0;
  3488. }
  3489.  
  3490. /**
  3491.  * Moves a single object to the CPU read, and possibly write domain.
  3492.  *
  3493.  * This function returns when the move is complete, including waiting on
  3494.  * flushes to occur.
  3495.  */
  3496. int
  3497. i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
  3498. {
  3499.         uint32_t old_write_domain, old_read_domains;
  3500.         int ret;
  3501.  
  3502.         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
  3503.                 return 0;
  3504.  
  3505.         ret = i915_gem_object_wait_rendering(obj, !write);
  3506.         if (ret)
  3507.                 return ret;
  3508.  
  3509.         i915_gem_object_retire(obj);
  3510.         i915_gem_object_flush_gtt_write_domain(obj);
  3511.  
  3512.         old_write_domain = obj->base.write_domain;
  3513.         old_read_domains = obj->base.read_domains;
  3514.  
  3515.         /* Flush the CPU cache if it's still invalid. */
  3516.         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
  3517.                 i915_gem_clflush_object(obj, false);
  3518.  
  3519.                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
  3520.         }
  3521.  
  3522.         /* It should now be out of any other write domains, and we can update
  3523.          * the domain values for our changes.
  3524.          */
  3525.         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
  3526.  
  3527.         /* If we're writing through the CPU, then the GPU read domains will
  3528.          * need to be invalidated at next use.
  3529.          */
  3530.         if (write) {
  3531.                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3532.                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3533.         }
  3534.  
  3535.         if (write)
  3536.                 intel_fb_obj_invalidate(obj, NULL);
  3537.  
  3538.         trace_i915_gem_object_change_domain(obj,
  3539.                                             old_read_domains,
  3540.                                             old_write_domain);
  3541.  
  3542.         return 0;
  3543. }
  3544.  
  3545. /* Throttle our rendering by waiting until the ring has completed our requests
  3546.  * emitted over 20 msec ago.
  3547.  *
  3548.  * Note that if we were to use the current jiffies each time around the loop,
  3549.  * we wouldn't escape the function with any frames outstanding if the time to
  3550.  * render a frame was over 20ms.
  3551.  *
  3552.  * This should get us reasonable parallelism between CPU and GPU but also
  3553.  * relatively low latency when blocking on a particular request to finish.
  3554.  */
  3555. static int
  3556. i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
  3557. {
  3558.         struct drm_i915_private *dev_priv = dev->dev_private;
  3559.         struct drm_i915_file_private *file_priv = file->driver_priv;
  3560.         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
  3561.         struct drm_i915_gem_request *request;
  3562.         struct intel_engine_cs *ring = NULL;
  3563.         unsigned reset_counter;
  3564.         u32 seqno = 0;
  3565.         int ret;
  3566.  
  3567.         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
  3568.         if (ret)
  3569.                 return ret;
  3570.  
  3571.         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
  3572.         if (ret)
  3573.                 return ret;
  3574.  
  3575.         spin_lock(&file_priv->mm.lock);
  3576.         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  3577.                 if (time_after_eq(request->emitted_jiffies, recent_enough))
  3578.                         break;
  3579.  
  3580.                 ring = request->ring;
  3581.                 seqno = request->seqno;
  3582.         }
  3583.         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  3584.         spin_unlock(&file_priv->mm.lock);
  3585.  
  3586.         if (seqno == 0)
  3587.                 return 0;
  3588.  
  3589.         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
  3590.         if (ret == 0)
  3591.                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  3592.  
  3593.         return ret;
  3594. }
  3595.  
  3596. static bool
  3597. i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
  3598. {
  3599.         struct drm_i915_gem_object *obj = vma->obj;
  3600.  
  3601.         if (alignment &&
  3602.             vma->node.start & (alignment - 1))
  3603.                 return true;
  3604.  
  3605.         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
  3606.                 return true;
  3607.  
  3608.         if (flags & PIN_OFFSET_BIAS &&
  3609.             vma->node.start < (flags & PIN_OFFSET_MASK))
  3610.                 return true;
  3611.  
  3612.         return false;
  3613. }
  3614.  
  3615. int
  3616. i915_gem_object_pin(struct drm_i915_gem_object *obj,
  3617.                     struct i915_address_space *vm,
  3618.                     uint32_t alignment,
  3619.                     uint64_t flags)
  3620. {
  3621.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3622.         struct i915_vma *vma;
  3623.         unsigned bound;
  3624.         int ret;
  3625.  
  3626.         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
  3627.                 return -ENODEV;
  3628.  
  3629.         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
  3630.                 return -EINVAL;
  3631.  
  3632.         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
  3633.                 return -EINVAL;
  3634.  
  3635.         vma = i915_gem_obj_to_vma(obj, vm);
  3636.         if (vma) {
  3637.                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
  3638.                         return -EBUSY;
  3639.  
  3640.                 if (i915_vma_misplaced(vma, alignment, flags)) {
  3641.                         WARN(vma->pin_count,
  3642.                              "bo is already pinned with incorrect alignment:"
  3643.                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
  3644.                              " obj->map_and_fenceable=%d\n",
  3645.                              i915_gem_obj_offset(obj, vm), alignment,
  3646.                              !!(flags & PIN_MAPPABLE),
  3647.                              obj->map_and_fenceable);
  3648.                         ret = i915_vma_unbind(vma);
  3649.                         if (ret)
  3650.                                 return ret;
  3651.  
  3652.                         vma = NULL;
  3653.                 }
  3654.         }
  3655.  
  3656.         bound = vma ? vma->bound : 0;
  3657.         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
  3658.                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
  3659.                 if (IS_ERR(vma))
  3660.                         return PTR_ERR(vma);
  3661.         }
  3662.  
  3663.         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
  3664.                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
  3665.  
  3666.         if ((bound ^ vma->bound) & GLOBAL_BIND) {
  3667.                 bool mappable, fenceable;
  3668.                 u32 fence_size, fence_alignment;
  3669.  
  3670.                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
  3671.                                                    obj->base.size,
  3672.                                                    obj->tiling_mode);
  3673.                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
  3674.                                                              obj->base.size,
  3675.                                                              obj->tiling_mode,
  3676.                                                              true);
  3677.  
  3678.                 fenceable = (vma->node.size == fence_size &&
  3679.                              (vma->node.start & (fence_alignment - 1)) == 0);
  3680.  
  3681.                 mappable = (vma->node.start + obj->base.size <=
  3682.                             dev_priv->gtt.mappable_end);
  3683.  
  3684.                 obj->map_and_fenceable = mappable && fenceable;
  3685.         }
  3686.  
  3687.         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
  3688.  
  3689.         vma->pin_count++;
  3690.         if (flags & PIN_MAPPABLE)
  3691.                 obj->pin_mappable |= true;
  3692.  
  3693.         return 0;
  3694. }
  3695.  
  3696. void
  3697. i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
  3698. {
  3699.         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
  3700.  
  3701.         BUG_ON(!vma);
  3702.         BUG_ON(vma->pin_count == 0);
  3703.         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
  3704.  
  3705.         if (--vma->pin_count == 0)
  3706.                 obj->pin_mappable = false;
  3707. }
  3708.  
  3709. bool
  3710. i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
  3711. {
  3712.         if (obj->fence_reg != I915_FENCE_REG_NONE) {
  3713.                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3714.                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
  3715.  
  3716.                 WARN_ON(!ggtt_vma ||
  3717.                         dev_priv->fence_regs[obj->fence_reg].pin_count >
  3718.                         ggtt_vma->pin_count);
  3719.                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
  3720.                 return true;
  3721.         } else
  3722.                 return false;
  3723. }
  3724.  
  3725. void
  3726. i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
  3727. {
  3728.         if (obj->fence_reg != I915_FENCE_REG_NONE) {
  3729.                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  3730.                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
  3731.                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
  3732.         }
  3733. }
  3734.  
  3735. int
  3736. i915_gem_pin_ioctl(struct drm_device *dev, void *data,
  3737.                    struct drm_file *file)
  3738. {
  3739.         struct drm_i915_gem_pin *args = data;
  3740.         struct drm_i915_gem_object *obj;
  3741.         int ret;
  3742.  
  3743.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  3744.                 return -ENODEV;
  3745.  
  3746.         ret = i915_mutex_lock_interruptible(dev);
  3747.         if (ret)
  3748.                 return ret;
  3749.  
  3750.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3751.         if (&obj->base == NULL) {
  3752.                 ret = -ENOENT;
  3753.                 goto unlock;
  3754.         }
  3755.  
  3756.         if (obj->madv != I915_MADV_WILLNEED) {
  3757.                 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
  3758.                 ret = -EFAULT;
  3759.                 goto out;
  3760.         }
  3761.  
  3762.         if (obj->pin_filp != NULL && obj->pin_filp != file) {
  3763.                 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
  3764.                           args->handle);
  3765.                 ret = -EINVAL;
  3766.                 goto out;
  3767.         }
  3768.  
  3769.         if (obj->user_pin_count == ULONG_MAX) {
  3770.                 ret = -EBUSY;
  3771.                 goto out;
  3772.         }
  3773.  
  3774.         if (obj->user_pin_count == 0) {
  3775.                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
  3776.                 if (ret)
  3777.                         goto out;
  3778.         }
  3779.  
  3780.         obj->user_pin_count++;
  3781.         obj->pin_filp = file;
  3782.  
  3783.         args->offset = i915_gem_obj_ggtt_offset(obj);
  3784. out:
  3785.         drm_gem_object_unreference(&obj->base);
  3786. unlock:
  3787.         mutex_unlock(&dev->struct_mutex);
  3788.         return ret;
  3789. }
  3790.  
  3791. int
  3792. i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
  3793.                      struct drm_file *file)
  3794. {
  3795.         struct drm_i915_gem_pin *args = data;
  3796.         struct drm_i915_gem_object *obj;
  3797.         int ret;
  3798.  
  3799.         if (drm_core_check_feature(dev, DRIVER_MODESET))
  3800.                 return -ENODEV;
  3801.  
  3802.         ret = i915_mutex_lock_interruptible(dev);
  3803.         if (ret)
  3804.                 return ret;
  3805.  
  3806.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3807.         if (&obj->base == NULL) {
  3808.                 ret = -ENOENT;
  3809.                 goto unlock;
  3810.         }
  3811.  
  3812.         if (obj->pin_filp != file) {
  3813.                 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
  3814.                           args->handle);
  3815.                 ret = -EINVAL;
  3816.                 goto out;
  3817.         }
  3818.         obj->user_pin_count--;
  3819.         if (obj->user_pin_count == 0) {
  3820.                 obj->pin_filp = NULL;
  3821.                 i915_gem_object_ggtt_unpin(obj);
  3822.         }
  3823.  
  3824. out:
  3825.         drm_gem_object_unreference(&obj->base);
  3826. unlock:
  3827.         mutex_unlock(&dev->struct_mutex);
  3828.         return ret;
  3829. }
  3830.  
  3831. int
  3832. i915_gem_busy_ioctl(struct drm_device *dev, void *data,
  3833.                     struct drm_file *file)
  3834. {
  3835.         struct drm_i915_gem_busy *args = data;
  3836.         struct drm_i915_gem_object *obj;
  3837.         int ret;
  3838.  
  3839.         ret = i915_mutex_lock_interruptible(dev);
  3840.         if (ret)
  3841.                 return ret;
  3842.  
  3843.         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
  3844.         if (&obj->base == NULL) {
  3845.                 ret = -ENOENT;
  3846.                 goto unlock;
  3847.         }
  3848.  
  3849.         /* Count all active objects as busy, even if they are currently not used
  3850.          * by the gpu. Users of this interface expect objects to eventually
  3851.          * become non-busy without any further actions, therefore emit any
  3852.          * necessary flushes here.
  3853.          */
  3854.         ret = i915_gem_object_flush_active(obj);
  3855.  
  3856.         args->busy = obj->active;
  3857.         if (obj->ring) {
  3858.                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
  3859.                 args->busy |= intel_ring_flag(obj->ring) << 16;
  3860.         }
  3861.  
  3862.         drm_gem_object_unreference(&obj->base);
  3863. unlock:
  3864.         mutex_unlock(&dev->struct_mutex);
  3865.         return ret;
  3866. }
  3867.  
  3868. int
  3869. i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
  3870.                         struct drm_file *file_priv)
  3871. {
  3872.         return i915_gem_ring_throttle(dev, file_priv);
  3873. }
  3874.  
  3875. #if 0
  3876.  
  3877. int
  3878. i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
  3879.                        struct drm_file *file_priv)
  3880. {
  3881.         struct drm_i915_private *dev_priv = dev->dev_private;
  3882.         struct drm_i915_gem_madvise *args = data;
  3883.         struct drm_i915_gem_object *obj;
  3884.         int ret;
  3885.  
  3886.         switch (args->madv) {
  3887.         case I915_MADV_DONTNEED:
  3888.         case I915_MADV_WILLNEED:
  3889.             break;
  3890.         default:
  3891.             return -EINVAL;
  3892.         }
  3893.  
  3894.         ret = i915_mutex_lock_interruptible(dev);
  3895.         if (ret)
  3896.                 return ret;
  3897.  
  3898.         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
  3899.         if (&obj->base == NULL) {
  3900.                 ret = -ENOENT;
  3901.                 goto unlock;
  3902.         }
  3903.  
  3904.         if (i915_gem_obj_is_pinned(obj)) {
  3905.                 ret = -EINVAL;
  3906.                 goto out;
  3907.         }
  3908.  
  3909.         if (obj->pages &&
  3910.             obj->tiling_mode != I915_TILING_NONE &&
  3911.             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
  3912.                 if (obj->madv == I915_MADV_WILLNEED)
  3913.                         i915_gem_object_unpin_pages(obj);
  3914.                 if (args->madv == I915_MADV_WILLNEED)
  3915.                         i915_gem_object_pin_pages(obj);
  3916.         }
  3917.  
  3918.         if (obj->madv != __I915_MADV_PURGED)
  3919.                 obj->madv = args->madv;
  3920.  
  3921.         /* if the object is no longer attached, discard its backing storage */
  3922.         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
  3923.                 i915_gem_object_truncate(obj);
  3924.  
  3925.         args->retained = obj->madv != __I915_MADV_PURGED;
  3926.  
  3927. out:
  3928.         drm_gem_object_unreference(&obj->base);
  3929. unlock:
  3930.         mutex_unlock(&dev->struct_mutex);
  3931.         return ret;
  3932. }
  3933. #endif
  3934.  
  3935. void i915_gem_object_init(struct drm_i915_gem_object *obj,
  3936.                           const struct drm_i915_gem_object_ops *ops)
  3937. {
  3938.         INIT_LIST_HEAD(&obj->global_list);
  3939.         INIT_LIST_HEAD(&obj->ring_list);
  3940.         INIT_LIST_HEAD(&obj->obj_exec_link);
  3941.         INIT_LIST_HEAD(&obj->vma_list);
  3942.  
  3943.         obj->ops = ops;
  3944.  
  3945.         obj->fence_reg = I915_FENCE_REG_NONE;
  3946.         obj->madv = I915_MADV_WILLNEED;
  3947.  
  3948.         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
  3949. }
  3950.  
  3951. static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
  3952.         .get_pages = i915_gem_object_get_pages_gtt,
  3953.         .put_pages = i915_gem_object_put_pages_gtt,
  3954. };
  3955.  
  3956. struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
  3957.                                                   size_t size)
  3958. {
  3959.         struct drm_i915_gem_object *obj;
  3960.         struct address_space *mapping;
  3961.         gfp_t mask;
  3962.  
  3963.         obj = i915_gem_object_alloc(dev);
  3964.         if (obj == NULL)
  3965.                 return NULL;
  3966.  
  3967.         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
  3968.                 i915_gem_object_free(obj);
  3969.                 return NULL;
  3970.         }
  3971.  
  3972.  
  3973.         i915_gem_object_init(obj, &i915_gem_object_ops);
  3974.  
  3975.         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
  3976.         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
  3977.  
  3978.         if (HAS_LLC(dev)) {
  3979.                 /* On some devices, we can have the GPU use the LLC (the CPU
  3980.                  * cache) for about a 10% performance improvement
  3981.                  * compared to uncached.  Graphics requests other than
  3982.                  * display scanout are coherent with the CPU in
  3983.                  * accessing this cache.  This means in this mode we
  3984.                  * don't need to clflush on the CPU side, and on the
  3985.                  * GPU side we only need to flush internal caches to
  3986.                  * get data visible to the CPU.
  3987.                  *
  3988.                  * However, we maintain the display planes as UC, and so
  3989.                  * need to rebind when first used as such.
  3990.                  */
  3991.                 obj->cache_level = I915_CACHE_LLC;
  3992.         } else
  3993.                 obj->cache_level = I915_CACHE_NONE;
  3994.  
  3995.         trace_i915_gem_object_create(obj);
  3996.  
  3997.         return obj;
  3998. }
  3999.  
  4000. void i915_gem_free_object(struct drm_gem_object *gem_obj)
  4001. {
  4002.         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
  4003.         struct drm_device *dev = obj->base.dev;
  4004.         struct drm_i915_private *dev_priv = dev->dev_private;
  4005.         struct i915_vma *vma, *next;
  4006.  
  4007.         intel_runtime_pm_get(dev_priv);
  4008.  
  4009.         trace_i915_gem_object_destroy(obj);
  4010.  
  4011.         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
  4012.                 int ret;
  4013.  
  4014.                 vma->pin_count = 0;
  4015.                 ret = i915_vma_unbind(vma);
  4016.                 if (WARN_ON(ret == -ERESTARTSYS)) {
  4017.                 bool was_interruptible;
  4018.  
  4019.                 was_interruptible = dev_priv->mm.interruptible;
  4020.                 dev_priv->mm.interruptible = false;
  4021.  
  4022.                         WARN_ON(i915_vma_unbind(vma));
  4023.  
  4024.                 dev_priv->mm.interruptible = was_interruptible;
  4025.         }
  4026.         }
  4027.  
  4028.         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
  4029.          * before progressing. */
  4030.         if (obj->stolen)
  4031.                 i915_gem_object_unpin_pages(obj);
  4032.  
  4033.         WARN_ON(obj->frontbuffer_bits);
  4034.  
  4035.         if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
  4036.             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
  4037.             obj->tiling_mode != I915_TILING_NONE)
  4038.                 i915_gem_object_unpin_pages(obj);
  4039.  
  4040.         if (WARN_ON(obj->pages_pin_count))
  4041.         obj->pages_pin_count = 0;
  4042.         i915_gem_object_put_pages(obj);
  4043. //   i915_gem_object_free_mmap_offset(obj);
  4044.  
  4045.         BUG_ON(obj->pages);
  4046.  
  4047.  
  4048.     if(obj->base.filp != NULL)
  4049.     {
  4050. //        printf("filp %p\n", obj->base.filp);
  4051.         shmem_file_delete(obj->base.filp);
  4052.     }
  4053.  
  4054.         drm_gem_object_release(&obj->base);
  4055.         i915_gem_info_remove_obj(dev_priv, obj->base.size);
  4056.  
  4057.         kfree(obj->bit_17);
  4058.         i915_gem_object_free(obj);
  4059.  
  4060.         intel_runtime_pm_put(dev_priv);
  4061. }
  4062.  
  4063. struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
  4064.                                      struct i915_address_space *vm)
  4065. {
  4066.         struct i915_vma *vma;
  4067.         list_for_each_entry(vma, &obj->vma_list, vma_link)
  4068.                 if (vma->vm == vm)
  4069.                         return vma;
  4070.  
  4071.         return NULL;
  4072. }
  4073.  
  4074. void i915_gem_vma_destroy(struct i915_vma *vma)
  4075. {
  4076.         struct i915_address_space *vm = NULL;
  4077.         WARN_ON(vma->node.allocated);
  4078.  
  4079.         /* Keep the vma as a placeholder in the execbuffer reservation lists */
  4080.         if (!list_empty(&vma->exec_list))
  4081.                 return;
  4082.  
  4083.         vm = vma->vm;
  4084.  
  4085.         if (!i915_is_ggtt(vm))
  4086.                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
  4087.  
  4088.         list_del(&vma->vma_link);
  4089.  
  4090.         kfree(vma);
  4091. }
  4092.  
  4093. #if 0
  4094. int
  4095. i915_gem_suspend(struct drm_device *dev)
  4096. {
  4097.         struct drm_i915_private *dev_priv = dev->dev_private;
  4098.         int ret = 0;
  4099.  
  4100.         mutex_lock(&dev->struct_mutex);
  4101.         ret = i915_gpu_idle(dev);
  4102.         if (ret)
  4103.                 goto err;
  4104.  
  4105.         i915_gem_retire_requests(dev);
  4106.  
  4107.         /* Under UMS, be paranoid and evict. */
  4108.         if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4109.                 i915_gem_evict_everything(dev);
  4110.  
  4111.         i915_gem_stop_ringbuffers(dev);
  4112.         mutex_unlock(&dev->struct_mutex);
  4113.  
  4114.         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
  4115.         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
  4116.         flush_delayed_work(&dev_priv->mm.idle_work);
  4117.  
  4118.         return 0;
  4119.  
  4120. err:
  4121.         mutex_unlock(&dev->struct_mutex);
  4122.         return ret;
  4123. }
  4124. #endif
  4125.  
  4126. int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
  4127. {
  4128.         struct drm_device *dev = ring->dev;
  4129.         struct drm_i915_private *dev_priv = dev->dev_private;
  4130.         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
  4131.         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
  4132.         int i, ret;
  4133.  
  4134.         if (!HAS_L3_DPF(dev) || !remap_info)
  4135.                 return 0;
  4136.  
  4137.         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
  4138.         if (ret)
  4139.                 return ret;
  4140.  
  4141.         /*
  4142.          * Note: We do not worry about the concurrent register cacheline hang
  4143.          * here because no other code should access these registers other than
  4144.          * at initialization time.
  4145.          */
  4146.         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
  4147.                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  4148.                 intel_ring_emit(ring, reg_base + i);
  4149.                 intel_ring_emit(ring, remap_info[i/4]);
  4150.         }
  4151.  
  4152.         intel_ring_advance(ring);
  4153.  
  4154.         return ret;
  4155. }
  4156.  
  4157. void i915_gem_init_swizzling(struct drm_device *dev)
  4158. {
  4159.         struct drm_i915_private *dev_priv = dev->dev_private;
  4160.  
  4161.         if (INTEL_INFO(dev)->gen < 5 ||
  4162.             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
  4163.                 return;
  4164.  
  4165.         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
  4166.                                  DISP_TILE_SURFACE_SWIZZLING);
  4167.  
  4168.         if (IS_GEN5(dev))
  4169.                 return;
  4170.  
  4171.         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
  4172.         if (IS_GEN6(dev))
  4173.                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
  4174.         else if (IS_GEN7(dev))
  4175.                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
  4176.         else if (IS_GEN8(dev))
  4177.                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
  4178.         else
  4179.                 BUG();
  4180. }
  4181.  
  4182. static bool
  4183. intel_enable_blt(struct drm_device *dev)
  4184. {
  4185.         if (!HAS_BLT(dev))
  4186.                 return false;
  4187.  
  4188.         /* The blitter was dysfunctional on early prototypes */
  4189.         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
  4190.                 DRM_INFO("BLT not supported on this pre-production hardware;"
  4191.                          " graphics performance will be degraded.\n");
  4192.                 return false;
  4193.         }
  4194.  
  4195.         return true;
  4196. }
  4197.  
  4198. static void init_unused_ring(struct drm_device *dev, u32 base)
  4199. {
  4200.         struct drm_i915_private *dev_priv = dev->dev_private;
  4201.  
  4202.         I915_WRITE(RING_CTL(base), 0);
  4203.         I915_WRITE(RING_HEAD(base), 0);
  4204.         I915_WRITE(RING_TAIL(base), 0);
  4205.         I915_WRITE(RING_START(base), 0);
  4206. }
  4207.  
  4208. static void init_unused_rings(struct drm_device *dev)
  4209. {
  4210.         if (IS_I830(dev)) {
  4211.                 init_unused_ring(dev, PRB1_BASE);
  4212.                 init_unused_ring(dev, SRB0_BASE);
  4213.                 init_unused_ring(dev, SRB1_BASE);
  4214.                 init_unused_ring(dev, SRB2_BASE);
  4215.                 init_unused_ring(dev, SRB3_BASE);
  4216.         } else if (IS_GEN2(dev)) {
  4217.                 init_unused_ring(dev, SRB0_BASE);
  4218.                 init_unused_ring(dev, SRB1_BASE);
  4219.         } else if (IS_GEN3(dev)) {
  4220.                 init_unused_ring(dev, PRB1_BASE);
  4221.                 init_unused_ring(dev, PRB2_BASE);
  4222.         }
  4223. }
  4224.  
  4225. int i915_gem_init_rings(struct drm_device *dev)
  4226. {
  4227.         struct drm_i915_private *dev_priv = dev->dev_private;
  4228.         int ret;
  4229.  
  4230.         /*
  4231.          * At least 830 can leave some of the unused rings
  4232.          * "active" (ie. head != tail) after resume which
  4233.          * will prevent c3 entry. Makes sure all unused rings
  4234.          * are totally idle.
  4235.          */
  4236.         init_unused_rings(dev);
  4237.  
  4238.         ret = intel_init_render_ring_buffer(dev);
  4239.         if (ret)
  4240.                 return ret;
  4241.  
  4242.     if (HAS_BSD(dev)) {
  4243.                 ret = intel_init_bsd_ring_buffer(dev);
  4244.                 if (ret)
  4245.                         goto cleanup_render_ring;
  4246.         }
  4247.  
  4248.         if (intel_enable_blt(dev)) {
  4249.                 ret = intel_init_blt_ring_buffer(dev);
  4250.                 if (ret)
  4251.                         goto cleanup_bsd_ring;
  4252.         }
  4253.  
  4254.         if (HAS_VEBOX(dev)) {
  4255.                 ret = intel_init_vebox_ring_buffer(dev);
  4256.                 if (ret)
  4257.                         goto cleanup_blt_ring;
  4258.         }
  4259.  
  4260.         if (HAS_BSD2(dev)) {
  4261.                 ret = intel_init_bsd2_ring_buffer(dev);
  4262.                 if (ret)
  4263.                         goto cleanup_vebox_ring;
  4264.         }
  4265.  
  4266.         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
  4267.         if (ret)
  4268.                 goto cleanup_bsd2_ring;
  4269.  
  4270.         return 0;
  4271.  
  4272. cleanup_bsd2_ring:
  4273.         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
  4274. cleanup_vebox_ring:
  4275.         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
  4276. cleanup_blt_ring:
  4277.         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
  4278. cleanup_bsd_ring:
  4279.         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
  4280. cleanup_render_ring:
  4281.         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
  4282.  
  4283.         return ret;
  4284. }
  4285.  
  4286. int
  4287. i915_gem_init_hw(struct drm_device *dev)
  4288. {
  4289.         struct drm_i915_private *dev_priv = dev->dev_private;
  4290.         int ret, i;
  4291.  
  4292.         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
  4293.                 return -EIO;
  4294.  
  4295.         if (dev_priv->ellc_size)
  4296.                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  4297.  
  4298.         if (IS_HASWELL(dev))
  4299.                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
  4300.                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
  4301.  
  4302.         if (HAS_PCH_NOP(dev)) {
  4303.                 if (IS_IVYBRIDGE(dev)) {
  4304.                 u32 temp = I915_READ(GEN7_MSG_CTL);
  4305.                 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
  4306.                 I915_WRITE(GEN7_MSG_CTL, temp);
  4307.                 } else if (INTEL_INFO(dev)->gen >= 7) {
  4308.                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
  4309.                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
  4310.                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
  4311.                 }
  4312.         }
  4313.  
  4314.         i915_gem_init_swizzling(dev);
  4315.  
  4316.         ret = dev_priv->gt.init_rings(dev);
  4317.         if (ret)
  4318.                 return ret;
  4319.  
  4320.         for (i = 0; i < NUM_L3_SLICES(dev); i++)
  4321.                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
  4322.  
  4323.         /*
  4324.          * XXX: Contexts should only be initialized once. Doing a switch to the
  4325.          * default context switch however is something we'd like to do after
  4326.          * reset or thaw (the latter may not actually be necessary for HW, but
  4327.          * goes with our code better). Context switching requires rings (for
  4328.          * the do_switch), but before enabling PPGTT. So don't move this.
  4329.          */
  4330.         ret = i915_gem_context_enable(dev_priv);
  4331.         if (ret && ret != -EIO) {
  4332.                 DRM_ERROR("Context enable failed %d\n", ret);
  4333.                 i915_gem_cleanup_ringbuffer(dev);
  4334.  
  4335.                 return ret;
  4336.         }
  4337.  
  4338.         ret = i915_ppgtt_init_hw(dev);
  4339.         if (ret && ret != -EIO) {
  4340.                 DRM_ERROR("PPGTT enable failed %d\n", ret);
  4341.                 i915_gem_cleanup_ringbuffer(dev);
  4342.         }
  4343.  
  4344.         return ret;
  4345. }
  4346.  
  4347. int i915_gem_init(struct drm_device *dev)
  4348. {
  4349.         struct drm_i915_private *dev_priv = dev->dev_private;
  4350.         int ret;
  4351.  
  4352.         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
  4353.                         i915.enable_execlists);
  4354.  
  4355.         mutex_lock(&dev->struct_mutex);
  4356.  
  4357.         if (IS_VALLEYVIEW(dev)) {
  4358.                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
  4359.                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
  4360.                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
  4361.                               VLV_GTLC_ALLOWWAKEACK), 10))
  4362.                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
  4363.         }
  4364.  
  4365.         if (!i915.enable_execlists) {
  4366.                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
  4367.                 dev_priv->gt.init_rings = i915_gem_init_rings;
  4368.                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
  4369.                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
  4370.         } else {
  4371.                 dev_priv->gt.do_execbuf = intel_execlists_submission;
  4372.                 dev_priv->gt.init_rings = intel_logical_rings_init;
  4373.                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
  4374.                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
  4375.         }
  4376.  
  4377. //   ret = i915_gem_init_userptr(dev);
  4378. //   if (ret) {
  4379. //       mutex_unlock(&dev->struct_mutex);
  4380. //       return ret;
  4381. //   }
  4382.  
  4383.     i915_gem_init_global_gtt(dev);
  4384.  
  4385.         ret = i915_gem_context_init(dev);
  4386.         if (ret) {
  4387.                 mutex_unlock(&dev->struct_mutex);
  4388.                 return ret;
  4389.         }
  4390.  
  4391.         ret = i915_gem_init_hw(dev);
  4392.         if (ret == -EIO) {
  4393.                 /* Allow ring initialisation to fail by marking the GPU as
  4394.                  * wedged. But we only want to do this where the GPU is angry,
  4395.                  * for all other failure, such as an allocation failure, bail.
  4396.                  */
  4397.                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
  4398.                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
  4399.                 ret = 0;
  4400.         }
  4401.         mutex_unlock(&dev->struct_mutex);
  4402.  
  4403.                 return ret;
  4404. }
  4405.  
  4406. void
  4407. i915_gem_cleanup_ringbuffer(struct drm_device *dev)
  4408. {
  4409.         struct drm_i915_private *dev_priv = dev->dev_private;
  4410.         struct intel_engine_cs *ring;
  4411.         int i;
  4412.  
  4413.         for_each_ring(ring, dev_priv, i)
  4414.                 dev_priv->gt.cleanup_ring(ring);
  4415. }
  4416.  
  4417. static void
  4418. init_ring_lists(struct intel_engine_cs *ring)
  4419. {
  4420.     INIT_LIST_HEAD(&ring->active_list);
  4421.     INIT_LIST_HEAD(&ring->request_list);
  4422. }
  4423.  
  4424. void i915_init_vm(struct drm_i915_private *dev_priv,
  4425.                          struct i915_address_space *vm)
  4426. {
  4427.         if (!i915_is_ggtt(vm))
  4428.                 drm_mm_init(&vm->mm, vm->start, vm->total);
  4429.         vm->dev = dev_priv->dev;
  4430.         INIT_LIST_HEAD(&vm->active_list);
  4431.         INIT_LIST_HEAD(&vm->inactive_list);
  4432.         INIT_LIST_HEAD(&vm->global_link);
  4433.         list_add_tail(&vm->global_link, &dev_priv->vm_list);
  4434. }
  4435.  
  4436. void
  4437. i915_gem_load(struct drm_device *dev)
  4438. {
  4439.         struct drm_i915_private *dev_priv = dev->dev_private;
  4440.     int i;
  4441.  
  4442.         INIT_LIST_HEAD(&dev_priv->vm_list);
  4443.         i915_init_vm(dev_priv, &dev_priv->gtt.base);
  4444.  
  4445.         INIT_LIST_HEAD(&dev_priv->context_list);
  4446.         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
  4447.         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
  4448.     INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4449.     for (i = 0; i < I915_NUM_RINGS; i++)
  4450.         init_ring_lists(&dev_priv->ring[i]);
  4451.         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
  4452.         INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
  4453.         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  4454.                           i915_gem_retire_work_handler);
  4455.         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
  4456.                           i915_gem_idle_work_handler);
  4457.         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
  4458.  
  4459.     /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
  4460.         if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
  4461.                 I915_WRITE(MI_ARB_STATE,
  4462.                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
  4463.     }
  4464.  
  4465.     dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  4466.  
  4467.         /* Old X drivers will take 0-2 for front, back, depth buffers */
  4468.         if (!drm_core_check_feature(dev, DRIVER_MODESET))
  4469.                 dev_priv->fence_reg_start = 3;
  4470.  
  4471.         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
  4472.                 dev_priv->num_fence_regs = 32;
  4473.         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
  4474.         dev_priv->num_fence_regs = 16;
  4475.     else
  4476.         dev_priv->num_fence_regs = 8;
  4477.  
  4478.     /* Initialize fence registers to zero */
  4479.         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
  4480.         i915_gem_restore_fences(dev);
  4481.  
  4482.     i915_gem_detect_bit_6_swizzle(dev);
  4483.  
  4484.     dev_priv->mm.interruptible = true;
  4485.  
  4486.         mutex_init(&dev_priv->fb_tracking.lock);
  4487. }
  4488.  
  4489. int i915_gem_open(struct drm_device *dev, struct drm_file *file)
  4490. {
  4491.         struct drm_i915_file_private *file_priv;
  4492.         int ret;
  4493.  
  4494.         DRM_DEBUG_DRIVER("\n");
  4495.  
  4496.         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
  4497.         if (!file_priv)
  4498.                 return -ENOMEM;
  4499.  
  4500.         file->driver_priv = file_priv;
  4501.         file_priv->dev_priv = dev->dev_private;
  4502.         file_priv->file = file;
  4503.  
  4504.         spin_lock_init(&file_priv->mm.lock);
  4505.         INIT_LIST_HEAD(&file_priv->mm.request_list);
  4506. //      INIT_DELAYED_WORK(&file_priv->mm.idle_work,
  4507. //                        i915_gem_file_idle_work_handler);
  4508.  
  4509.         ret = i915_gem_context_open(dev, file);
  4510.         if (ret)
  4511.                 kfree(file_priv);
  4512.  
  4513.         return ret;
  4514. }
  4515.  
  4516. /**
  4517.  * i915_gem_track_fb - update frontbuffer tracking
  4518.  * old: current GEM buffer for the frontbuffer slots
  4519.  * new: new GEM buffer for the frontbuffer slots
  4520.  * frontbuffer_bits: bitmask of frontbuffer slots
  4521.  *
  4522.  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
  4523.  * from @old and setting them in @new. Both @old and @new can be NULL.
  4524.  */
  4525. void i915_gem_track_fb(struct drm_i915_gem_object *old,
  4526.                        struct drm_i915_gem_object *new,
  4527.                        unsigned frontbuffer_bits)
  4528. {
  4529.         if (old) {
  4530.                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
  4531.                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
  4532.                 old->frontbuffer_bits &= ~frontbuffer_bits;
  4533.         }
  4534.  
  4535.         if (new) {
  4536.                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
  4537.                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
  4538.                 new->frontbuffer_bits |= frontbuffer_bits;
  4539.         }
  4540. }
  4541.  
  4542. static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
  4543. {
  4544.         if (!mutex_is_locked(mutex))
  4545.                 return false;
  4546.  
  4547. #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
  4548.         return mutex->owner == task;
  4549. #else
  4550.         /* Since UP may be pre-empted, we cannot assume that we own the lock */
  4551.         return false;
  4552. #endif
  4553. }
  4554.  
  4555. /* All the new VM stuff */
  4556. unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
  4557.                                   struct i915_address_space *vm)
  4558. {
  4559.         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4560.         struct i915_vma *vma;
  4561.  
  4562.         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
  4563.  
  4564.         list_for_each_entry(vma, &o->vma_list, vma_link) {
  4565.                 if (vma->vm == vm)
  4566.                         return vma->node.start;
  4567.  
  4568.         }
  4569.         WARN(1, "%s vma for this object not found.\n",
  4570.              i915_is_ggtt(vm) ? "global" : "ppgtt");
  4571.         return -1;
  4572. }
  4573.  
  4574. bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
  4575.                         struct i915_address_space *vm)
  4576. {
  4577.         struct i915_vma *vma;
  4578.  
  4579.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4580.                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
  4581.                         return true;
  4582.  
  4583.         return false;
  4584. }
  4585.  
  4586. bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
  4587. {
  4588.         struct i915_vma *vma;
  4589.  
  4590.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4591.                 if (drm_mm_node_allocated(&vma->node))
  4592.                         return true;
  4593.  
  4594.         return false;
  4595. }
  4596.  
  4597. unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
  4598.                                 struct i915_address_space *vm)
  4599. {
  4600.         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
  4601.         struct i915_vma *vma;
  4602.  
  4603.         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
  4604.  
  4605.         BUG_ON(list_empty(&o->vma_list));
  4606.  
  4607.         list_for_each_entry(vma, &o->vma_list, vma_link)
  4608.                 if (vma->vm == vm)
  4609.                         return vma->node.size;
  4610.  
  4611.         return 0;
  4612. }
  4613.  
  4614.  
  4615.  
  4616. struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
  4617. {
  4618.         struct i915_vma *vma;
  4619.  
  4620.         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
  4621.         if (vma->vm != i915_obj_to_ggtt(obj))
  4622.                 return NULL;
  4623.  
  4624.         return vma;
  4625. }
  4626.