Subversion Repositories Kolibri OS

Rev

Rev 6084 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2014 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Mika Kuoppala <mika.kuoppala@intel.com>
  25.  *
  26.  */
  27.  
  28. #include "i915_drv.h"
  29. #include "intel_renderstate.h"
  30.  
  31. static const struct intel_renderstate_rodata *
  32. render_state_get_rodata(struct drm_device *dev, const int gen)
  33. {
  34.         switch (gen) {
  35.         case 6:
  36.                 return &gen6_null_state;
  37.         case 7:
  38.                 return &gen7_null_state;
  39.         case 8:
  40.                 return &gen8_null_state;
  41.         case 9:
  42.                 return &gen9_null_state;
  43.         }
  44.  
  45.         return NULL;
  46. }
  47.  
  48. static int render_state_init(struct render_state *so, struct drm_device *dev)
  49. {
  50.         int ret;
  51.  
  52.         so->gen = INTEL_INFO(dev)->gen;
  53.         so->rodata = render_state_get_rodata(dev, so->gen);
  54.         if (so->rodata == NULL)
  55.                 return 0;
  56.  
  57.         if (so->rodata->batch_items * 4 > 4096)
  58.                 return -EINVAL;
  59.  
  60.         so->obj = i915_gem_alloc_object(dev, 4096);
  61.         if (so->obj == NULL)
  62.                 return -ENOMEM;
  63.  
  64.         ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
  65.         if (ret)
  66.                 goto free_gem;
  67.  
  68.         so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
  69.         return 0;
  70.  
  71. free_gem:
  72.         drm_gem_object_unreference(&so->obj->base);
  73.         return ret;
  74. }
  75.  
  76. /*
  77.  * Macro to add commands to auxiliary batch.
  78.  * This macro only checks for page overflow before inserting the commands,
  79.  * this is sufficient as the null state generator makes the final batch
  80.  * with two passes to build command and state separately. At this point
  81.  * the size of both are known and it compacts them by relocating the state
  82.  * right after the commands taking care of aligment so we should sufficient
  83.  * space below them for adding new commands.
  84.  */
  85. #define OUT_BATCH(batch, i, val)                                \
  86.         do {                                                    \
  87.                 if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {  \
  88.                         ret = -ENOSPC;                          \
  89.                         goto err_out;                           \
  90.                 }                                               \
  91.                 (batch)[(i)++] = (val);                         \
  92.         } while(0)
  93.  
  94. static int render_state_setup(struct render_state *so)
  95. {
  96.         const struct intel_renderstate_rodata *rodata = so->rodata;
  97.         unsigned int i = 0, reloc_index = 0;
  98.         struct page *page;
  99.         u32 *d;
  100.         int ret;
  101.  
  102.         ret = i915_gem_object_set_to_cpu_domain(so->obj, true);
  103.         if (ret)
  104.                 return ret;
  105.  
  106.         page = i915_gem_object_get_dirty_page(so->obj, 0);
  107.         d = kmap(page);
  108.  
  109.         while (i < rodata->batch_items) {
  110.                 u32 s = rodata->batch[i];
  111.  
  112.                 if (i * 4  == rodata->reloc[reloc_index]) {
  113.                         u64 r = s + so->ggtt_offset;
  114.                         s = lower_32_bits(r);
  115.                         if (so->gen >= 8) {
  116.                                 if (i + 1 >= rodata->batch_items ||
  117.                                     rodata->batch[i + 1] != 0) {
  118.                                         ret = -EINVAL;
  119.                                         goto err_out;
  120.                                 }
  121.  
  122.                                 d[i++] = s;
  123.                                 s = upper_32_bits(r);
  124.                         }
  125.  
  126.                         reloc_index++;
  127.                 }
  128.  
  129.                 d[i++] = s;
  130.         }
  131.  
  132.         while (i % CACHELINE_DWORDS)
  133.                 OUT_BATCH(d, i, MI_NOOP);
  134.  
  135.         so->aux_batch_offset = i * sizeof(u32);
  136.  
  137.         OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
  138.         so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
  139.  
  140.         /*
  141.          * Since we are sending length, we need to strictly conform to
  142.          * all requirements. For Gen2 this must be a multiple of 8.
  143.          */
  144.         so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
  145.  
  146.         kunmap(page);
  147.  
  148.         ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
  149.         if (ret)
  150.                 return ret;
  151.  
  152.         if (rodata->reloc[reloc_index] != -1) {
  153.                 DRM_ERROR("only %d relocs resolved\n", reloc_index);
  154.                 return -EINVAL;
  155.         }
  156.  
  157.         return 0;
  158.  
  159. err_out:
  160.         kunmap(page);
  161.         return ret;
  162. }
  163.  
  164. #undef OUT_BATCH
  165.  
  166. void i915_gem_render_state_fini(struct render_state *so)
  167. {
  168.         i915_gem_object_ggtt_unpin(so->obj);
  169.         drm_gem_object_unreference(&so->obj->base);
  170. }
  171.  
  172. int i915_gem_render_state_prepare(struct intel_engine_cs *ring,
  173.                                   struct render_state *so)
  174. {
  175.         int ret;
  176.  
  177.         if (WARN_ON(ring->id != RCS))
  178.                 return -ENOENT;
  179.  
  180.         ret = render_state_init(so, ring->dev);
  181.         if (ret)
  182.                 return ret;
  183.  
  184.         if (so->rodata == NULL)
  185.                 return 0;
  186.  
  187.         ret = render_state_setup(so);
  188.         if (ret) {
  189.                 i915_gem_render_state_fini(so);
  190.                 return ret;
  191.         }
  192.  
  193.         return 0;
  194. }
  195.  
  196. int i915_gem_render_state_init(struct drm_i915_gem_request *req)
  197. {
  198.         struct render_state so;
  199.         int ret;
  200.  
  201.         ret = i915_gem_render_state_prepare(req->ring, &so);
  202.         if (ret)
  203.                 return ret;
  204.  
  205.         if (so.rodata == NULL)
  206.                 return 0;
  207.  
  208.         ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset,
  209.                                              so.rodata->batch_items * 4,
  210.                                              I915_DISPATCH_SECURE);
  211.         if (ret)
  212.                 goto out;
  213.  
  214.         if (so.aux_batch_size > 8) {
  215.                 ret = req->ring->dispatch_execbuffer(req,
  216.                                                      (so.ggtt_offset +
  217.                                                       so.aux_batch_offset),
  218.                                                      so.aux_batch_size,
  219.                                                      I915_DISPATCH_SECURE);
  220.                 if (ret)
  221.                         goto out;
  222.         }
  223.  
  224.         i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
  225.  
  226. out:
  227.         i915_gem_render_state_fini(&so);
  228.         return ret;
  229. }
  230.