Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2012 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21.  * SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Chris Wilson <chris@chris-wilson.co.uk>
  25.  *
  26.  */
  27.  
  28. #ifdef HAVE_CONFIG_H
  29. #include "config.h"
  30. #endif
  31.  
  32. #include "sna.h"
  33. #include "sna_render.h"
  34. #include "sna_render_inline.h"
  35. #include "gen4_vertex.h"
  36.  
  37. void gen4_vertex_flush(struct sna *sna)
  38. {
  39.     DBG(("%s[%x] = %d\n", __FUNCTION__,
  40.          4*sna->render.vertex_offset,
  41.          sna->render.vertex_index - sna->render.vertex_start));
  42.  
  43.     assert(sna->render.vertex_offset);
  44.     assert(sna->render.vertex_index > sna->render.vertex_start);
  45.  
  46.     sna->kgem.batch[sna->render.vertex_offset] =
  47.         sna->render.vertex_index - sna->render.vertex_start;
  48.     sna->render.vertex_offset = 0;
  49. }
  50.  
  51. int gen4_vertex_finish(struct sna *sna)
  52. {
  53.     struct kgem_bo *bo;
  54.     unsigned int i;
  55.     unsigned hint, size;
  56.  
  57.     DBG(("%s: used=%d / %d\n", __FUNCTION__,
  58.          sna->render.vertex_used, sna->render.vertex_size));
  59.     assert(sna->render.vertex_offset == 0);
  60.     assert(sna->render.vertex_used);
  61.  
  62.         sna_vertex_wait__locked(&sna->render);
  63.  
  64.     /* Note: we only need dword alignment (currently) */
  65.  
  66.     bo = sna->render.vbo;
  67.     if (bo) {
  68.         for (i = 0; i < sna->render.nvertex_reloc; i++) {
  69.             DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
  70.                  i, sna->render.vertex_reloc[i]));
  71.  
  72.             sna->kgem.batch[sna->render.vertex_reloc[i]] =
  73.                 kgem_add_reloc(&sna->kgem,
  74.                            sna->render.vertex_reloc[i], bo,
  75.                            I915_GEM_DOMAIN_VERTEX << 16,
  76.                            0);
  77.         }
  78.  
  79.         assert(!sna->render.active);
  80.         sna->render.nvertex_reloc = 0;
  81.         sna->render.vertex_used = 0;
  82.         sna->render.vertex_index = 0;
  83.         sna->render.vbo = NULL;
  84.         sna->render.vb_id = 0;
  85.  
  86.         kgem_bo_destroy(&sna->kgem, bo);
  87.     }
  88.  
  89.     hint = CREATE_GTT_MAP;
  90.     if (bo)
  91.         hint |= CREATE_CACHED | CREATE_NO_THROTTLE;
  92.  
  93.     size = 256*1024;
  94.     assert(!sna->render.active);
  95.     sna->render.vertices = NULL;
  96.     sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint);
  97.     while (sna->render.vbo == NULL && size > 16*1024) {
  98.         size /= 2;
  99.         sna->render.vbo = kgem_create_linear(&sna->kgem, size, hint);
  100.     }
  101.     if (sna->render.vbo == NULL)
  102.         sna->render.vbo = kgem_create_linear(&sna->kgem,
  103.                              256*1024, CREATE_GTT_MAP);
  104.     if (sna->render.vbo)
  105.         sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
  106.     if (sna->render.vertices == NULL) {
  107.         if (sna->render.vbo) {
  108.             kgem_bo_destroy(&sna->kgem, sna->render.vbo);
  109.             sna->render.vbo = NULL;
  110.         }
  111.         sna->render.vertices = sna->render.vertex_data;
  112.         sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
  113.         return 0;
  114.     }
  115.  
  116.     if (sna->render.vertex_used) {
  117.         DBG(("%s: copying initial buffer x %d to handle=%d\n",
  118.              __FUNCTION__,
  119.              sna->render.vertex_used,
  120.              sna->render.vbo->handle));
  121.         assert(sizeof(float)*sna->render.vertex_used <=
  122.                __kgem_bo_size(sna->render.vbo));
  123.         memcpy(sna->render.vertices,
  124.                sna->render.vertex_data,
  125.                sizeof(float)*sna->render.vertex_used);
  126.     }
  127.  
  128.     size = __kgem_bo_size(sna->render.vbo)/4;
  129.     if (size >= UINT16_MAX)
  130.         size = UINT16_MAX - 1;
  131.  
  132.     DBG(("%s: create vbo handle=%d, size=%d\n",
  133.          __FUNCTION__, sna->render.vbo->handle, size));
  134.  
  135.     sna->render.vertex_size = size;
  136.     return sna->render.vertex_size - sna->render.vertex_used;
  137. }
  138.  
  139. void gen4_vertex_close(struct sna *sna)
  140. {
  141.     struct kgem_bo *bo, *free_bo = NULL;
  142.     unsigned int i, delta = 0;
  143.  
  144.     assert(sna->render.vertex_offset == 0);
  145.     if (!sna->render.vb_id)
  146.         return;
  147.  
  148.     DBG(("%s: used=%d, vbo active? %d, vb=%x, nreloc=%d\n",
  149.          __FUNCTION__, sna->render.vertex_used, sna->render.vbo ? sna->render.vbo->handle : 0,
  150.          sna->render.vb_id, sna->render.nvertex_reloc));
  151.  
  152.     assert(!sna->render.active);
  153.  
  154.     bo = sna->render.vbo;
  155.     if (bo) {
  156.         if (sna->render.vertex_size - sna->render.vertex_used < 64) {
  157.             DBG(("%s: discarding vbo (full), handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
  158.             sna->render.vbo = NULL;
  159.             sna->render.vertices = sna->render.vertex_data;
  160.             sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
  161.             free_bo = bo;
  162.         } else if (IS_CPU_MAP(bo->map) && !sna->kgem.has_llc) {
  163.             DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
  164.             sna->render.vertices =
  165.                 kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
  166.             if (sna->render.vertices == NULL) {
  167.                 sna->render.vbo = NULL;
  168.                 sna->render.vertices = sna->render.vertex_data;
  169.                 sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
  170.                 free_bo = bo;
  171.             }
  172.  
  173.         }
  174.     } else {
  175.         if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
  176.             DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
  177.                  sna->render.vertex_used, sna->kgem.nbatch));
  178.             memcpy(sna->kgem.batch + sna->kgem.nbatch,
  179.                    sna->render.vertex_data,
  180.                    sna->render.vertex_used * 4);
  181.             delta = sna->kgem.nbatch * 4;
  182.             bo = NULL;
  183.             sna->kgem.nbatch += sna->render.vertex_used;
  184.         } else {
  185.             bo = kgem_create_linear(&sna->kgem,
  186.                         4*sna->render.vertex_used,
  187.                         CREATE_NO_THROTTLE);
  188.             if (bo && !kgem_bo_write(&sna->kgem, bo,
  189.                          sna->render.vertex_data,
  190.                          4*sna->render.vertex_used)) {
  191.                 kgem_bo_destroy(&sna->kgem, bo);
  192.                 bo = NULL;
  193.             }
  194.             DBG(("%s: new vbo: %d\n", __FUNCTION__,
  195.                  sna->render.vertex_used));
  196.             free_bo = bo;
  197.         }
  198.     }
  199.  
  200.     assert(sna->render.nvertex_reloc);
  201.     for (i = 0; i < sna->render.nvertex_reloc; i++) {
  202.         DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
  203.              i, sna->render.vertex_reloc[i]));
  204.  
  205.         sna->kgem.batch[sna->render.vertex_reloc[i]] =
  206.             kgem_add_reloc(&sna->kgem,
  207.                        sna->render.vertex_reloc[i], bo,
  208.                        I915_GEM_DOMAIN_VERTEX << 16,
  209.                        delta);
  210.     }
  211.     sna->render.nvertex_reloc = 0;
  212.     sna->render.vb_id = 0;
  213.  
  214.     if (sna->render.vbo == NULL) {
  215.         assert(!sna->render.active);
  216.         sna->render.vertex_used = 0;
  217.         sna->render.vertex_index = 0;
  218.         assert(sna->render.vertices == sna->render.vertex_data);
  219.         assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
  220.     }
  221.  
  222.     if (free_bo)
  223.         kgem_bo_destroy(&sna->kgem, free_bo);
  224. }
  225.  
  226. fastcall static void
  227. emit_primitive_identity_source_mask(struct sna *sna,
  228.                                     const struct sna_composite_op *op,
  229.                                     const struct sna_composite_rectangles *r)
  230. {
  231.         union {
  232.                 struct sna_coordinate p;
  233.                 float f;
  234.         } dst;
  235.         float src_x, src_y;
  236.         float msk_x, msk_y;
  237.         float w, h;
  238.         float *v;
  239.  
  240.         src_x = r->src.x + op->src.offset[0];
  241.         src_y = r->src.y + op->src.offset[1];
  242.         msk_x = r->mask.x + op->mask.offset[0];
  243.         msk_y = r->mask.y + op->mask.offset[1];
  244.         w = r->width;
  245.         h = r->height;
  246.  
  247.         assert(op->floats_per_rect == 15);
  248.         assert((sna->render.vertex_used % 5) == 0);
  249.         v = sna->render.vertices + sna->render.vertex_used;
  250.         sna->render.vertex_used += 15;
  251.  
  252.         dst.p.x = r->dst.x + r->width;
  253.         dst.p.y = r->dst.y + r->height;
  254.         v[0] = dst.f;
  255.         v[1] = (src_x + w) * op->src.scale[0];
  256.         v[2] = (src_y + h) * op->src.scale[1];
  257.         v[3] = (msk_x + w) * op->mask.scale[0];
  258.         v[4] = (msk_y + h) * op->mask.scale[1];
  259.  
  260.         dst.p.x = r->dst.x;
  261.         v[5] = dst.f;
  262.         v[6] = src_x * op->src.scale[0];
  263.         v[7] = v[2];
  264.         v[8] = msk_x * op->mask.scale[0];
  265.         v[9] = v[4];
  266.  
  267.         dst.p.y = r->dst.y;
  268.         v[10] = dst.f;
  269.         v[11] = v[6];
  270.         v[12] = src_y * op->src.scale[1];
  271.         v[13] = v[8];
  272.         v[14] = msk_y * op->mask.scale[1];
  273. }
  274.  
  275. unsigned gen4_choose_composite_emitter(struct sna_composite_op *tmp)
  276. {
  277.         unsigned vb;
  278.  
  279.         if (tmp->mask.bo) {
  280.                 if (tmp->mask.transform == NULL) {
  281.                         if (tmp->src.is_solid) {
  282.                                 DBG(("%s: solid, identity mask\n", __FUNCTION__));
  283.                         } else if (tmp->src.is_linear) {
  284.                                 DBG(("%s: linear, identity mask\n", __FUNCTION__));
  285.                         } else if (tmp->src.transform == NULL) {
  286.                                 DBG(("%s: identity source, identity mask\n", __FUNCTION__));
  287.                                 tmp->prim_emit = emit_primitive_identity_source_mask;
  288.                                 tmp->floats_per_vertex = 5;
  289.                                 vb = 2 << 2 | 2;
  290.                         } else if (tmp->src.is_affine) {
  291.                                 DBG(("%s: simple src, identity mask\n", __FUNCTION__));
  292.                         } else {
  293.                                 DBG(("%s: projective source, identity mask\n", __FUNCTION__));
  294.                         }
  295.                 } else {
  296.                         DBG(("%s: general mask: floats-per-vertex=%d, vb=%x\n",
  297.                              __FUNCTION__,tmp->floats_per_vertex, vb));
  298.                 }
  299.         } else {
  300.         }
  301.         tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
  302.  
  303.         return vb;
  304. }
  305.