Subversion Repositories Kolibri OS

Rev

Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Copyright 2008 Ben Skeggs
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  */
  22.  
  23. #include <stdint.h>
  24.  
  25. #include "pipe/p_defines.h"
  26.  
  27. #include "util/u_inlines.h"
  28. #include "util/u_pack_color.h"
  29. #include "util/u_format.h"
  30. #include "util/u_surface.h"
  31.  
  32. #include "os/os_thread.h"
  33.  
  34. #include "nvc0/nvc0_context.h"
  35. #include "nvc0/nvc0_resource.h"
  36.  
  37. #include "nv50/nv50_defs.xml.h"
  38. #include "nv50/nv50_texture.xml.h"
  39.  
  40. /* these are used in nv50_blit.h */
  41. #define NV50_ENG2D_SUPPORTED_FORMATS 0xff9ccfe1cce3ccc9ULL
  42. #define NV50_ENG2D_NOCONVERT_FORMATS 0x009cc02000000000ULL
  43. #define NV50_ENG2D_LUMINANCE_FORMATS 0x001cc02000000000ULL
  44. #define NV50_ENG2D_INTENSITY_FORMATS 0x0080000000000000ULL
  45. #define NV50_ENG2D_OPERATION_FORMATS 0x060001c000638000ULL
  46.  
  47. #define NOUVEAU_DRIVER 0xc0
  48. #include "nv50/nv50_blit.h"
  49.  
  50. static INLINE uint8_t
  51. nvc0_2d_format(enum pipe_format format, boolean dst, boolean dst_src_equal)
  52. {
  53.    uint8_t id = nvc0_format_table[format].rt;
  54.  
  55.    /* A8_UNORM is treated as I8_UNORM as far as the 2D engine is concerned. */
  56.    if (!dst && unlikely(format == PIPE_FORMAT_I8_UNORM) && !dst_src_equal)
  57.       return NV50_SURFACE_FORMAT_A8_UNORM;
  58.  
  59.    /* Hardware values for color formats range from 0xc0 to 0xff,
  60.     * but the 2D engine doesn't support all of them.
  61.     */
  62.    if (nv50_2d_format_supported(format))
  63.       return id;
  64.    assert(dst_src_equal);
  65.  
  66.    switch (util_format_get_blocksize(format)) {
  67.    case 1:
  68.       return NV50_SURFACE_FORMAT_R8_UNORM;
  69.    case 2:
  70.       return NV50_SURFACE_FORMAT_R16_UNORM;
  71.    case 4:
  72.       return NV50_SURFACE_FORMAT_BGRA8_UNORM;
  73.    case 8:
  74.       return NV50_SURFACE_FORMAT_RGBA16_UNORM;
  75.    case 16:
  76.       return NV50_SURFACE_FORMAT_RGBA32_FLOAT;
  77.    default:
  78.       assert(0);
  79.       return 0;
  80.    }
  81. }
  82.  
  83. static int
  84. nvc0_2d_texture_set(struct nouveau_pushbuf *push, boolean dst,
  85.                     struct nv50_miptree *mt, unsigned level, unsigned layer,
  86.                     enum pipe_format pformat, boolean dst_src_pformat_equal)
  87. {
  88.    struct nouveau_bo *bo = mt->base.bo;
  89.    uint32_t width, height, depth;
  90.    uint32_t format;
  91.    uint32_t mthd = dst ? NV50_2D_DST_FORMAT : NV50_2D_SRC_FORMAT;
  92.    uint32_t offset = mt->level[level].offset;
  93.  
  94.    format = nvc0_2d_format(pformat, dst, dst_src_pformat_equal);
  95.    if (!format) {
  96.       NOUVEAU_ERR("invalid/unsupported surface format: %s\n",
  97.                   util_format_name(pformat));
  98.       return 1;
  99.    }
  100.  
  101.    width = u_minify(mt->base.base.width0, level) << mt->ms_x;
  102.    height = u_minify(mt->base.base.height0, level) << mt->ms_y;
  103.    depth = u_minify(mt->base.base.depth0, level);
  104.  
  105.    /* layer has to be < depth, and depth > tile depth / 2 */
  106.  
  107.    if (!mt->layout_3d) {
  108.       offset += mt->layer_stride * layer;
  109.       layer = 0;
  110.       depth = 1;
  111.    } else
  112.    if (!dst) {
  113.       offset += nvc0_mt_zslice_offset(mt, level, layer);
  114.       layer = 0;
  115.    }
  116.  
  117.    if (!nouveau_bo_memtype(bo)) {
  118.       BEGIN_NVC0(push, SUBC_2D(mthd), 2);
  119.       PUSH_DATA (push, format);
  120.       PUSH_DATA (push, 1);
  121.       BEGIN_NVC0(push, SUBC_2D(mthd + 0x14), 5);
  122.       PUSH_DATA (push, mt->level[level].pitch);
  123.       PUSH_DATA (push, width);
  124.       PUSH_DATA (push, height);
  125.       PUSH_DATAh(push, bo->offset + offset);
  126.       PUSH_DATA (push, bo->offset + offset);
  127.    } else {
  128.       BEGIN_NVC0(push, SUBC_2D(mthd), 5);
  129.       PUSH_DATA (push, format);
  130.       PUSH_DATA (push, 0);
  131.       PUSH_DATA (push, mt->level[level].tile_mode);
  132.       PUSH_DATA (push, depth);
  133.       PUSH_DATA (push, layer);
  134.       BEGIN_NVC0(push, SUBC_2D(mthd + 0x18), 4);
  135.       PUSH_DATA (push, width);
  136.       PUSH_DATA (push, height);
  137.       PUSH_DATAh(push, bo->offset + offset);
  138.       PUSH_DATA (push, bo->offset + offset);
  139.    }
  140.  
  141. #if 0
  142.    if (dst) {
  143.       BEGIN_NVC0(push, SUBC_2D(NVC0_2D_CLIP_X), 4);
  144.       PUSH_DATA (push, 0);
  145.       PUSH_DATA (push, 0);
  146.       PUSH_DATA (push, width);
  147.       PUSH_DATA (push, height);
  148.    }
  149. #endif
  150.    return 0;
  151. }
  152.  
  153. static int
  154. nvc0_2d_texture_do_copy(struct nouveau_pushbuf *push,
  155.                         struct nv50_miptree *dst, unsigned dst_level,
  156.                         unsigned dx, unsigned dy, unsigned dz,
  157.                         struct nv50_miptree *src, unsigned src_level,
  158.                         unsigned sx, unsigned sy, unsigned sz,
  159.                         unsigned w, unsigned h)
  160. {
  161.    const enum pipe_format dfmt = dst->base.base.format;
  162.    const enum pipe_format sfmt = src->base.base.format;
  163.    int ret;
  164.    boolean eqfmt = dfmt == sfmt;
  165.  
  166.    if (!PUSH_SPACE(push, 2 * 16 + 32))
  167.       return PIPE_ERROR;
  168.  
  169.    ret = nvc0_2d_texture_set(push, TRUE, dst, dst_level, dz, dfmt, eqfmt);
  170.    if (ret)
  171.       return ret;
  172.  
  173.    ret = nvc0_2d_texture_set(push, FALSE, src, src_level, sz, sfmt, eqfmt);
  174.    if (ret)
  175.       return ret;
  176.  
  177.    IMMED_NVC0(push, NVC0_2D(BLIT_CONTROL), 0x00);
  178.    BEGIN_NVC0(push, NVC0_2D(BLIT_DST_X), 4);
  179.    PUSH_DATA (push, dx << dst->ms_x);
  180.    PUSH_DATA (push, dy << dst->ms_y);
  181.    PUSH_DATA (push, w << dst->ms_x);
  182.    PUSH_DATA (push, h << dst->ms_y);
  183.    BEGIN_NVC0(push, NVC0_2D(BLIT_DU_DX_FRACT), 4);
  184.    PUSH_DATA (push, 0);
  185.    PUSH_DATA (push, 1);
  186.    PUSH_DATA (push, 0);
  187.    PUSH_DATA (push, 1);
  188.    BEGIN_NVC0(push, NVC0_2D(BLIT_SRC_X_FRACT), 4);
  189.    PUSH_DATA (push, 0);
  190.    PUSH_DATA (push, sx << src->ms_x);
  191.    PUSH_DATA (push, 0);
  192.    PUSH_DATA (push, sy << src->ms_x);
  193.  
  194.    return 0;
  195. }
  196.  
  197. static void
  198. nvc0_resource_copy_region(struct pipe_context *pipe,
  199.                           struct pipe_resource *dst, unsigned dst_level,
  200.                           unsigned dstx, unsigned dsty, unsigned dstz,
  201.                           struct pipe_resource *src, unsigned src_level,
  202.                           const struct pipe_box *src_box)
  203. {
  204.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  205.    int ret;
  206.    boolean m2mf;
  207.    unsigned dst_layer = dstz, src_layer = src_box->z;
  208.  
  209.    if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
  210.       nouveau_copy_buffer(&nvc0->base,
  211.                           nv04_resource(dst), dstx,
  212.                           nv04_resource(src), src_box->x, src_box->width);
  213.       NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width);
  214.       return;
  215.    }
  216.    NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1);
  217.  
  218.    /* 0 and 1 are equal, only supporting 0/1, 2, 4 and 8 */
  219.    assert((src->nr_samples | 1) == (dst->nr_samples | 1));
  220.  
  221.    m2mf = (src->format == dst->format) ||
  222.       (util_format_get_blocksizebits(src->format) ==
  223.        util_format_get_blocksizebits(dst->format));
  224.  
  225.    nv04_resource(dst)->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
  226.  
  227.    if (m2mf) {
  228.       struct nv50_m2mf_rect drect, srect;
  229.       unsigned i;
  230.       unsigned nx = util_format_get_nblocksx(src->format, src_box->width);
  231.       unsigned ny = util_format_get_nblocksy(src->format, src_box->height);
  232.  
  233.       nv50_m2mf_rect_setup(&drect, dst, dst_level, dstx, dsty, dstz);
  234.       nv50_m2mf_rect_setup(&srect, src, src_level,
  235.                            src_box->x, src_box->y, src_box->z);
  236.  
  237.       for (i = 0; i < src_box->depth; ++i) {
  238.          nvc0->m2mf_copy_rect(nvc0, &drect, &srect, nx, ny);
  239.  
  240.          if (nv50_miptree(dst)->layout_3d)
  241.             drect.z++;
  242.          else
  243.             drect.base += nv50_miptree(dst)->layer_stride;
  244.  
  245.          if (nv50_miptree(src)->layout_3d)
  246.             srect.z++;
  247.          else
  248.             srect.base += nv50_miptree(src)->layer_stride;
  249.       }
  250.       return;
  251.    }
  252.  
  253.    assert(nv50_2d_dst_format_faithful(dst->format));
  254.    assert(nv50_2d_src_format_faithful(src->format));
  255.  
  256.    BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(src), RD);
  257.    BCTX_REFN(nvc0->bufctx, 2D, nv04_resource(dst), WR);
  258.    nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
  259.    nouveau_pushbuf_validate(nvc0->base.pushbuf);
  260.  
  261.    for (; dst_layer < dstz + src_box->depth; ++dst_layer, ++src_layer) {
  262.       ret = nvc0_2d_texture_do_copy(nvc0->base.pushbuf,
  263.                                     nv50_miptree(dst), dst_level,
  264.                                     dstx, dsty, dst_layer,
  265.                                     nv50_miptree(src), src_level,
  266.                                     src_box->x, src_box->y, src_layer,
  267.                                     src_box->width, src_box->height);
  268.       if (ret)
  269.          break;
  270.    }
  271.    nouveau_bufctx_reset(nvc0->bufctx, 0);
  272. }
  273.  
  274. static void
  275. nvc0_clear_render_target(struct pipe_context *pipe,
  276.                          struct pipe_surface *dst,
  277.                          const union pipe_color_union *color,
  278.                          unsigned dstx, unsigned dsty,
  279.                          unsigned width, unsigned height)
  280. {
  281.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  282.    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  283.    struct nv50_surface *sf = nv50_surface(dst);
  284.    struct nv04_resource *res = nv04_resource(sf->base.texture);
  285.    unsigned z;
  286.  
  287.    if (!PUSH_SPACE(push, 32 + sf->depth))
  288.       return;
  289.  
  290.    PUSH_REFN (push, res->bo, res->domain | NOUVEAU_BO_WR);
  291.  
  292.    BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4);
  293.    PUSH_DATAf(push, color->f[0]);
  294.    PUSH_DATAf(push, color->f[1]);
  295.    PUSH_DATAf(push, color->f[2]);
  296.    PUSH_DATAf(push, color->f[3]);
  297.  
  298.    BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2);
  299.    PUSH_DATA (push, ( width << 16) | dstx);
  300.    PUSH_DATA (push, (height << 16) | dsty);
  301.  
  302.    BEGIN_NVC0(push, NVC0_3D(RT_CONTROL), 1);
  303.    PUSH_DATA (push, 1);
  304.    BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(0)), 9);
  305.    PUSH_DATAh(push, res->address + sf->offset);
  306.    PUSH_DATA (push, res->address + sf->offset);
  307.    if (likely(nouveau_bo_memtype(res->bo))) {
  308.       struct nv50_miptree *mt = nv50_miptree(dst->texture);
  309.  
  310.       PUSH_DATA(push, sf->width);
  311.       PUSH_DATA(push, sf->height);
  312.       PUSH_DATA(push, nvc0_format_table[dst->format].rt);
  313.       PUSH_DATA(push, (mt->layout_3d << 16) |
  314.                mt->level[sf->base.u.tex.level].tile_mode);
  315.       PUSH_DATA(push, dst->u.tex.first_layer + sf->depth);
  316.       PUSH_DATA(push, mt->layer_stride >> 2);
  317.       PUSH_DATA(push, dst->u.tex.first_layer);
  318.    } else {
  319.       if (res->base.target == PIPE_BUFFER) {
  320.          PUSH_DATA(push, 262144);
  321.          PUSH_DATA(push, 1);
  322.       } else {
  323.          PUSH_DATA(push, nv50_miptree(&res->base)->level[0].pitch);
  324.          PUSH_DATA(push, sf->height);
  325.       }
  326.       PUSH_DATA(push, nvc0_format_table[sf->base.format].rt);
  327.       PUSH_DATA(push, 1 << 12);
  328.       PUSH_DATA(push, 1);
  329.       PUSH_DATA(push, 0);
  330.       PUSH_DATA(push, 0);
  331.  
  332.       IMMED_NVC0(push, NVC0_3D(ZETA_ENABLE), 0);
  333.  
  334.       /* tiled textures don't have to be fenced, they're not mapped directly */
  335.       nvc0_resource_fence(res, NOUVEAU_BO_WR);
  336.    }
  337.  
  338.    BEGIN_NIC0(push, NVC0_3D(CLEAR_BUFFERS), sf->depth);
  339.    for (z = 0; z < sf->depth; ++z) {
  340.       PUSH_DATA (push, 0x3c |
  341.                  (z << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  342.    }
  343.  
  344.    nvc0->dirty |= NVC0_NEW_FRAMEBUFFER;
  345. }
  346.  
  347. static void
  348. nvc0_clear_buffer_cpu(struct pipe_context *pipe,
  349.                       struct pipe_resource *res,
  350.                       unsigned offset, unsigned size,
  351.                       const void *data, int data_size)
  352. {
  353.    struct nv04_resource *buf = nv04_resource(res);
  354.    struct pipe_transfer *pt;
  355.    struct pipe_box box;
  356.    unsigned elements, i;
  357.  
  358.    elements = size / data_size;
  359.  
  360.    u_box_1d(offset, size, &box);
  361.  
  362.    uint8_t *map = buf->vtbl->transfer_map(pipe, res, 0, PIPE_TRANSFER_WRITE,
  363.                                           &box, &pt);
  364.  
  365.    for (i = 0; i < elements; ++i)
  366.       memcpy(&map[i*data_size], data, data_size);
  367.  
  368.    buf->vtbl->transfer_unmap(pipe, pt);
  369. }
  370.  
  371. static void
  372. nvc0_clear_buffer(struct pipe_context *pipe,
  373.                   struct pipe_resource *res,
  374.                   unsigned offset, unsigned size,
  375.                   const void *data, int data_size)
  376. {
  377.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  378.    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  379.    struct nv04_resource *buf = nv04_resource(res);
  380.    union pipe_color_union color;
  381.    enum pipe_format dst_fmt;
  382.    unsigned width, height, elements;
  383.  
  384.    assert(res->target == PIPE_BUFFER);
  385.    assert(nouveau_bo_memtype(buf->bo) == 0);
  386.  
  387.    switch (data_size) {
  388.    case 16:
  389.       dst_fmt = PIPE_FORMAT_R32G32B32A32_UINT;
  390.       memcpy(&color.ui, data, 16);
  391.       break;
  392.    case 12:
  393.       /* This doesn't work, RGB32 is not a valid RT format.
  394.        * dst_fmt = PIPE_FORMAT_R32G32B32_UINT;
  395.        * memcpy(&color.ui, data, 12);
  396.        * memset(&color.ui[3], 0, 4);
  397.        */
  398.       break;
  399.    case 8:
  400.       dst_fmt = PIPE_FORMAT_R32G32_UINT;
  401.       memcpy(&color.ui, data, 8);
  402.       memset(&color.ui[2], 0, 8);
  403.       break;
  404.    case 4:
  405.       dst_fmt = PIPE_FORMAT_R32_UINT;
  406.       memcpy(&color.ui, data, 4);
  407.       memset(&color.ui[1], 0, 12);
  408.       break;
  409.    case 2:
  410.       dst_fmt = PIPE_FORMAT_R16_UINT;
  411.       color.ui[0] = util_cpu_to_le32(
  412.             util_le16_to_cpu(*(unsigned short *)data));
  413.       memset(&color.ui[1], 0, 12);
  414.       break;
  415.    case 1:
  416.       dst_fmt = PIPE_FORMAT_R8_UINT;
  417.       color.ui[0] = util_cpu_to_le32(*(unsigned char *)data);
  418.       memset(&color.ui[1], 0, 12);
  419.       break;
  420.    default:
  421.       assert(!"Unsupported element size");
  422.       return;
  423.    }
  424.  
  425.    assert(size % data_size == 0);
  426.  
  427.    if (data_size == 12) {
  428.       /* TODO: Find a way to do this with the GPU! */
  429.       nvc0_clear_buffer_cpu(pipe, res, offset, size, data, data_size);
  430.       return;
  431.    }
  432.  
  433.    elements = size / data_size;
  434.    height = (elements + 16383) / 16384;
  435.    width = elements / height;
  436.  
  437.    if (!PUSH_SPACE(push, 40))
  438.       return;
  439.  
  440.    PUSH_REFN (push, buf->bo, buf->domain | NOUVEAU_BO_WR);
  441.  
  442.    BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4);
  443.    PUSH_DATAf(push, color.f[0]);
  444.    PUSH_DATAf(push, color.f[1]);
  445.    PUSH_DATAf(push, color.f[2]);
  446.    PUSH_DATAf(push, color.f[3]);
  447.    BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2);
  448.    PUSH_DATA (push, width << 16);
  449.    PUSH_DATA (push, height << 16);
  450.  
  451.    IMMED_NVC0(push, NVC0_3D(RT_CONTROL), 1);
  452.  
  453.    BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(0)), 9);
  454.    PUSH_DATAh(push, buf->address + offset);
  455.    PUSH_DATA (push, buf->address + offset);
  456.    PUSH_DATA (push, width * data_size);
  457.    PUSH_DATA (push, height);
  458.    PUSH_DATA (push, nvc0_format_table[dst_fmt].rt);
  459.    PUSH_DATA (push, NVC0_3D_RT_TILE_MODE_LINEAR);
  460.    PUSH_DATA (push, 1);
  461.    PUSH_DATA (push, 0);
  462.    PUSH_DATA (push, 0);
  463.  
  464.    IMMED_NVC0(push, NVC0_3D(ZETA_ENABLE), 0);
  465.  
  466.    IMMED_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 0x3c);
  467.  
  468.    if (width * height != elements) {
  469.       offset += width * height * data_size;
  470.       width = elements - width * height;
  471.       height = 1;
  472.  
  473.       BEGIN_NVC0(push, NVC0_3D(RT_ADDRESS_HIGH(0)), 4);
  474.       PUSH_DATAh(push, buf->address + offset);
  475.       PUSH_DATA (push, buf->address + offset);
  476.       PUSH_DATA (push, width * data_size);
  477.       PUSH_DATA (push, height);
  478.  
  479.       IMMED_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 0x3c);
  480.    }
  481.  
  482.    nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence);
  483.    nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence_wr);
  484.    nvc0->dirty |= NVC0_NEW_FRAMEBUFFER;
  485. }
  486.  
  487. static void
  488. nvc0_clear_depth_stencil(struct pipe_context *pipe,
  489.                          struct pipe_surface *dst,
  490.                          unsigned clear_flags,
  491.                          double depth,
  492.                          unsigned stencil,
  493.                          unsigned dstx, unsigned dsty,
  494.                          unsigned width, unsigned height)
  495. {
  496.         struct nvc0_context *nvc0 = nvc0_context(pipe);
  497.         struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  498.         struct nv50_miptree *mt = nv50_miptree(dst->texture);
  499.         struct nv50_surface *sf = nv50_surface(dst);
  500.         uint32_t mode = 0;
  501.         int unk = mt->base.base.target == PIPE_TEXTURE_2D;
  502.         unsigned z;
  503.  
  504.         if (!PUSH_SPACE(push, 32 + sf->depth))
  505.                 return;
  506.  
  507.         PUSH_REFN (push, mt->base.bo, mt->base.domain | NOUVEAU_BO_WR);
  508.  
  509.         if (clear_flags & PIPE_CLEAR_DEPTH) {
  510.                 BEGIN_NVC0(push, NVC0_3D(CLEAR_DEPTH), 1);
  511.                 PUSH_DATAf(push, depth);
  512.                 mode |= NVC0_3D_CLEAR_BUFFERS_Z;
  513.         }
  514.  
  515.         if (clear_flags & PIPE_CLEAR_STENCIL) {
  516.                 BEGIN_NVC0(push, NVC0_3D(CLEAR_STENCIL), 1);
  517.                 PUSH_DATA (push, stencil & 0xff);
  518.                 mode |= NVC0_3D_CLEAR_BUFFERS_S;
  519.         }
  520.  
  521.         BEGIN_NVC0(push, NVC0_3D(SCREEN_SCISSOR_HORIZ), 2);
  522.         PUSH_DATA (push, ( width << 16) | dstx);
  523.         PUSH_DATA (push, (height << 16) | dsty);
  524.  
  525.         BEGIN_NVC0(push, NVC0_3D(ZETA_ADDRESS_HIGH), 5);
  526.         PUSH_DATAh(push, mt->base.address + sf->offset);
  527.         PUSH_DATA (push, mt->base.address + sf->offset);
  528.         PUSH_DATA (push, nvc0_format_table[dst->format].rt);
  529.         PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
  530.         PUSH_DATA (push, mt->layer_stride >> 2);
  531.         BEGIN_NVC0(push, NVC0_3D(ZETA_ENABLE), 1);
  532.         PUSH_DATA (push, 1);
  533.         BEGIN_NVC0(push, NVC0_3D(ZETA_HORIZ), 3);
  534.         PUSH_DATA (push, sf->width);
  535.         PUSH_DATA (push, sf->height);
  536.         PUSH_DATA (push, (unk << 16) | (dst->u.tex.first_layer + sf->depth));
  537.         BEGIN_NVC0(push, NVC0_3D(ZETA_BASE_LAYER), 1);
  538.         PUSH_DATA (push, dst->u.tex.first_layer);
  539.  
  540.         BEGIN_NIC0(push, NVC0_3D(CLEAR_BUFFERS), sf->depth);
  541.         for (z = 0; z < sf->depth; ++z) {
  542.                 PUSH_DATA (push, mode |
  543.                            (z << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  544.         }
  545.  
  546.         nvc0->dirty |= NVC0_NEW_FRAMEBUFFER;
  547. }
  548.  
  549. void
  550. nvc0_clear(struct pipe_context *pipe, unsigned buffers,
  551.            const union pipe_color_union *color,
  552.            double depth, unsigned stencil)
  553. {
  554.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  555.    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  556.    struct pipe_framebuffer_state *fb = &nvc0->framebuffer;
  557.    unsigned i, j, k;
  558.    uint32_t mode = 0;
  559.  
  560.    /* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */
  561.    if (!nvc0_state_validate(nvc0, NVC0_NEW_FRAMEBUFFER, 9 + (fb->nr_cbufs * 2)))
  562.       return;
  563.  
  564.    if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
  565.       BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4);
  566.       PUSH_DATAf(push, color->f[0]);
  567.       PUSH_DATAf(push, color->f[1]);
  568.       PUSH_DATAf(push, color->f[2]);
  569.       PUSH_DATAf(push, color->f[3]);
  570.       if (buffers & PIPE_CLEAR_COLOR0)
  571.          mode =
  572.             NVC0_3D_CLEAR_BUFFERS_R | NVC0_3D_CLEAR_BUFFERS_G |
  573.             NVC0_3D_CLEAR_BUFFERS_B | NVC0_3D_CLEAR_BUFFERS_A;
  574.    }
  575.  
  576.    if (buffers & PIPE_CLEAR_DEPTH) {
  577.       BEGIN_NVC0(push, NVC0_3D(CLEAR_DEPTH), 1);
  578.       PUSH_DATA (push, fui(depth));
  579.       mode |= NVC0_3D_CLEAR_BUFFERS_Z;
  580.    }
  581.  
  582.    if (buffers & PIPE_CLEAR_STENCIL) {
  583.       BEGIN_NVC0(push, NVC0_3D(CLEAR_STENCIL), 1);
  584.       PUSH_DATA (push, stencil & 0xff);
  585.       mode |= NVC0_3D_CLEAR_BUFFERS_S;
  586.    }
  587.  
  588.    if (mode) {
  589.       int zs_layers = 0, color0_layers = 0;
  590.       if (fb->cbufs[0] && (mode & 0x3c))
  591.          color0_layers = fb->cbufs[0]->u.tex.last_layer -
  592.             fb->cbufs[0]->u.tex.first_layer + 1;
  593.       if (fb->zsbuf && (mode & ~0x3c))
  594.          zs_layers = fb->zsbuf->u.tex.last_layer -
  595.             fb->zsbuf->u.tex.first_layer + 1;
  596.  
  597.       for (j = 0; j < MIN2(zs_layers, color0_layers); j++) {
  598.          BEGIN_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 1);
  599.          PUSH_DATA(push, mode | (j << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  600.       }
  601.       for (k = j; k < zs_layers; k++) {
  602.          BEGIN_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 1);
  603.          PUSH_DATA(push, (mode & ~0x3c) | (k << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  604.       }
  605.       for (k = j; k < color0_layers; k++) {
  606.          BEGIN_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 1);
  607.          PUSH_DATA(push, (mode & 0x3c) | (k << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  608.       }
  609.    }
  610.  
  611.    for (i = 1; i < fb->nr_cbufs; i++) {
  612.       struct pipe_surface *sf = fb->cbufs[i];
  613.       if (!sf || !(buffers & (PIPE_CLEAR_COLOR0 << i)))
  614.          continue;
  615.       for (j = 0; j <= sf->u.tex.last_layer - sf->u.tex.first_layer; j++) {
  616.          BEGIN_NVC0(push, NVC0_3D(CLEAR_BUFFERS), 1);
  617.          PUSH_DATA (push, (i << 6) | 0x3c |
  618.                     (j << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  619.       }
  620.    }
  621. }
  622.  
  623.  
  624. /* =============================== BLIT CODE ===================================
  625.  */
  626.  
  627. struct nvc0_blitter
  628. {
  629.    struct nvc0_program *fp[NV50_BLIT_MAX_TEXTURE_TYPES][NV50_BLIT_MODES];
  630.    struct nvc0_program vp;
  631.  
  632.    struct nv50_tsc_entry sampler[2]; /* nearest, bilinear */
  633.  
  634.    pipe_mutex mutex;
  635.  
  636.    struct nvc0_screen *screen;
  637. };
  638.  
  639. struct nvc0_blitctx
  640. {
  641.    struct nvc0_context *nvc0;
  642.    struct nvc0_program *fp;
  643.    uint8_t mode;
  644.    uint16_t color_mask;
  645.    uint8_t filter;
  646.    uint8_t render_condition_enable;
  647.    enum pipe_texture_target target;
  648.    struct {
  649.       struct pipe_framebuffer_state fb;
  650.       struct nvc0_rasterizer_stateobj *rast;
  651.       struct nvc0_program *vp;
  652.       struct nvc0_program *tcp;
  653.       struct nvc0_program *tep;
  654.       struct nvc0_program *gp;
  655.       struct nvc0_program *fp;
  656.       unsigned num_textures[5];
  657.       unsigned num_samplers[5];
  658.       struct pipe_sampler_view *texture[2];
  659.       struct nv50_tsc_entry *sampler[2];
  660.       unsigned min_samples;
  661.       uint32_t dirty;
  662.    } saved;
  663.    struct nvc0_rasterizer_stateobj rast;
  664. };
  665.  
  666. static void
  667. nvc0_blitter_make_vp(struct nvc0_blitter *blit)
  668. {
  669.    static const uint32_t code_nvc0[] =
  670.    {
  671.       0xfff11c26, 0x06000080, /* vfetch b64 $r4:$r5 a[0x80] */
  672.       0xfff01c46, 0x06000090, /* vfetch b96 $r0:$r1:$r2 a[0x90] */
  673.       0x13f01c26, 0x0a7e0070, /* export b64 o[0x70] $r4:$r5 */
  674.       0x03f01c46, 0x0a7e0080, /* export b96 o[0x80] $r0:$r1:$r2 */
  675.       0x00001de7, 0x80000000, /* exit */
  676.    };
  677.    static const uint32_t code_nve4[] =
  678.    {
  679.       0x00000007, 0x20000000, /* sched */
  680.       0xfff11c26, 0x06000080, /* vfetch b64 $r4:$r5 a[0x80] */
  681.       0xfff01c46, 0x06000090, /* vfetch b96 $r0:$r1:$r2 a[0x90] */
  682.       0x13f01c26, 0x0a7e0070, /* export b64 o[0x70] $r4:$r5 */
  683.       0x03f01c46, 0x0a7e0080, /* export b96 o[0x80] $r0:$r1:$r2 */
  684.       0x00001de7, 0x80000000, /* exit */
  685.    };
  686.    static const uint32_t code_gk110[] =
  687.    {
  688.       0x00000000, 0x08000000, /* sched */
  689.       0x401ffc12, 0x7ec7fc00, /* ld b64 $r4d a[0x80] 0x0 0x0 */
  690.       0x481ffc02, 0x7ecbfc00, /* ld b96 $r0t a[0x90] 0x0 0x0 */
  691.       0x381ffc12, 0x7f07fc00, /* st b64 a[0x70] $r4d 0x0 0x0 */
  692.       0x401ffc02, 0x7f0bfc00, /* st b96 a[0x80] $r0t 0x0 0x0 */
  693.       0x001c003c, 0x18000000, /* exit */
  694.    };
  695.    static const uint32_t code_gm107[] =
  696.    {
  697.       0xfc0007e0, 0x001f8000, /* sched 0x7e0 0x7e0 0x7e0 */
  698.       0x0807ff04, 0xefd8ff80, /* ld b64 $r4 a[0x80] 0x0 */
  699.       0x0907ff00, 0xefd97f80, /* ld b96 $r0 a[0x90] 0x0 */
  700.       0x0707ff04, 0xeff0ff80, /* st b64 a[0x70] $r4 0x0 */
  701.       0xfc0007e0, 0x00000000, /* sched 0x7e0 0x7e0 0x0 */
  702.       0x0807ff00, 0xeff17f80, /* st b96 a[0x80] $r0 0x0 */
  703.       0x0007000f, 0xe3000000, /* exit */
  704.    };
  705.  
  706.    blit->vp.type = PIPE_SHADER_VERTEX;
  707.    blit->vp.translated = TRUE;
  708.    if (blit->screen->base.class_3d >= GM107_3D_CLASS) {
  709.       blit->vp.code = (uint32_t *)code_gm107; /* const_cast */
  710.       blit->vp.code_size = sizeof(code_gm107);
  711.    } else
  712.    if (blit->screen->base.class_3d >= NVF0_3D_CLASS) {
  713.       blit->vp.code = (uint32_t *)code_gk110; /* const_cast */
  714.       blit->vp.code_size = sizeof(code_gk110);
  715.    } else
  716.    if (blit->screen->base.class_3d >= NVE4_3D_CLASS) {
  717.       blit->vp.code = (uint32_t *)code_nve4; /* const_cast */
  718.       blit->vp.code_size = sizeof(code_nve4);
  719.    } else {
  720.       blit->vp.code = (uint32_t *)code_nvc0; /* const_cast */
  721.       blit->vp.code_size = sizeof(code_nvc0);
  722.    }
  723.    blit->vp.num_gprs = 6;
  724.    blit->vp.vp.edgeflag = PIPE_MAX_ATTRIBS;
  725.  
  726.    blit->vp.hdr[0]  = 0x00020461; /* vertprog magic */
  727.    blit->vp.hdr[4]  = 0x000ff000; /* no outputs read */
  728.    blit->vp.hdr[6]  = 0x00000073; /* a[0x80].xy, a[0x90].xyz */
  729.    blit->vp.hdr[13] = 0x00073000; /* o[0x70].xy, o[0x80].xyz */
  730. }
  731.  
  732. static void
  733. nvc0_blitter_make_sampler(struct nvc0_blitter *blit)
  734. {
  735.    /* clamp to edge, min/max lod = 0, nearest filtering */
  736.  
  737.    blit->sampler[0].id = -1;
  738.  
  739.    blit->sampler[0].tsc[0] = NV50_TSC_0_SRGB_CONVERSION_ALLOWED |
  740.       (NV50_TSC_WRAP_CLAMP_TO_EDGE << NV50_TSC_0_WRAPS__SHIFT) |
  741.       (NV50_TSC_WRAP_CLAMP_TO_EDGE << NV50_TSC_0_WRAPT__SHIFT) |
  742.       (NV50_TSC_WRAP_CLAMP_TO_EDGE << NV50_TSC_0_WRAPR__SHIFT);
  743.    blit->sampler[0].tsc[1] =
  744.       NV50_TSC_1_MAGF_NEAREST | NV50_TSC_1_MINF_NEAREST | NV50_TSC_1_MIPF_NONE;
  745.  
  746.    /* clamp to edge, min/max lod = 0, bilinear filtering */
  747.  
  748.    blit->sampler[1].id = -1;
  749.  
  750.    blit->sampler[1].tsc[0] = blit->sampler[0].tsc[0];
  751.    blit->sampler[1].tsc[1] =
  752.       NV50_TSC_1_MAGF_LINEAR | NV50_TSC_1_MINF_LINEAR | NV50_TSC_1_MIPF_NONE;
  753. }
  754.  
  755. static void
  756. nvc0_blit_select_fp(struct nvc0_blitctx *ctx, const struct pipe_blit_info *info)
  757. {
  758.    struct nvc0_blitter *blitter = ctx->nvc0->screen->blitter;
  759.  
  760.    const enum pipe_texture_target ptarg =
  761.       nv50_blit_reinterpret_pipe_texture_target(info->src.resource->target);
  762.  
  763.    const unsigned targ = nv50_blit_texture_type(ptarg);
  764.    const unsigned mode = ctx->mode;
  765.  
  766.    if (!blitter->fp[targ][mode]) {
  767.       pipe_mutex_lock(blitter->mutex);
  768.       if (!blitter->fp[targ][mode])
  769.          blitter->fp[targ][mode] =
  770.             nv50_blitter_make_fp(&ctx->nvc0->base.pipe, mode, ptarg);
  771.       pipe_mutex_unlock(blitter->mutex);
  772.    }
  773.    ctx->fp = blitter->fp[targ][mode];
  774. }
  775.  
  776. static void
  777. nvc0_blit_set_dst(struct nvc0_blitctx *ctx,
  778.                   struct pipe_resource *res, unsigned level, unsigned layer,
  779.                   enum pipe_format format)
  780. {
  781.    struct nvc0_context *nvc0 = ctx->nvc0;
  782.    struct pipe_context *pipe = &nvc0->base.pipe;
  783.    struct pipe_surface templ;
  784.  
  785.    if (util_format_is_depth_or_stencil(format))
  786.       templ.format = nv50_blit_zeta_to_colour_format(format);
  787.    else
  788.       templ.format = format;
  789.  
  790.    templ.u.tex.level = level;
  791.    templ.u.tex.first_layer = templ.u.tex.last_layer = layer;
  792.  
  793.    if (layer == -1) {
  794.       templ.u.tex.first_layer = 0;
  795.       templ.u.tex.last_layer =
  796.          (res->target == PIPE_TEXTURE_3D ? res->depth0 : res->array_size) - 1;
  797.    }
  798.  
  799.    nvc0->framebuffer.cbufs[0] = nvc0_miptree_surface_new(pipe, res, &templ);
  800.    nvc0->framebuffer.nr_cbufs = 1;
  801.    nvc0->framebuffer.zsbuf = NULL;
  802.    nvc0->framebuffer.width = nvc0->framebuffer.cbufs[0]->width;
  803.    nvc0->framebuffer.height = nvc0->framebuffer.cbufs[0]->height;
  804. }
  805.  
  806. static void
  807. nvc0_blit_set_src(struct nvc0_blitctx *ctx,
  808.                   struct pipe_resource *res, unsigned level, unsigned layer,
  809.                   enum pipe_format format, const uint8_t filter)
  810. {
  811.    struct nvc0_context *nvc0 = ctx->nvc0;
  812.    struct pipe_context *pipe = &nvc0->base.pipe;
  813.    struct pipe_sampler_view templ;
  814.    uint32_t flags;
  815.    unsigned s;
  816.    enum pipe_texture_target target;
  817.  
  818.    target = nv50_blit_reinterpret_pipe_texture_target(res->target);
  819.  
  820.    templ.format = format;
  821.    templ.u.tex.first_layer = templ.u.tex.last_layer = layer;
  822.    templ.u.tex.first_level = templ.u.tex.last_level = level;
  823.    templ.swizzle_r = PIPE_SWIZZLE_RED;
  824.    templ.swizzle_g = PIPE_SWIZZLE_GREEN;
  825.    templ.swizzle_b = PIPE_SWIZZLE_BLUE;
  826.    templ.swizzle_a = PIPE_SWIZZLE_ALPHA;
  827.  
  828.    if (layer == -1) {
  829.       templ.u.tex.first_layer = 0;
  830.       templ.u.tex.last_layer =
  831.          (res->target == PIPE_TEXTURE_3D ? res->depth0 : res->array_size) - 1;
  832.    }
  833.  
  834.    flags = res->last_level ? 0 : NV50_TEXVIEW_SCALED_COORDS;
  835.    flags |= NV50_TEXVIEW_ACCESS_RESOLVE;
  836.    if (filter && res->nr_samples == 8)
  837.       flags |= NV50_TEXVIEW_FILTER_MSAA8;
  838.  
  839.    nvc0->textures[4][0] = nvc0_create_texture_view(
  840.       pipe, res, &templ, flags, target);
  841.    nvc0->textures[4][1] = NULL;
  842.  
  843.    for (s = 0; s <= 3; ++s)
  844.       nvc0->num_textures[s] = 0;
  845.    nvc0->num_textures[4] = 1;
  846.  
  847.    templ.format = nv50_zs_to_s_format(format);
  848.    if (templ.format != format) {
  849.       nvc0->textures[4][1] = nvc0_create_texture_view(
  850.          pipe, res, &templ, flags, target);
  851.       nvc0->num_textures[4] = 2;
  852.    }
  853. }
  854.  
  855. static void
  856. nvc0_blitctx_prepare_state(struct nvc0_blitctx *blit)
  857. {
  858.    struct nouveau_pushbuf *push = blit->nvc0->base.pushbuf;
  859.  
  860.    /* TODO: maybe make this a MACRO (if we need more logic) ? */
  861.  
  862.    if (blit->nvc0->cond_query && !blit->render_condition_enable)
  863.       IMMED_NVC0(push, NVC0_3D(COND_MODE), NVC0_3D_COND_MODE_ALWAYS);
  864.  
  865.    /* blend state */
  866.    BEGIN_NVC0(push, NVC0_3D(COLOR_MASK(0)), 1);
  867.    PUSH_DATA (push, blit->color_mask);
  868.    IMMED_NVC0(push, NVC0_3D(BLEND_ENABLE(0)), 0);
  869.    IMMED_NVC0(push, NVC0_3D(LOGIC_OP_ENABLE), 0);
  870.  
  871.    /* rasterizer state */
  872.    IMMED_NVC0(push, NVC0_3D(FRAG_COLOR_CLAMP_EN), 0);
  873.    IMMED_NVC0(push, NVC0_3D(MULTISAMPLE_ENABLE), 0);
  874.    BEGIN_NVC0(push, NVC0_3D(MSAA_MASK(0)), 4);
  875.    PUSH_DATA (push, 0xffff);
  876.    PUSH_DATA (push, 0xffff);
  877.    PUSH_DATA (push, 0xffff);
  878.    PUSH_DATA (push, 0xffff);
  879.    BEGIN_NVC0(push, NVC0_3D(MACRO_POLYGON_MODE_FRONT), 1);
  880.    PUSH_DATA (push, NVC0_3D_MACRO_POLYGON_MODE_FRONT_FILL);
  881.    BEGIN_NVC0(push, NVC0_3D(MACRO_POLYGON_MODE_BACK), 1);
  882.    PUSH_DATA (push, NVC0_3D_MACRO_POLYGON_MODE_BACK_FILL);
  883.    IMMED_NVC0(push, NVC0_3D(POLYGON_SMOOTH_ENABLE), 0);
  884.    IMMED_NVC0(push, NVC0_3D(POLYGON_OFFSET_FILL_ENABLE), 0);
  885.    IMMED_NVC0(push, NVC0_3D(POLYGON_STIPPLE_ENABLE), 0);
  886.    IMMED_NVC0(push, NVC0_3D(CULL_FACE_ENABLE), 0);
  887.  
  888.    /* zsa state */
  889.    IMMED_NVC0(push, NVC0_3D(DEPTH_TEST_ENABLE), 0);
  890.    IMMED_NVC0(push, NVC0_3D(STENCIL_ENABLE), 0);
  891.    IMMED_NVC0(push, NVC0_3D(ALPHA_TEST_ENABLE), 0);
  892.  
  893.    /* disable transform feedback */
  894.    IMMED_NVC0(push, NVC0_3D(TFB_ENABLE), 0);
  895. }
  896.  
  897. static void
  898. nvc0_blitctx_pre_blit(struct nvc0_blitctx *ctx)
  899. {
  900.    struct nvc0_context *nvc0 = ctx->nvc0;
  901.    struct nvc0_blitter *blitter = nvc0->screen->blitter;
  902.    int s;
  903.  
  904.    ctx->saved.fb.width = nvc0->framebuffer.width;
  905.    ctx->saved.fb.height = nvc0->framebuffer.height;
  906.    ctx->saved.fb.nr_cbufs = nvc0->framebuffer.nr_cbufs;
  907.    ctx->saved.fb.cbufs[0] = nvc0->framebuffer.cbufs[0];
  908.    ctx->saved.fb.zsbuf = nvc0->framebuffer.zsbuf;
  909.  
  910.    ctx->saved.rast = nvc0->rast;
  911.  
  912.    ctx->saved.vp = nvc0->vertprog;
  913.    ctx->saved.tcp = nvc0->tctlprog;
  914.    ctx->saved.tep = nvc0->tevlprog;
  915.    ctx->saved.gp = nvc0->gmtyprog;
  916.    ctx->saved.fp = nvc0->fragprog;
  917.  
  918.    ctx->saved.min_samples = nvc0->min_samples;
  919.  
  920.    nvc0->rast = &ctx->rast;
  921.  
  922.    nvc0->vertprog = &blitter->vp;
  923.    nvc0->tctlprog = NULL;
  924.    nvc0->tevlprog = NULL;
  925.    nvc0->gmtyprog = NULL;
  926.    nvc0->fragprog = ctx->fp;
  927.  
  928.    for (s = 0; s <= 4; ++s) {
  929.       ctx->saved.num_textures[s] = nvc0->num_textures[s];
  930.       ctx->saved.num_samplers[s] = nvc0->num_samplers[s];
  931.       nvc0->textures_dirty[s] = (1 << nvc0->num_textures[s]) - 1;
  932.       nvc0->samplers_dirty[s] = (1 << nvc0->num_samplers[s]) - 1;
  933.    }
  934.    ctx->saved.texture[0] = nvc0->textures[4][0];
  935.    ctx->saved.texture[1] = nvc0->textures[4][1];
  936.    ctx->saved.sampler[0] = nvc0->samplers[4][0];
  937.    ctx->saved.sampler[1] = nvc0->samplers[4][1];
  938.  
  939.    nvc0->samplers[4][0] = &blitter->sampler[ctx->filter];
  940.    nvc0->samplers[4][1] = &blitter->sampler[ctx->filter];
  941.  
  942.    for (s = 0; s <= 3; ++s)
  943.       nvc0->num_samplers[s] = 0;
  944.    nvc0->num_samplers[4] = 2;
  945.  
  946.    nvc0->min_samples = 1;
  947.  
  948.    ctx->saved.dirty = nvc0->dirty;
  949.  
  950.    nvc0->textures_dirty[4] |= 3;
  951.    nvc0->samplers_dirty[4] |= 3;
  952.  
  953.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
  954.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(4, 0));
  955.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(4, 1));
  956.  
  957.    nvc0->dirty = NVC0_NEW_FRAMEBUFFER | NVC0_NEW_MIN_SAMPLES |
  958.       NVC0_NEW_VERTPROG | NVC0_NEW_FRAGPROG |
  959.       NVC0_NEW_TCTLPROG | NVC0_NEW_TEVLPROG | NVC0_NEW_GMTYPROG |
  960.       NVC0_NEW_TEXTURES | NVC0_NEW_SAMPLERS;
  961. }
  962.  
  963. static void
  964. nvc0_blitctx_post_blit(struct nvc0_blitctx *blit)
  965. {
  966.    struct nvc0_context *nvc0 = blit->nvc0;
  967.    int s;
  968.  
  969.    pipe_surface_reference(&nvc0->framebuffer.cbufs[0], NULL);
  970.  
  971.    nvc0->framebuffer.width = blit->saved.fb.width;
  972.    nvc0->framebuffer.height = blit->saved.fb.height;
  973.    nvc0->framebuffer.nr_cbufs = blit->saved.fb.nr_cbufs;
  974.    nvc0->framebuffer.cbufs[0] = blit->saved.fb.cbufs[0];
  975.    nvc0->framebuffer.zsbuf = blit->saved.fb.zsbuf;
  976.  
  977.    nvc0->rast = blit->saved.rast;
  978.  
  979.    nvc0->vertprog = blit->saved.vp;
  980.    nvc0->tctlprog = blit->saved.tcp;
  981.    nvc0->tevlprog = blit->saved.tep;
  982.    nvc0->gmtyprog = blit->saved.gp;
  983.    nvc0->fragprog = blit->saved.fp;
  984.  
  985.    nvc0->min_samples = blit->saved.min_samples;
  986.  
  987.    pipe_sampler_view_reference(&nvc0->textures[4][0], NULL);
  988.    pipe_sampler_view_reference(&nvc0->textures[4][1], NULL);
  989.  
  990.    for (s = 0; s <= 4; ++s) {
  991.       nvc0->num_textures[s] = blit->saved.num_textures[s];
  992.       nvc0->num_samplers[s] = blit->saved.num_samplers[s];
  993.       nvc0->textures_dirty[s] = (1 << nvc0->num_textures[s]) - 1;
  994.       nvc0->samplers_dirty[s] = (1 << nvc0->num_samplers[s]) - 1;
  995.    }
  996.    nvc0->textures[4][0] = blit->saved.texture[0];
  997.    nvc0->textures[4][1] = blit->saved.texture[1];
  998.    nvc0->samplers[4][0] = blit->saved.sampler[0];
  999.    nvc0->samplers[4][1] = blit->saved.sampler[1];
  1000.  
  1001.    nvc0->textures_dirty[4] |= 3;
  1002.    nvc0->samplers_dirty[4] |= 3;
  1003.  
  1004.    if (nvc0->cond_query && !blit->render_condition_enable)
  1005.       nvc0->base.pipe.render_condition(&nvc0->base.pipe, nvc0->cond_query,
  1006.                                        nvc0->cond_cond, nvc0->cond_mode);
  1007.  
  1008.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_FB);
  1009.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(4, 0));
  1010.    nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_TEX(4, 1));
  1011.  
  1012.    nvc0->dirty = blit->saved.dirty |
  1013.       (NVC0_NEW_FRAMEBUFFER | NVC0_NEW_SCISSOR | NVC0_NEW_SAMPLE_MASK |
  1014.        NVC0_NEW_RASTERIZER | NVC0_NEW_ZSA | NVC0_NEW_BLEND |
  1015.        NVC0_NEW_VIEWPORT |
  1016.        NVC0_NEW_TEXTURES | NVC0_NEW_SAMPLERS |
  1017.        NVC0_NEW_VERTPROG | NVC0_NEW_FRAGPROG |
  1018.        NVC0_NEW_TCTLPROG | NVC0_NEW_TEVLPROG | NVC0_NEW_GMTYPROG |
  1019.        NVC0_NEW_TFB_TARGETS | NVC0_NEW_VERTEX | NVC0_NEW_ARRAYS);
  1020.    nvc0->scissors_dirty |= 1;
  1021.    nvc0->viewports_dirty |= 1;
  1022.  
  1023.    nvc0->base.pipe.set_min_samples(&nvc0->base.pipe, blit->saved.min_samples);
  1024. }
  1025.  
  1026. static void
  1027. nvc0_blit_3d(struct nvc0_context *nvc0, const struct pipe_blit_info *info)
  1028. {
  1029.    struct nvc0_blitctx *blit = nvc0->blit;
  1030.    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  1031.    struct pipe_resource *src = info->src.resource;
  1032.    struct pipe_resource *dst = info->dst.resource;
  1033.    struct nouveau_bo *vtxbuf_bo;
  1034.    uint32_t stride, length, *vbuf;
  1035.    uint64_t vtxbuf;
  1036.    int32_t minx, maxx, miny, maxy;
  1037.    int32_t i, n;
  1038.    float x0, x1, y0, y1, z;
  1039.    float dz;
  1040.    float x_range, y_range;
  1041.  
  1042.    blit->mode = nv50_blit_select_mode(info);
  1043.    blit->color_mask = nv50_blit_derive_color_mask(info);
  1044.    blit->filter = nv50_blit_get_filter(info);
  1045.    blit->render_condition_enable = info->render_condition_enable;
  1046.  
  1047.    nvc0_blit_select_fp(blit, info);
  1048.    nvc0_blitctx_pre_blit(blit);
  1049.  
  1050.    nvc0_blit_set_dst(blit, dst, info->dst.level, -1, info->dst.format);
  1051.    nvc0_blit_set_src(blit, src, info->src.level, -1, info->src.format,
  1052.                      blit->filter);
  1053.  
  1054.    nvc0_blitctx_prepare_state(blit);
  1055.  
  1056.    nvc0_state_validate(nvc0, ~0, 48);
  1057.  
  1058.    x_range = (float)info->src.box.width / (float)info->dst.box.width;
  1059.    y_range = (float)info->src.box.height / (float)info->dst.box.height;
  1060.  
  1061.    x0 = (float)info->src.box.x - x_range * (float)info->dst.box.x;
  1062.    y0 = (float)info->src.box.y - y_range * (float)info->dst.box.y;
  1063.  
  1064.    x1 = x0 + 16384.0f * x_range;
  1065.    y1 = y0 + 16384.0f * y_range;
  1066.  
  1067.    x0 *= (float)(1 << nv50_miptree(src)->ms_x);
  1068.    x1 *= (float)(1 << nv50_miptree(src)->ms_x);
  1069.    y0 *= (float)(1 << nv50_miptree(src)->ms_y);
  1070.    y1 *= (float)(1 << nv50_miptree(src)->ms_y);
  1071.  
  1072.    dz = (float)info->src.box.depth / (float)info->dst.box.depth;
  1073.    z = (float)info->src.box.z;
  1074.    if (nv50_miptree(src)->layout_3d)
  1075.       z += 0.5f * dz;
  1076.  
  1077.    if (src->last_level > 0) {
  1078.       /* If there are mip maps, GPU always assumes normalized coordinates. */
  1079.       const unsigned l = info->src.level;
  1080.       const float fh = u_minify(src->width0 << nv50_miptree(src)->ms_x, l);
  1081.       const float fv = u_minify(src->height0 << nv50_miptree(src)->ms_y, l);
  1082.       x0 /= fh;
  1083.       x1 /= fh;
  1084.       y0 /= fv;
  1085.       y1 /= fv;
  1086.       if (nv50_miptree(src)->layout_3d) {
  1087.          z /= u_minify(src->depth0, l);
  1088.          dz /= u_minify(src->depth0, l);
  1089.       }
  1090.    }
  1091.  
  1092.    IMMED_NVC0(push, NVC0_3D(VIEWPORT_TRANSFORM_EN), 0);
  1093.    IMMED_NVC0(push, NVC0_3D(VIEW_VOLUME_CLIP_CTRL), 0x2 |
  1094.               NVC0_3D_VIEW_VOLUME_CLIP_CTRL_DEPTH_RANGE_0_1);
  1095.    BEGIN_NVC0(push, NVC0_3D(VIEWPORT_HORIZ(0)), 2);
  1096.    PUSH_DATA (push, nvc0->framebuffer.width << 16);
  1097.    PUSH_DATA (push, nvc0->framebuffer.height << 16);
  1098.  
  1099.    /* Draw a large triangle in screen coordinates covering the whole
  1100.     * render target, with scissors defining the destination region.
  1101.     * The vertex is supplied with non-normalized texture coordinates
  1102.     * arranged in a way to yield the desired offset and scale.
  1103.     */
  1104.  
  1105.    minx = info->dst.box.x;
  1106.    maxx = info->dst.box.x + info->dst.box.width;
  1107.    miny = info->dst.box.y;
  1108.    maxy = info->dst.box.y + info->dst.box.height;
  1109.    if (info->scissor_enable) {
  1110.       minx = MAX2(minx, info->scissor.minx);
  1111.       maxx = MIN2(maxx, info->scissor.maxx);
  1112.       miny = MAX2(miny, info->scissor.miny);
  1113.       maxy = MIN2(maxy, info->scissor.maxy);
  1114.    }
  1115.    BEGIN_NVC0(push, NVC0_3D(SCISSOR_HORIZ(0)), 2);
  1116.    PUSH_DATA (push, (maxx << 16) | minx);
  1117.    PUSH_DATA (push, (maxy << 16) | miny);
  1118.  
  1119.    stride = (3 + 2) * 4;
  1120.    length = stride * 3 * info->dst.box.depth;
  1121.  
  1122.    vbuf = nouveau_scratch_get(&nvc0->base, length, &vtxbuf, &vtxbuf_bo);
  1123.    if (!vbuf) {
  1124.       assert(vbuf);
  1125.       return;
  1126.    }
  1127.  
  1128.    BCTX_REFN_bo(nvc0->bufctx_3d, VTX_TMP, NOUVEAU_BO_GART | NOUVEAU_BO_RD, vtxbuf_bo);
  1129.    nouveau_pushbuf_validate(push);
  1130.  
  1131.    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_FETCH(0)), 4);
  1132.    PUSH_DATA (push, NVC0_3D_VERTEX_ARRAY_FETCH_ENABLE | stride <<
  1133.                     NVC0_3D_VERTEX_ARRAY_FETCH_STRIDE__SHIFT);
  1134.    PUSH_DATAh(push, vtxbuf);
  1135.    PUSH_DATA (push, vtxbuf);
  1136.    PUSH_DATA (push, 0);
  1137.    BEGIN_NVC0(push, NVC0_3D(VERTEX_ARRAY_LIMIT_HIGH(0)), 2);
  1138.    PUSH_DATAh(push, vtxbuf + length - 1);
  1139.    PUSH_DATA (push, vtxbuf + length - 1);
  1140.  
  1141.    n = MAX2(2, nvc0->state.num_vtxelts);
  1142.  
  1143.    BEGIN_NVC0(push, NVC0_3D(VERTEX_ATTRIB_FORMAT(0)), n);
  1144.    PUSH_DATA (push, NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
  1145.                     NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32_32 | 0x00 <<
  1146.                     NVC0_3D_VERTEX_ATTRIB_FORMAT_OFFSET__SHIFT);
  1147.    PUSH_DATA (push, NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
  1148.                     NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32_32_32 | 0x08 <<
  1149.                     NVC0_3D_VERTEX_ATTRIB_FORMAT_OFFSET__SHIFT);
  1150.    for (i = 2; i < n; i++) {
  1151.       PUSH_DATA(push, NVC0_3D_VERTEX_ATTRIB_FORMAT_TYPE_FLOAT |
  1152.                       NVC0_3D_VERTEX_ATTRIB_FORMAT_SIZE_32 |
  1153.                       NVC0_3D_VERTEX_ATTRIB_FORMAT_CONST);
  1154.    }
  1155.    if (nvc0->state.instance_elts) {
  1156.       nvc0->state.instance_elts = 0;
  1157.       BEGIN_NVC0(push, NVC0_3D(MACRO_VERTEX_ARRAY_PER_INSTANCE), 2);
  1158.       PUSH_DATA (push, n);
  1159.       PUSH_DATA (push, 0);
  1160.    }
  1161.    nvc0->state.num_vtxelts = 2;
  1162.  
  1163.    for (i = 0; i < info->dst.box.depth; ++i, z += dz) {
  1164.       if (info->dst.box.z + i) {
  1165.          BEGIN_NVC0(push, NVC0_3D(LAYER), 1);
  1166.          PUSH_DATA (push, info->dst.box.z + i);
  1167.       }
  1168.  
  1169.       *(vbuf++) = fui(0.0f);
  1170.       *(vbuf++) = fui(0.0f);
  1171.       *(vbuf++) = fui(x0);
  1172.       *(vbuf++) = fui(y0);
  1173.       *(vbuf++) = fui(z);
  1174.  
  1175.       *(vbuf++) = fui(16384 << nv50_miptree(dst)->ms_x);
  1176.       *(vbuf++) = fui(0.0f);
  1177.       *(vbuf++) = fui(x1);
  1178.       *(vbuf++) = fui(y0);
  1179.       *(vbuf++) = fui(z);
  1180.  
  1181.       *(vbuf++) = fui(0.0f);
  1182.       *(vbuf++) = fui(16384 << nv50_miptree(dst)->ms_y);
  1183.       *(vbuf++) = fui(x0);
  1184.       *(vbuf++) = fui(y1);
  1185.       *(vbuf++) = fui(z);
  1186.  
  1187.       IMMED_NVC0(push, NVC0_3D(VERTEX_BEGIN_GL),
  1188.                        NVC0_3D_VERTEX_BEGIN_GL_PRIMITIVE_TRIANGLES);
  1189.       BEGIN_NVC0(push, NVC0_3D(VERTEX_BUFFER_FIRST), 2);
  1190.       PUSH_DATA (push, i * 3);
  1191.       PUSH_DATA (push, 3);
  1192.       IMMED_NVC0(push, NVC0_3D(VERTEX_END_GL), 0);
  1193.    }
  1194.    if (info->dst.box.z + info->dst.box.depth - 1)
  1195.       IMMED_NVC0(push, NVC0_3D(LAYER), 0);
  1196.  
  1197.    nvc0_blitctx_post_blit(blit);
  1198.  
  1199.    /* restore viewport transform */
  1200.    IMMED_NVC0(push, NVC0_3D(VIEWPORT_TRANSFORM_EN), 1);
  1201. }
  1202.  
  1203. static void
  1204. nvc0_blit_eng2d(struct nvc0_context *nvc0, const struct pipe_blit_info *info)
  1205. {
  1206.    struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  1207.    struct nv50_miptree *dst = nv50_miptree(info->dst.resource);
  1208.    struct nv50_miptree *src = nv50_miptree(info->src.resource);
  1209.    const int32_t srcx_adj = info->src.box.width < 0 ? -1 : 0;
  1210.    const int32_t srcy_adj = info->src.box.height < 0 ? -1 : 0;
  1211.    const int dz = info->dst.box.z;
  1212.    const int sz = info->src.box.z;
  1213.    uint32_t dstw, dsth;
  1214.    int32_t dstx, dsty;
  1215.    int64_t srcx, srcy;
  1216.    int64_t du_dx, dv_dy;
  1217.    int i;
  1218.    uint32_t mode;
  1219.    uint32_t mask = nv50_blit_eng2d_get_mask(info);
  1220.    boolean b;
  1221.  
  1222.    mode = nv50_blit_get_filter(info) ?
  1223.       NV50_2D_BLIT_CONTROL_FILTER_BILINEAR :
  1224.       NV50_2D_BLIT_CONTROL_FILTER_POINT_SAMPLE;
  1225.    mode |= (src->base.base.nr_samples > dst->base.base.nr_samples) ?
  1226.       NV50_2D_BLIT_CONTROL_ORIGIN_CORNER : NV50_2D_BLIT_CONTROL_ORIGIN_CENTER;
  1227.  
  1228.    du_dx = ((int64_t)info->src.box.width << 32) / info->dst.box.width;
  1229.    dv_dy = ((int64_t)info->src.box.height << 32) / info->dst.box.height;
  1230.  
  1231.    b = info->dst.format == info->src.format;
  1232.    nvc0_2d_texture_set(push, 1, dst, info->dst.level, dz, info->dst.format, b);
  1233.    nvc0_2d_texture_set(push, 0, src, info->src.level, sz, info->src.format, b);
  1234.  
  1235.    if (info->scissor_enable) {
  1236.       BEGIN_NVC0(push, NVC0_2D(CLIP_X), 5);
  1237.       PUSH_DATA (push, info->scissor.minx << dst->ms_x);
  1238.       PUSH_DATA (push, info->scissor.miny << dst->ms_y);
  1239.       PUSH_DATA (push, (info->scissor.maxx - info->scissor.minx) << dst->ms_x);
  1240.       PUSH_DATA (push, (info->scissor.maxy - info->scissor.miny) << dst->ms_y);
  1241.       PUSH_DATA (push, 1); /* enable */
  1242.    }
  1243.  
  1244.    if (nvc0->cond_query && info->render_condition_enable)
  1245.       IMMED_NVC0(push, NVC0_2D(COND_MODE), nvc0->cond_condmode);
  1246.  
  1247.    if (mask != 0xffffffff) {
  1248.       IMMED_NVC0(push, NVC0_2D(ROP), 0xca); /* DPSDxax */
  1249.       IMMED_NVC0(push, NVC0_2D(PATTERN_COLOR_FORMAT),
  1250.                        NV50_2D_PATTERN_COLOR_FORMAT_A8R8G8B8);
  1251.       BEGIN_NVC0(push, NVC0_2D(PATTERN_BITMAP_COLOR(0)), 4);
  1252.       PUSH_DATA (push, 0x00000000);
  1253.       PUSH_DATA (push, mask);
  1254.       PUSH_DATA (push, 0xffffffff);
  1255.       PUSH_DATA (push, 0xffffffff);
  1256.       IMMED_NVC0(push, NVC0_2D(OPERATION), NV50_2D_OPERATION_ROP);
  1257.    } else
  1258.    if (info->src.format != info->dst.format) {
  1259.       if (info->src.format == PIPE_FORMAT_R8_UNORM ||
  1260.           info->src.format == PIPE_FORMAT_R8_SNORM ||
  1261.           info->src.format == PIPE_FORMAT_R16_UNORM ||
  1262.           info->src.format == PIPE_FORMAT_R16_SNORM ||
  1263.           info->src.format == PIPE_FORMAT_R16_FLOAT ||
  1264.           info->src.format == PIPE_FORMAT_R32_FLOAT) {
  1265.          mask = 0xffff0000; /* also makes condition for OPERATION reset true */
  1266.          BEGIN_NVC0(push, NVC0_2D(BETA4), 2);
  1267.          PUSH_DATA (push, mask);
  1268.          PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY_PREMULT);
  1269.       } else
  1270.       if (info->src.format == PIPE_FORMAT_A8_UNORM) {
  1271.          mask = 0xff000000;
  1272.          BEGIN_NVC0(push, NVC0_2D(BETA4), 2);
  1273.          PUSH_DATA (push, mask);
  1274.          PUSH_DATA (push, NV50_2D_OPERATION_SRCCOPY_PREMULT);
  1275.       }
  1276.    }
  1277.  
  1278.    if (src->ms_x > dst->ms_x || src->ms_y > dst->ms_y) {
  1279.       /* ms_x is always >= ms_y */
  1280.       du_dx <<= src->ms_x - dst->ms_x;
  1281.       dv_dy <<= src->ms_y - dst->ms_y;
  1282.    } else {
  1283.       du_dx >>= dst->ms_x - src->ms_x;
  1284.       dv_dy >>= dst->ms_y - src->ms_y;
  1285.    }
  1286.  
  1287.    srcx = (int64_t)(info->src.box.x + srcx_adj) << (src->ms_x + 32);
  1288.    srcy = (int64_t)(info->src.box.y + srcy_adj) << (src->ms_y + 32);
  1289.  
  1290.    if (src->base.base.nr_samples > dst->base.base.nr_samples) {
  1291.       /* center src coorinates for proper MS resolve filtering */
  1292.       srcx += (int64_t)1 << (src->ms_x + 31);
  1293.       srcy += (int64_t)1 << (src->ms_y + 31);
  1294.    }
  1295.  
  1296.    dstx = info->dst.box.x << dst->ms_x;
  1297.    dsty = info->dst.box.y << dst->ms_y;
  1298.  
  1299.    dstw = info->dst.box.width << dst->ms_x;
  1300.    dsth = info->dst.box.height << dst->ms_y;
  1301.  
  1302.    if (dstx < 0) {
  1303.       dstw += dstx;
  1304.       srcx -= du_dx * dstx;
  1305.       dstx = 0;
  1306.    }
  1307.    if (dsty < 0) {
  1308.       dsth += dsty;
  1309.       srcy -= dv_dy * dsty;
  1310.       dsty = 0;
  1311.    }
  1312.  
  1313.    IMMED_NVC0(push, NVC0_2D(BLIT_CONTROL), mode);
  1314.    BEGIN_NVC0(push, NVC0_2D(BLIT_DST_X), 4);
  1315.    PUSH_DATA (push, dstx);
  1316.    PUSH_DATA (push, dsty);
  1317.    PUSH_DATA (push, dstw);
  1318.    PUSH_DATA (push, dsth);
  1319.    BEGIN_NVC0(push, NVC0_2D(BLIT_DU_DX_FRACT), 4);
  1320.    PUSH_DATA (push, du_dx);
  1321.    PUSH_DATA (push, du_dx >> 32);
  1322.    PUSH_DATA (push, dv_dy);
  1323.    PUSH_DATA (push, dv_dy >> 32);
  1324.  
  1325.    BCTX_REFN(nvc0->bufctx, 2D, &dst->base, WR);
  1326.    BCTX_REFN(nvc0->bufctx, 2D, &src->base, RD);
  1327.    nouveau_pushbuf_bufctx(nvc0->base.pushbuf, nvc0->bufctx);
  1328.    if (nouveau_pushbuf_validate(nvc0->base.pushbuf))
  1329.       return;
  1330.  
  1331.    for (i = 0; i < info->dst.box.depth; ++i) {
  1332.       if (i > 0) {
  1333.          /* no scaling in z-direction possible for eng2d blits */
  1334.          if (dst->layout_3d) {
  1335.             BEGIN_NVC0(push, NVC0_2D(DST_LAYER), 1);
  1336.             PUSH_DATA (push, info->dst.box.z + i);
  1337.          } else {
  1338.             const unsigned z = info->dst.box.z + i;
  1339.             BEGIN_NVC0(push, NVC0_2D(DST_ADDRESS_HIGH), 2);
  1340.             PUSH_DATAh(push, dst->base.address + z * dst->layer_stride);
  1341.             PUSH_DATA (push, dst->base.address + z * dst->layer_stride);
  1342.          }
  1343.          if (src->layout_3d) {
  1344.             /* not possible because of depth tiling */
  1345.             assert(0);
  1346.          } else {
  1347.             const unsigned z = info->src.box.z + i;
  1348.             BEGIN_NVC0(push, NVC0_2D(SRC_ADDRESS_HIGH), 2);
  1349.             PUSH_DATAh(push, src->base.address + z * src->layer_stride);
  1350.             PUSH_DATA (push, src->base.address + z * src->layer_stride);
  1351.          }
  1352.          BEGIN_NVC0(push, NVC0_2D(BLIT_SRC_Y_INT), 1); /* trigger */
  1353.          PUSH_DATA (push, srcy >> 32);
  1354.       } else {
  1355.          BEGIN_NVC0(push, NVC0_2D(BLIT_SRC_X_FRACT), 4);
  1356.          PUSH_DATA (push, srcx);
  1357.          PUSH_DATA (push, srcx >> 32);
  1358.          PUSH_DATA (push, srcy);
  1359.          PUSH_DATA (push, srcy >> 32);
  1360.       }
  1361.    }
  1362.    nvc0_resource_validate(&dst->base, NOUVEAU_BO_WR);
  1363.    nvc0_resource_validate(&src->base, NOUVEAU_BO_RD);
  1364.  
  1365.    nouveau_bufctx_reset(nvc0->bufctx, NVC0_BIND_2D);
  1366.  
  1367.    if (info->scissor_enable)
  1368.       IMMED_NVC0(push, NVC0_2D(CLIP_ENABLE), 0);
  1369.    if (mask != 0xffffffff)
  1370.       IMMED_NVC0(push, NVC0_2D(OPERATION), NV50_2D_OPERATION_SRCCOPY);
  1371.    if (nvc0->cond_query && info->render_condition_enable)
  1372.       IMMED_NVC0(push, NVC0_2D(COND_MODE), NV50_2D_COND_MODE_ALWAYS);
  1373. }
  1374.  
  1375. static void
  1376. nvc0_blit(struct pipe_context *pipe, const struct pipe_blit_info *info)
  1377. {
  1378.    struct nvc0_context *nvc0 = nvc0_context(pipe);
  1379.    boolean eng3d = FALSE;
  1380.  
  1381.    if (util_format_is_depth_or_stencil(info->dst.resource->format)) {
  1382.       if (!(info->mask & PIPE_MASK_ZS))
  1383.          return;
  1384.       if (info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT ||
  1385.           info->dst.resource->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT)
  1386.          eng3d = TRUE;
  1387.       if (info->filter != PIPE_TEX_FILTER_NEAREST)
  1388.          eng3d = TRUE;
  1389.    } else {
  1390.       if (!(info->mask & PIPE_MASK_RGBA))
  1391.          return;
  1392.       if (info->mask != PIPE_MASK_RGBA)
  1393.          eng3d = TRUE;
  1394.    }
  1395.  
  1396.    if (nv50_miptree(info->src.resource)->layout_3d) {
  1397.       eng3d = TRUE;
  1398.    } else
  1399.    if (info->src.box.depth != info->dst.box.depth) {
  1400.       eng3d = TRUE;
  1401.       debug_printf("blit: cannot filter array or cube textures in z direction");
  1402.    }
  1403.  
  1404.    if (!eng3d && info->dst.format != info->src.format) {
  1405.       if (!nv50_2d_dst_format_faithful(info->dst.format)) {
  1406.          eng3d = TRUE;
  1407.       } else
  1408.       if (!nv50_2d_src_format_faithful(info->src.format)) {
  1409.          if (!util_format_is_luminance(info->src.format)) {
  1410.             if (!nv50_2d_dst_format_ops_supported(info->dst.format))
  1411.                eng3d = TRUE;
  1412.             else
  1413.             if (util_format_is_intensity(info->src.format))
  1414.                eng3d = info->src.format != PIPE_FORMAT_I8_UNORM;
  1415.             else
  1416.             if (util_format_is_alpha(info->src.format))
  1417.                eng3d = info->src.format != PIPE_FORMAT_A8_UNORM;
  1418.             else
  1419.                eng3d = !nv50_2d_format_supported(info->src.format);
  1420.          }
  1421.       } else
  1422.       if (util_format_is_luminance_alpha(info->src.format))
  1423.          eng3d = TRUE;
  1424.    }
  1425.  
  1426.    if (info->src.resource->nr_samples == 8 &&
  1427.        info->dst.resource->nr_samples <= 1)
  1428.       eng3d = TRUE;
  1429. #if 0
  1430.    /* FIXME: can't make this work with eng2d anymore, at least not on nv50 */
  1431.    if (info->src.resource->nr_samples > 1 ||
  1432.        info->dst.resource->nr_samples > 1)
  1433.       eng3d = TRUE;
  1434. #endif
  1435.    /* FIXME: find correct src coordinates adjustments */
  1436.    if ((info->src.box.width !=  info->dst.box.width &&
  1437.         info->src.box.width != -info->dst.box.width) ||
  1438.        (info->src.box.height !=  info->dst.box.height &&
  1439.         info->src.box.height != -info->dst.box.height))
  1440.       eng3d = TRUE;
  1441.  
  1442.    if (!eng3d)
  1443.       nvc0_blit_eng2d(nvc0, info);
  1444.    else
  1445.       nvc0_blit_3d(nvc0, info);
  1446.  
  1447.    NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_blit_count, 1);
  1448. }
  1449.  
  1450. static void
  1451. nvc0_flush_resource(struct pipe_context *ctx,
  1452.                     struct pipe_resource *resource)
  1453. {
  1454. }
  1455.  
  1456. boolean
  1457. nvc0_blitter_create(struct nvc0_screen *screen)
  1458. {
  1459.    screen->blitter = CALLOC_STRUCT(nvc0_blitter);
  1460.    if (!screen->blitter) {
  1461.       NOUVEAU_ERR("failed to allocate blitter struct\n");
  1462.       return FALSE;
  1463.    }
  1464.    screen->blitter->screen = screen;
  1465.  
  1466.    pipe_mutex_init(screen->blitter->mutex);
  1467.  
  1468.    nvc0_blitter_make_vp(screen->blitter);
  1469.    nvc0_blitter_make_sampler(screen->blitter);
  1470.  
  1471.    return TRUE;
  1472. }
  1473.  
  1474. void
  1475. nvc0_blitter_destroy(struct nvc0_screen *screen)
  1476. {
  1477.    struct nvc0_blitter *blitter = screen->blitter;
  1478.    unsigned i, m;
  1479.  
  1480.    for (i = 0; i < NV50_BLIT_MAX_TEXTURE_TYPES; ++i) {
  1481.       for (m = 0; m < NV50_BLIT_MODES; ++m) {
  1482.          struct nvc0_program *prog = blitter->fp[i][m];
  1483.          if (prog) {
  1484.             nvc0_program_destroy(NULL, prog);
  1485.             FREE((void *)prog->pipe.tokens);
  1486.             FREE(prog);
  1487.          }
  1488.       }
  1489.    }
  1490.  
  1491.    FREE(blitter);
  1492. }
  1493.  
  1494. boolean
  1495. nvc0_blitctx_create(struct nvc0_context *nvc0)
  1496. {
  1497.    nvc0->blit = CALLOC_STRUCT(nvc0_blitctx);
  1498.    if (!nvc0->blit) {
  1499.       NOUVEAU_ERR("failed to allocate blit context\n");
  1500.       return FALSE;
  1501.    }
  1502.  
  1503.    nvc0->blit->nvc0 = nvc0;
  1504.  
  1505.    nvc0->blit->rast.pipe.half_pixel_center = 1;
  1506.  
  1507.    return TRUE;
  1508. }
  1509.  
  1510. void
  1511. nvc0_blitctx_destroy(struct nvc0_context *nvc0)
  1512. {
  1513.    FREE(nvc0->blit);
  1514. }
  1515.  
  1516. void
  1517. nvc0_init_surface_functions(struct nvc0_context *nvc0)
  1518. {
  1519.    struct pipe_context *pipe = &nvc0->base.pipe;
  1520.  
  1521.    pipe->resource_copy_region = nvc0_resource_copy_region;
  1522.    pipe->blit = nvc0_blit;
  1523.    pipe->flush_resource = nvc0_flush_resource;
  1524.    pipe->clear_render_target = nvc0_clear_render_target;
  1525.    pipe->clear_depth_stencil = nvc0_clear_depth_stencil;
  1526.    pipe->clear_buffer = nvc0_clear_buffer;
  1527. }
  1528.