Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | RSS feed

  1. /*
  2.  * Mesa 3-D graphics library
  3.  *
  4.  * Copyright (C) 2012-2014 LunarG, Inc.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included
  14.  * in all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  21.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  22.  * DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors:
  25.  *    Chia-I Wu <olv@lunarg.com>
  26.  */
  27.  
  28. #include <string.h>
  29. #include <errno.h>
  30. #ifndef ETIME
  31. #define ETIME ETIMEDOUT
  32. #endif
  33.  
  34. #include <xf86drm.h>
  35. #include <i915_drm.h>
  36. #include <intel_bufmgr.h>
  37.  
  38. #include "os/os_thread.h"
  39. #include "state_tracker/drm_driver.h"
  40. #include "pipe/p_state.h"
  41. #include "util/u_inlines.h"
  42. #include "util/u_memory.h"
  43. #include "util/u_debug.h"
  44. #include "ilo/core/intel_winsys.h"
  45. #include "intel_drm_public.h"
  46.  
  47. struct intel_winsys {
  48.    int fd;
  49.    drm_intel_bufmgr *bufmgr;
  50.    struct intel_winsys_info info;
  51.  
  52.    /* these are protected by the mutex */
  53.    pipe_mutex mutex;
  54.    drm_intel_context *first_gem_ctx;
  55.    struct drm_intel_decode *decode;
  56. };
  57.  
  58. static drm_intel_context *
  59. gem_ctx(const struct intel_context *ctx)
  60. {
  61.    return (drm_intel_context *) ctx;
  62. }
  63.  
  64. static drm_intel_bo *
  65. gem_bo(const struct intel_bo *bo)
  66. {
  67.    return (drm_intel_bo *) bo;
  68. }
  69.  
  70. static bool
  71. get_param(struct intel_winsys *winsys, int param, int *value)
  72. {
  73.    struct drm_i915_getparam gp;
  74.    int err;
  75.  
  76.    *value = 0;
  77.  
  78.    memset(&gp, 0, sizeof(gp));
  79.    gp.param = param;
  80.    gp.value = value;
  81.  
  82.    err = drmCommandWriteRead(winsys->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
  83.    if (err) {
  84.       *value = 0;
  85.       return false;
  86.    }
  87.  
  88.    return true;
  89. }
  90.  
  91. static bool
  92. test_address_swizzling(struct intel_winsys *winsys)
  93. {
  94.    drm_intel_bo *bo;
  95.    uint32_t tiling = I915_TILING_X, swizzle;
  96.    unsigned long pitch;
  97.  
  98.    bo = drm_intel_bo_alloc_tiled(winsys->bufmgr,
  99.          "address swizzling test", 64, 64, 4, &tiling, &pitch, 0);
  100.    if (bo) {
  101.       drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
  102.       drm_intel_bo_unreference(bo);
  103.    }
  104.    else {
  105.       swizzle = I915_BIT_6_SWIZZLE_NONE;
  106.    }
  107.  
  108.    return (swizzle != I915_BIT_6_SWIZZLE_NONE);
  109. }
  110.  
  111. static bool
  112. test_reg_read(struct intel_winsys *winsys, uint32_t reg)
  113. {
  114.    uint64_t dummy;
  115.  
  116.    return !drm_intel_reg_read(winsys->bufmgr, reg, &dummy);
  117. }
  118.  
  119. static bool
  120. probe_winsys(struct intel_winsys *winsys)
  121. {
  122.    struct intel_winsys_info *info = &winsys->info;
  123.    int val;
  124.  
  125.    /*
  126.     * When we need the Nth vertex from a user vertex buffer, and the vertex is
  127.     * uploaded to, say, the beginning of a bo, we want the first vertex in the
  128.     * bo to be fetched.  One way to do this is to set the base address of the
  129.     * vertex buffer to
  130.     *
  131.     *   bo->offset64 + (vb->buffer_offset - vb->stride * N).
  132.     *
  133.     * The second term may be negative, and we need kernel support to do that.
  134.     *
  135.     * This check is taken from the classic driver.  u_vbuf_upload_buffers()
  136.     * guarantees the term is never negative, but it is good to require a
  137.     * recent kernel.
  138.     */
  139.    get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
  140.    if (!val) {
  141.       debug_error("kernel 2.6.39 required");
  142.       return false;
  143.    }
  144.  
  145.    info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
  146.  
  147.    if (drm_intel_get_aperture_sizes(winsys->fd,
  148.          &info->aperture_mappable, &info->aperture_total)) {
  149.       debug_error("failed to query aperture sizes");
  150.       return false;
  151.    }
  152.  
  153.    get_param(winsys, I915_PARAM_HAS_LLC, &val);
  154.    info->has_llc = val;
  155.    info->has_address_swizzling = test_address_swizzling(winsys);
  156.  
  157.    winsys->first_gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
  158.    info->has_logical_context = (winsys->first_gem_ctx != NULL);
  159.  
  160.    get_param(winsys, I915_PARAM_HAS_ALIASING_PPGTT, &val);
  161.    info->has_ppgtt = val;
  162.  
  163.    /* test TIMESTAMP read */
  164.    info->has_timestamp = test_reg_read(winsys, 0x2358);
  165.  
  166.    get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
  167.    info->has_gen7_sol_reset = val;
  168.  
  169.    return true;
  170. }
  171.  
  172. struct intel_winsys *
  173. intel_winsys_create_for_fd(int fd)
  174. {
  175.    /* so that we can have enough (up to 4094) relocs per bo */
  176.    const int batch_size = sizeof(uint32_t) * 8192;
  177.    struct intel_winsys *winsys;
  178.  
  179.    winsys = CALLOC_STRUCT(intel_winsys);
  180.    if (!winsys)
  181.       return NULL;
  182.  
  183.    winsys->fd = fd;
  184.  
  185.    winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, batch_size);
  186.    if (!winsys->bufmgr) {
  187.       debug_error("failed to create GEM buffer manager");
  188.       FREE(winsys);
  189.       return NULL;
  190.    }
  191.  
  192.    pipe_mutex_init(winsys->mutex);
  193.  
  194.    if (!probe_winsys(winsys)) {
  195.       pipe_mutex_destroy(winsys->mutex);
  196.       drm_intel_bufmgr_destroy(winsys->bufmgr);
  197.       FREE(winsys);
  198.       return NULL;
  199.    }
  200.  
  201.    /*
  202.     * No need to implicitly set up a fence register for each non-linear reloc
  203.     * entry.  INTEL_RELOC_FENCE will be set on reloc entries that need them.
  204.     */
  205.    drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
  206.  
  207.    drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
  208.  
  209.    return winsys;
  210. }
  211.  
  212. void
  213. intel_winsys_destroy(struct intel_winsys *winsys)
  214. {
  215.    if (winsys->decode)
  216.       drm_intel_decode_context_free(winsys->decode);
  217.  
  218.    if (winsys->first_gem_ctx)
  219.       drm_intel_gem_context_destroy(winsys->first_gem_ctx);
  220.  
  221.    pipe_mutex_destroy(winsys->mutex);
  222.    drm_intel_bufmgr_destroy(winsys->bufmgr);
  223.    FREE(winsys);
  224. }
  225.  
  226. const struct intel_winsys_info *
  227. intel_winsys_get_info(const struct intel_winsys *winsys)
  228. {
  229.    return &winsys->info;
  230. }
  231.  
  232. struct intel_context *
  233. intel_winsys_create_context(struct intel_winsys *winsys)
  234. {
  235.    drm_intel_context *gem_ctx;
  236.  
  237.    /* try the preallocated context first */
  238.    pipe_mutex_lock(winsys->mutex);
  239.    gem_ctx = winsys->first_gem_ctx;
  240.    winsys->first_gem_ctx = NULL;
  241.    pipe_mutex_unlock(winsys->mutex);
  242.  
  243.    if (!gem_ctx)
  244.       gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
  245.  
  246.    return (struct intel_context *) gem_ctx;
  247. }
  248.  
  249. void
  250. intel_winsys_destroy_context(struct intel_winsys *winsys,
  251.                              struct intel_context *ctx)
  252. {
  253.    drm_intel_gem_context_destroy(gem_ctx(ctx));
  254. }
  255.  
  256. int
  257. intel_winsys_read_reg(struct intel_winsys *winsys,
  258.                       uint32_t reg, uint64_t *val)
  259. {
  260.    return drm_intel_reg_read(winsys->bufmgr, reg, val);
  261. }
  262.  
  263. int
  264. intel_winsys_get_reset_stats(struct intel_winsys *winsys,
  265.                              struct intel_context *ctx,
  266.                              uint32_t *active_lost,
  267.                              uint32_t *pending_lost)
  268. {
  269.    uint32_t reset_count;
  270.  
  271.    return drm_intel_get_reset_stats(gem_ctx(ctx),
  272.          &reset_count, active_lost, pending_lost);
  273. }
  274.  
  275. struct intel_bo *
  276. intel_winsys_alloc_bo(struct intel_winsys *winsys,
  277.                       const char *name,
  278.                       unsigned long size,
  279.                       bool cpu_init)
  280. {
  281.    const unsigned int alignment = 4096; /* always page-aligned */
  282.    drm_intel_bo *bo;
  283.  
  284.    if (cpu_init) {
  285.       bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
  286.    } else {
  287.       bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
  288.             name, size, alignment);
  289.    }
  290.  
  291.    return (struct intel_bo *) bo;
  292. }
  293.  
  294. struct intel_bo *
  295. intel_winsys_import_userptr(struct intel_winsys *winsys,
  296.                             const char *name,
  297.                             void *userptr,
  298.                             unsigned long size,
  299.                             unsigned long flags)
  300. {
  301.    return NULL;
  302. }
  303.  
  304. struct intel_bo *
  305. intel_winsys_import_handle(struct intel_winsys *winsys,
  306.                            const char *name,
  307.                            const struct winsys_handle *handle,
  308.                            unsigned long height,
  309.                            enum intel_tiling_mode *tiling,
  310.                            unsigned long *pitch)
  311. {
  312.    uint32_t real_tiling, swizzle;
  313.    drm_intel_bo *bo;
  314.    int err;
  315.  
  316.    switch (handle->type) {
  317.    case DRM_API_HANDLE_TYPE_SHARED:
  318.       {
  319.          const uint32_t gem_name = handle->handle;
  320.          bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr,
  321.                name, gem_name);
  322.       }
  323.       break;
  324.    case DRM_API_HANDLE_TYPE_FD:
  325.       {
  326.          const int fd = (int) handle->handle;
  327.          bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr,
  328.                fd, height * handle->stride);
  329.       }
  330.       break;
  331.    default:
  332.       bo = NULL;
  333.       break;
  334.    }
  335.  
  336.    if (!bo)
  337.       return NULL;
  338.  
  339.    err = drm_intel_bo_get_tiling(bo, &real_tiling, &swizzle);
  340.    if (err) {
  341.       drm_intel_bo_unreference(bo);
  342.       return NULL;
  343.    }
  344.  
  345.    *tiling = real_tiling;
  346.    *pitch = handle->stride;
  347.  
  348.    return (struct intel_bo *) bo;
  349. }
  350.  
  351. int
  352. intel_winsys_export_handle(struct intel_winsys *winsys,
  353.                            struct intel_bo *bo,
  354.                            enum intel_tiling_mode tiling,
  355.                            unsigned long pitch,
  356.                            unsigned long height,
  357.                            struct winsys_handle *handle)
  358. {
  359.    int err = 0;
  360.  
  361.    switch (handle->type) {
  362.    case DRM_API_HANDLE_TYPE_SHARED:
  363.       {
  364.          uint32_t name;
  365.  
  366.          err = drm_intel_bo_flink(gem_bo(bo), &name);
  367.          if (!err)
  368.             handle->handle = name;
  369.       }
  370.       break;
  371.    case DRM_API_HANDLE_TYPE_KMS:
  372.       handle->handle = gem_bo(bo)->handle;
  373.       break;
  374.    case DRM_API_HANDLE_TYPE_FD:
  375.       {
  376.          int fd;
  377.  
  378.          err = drm_intel_bo_gem_export_to_prime(gem_bo(bo), &fd);
  379.          if (!err)
  380.             handle->handle = fd;
  381.       }
  382.       break;
  383.    default:
  384.       err = -EINVAL;
  385.       break;
  386.    }
  387.  
  388.    if (err)
  389.       return err;
  390.  
  391.    handle->stride = pitch;
  392.  
  393.    return 0;
  394. }
  395.  
  396. bool
  397. intel_winsys_can_submit_bo(struct intel_winsys *winsys,
  398.                            struct intel_bo **bo_array,
  399.                            int count)
  400. {
  401.    return !drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
  402.                                                  count);
  403. }
  404.  
  405. int
  406. intel_winsys_submit_bo(struct intel_winsys *winsys,
  407.                        enum intel_ring_type ring,
  408.                        struct intel_bo *bo, int used,
  409.                        struct intel_context *ctx,
  410.                        unsigned long flags)
  411. {
  412.    const unsigned long exec_flags = (unsigned long) ring | flags;
  413.  
  414.    /* logical contexts are only available for the render ring */
  415.    if (ring != INTEL_RING_RENDER)
  416.       ctx = NULL;
  417.  
  418.    if (ctx) {
  419.       return drm_intel_gem_bo_context_exec(gem_bo(bo),
  420.             (drm_intel_context *) ctx, used, exec_flags);
  421.    }
  422.    else {
  423.       return drm_intel_bo_mrb_exec(gem_bo(bo),
  424.             used, NULL, 0, 0, exec_flags);
  425.    }
  426. }
  427.  
  428. void
  429. intel_winsys_decode_bo(struct intel_winsys *winsys,
  430.                        struct intel_bo *bo, int used)
  431. {
  432.    void *ptr;
  433.  
  434.    ptr = intel_bo_map(bo, false);
  435.    if (!ptr) {
  436.       debug_printf("failed to map buffer for decoding\n");
  437.       return;
  438.    }
  439.  
  440.    pipe_mutex_lock(winsys->mutex);
  441.  
  442.    if (!winsys->decode) {
  443.       winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
  444.       if (!winsys->decode) {
  445.          pipe_mutex_unlock(winsys->mutex);
  446.          intel_bo_unmap(bo);
  447.          return;
  448.       }
  449.  
  450.       /* debug_printf()/debug_error() uses stderr by default */
  451.       drm_intel_decode_set_output_file(winsys->decode, stderr);
  452.    }
  453.  
  454.    /* in dwords */
  455.    used /= 4;
  456.  
  457.    drm_intel_decode_set_batch_pointer(winsys->decode,
  458.          ptr, gem_bo(bo)->offset64, used);
  459.  
  460.    drm_intel_decode(winsys->decode);
  461.  
  462.    pipe_mutex_unlock(winsys->mutex);
  463.  
  464.    intel_bo_unmap(bo);
  465. }
  466.  
  467. struct intel_bo *
  468. intel_bo_ref(struct intel_bo *bo)
  469. {
  470.    if (bo)
  471.       drm_intel_bo_reference(gem_bo(bo));
  472.  
  473.    return bo;
  474. }
  475.  
  476. void
  477. intel_bo_unref(struct intel_bo *bo)
  478. {
  479.    if (bo)
  480.       drm_intel_bo_unreference(gem_bo(bo));
  481. }
  482.  
  483. int
  484. intel_bo_set_tiling(struct intel_bo *bo,
  485.                     enum intel_tiling_mode tiling,
  486.                     unsigned long pitch)
  487. {
  488.    uint32_t real_tiling = tiling;
  489.    int err;
  490.  
  491.    switch (tiling) {
  492.    case INTEL_TILING_X:
  493.       if (pitch % 512)
  494.          return -1;
  495.       break;
  496.    case INTEL_TILING_Y:
  497.       if (pitch % 128)
  498.          return -1;
  499.       break;
  500.    default:
  501.       break;
  502.    }
  503.  
  504.    err = drm_intel_bo_set_tiling(gem_bo(bo), &real_tiling, pitch);
  505.    if (err || real_tiling != tiling) {
  506.       assert(!"tiling mismatch");
  507.       return -1;
  508.    }
  509.  
  510.    return 0;
  511. }
  512.  
  513. void *
  514. intel_bo_map(struct intel_bo *bo, bool write_enable)
  515. {
  516.    int err;
  517.  
  518.    err = drm_intel_bo_map(gem_bo(bo), write_enable);
  519.    if (err) {
  520.       debug_error("failed to map bo");
  521.       return NULL;
  522.    }
  523.  
  524.    return gem_bo(bo)->virtual;
  525. }
  526.  
  527. void *
  528. intel_bo_map_async(struct intel_bo *bo)
  529. {
  530.    return NULL;
  531. }
  532.  
  533. void *
  534. intel_bo_map_gtt(struct intel_bo *bo)
  535. {
  536.    int err;
  537.  
  538.    err = drm_intel_gem_bo_map_gtt(gem_bo(bo));
  539.    if (err) {
  540.       debug_error("failed to map bo");
  541.       return NULL;
  542.    }
  543.  
  544.    return gem_bo(bo)->virtual;
  545. }
  546.  
  547. void *
  548. intel_bo_map_gtt_async(struct intel_bo *bo)
  549. {
  550.    int err;
  551.  
  552.    err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
  553.    if (err) {
  554.       debug_error("failed to map bo");
  555.       return NULL;
  556.    }
  557.  
  558.    return gem_bo(bo)->virtual;
  559. }
  560.  
  561. void
  562. intel_bo_unmap(struct intel_bo *bo)
  563. {
  564.    int err;
  565.  
  566.    err = drm_intel_bo_unmap(gem_bo(bo));
  567.    assert(!err);
  568. }
  569.  
  570. int
  571. intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
  572.                 unsigned long size, const void *data)
  573. {
  574.    return drm_intel_bo_subdata(gem_bo(bo), offset, size, data);
  575. }
  576.  
  577. int
  578. intel_bo_pread(struct intel_bo *bo, unsigned long offset,
  579.                unsigned long size, void *data)
  580. {
  581.    return drm_intel_bo_get_subdata(gem_bo(bo), offset, size, data);
  582. }
  583.  
  584. int
  585. intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset,
  586.                    struct intel_bo *target_bo, uint32_t target_offset,
  587.                    uint32_t flags, uint64_t *presumed_offset)
  588. {
  589.    uint32_t read_domains, write_domain;
  590.    int err;
  591.  
  592.    if (flags & INTEL_RELOC_WRITE) {
  593.       /*
  594.        * Because of the translation to domains, INTEL_RELOC_GGTT should only
  595.        * be set on GEN6 when the bo is written by MI_* or PIPE_CONTROL.  The
  596.        * kernel will translate it back to INTEL_RELOC_GGTT.
  597.        */
  598.       write_domain = (flags & INTEL_RELOC_GGTT) ?
  599.          I915_GEM_DOMAIN_INSTRUCTION : I915_GEM_DOMAIN_RENDER;
  600.       read_domains = write_domain;
  601.    } else {
  602.       write_domain = 0;
  603.       read_domains = I915_GEM_DOMAIN_RENDER |
  604.                      I915_GEM_DOMAIN_SAMPLER |
  605.                      I915_GEM_DOMAIN_INSTRUCTION |
  606.                      I915_GEM_DOMAIN_VERTEX;
  607.    }
  608.  
  609.    if (flags & INTEL_RELOC_FENCE) {
  610.       err = drm_intel_bo_emit_reloc_fence(gem_bo(bo), offset,
  611.             gem_bo(target_bo), target_offset,
  612.             read_domains, write_domain);
  613.    } else {
  614.       err = drm_intel_bo_emit_reloc(gem_bo(bo), offset,
  615.             gem_bo(target_bo), target_offset,
  616.             read_domains, write_domain);
  617.    }
  618.  
  619.    *presumed_offset = gem_bo(target_bo)->offset64 + target_offset;
  620.  
  621.    return err;
  622. }
  623.  
  624. int
  625. intel_bo_get_reloc_count(struct intel_bo *bo)
  626. {
  627.    return drm_intel_gem_bo_get_reloc_count(gem_bo(bo));
  628. }
  629.  
  630. void
  631. intel_bo_truncate_relocs(struct intel_bo *bo, int start)
  632. {
  633.    drm_intel_gem_bo_clear_relocs(gem_bo(bo), start);
  634. }
  635.  
  636. bool
  637. intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo)
  638. {
  639.    return drm_intel_bo_references(gem_bo(bo), gem_bo(target_bo));
  640. }
  641.  
  642. int
  643. intel_bo_wait(struct intel_bo *bo, int64_t timeout)
  644. {
  645.    int err;
  646.  
  647.    if (timeout >= 0) {
  648.       err = drm_intel_gem_bo_wait(gem_bo(bo), timeout);
  649.    } else {
  650.       drm_intel_bo_wait_rendering(gem_bo(bo));
  651.       err = 0;
  652.    }
  653.  
  654.    /* consider the bo idle on errors */
  655.    if (err && err != -ETIME)
  656.       err = 0;
  657.  
  658.    return err;
  659. }
  660.