Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright (c) 2011 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20.  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21.  * SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Chris Wilson <chris@chris-wilson.co.uk>
  25.  *
  26.  */
  27.  
  28. #ifdef HAVE_CONFIG_H
  29. #include "config.h"
  30. #endif
  31.  
  32. #include "sna.h"
  33. #include "sna_reg.h"
  34.  
  35. #define DBG_NO_HW 0
  36. #define DBG_NO_TILING 1
  37. #define DBG_NO_CACHE 0
  38. #define DBG_NO_CACHE_LEVEL 0
  39. #define DBG_NO_CPU 0
  40. #define DBG_NO_USERPTR 0
  41. #define DBG_NO_LLC 0
  42. #define DBG_NO_SEMAPHORES 0
  43. #define DBG_NO_MADV 0
  44. #define DBG_NO_UPLOAD_CACHE 0
  45. #define DBG_NO_UPLOAD_ACTIVE 0
  46. #define DBG_NO_MAP_UPLOAD 0
  47. #define DBG_NO_RELAXED_FENCING 0
  48. #define DBG_NO_SECURE_BATCHES 0
  49. #define DBG_NO_PINNED_BATCHES 0
  50. #define DBG_NO_FAST_RELOC 0
  51. #define DBG_NO_HANDLE_LUT 0
  52. #define DBG_DUMP 0
  53.  
  54. #define MAX_GTT_VMA_CACHE 512
  55. #define MAX_CPU_VMA_CACHE INT16_MAX
  56. #define MAP_PRESERVE_TIME 10
  57.  
  58. #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
  59. #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
  60. #define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
  61. #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
  62. #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
  63.  
  64. #define MAKE_REQUEST(rq, ring) ((struct kgem_request *)((uintptr_t)(rq) | (ring)))
  65.  
  66. #define LOCAL_I915_PARAM_HAS_BLT                        11
  67. #define LOCAL_I915_PARAM_HAS_RELAXED_FENCING    12
  68. #define LOCAL_I915_PARAM_HAS_RELAXED_DELTA          15
  69. #define LOCAL_I915_PARAM_HAS_SEMAPHORES             20
  70. #define LOCAL_I915_PARAM_HAS_SECURE_BATCHES         23
  71. #define LOCAL_I915_PARAM_HAS_PINNED_BATCHES         24
  72. #define LOCAL_I915_PARAM_HAS_NO_RELOC               25
  73. #define LOCAL_I915_PARAM_HAS_HANDLE_LUT             26
  74.  
  75.  
  76.  
  77. static int gem_param(struct kgem *kgem, int name)
  78. {
  79.     ioctl_t  io;
  80.  
  81.     drm_i915_getparam_t gp;
  82.     int v = -1; /* No param uses the sign bit, reserve it for errors */
  83.  
  84.     VG_CLEAR(gp);
  85.     gp.param = name;
  86.     gp.value = &v;
  87.  
  88.     io.handle   = kgem->fd;
  89.     io.io_code  = SRV_GET_PARAM;
  90.     io.input    = &gp;
  91.     io.inp_size = sizeof(gp);
  92.     io.output   = NULL;
  93.     io.out_size = 0;
  94.  
  95.     if (call_service(&io)!=0)
  96.         return -1;
  97.  
  98.     VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
  99.     return v;
  100. }
  101.  
  102. static bool test_has_no_reloc(struct kgem *kgem)
  103. {
  104.         if (DBG_NO_FAST_RELOC)
  105.                 return false;
  106.  
  107.         return gem_param(kgem, LOCAL_I915_PARAM_HAS_NO_RELOC) > 0;
  108. }
  109.  
  110. static bool test_has_handle_lut(struct kgem *kgem)
  111. {
  112.         if (DBG_NO_HANDLE_LUT)
  113.                 return false;
  114.  
  115.         return gem_param(kgem, LOCAL_I915_PARAM_HAS_HANDLE_LUT) > 0;
  116. }
  117.  
  118. static bool test_has_semaphores_enabled(struct kgem *kgem)
  119. {
  120.         FILE *file;
  121.         bool detected = false;
  122.         int ret;
  123.  
  124.         if (DBG_NO_SEMAPHORES)
  125.                 return false;
  126.  
  127.         ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
  128.         if (ret != -1)
  129.                 return ret > 0;
  130.  
  131.         return detected;
  132. }
  133.  
  134.  
  135. static bool test_has_relaxed_fencing(struct kgem *kgem)
  136. {
  137.         if (kgem->gen < 040) {
  138.                 if (DBG_NO_RELAXED_FENCING)
  139.                         return false;
  140.  
  141.                 return gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_FENCING) > 0;
  142.         } else
  143.                 return true;
  144. }
  145.  
  146. static bool test_has_llc(struct kgem *kgem)
  147. {
  148.         int has_llc = -1;
  149.  
  150.         if (DBG_NO_LLC)
  151.                 return false;
  152.  
  153. #if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
  154.         has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
  155. #endif
  156.         if (has_llc == -1) {
  157.                 DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
  158.                 has_llc = kgem->gen >= 060;
  159.         }
  160.  
  161.         return has_llc;
  162. }
  163.  
  164. static bool test_has_cacheing(struct kgem *kgem)
  165. {
  166.         uint32_t handle;
  167.         bool ret = false;
  168.  
  169.         if (DBG_NO_CACHE_LEVEL)
  170.                 return false;
  171.  
  172.         /* Incoherent blt and sampler hangs the GPU */
  173.         if (kgem->gen == 040)
  174.                 return false;
  175.  
  176. //      handle = gem_create(kgem->fd, 1);
  177. //      if (handle == 0)
  178. //              return false;
  179.  
  180. //      ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
  181. //      gem_close(kgem->fd, handle);
  182.         return ret;
  183. }
  184.  
  185. static bool test_has_userptr(struct kgem *kgem)
  186. {
  187. #if defined(USE_USERPTR)
  188.         uint32_t handle;
  189.         void *ptr;
  190.  
  191.         if (DBG_NO_USERPTR)
  192.                 return false;
  193.  
  194.         /* Incoherent blt and sampler hangs the GPU */
  195.         if (kgem->gen == 040)
  196.                 return false;
  197.  
  198.         ptr = malloc(PAGE_SIZE);
  199.         handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
  200.         gem_close(kgem->fd, handle);
  201.         free(ptr);
  202.  
  203.         return handle != 0;
  204. #else
  205.         return false;
  206. #endif
  207. }
  208.  
  209. static bool test_has_secure_batches(struct kgem *kgem)
  210. {
  211.         if (DBG_NO_SECURE_BATCHES)
  212.                 return false;
  213.  
  214.         return gem_param(kgem, LOCAL_I915_PARAM_HAS_SECURE_BATCHES) > 0;
  215. }
  216.  
  217. static bool test_has_pinned_batches(struct kgem *kgem)
  218. {
  219.         if (DBG_NO_PINNED_BATCHES)
  220.                 return false;
  221.  
  222.         return gem_param(kgem, LOCAL_I915_PARAM_HAS_PINNED_BATCHES) > 0;
  223. }
  224.  
  225.  
  226.  
  227. void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
  228. {
  229.     struct drm_i915_gem_get_aperture aperture;
  230.     size_t totalram;
  231.     unsigned half_gpu_max;
  232.     unsigned int i, j;
  233.  
  234.     DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
  235.  
  236.     memset(kgem, 0, sizeof(*kgem));
  237.  
  238.     kgem->fd = fd;
  239.     kgem->gen = gen;
  240.  
  241.     list_init(&kgem->requests[0]);
  242.     list_init(&kgem->requests[1]);
  243.     list_init(&kgem->batch_buffers);
  244.     list_init(&kgem->active_buffers);
  245.     list_init(&kgem->flushing);
  246.     list_init(&kgem->large);
  247.     list_init(&kgem->large_inactive);
  248.     list_init(&kgem->snoop);
  249.     list_init(&kgem->scanout);
  250.     for (i = 0; i < ARRAY_SIZE(kgem->pinned_batches); i++)
  251.         list_init(&kgem->pinned_batches[i]);
  252.     for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
  253.         list_init(&kgem->inactive[i]);
  254.     for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
  255.         for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++)
  256.             list_init(&kgem->active[i][j]);
  257.     }
  258.     for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) {
  259.         for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++)
  260.             list_init(&kgem->vma[i].inactive[j]);
  261.     }
  262.  
  263.     kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE;
  264.     kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE;
  265.  
  266.     kgem->has_blt = gem_param(kgem, LOCAL_I915_PARAM_HAS_BLT) > 0;
  267.     DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
  268.          kgem->has_blt));
  269.  
  270.     kgem->has_relaxed_delta =
  271.         gem_param(kgem, LOCAL_I915_PARAM_HAS_RELAXED_DELTA) > 0;
  272.     DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
  273.          kgem->has_relaxed_delta));
  274.  
  275.  
  276.     kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
  277.     DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
  278.          kgem->has_relaxed_fencing));
  279.  
  280.     kgem->has_llc = test_has_llc(kgem);
  281.     DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
  282.          kgem->has_llc));
  283.  
  284.     kgem->has_cacheing = test_has_cacheing(kgem);
  285.     DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
  286.          kgem->has_cacheing));
  287.  
  288.     kgem->has_userptr = test_has_userptr(kgem);
  289.     DBG(("%s: has userptr? %d\n", __FUNCTION__,
  290.          kgem->has_userptr));
  291.  
  292.     kgem->has_no_reloc = test_has_no_reloc(kgem);
  293.     DBG(("%s: has no-reloc? %d\n", __FUNCTION__,
  294.          kgem->has_no_reloc));
  295.  
  296.     kgem->has_handle_lut = test_has_handle_lut(kgem);
  297.     DBG(("%s: has handle-lut? %d\n", __FUNCTION__,
  298.          kgem->has_handle_lut));
  299.  
  300.     kgem->has_semaphores = false;
  301.     if (kgem->has_blt && test_has_semaphores_enabled(kgem))
  302.         kgem->has_semaphores = true;
  303.     DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
  304.          kgem->has_semaphores));
  305.  
  306.     kgem->can_blt_cpu = gen >= 030;
  307.     DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
  308.          kgem->can_blt_cpu));
  309.  
  310.     kgem->has_secure_batches = test_has_secure_batches(kgem);
  311.     DBG(("%s: can use privileged batchbuffers? %d\n", __FUNCTION__,
  312.          kgem->has_secure_batches));
  313.  
  314.     kgem->has_pinned_batches = test_has_pinned_batches(kgem);
  315.     DBG(("%s: can use pinned batchbuffers (to avoid CS w/a)? %d\n", __FUNCTION__,
  316.          kgem->has_pinned_batches));
  317.  
  318. #if 0
  319.  
  320.     if (!is_hw_supported(kgem, dev)) {
  321.         xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
  322.                "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
  323.         kgem->wedged = 1;
  324.     } else if (__kgem_throttle(kgem)) {
  325.         xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
  326.                "Detected a hung GPU, disabling acceleration.\n");
  327.         kgem->wedged = 1;
  328.     }
  329.  
  330.     kgem->batch_size = ARRAY_SIZE(kgem->batch);
  331.     if (gen == 020 && !kgem->has_pinned_batches)
  332.         /* Limited to what we can pin */
  333.         kgem->batch_size = 4*1024;
  334.     if (gen == 022)
  335.         /* 865g cannot handle a batch spanning multiple pages */
  336.         kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
  337.     if ((gen >> 3) == 7)
  338.         kgem->batch_size = 16*1024;
  339.     if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
  340.         kgem->batch_size = 4*1024;
  341.  
  342.     if (!kgem_init_pinned_batches(kgem) && gen == 020) {
  343.         xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
  344.                "Unable to reserve memory for GPU, disabling acceleration.\n");
  345.         kgem->wedged = 1;
  346.     }
  347.  
  348.     DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
  349.          kgem->batch_size));
  350.  
  351.     kgem->min_alignment = 4;
  352.     if (gen < 040)
  353.         kgem->min_alignment = 64;
  354.  
  355.     kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
  356.     DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
  357.          kgem->half_cpu_cache_pages));
  358.  
  359.     kgem->next_request = __kgem_request_alloc(kgem);
  360.  
  361.     DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
  362.          !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
  363.          kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
  364.  
  365.     VG_CLEAR(aperture);
  366.     aperture.aper_size = 0;
  367.     (void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
  368.     if (aperture.aper_size == 0)
  369.         aperture.aper_size = 64*1024*1024;
  370.  
  371.     DBG(("%s: aperture size %lld, available now %lld\n",
  372.          __FUNCTION__,
  373.          (long long)aperture.aper_size,
  374.          (long long)aperture.aper_available_size));
  375.  
  376.     kgem->aperture_total = aperture.aper_size;
  377.     kgem->aperture_high = aperture.aper_size * 3/4;
  378.     kgem->aperture_low = aperture.aper_size * 1/3;
  379.     if (gen < 033) {
  380.         /* Severe alignment penalties */
  381.         kgem->aperture_high /= 2;
  382.         kgem->aperture_low /= 2;
  383.     }
  384.     DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
  385.          kgem->aperture_low, kgem->aperture_low / (1024*1024),
  386.          kgem->aperture_high, kgem->aperture_high / (1024*1024)));
  387.  
  388.     kgem->aperture_mappable = agp_aperture_size(dev, gen);
  389.     if (kgem->aperture_mappable == 0 ||
  390.         kgem->aperture_mappable > aperture.aper_size)
  391.         kgem->aperture_mappable = aperture.aper_size;
  392.     DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
  393.          kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
  394.  
  395.     kgem->buffer_size = 64 * 1024;
  396.     while (kgem->buffer_size < kgem->aperture_mappable >> 10)
  397.         kgem->buffer_size *= 2;
  398.     if (kgem->buffer_size >> 12 > kgem->half_cpu_cache_pages)
  399.         kgem->buffer_size = kgem->half_cpu_cache_pages << 12;
  400.     DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
  401.          kgem->buffer_size, kgem->buffer_size / 1024));
  402.  
  403.     kgem->max_object_size = 3 * (kgem->aperture_high >> 12) << 10;
  404.     kgem->max_gpu_size = kgem->max_object_size;
  405.     if (!kgem->has_llc)
  406.         kgem->max_gpu_size = MAX_CACHE_SIZE;
  407.  
  408.     totalram = total_ram_size();
  409.     if (totalram == 0) {
  410.         DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
  411.              __FUNCTION__));
  412.         totalram = kgem->aperture_total;
  413.     }
  414.     DBG(("%s: total ram=%ld\n", __FUNCTION__, (long)totalram));
  415.     if (kgem->max_object_size > totalram / 2)
  416.         kgem->max_object_size = totalram / 2;
  417.     if (kgem->max_gpu_size > totalram / 4)
  418.         kgem->max_gpu_size = totalram / 4;
  419.  
  420.     kgem->max_cpu_size = kgem->max_object_size;
  421.  
  422.     half_gpu_max = kgem->max_gpu_size / 2;
  423.     kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
  424.     if (kgem->max_copy_tile_size > half_gpu_max)
  425.         kgem->max_copy_tile_size = half_gpu_max;
  426.  
  427.     if (kgem->has_llc)
  428.         kgem->max_upload_tile_size = kgem->max_copy_tile_size;
  429.     else
  430.         kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
  431.     if (kgem->max_upload_tile_size > half_gpu_max)
  432.         kgem->max_upload_tile_size = half_gpu_max;
  433.  
  434.     kgem->large_object_size = MAX_CACHE_SIZE;
  435.     if (kgem->large_object_size > kgem->max_gpu_size)
  436.         kgem->large_object_size = kgem->max_gpu_size;
  437.  
  438.     if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
  439.         if (kgem->large_object_size > kgem->max_cpu_size)
  440.             kgem->large_object_size = kgem->max_cpu_size;
  441.     } else
  442.         kgem->max_cpu_size = 0;
  443.     if (DBG_NO_CPU)
  444.         kgem->max_cpu_size = 0;
  445.  
  446.     DBG(("%s: maximum object size=%d\n",
  447.          __FUNCTION__, kgem->max_object_size));
  448.     DBG(("%s: large object thresold=%d\n",
  449.          __FUNCTION__, kgem->large_object_size));
  450.     DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
  451.          __FUNCTION__,
  452.          kgem->max_gpu_size, kgem->max_cpu_size,
  453.          kgem->max_upload_tile_size, kgem->max_copy_tile_size));
  454.  
  455.     /* Convert the aperture thresholds to pages */
  456.     kgem->aperture_low /= PAGE_SIZE;
  457.     kgem->aperture_high /= PAGE_SIZE;
  458.  
  459.     kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
  460.     if ((int)kgem->fence_max < 0)
  461.         kgem->fence_max = 5; /* minimum safe value for all hw */
  462.     DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max));
  463.  
  464.     kgem->batch_flags_base = 0;
  465.     if (kgem->has_no_reloc)
  466.         kgem->batch_flags_base |= LOCAL_I915_EXEC_NO_RELOC;
  467.     if (kgem->has_handle_lut)
  468.         kgem->batch_flags_base |= LOCAL_I915_EXEC_HANDLE_LUT;
  469.     if (kgem->has_pinned_batches)
  470.         kgem->batch_flags_base |= LOCAL_I915_EXEC_IS_PINNED;
  471.  
  472. #endif
  473.  
  474. }
  475.  
  476.  
  477.  
  478. void kgem_clear_dirty(struct kgem *kgem)
  479. {
  480.         struct list * const buffers = &kgem->next_request->buffers;
  481.         struct kgem_bo *bo;
  482.  
  483.         list_for_each_entry(bo, buffers, request) {
  484.                 if (!bo->dirty)
  485.                         break;
  486.  
  487.                 bo->dirty = false;
  488.         }
  489. }
  490.  
  491.  
  492.  
  493. uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
  494. {
  495.         struct kgem_bo_binding *b;
  496.  
  497.         for (b = &bo->binding; b && b->offset; b = b->next)
  498.                 if (format == b->format)
  499.                         return b->offset;
  500.  
  501.         return 0;
  502. }
  503.  
  504. void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
  505. {
  506.         struct kgem_bo_binding *b;
  507.  
  508.         for (b = &bo->binding; b; b = b->next) {
  509.                 if (b->offset)
  510.                         continue;
  511.  
  512.                 b->offset = offset;
  513.                 b->format = format;
  514.  
  515.                 if (b->next)
  516.                         b->next->offset = 0;
  517.  
  518.                 return;
  519.         }
  520.  
  521.         b = malloc(sizeof(*b));
  522.         if (b) {
  523.                 b->next = bo->binding.next;
  524.                 b->format = format;
  525.                 b->offset = offset;
  526.                 bo->binding.next = b;
  527.         }
  528. }
  529.  
  530. uint32_t kgem_add_reloc(struct kgem *kgem,
  531.                         uint32_t pos,
  532.                         struct kgem_bo *bo,
  533.                         uint32_t read_write_domain,
  534.                         uint32_t delta)
  535. {
  536.     return 0;
  537. }
  538.  
  539. void kgem_reset(struct kgem *kgem)
  540. {
  541.  
  542. };
  543.  
  544. void _kgem_submit(struct kgem *kgem)
  545. {
  546. };
  547.  
  548. struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
  549. {
  550.         struct kgem_bo *bo = NULL;
  551.  
  552.         return bo;
  553. };
  554.  
  555. void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
  556. {
  557.  
  558.  
  559. };
  560.