Subversion Repositories Kolibri OS

Rev

Rev 2351 | Rev 2360 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2010 Daniel Vetter
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  */
  24.  
  25. #include "drmP.h"
  26. #include "drm.h"
  27. #include "i915_drm.h"
  28. #include "i915_drv.h"
  29. #include "i915_trace.h"
  30. #include "intel_drv.h"
  31.  
  32. #define AGP_USER_TYPES          (1 << 16)
  33. #define AGP_USER_MEMORY         (AGP_USER_TYPES)
  34. #define AGP_USER_CACHED_MEMORY  (AGP_USER_TYPES + 1)
  35.  
  36. /* XXX kill agp_type! */
  37. static unsigned int cache_level_to_agp_type(struct drm_device *dev,
  38.                                             enum i915_cache_level cache_level)
  39. {
  40.         switch (cache_level) {
  41.         case I915_CACHE_LLC_MLC:
  42.                 if (INTEL_INFO(dev)->gen >= 6)
  43.                         return AGP_USER_CACHED_MEMORY_LLC_MLC;
  44.                 /* Older chipsets do not have this extra level of CPU
  45.                  * cacheing, so fallthrough and request the PTE simply
  46.                  * as cached.
  47.                  */
  48.         case I915_CACHE_LLC:
  49.                 return AGP_USER_CACHED_MEMORY;
  50.         default:
  51.         case I915_CACHE_NONE:
  52.                 return AGP_USER_MEMORY;
  53.         }
  54. }
  55.  
  56. static bool do_idling(struct drm_i915_private *dev_priv)
  57. {
  58.         bool ret = dev_priv->mm.interruptible;
  59.  
  60.         if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
  61.                 dev_priv->mm.interruptible = false;
  62.                 if (i915_gpu_idle(dev_priv->dev)) {
  63.                         DRM_ERROR("Couldn't idle GPU\n");
  64.                         /* Wait a bit, in hopes it avoids the hang */
  65.                         udelay(10);
  66.                 }
  67.         }
  68.  
  69.         return ret;
  70. }
  71.  
  72. static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
  73. {
  74.         if (unlikely(dev_priv->mm.gtt->do_idle_maps))
  75.                 dev_priv->mm.interruptible = interruptible;
  76. }
  77.  
  78. #if 0
  79. void i915_gem_restore_gtt_mappings(struct drm_device *dev)
  80. {
  81.         struct drm_i915_private *dev_priv = dev->dev_private;
  82.         struct drm_i915_gem_object *obj;
  83.  
  84.         /* First fill our portion of the GTT with scratch pages */
  85.         intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
  86.                               (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
  87.  
  88.         list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
  89.                 i915_gem_clflush_object(obj);
  90.                 i915_gem_gtt_rebind_object(obj, obj->cache_level);
  91.         }
  92.  
  93.         intel_gtt_chipset_flush();
  94. }
  95. #endif
  96.  
  97. int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
  98. {
  99.         struct drm_device *dev = obj->base.dev;
  100.         struct drm_i915_private *dev_priv = dev->dev_private;
  101.         unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
  102.         int ret;
  103.  
  104. //   if (dev_priv->mm.gtt->needs_dmar) {
  105. //       ret = intel_gtt_map_memory(obj->pages,
  106. //                      obj->base.size >> PAGE_SHIFT,
  107. //                      &obj->sg_list,
  108. //                      &obj->num_sg);
  109. //       if (ret != 0)
  110. //           return ret;
  111.  
  112. //       intel_gtt_insert_sg_entries(obj->sg_list,
  113. //                       obj->num_sg,
  114. //                       obj->gtt_space->start >> PAGE_SHIFT,
  115. //                       agp_type);
  116. //   } else
  117.                 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
  118.                                        obj->base.size >> PAGE_SHIFT,
  119.                                        obj->pages,
  120.                                        agp_type);
  121.  
  122.         return 0;
  123. }
  124.  
  125. void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
  126.                                 enum i915_cache_level cache_level)
  127. {
  128.         struct drm_device *dev = obj->base.dev;
  129.         struct drm_i915_private *dev_priv = dev->dev_private;
  130.         unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
  131.  
  132. //   if (dev_priv->mm.gtt->needs_dmar) {
  133. //       BUG_ON(!obj->sg_list);
  134.  
  135. //       intel_gtt_insert_sg_entries(obj->sg_list,
  136. //                       obj->num_sg,
  137. //                       obj->gtt_space->start >> PAGE_SHIFT,
  138. //                       agp_type);
  139. //   } else
  140.                 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
  141.                                        obj->base.size >> PAGE_SHIFT,
  142.                                        obj->pages,
  143.                                        agp_type);
  144. }
  145.  
  146.  
  147. void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
  148. {
  149.         struct drm_device *dev = obj->base.dev;
  150.         struct drm_i915_private *dev_priv = dev->dev_private;
  151.         bool interruptible;
  152.  
  153.         interruptible = do_idling(dev_priv);
  154.  
  155.         intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
  156.                               obj->base.size >> PAGE_SHIFT);
  157.  
  158.         if (obj->sg_list) {
  159. //       intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
  160.                 obj->sg_list = NULL;
  161.         }
  162.  
  163.         undo_idling(dev_priv, interruptible);
  164. }
  165.