Subversion Repositories Kolibri OS

Rev

Rev 5354 | Rev 6084 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008-2012 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *    Chris Wilson <chris@chris-wilson.co.uk>
  26.  *
  27.  */
  28.  
  29. #include <drm/drmP.h>
  30. #include <drm/i915_drm.h>
  31. #include "i915_drv.h"
  32.  
  33. /*
  34.  * The BIOS typically reserves some of the system's memory for the exclusive
  35.  * use of the integrated graphics. This memory is no longer available for
  36.  * use by the OS and so the user finds that his system has less memory
  37.  * available than he put in. We refer to this memory as stolen.
  38.  *
  39.  * The BIOS will allocate its framebuffer from the stolen memory. Our
  40.  * goal is try to reuse that object for our own fbcon which must always
  41.  * be available for panics. Anything else we can reuse the stolen memory
  42.  * for is a boon.
  43.  */
  44.  
  45. static unsigned long i915_stolen_to_physical(struct drm_device *dev)
  46. {
  47.         struct drm_i915_private *dev_priv = dev->dev_private;
  48.         struct resource *r;
  49.         u32 base;
  50.  
  51.         /* Almost universally we can find the Graphics Base of Stolen Memory
  52.          * at offset 0x5c in the igfx configuration space. On a few (desktop)
  53.          * machines this is also mirrored in the bridge device at different
  54.          * locations, or in the MCHBAR. On gen2, the layout is again slightly
  55.          * different with the Graphics Segment immediately following Top of
  56.          * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
  57.          * reported by 865g, so we just use the top of memory as determined
  58.          * by the e820 probe.
  59.          *
  60.          * XXX However gen2 requires an unavailable symbol.
  61.          */
  62.         base = 0;
  63.         if (INTEL_INFO(dev)->gen >= 3) {
  64.                 /* Read Graphics Base of Stolen Memory directly */
  65.                 pci_read_config_dword(dev->pdev, 0x5c, &base);
  66.                 base &= ~((1<<20) - 1);
  67.         } else { /* GEN2 */
  68. #if 0
  69.                 /* Stolen is immediately above Top of Memory */
  70.                 base = max_low_pfn_mapped << PAGE_SHIFT;
  71. #endif
  72.         }
  73.  
  74.         if (base == 0)
  75.                 return 0;
  76.  
  77.         /* make sure we don't clobber the GTT if it's within stolen memory */
  78.         if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
  79.                 struct {
  80.                         u32 start, end;
  81.                 } stolen[2] = {
  82.                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
  83.                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
  84.                 };
  85.                 u64 gtt_start, gtt_end;
  86.  
  87.                 gtt_start = I915_READ(PGTBL_CTL);
  88.                 if (IS_GEN4(dev))
  89.                         gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
  90.                                 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
  91.                 else
  92.                         gtt_start &= PGTBL_ADDRESS_LO_MASK;
  93.                 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
  94.  
  95.                 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
  96.                         stolen[0].end = gtt_start;
  97.                 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
  98.                         stolen[1].start = gtt_end;
  99.  
  100.                 /* pick the larger of the two chunks */
  101.                 if (stolen[0].end - stolen[0].start >
  102.                     stolen[1].end - stolen[1].start) {
  103.                         base = stolen[0].start;
  104.                         dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
  105.                 } else {
  106.                         base = stolen[1].start;
  107.                         dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
  108.                 }
  109.  
  110.                 if (stolen[0].start != stolen[1].start ||
  111.                     stolen[0].end != stolen[1].end) {
  112.                         DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
  113.                                       (unsigned long long) gtt_start,
  114.                                       (unsigned long long) gtt_end - 1);
  115.                         DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
  116.                                       base, base + (u32) dev_priv->gtt.stolen_size - 1);
  117.                 }
  118.         }
  119.  
  120. #if 0
  121.  
  122.         /* Verify that nothing else uses this physical address. Stolen
  123.          * memory should be reserved by the BIOS and hidden from the
  124.          * kernel. So if the region is already marked as busy, something
  125.          * is seriously wrong.
  126.          */
  127.         r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
  128.                                     "Graphics Stolen Memory");
  129.         if (r == NULL) {
  130.                 /*
  131.                  * One more attempt but this time requesting region from
  132.                  * base + 1, as we have seen that this resolves the region
  133.                  * conflict with the PCI Bus.
  134.                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
  135.                  * PCI bus, but have an off-by-one error. Hence retry the
  136.                  * reservation starting from 1 instead of 0.
  137.                  */
  138.                 r = devm_request_mem_region(dev->dev, base + 1,
  139.                                             dev_priv->gtt.stolen_size - 1,
  140.                                             "Graphics Stolen Memory");
  141.                 /*
  142.                  * GEN3 firmware likes to smash pci bridges into the stolen
  143.                  * range. Apparently this works.
  144.                  */
  145.                 if (r == NULL && !IS_GEN3(dev)) {
  146.                 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
  147.                           base, base + (uint32_t)dev_priv->gtt.stolen_size);
  148.                 base = 0;
  149.         }
  150.         }
  151. #endif
  152.         return base;
  153. }
  154.  
  155. static int find_compression_threshold(struct drm_device *dev,
  156.                                       struct drm_mm_node *node,
  157.                                       int size,
  158.                                       int fb_cpp)
  159. {
  160.         struct drm_i915_private *dev_priv = dev->dev_private;
  161.         int compression_threshold = 1;
  162.         int ret;
  163.  
  164.         /* HACK: This code depends on what we will do in *_enable_fbc. If that
  165.          * code changes, this code needs to change as well.
  166.          *
  167.          * The enable_fbc code will attempt to use one of our 2 compression
  168.          * thresholds, therefore, in that case, we only have 1 resort.
  169.          */
  170.  
  171.         /* Try to over-allocate to reduce reallocations and fragmentation. */
  172.         ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
  173.                                  size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
  174.         if (ret == 0)
  175.                 return compression_threshold;
  176.  
  177. again:
  178.         /* HW's ability to limit the CFB is 1:4 */
  179.         if (compression_threshold > 4 ||
  180.             (fb_cpp == 2 && compression_threshold == 2))
  181.                 return 0;
  182.  
  183.         ret = drm_mm_insert_node(&dev_priv->mm.stolen, node,
  184.                                          size >>= 1, 4096,
  185.                                          DRM_MM_SEARCH_DEFAULT);
  186.         if (ret && INTEL_INFO(dev)->gen <= 4) {
  187.                 return 0;
  188.         } else if (ret) {
  189.                 compression_threshold <<= 1;
  190.                 goto again;
  191.         } else {
  192.                 return compression_threshold;
  193.         }
  194. }
  195.  
  196. static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp)
  197. {
  198.         struct drm_i915_private *dev_priv = dev->dev_private;
  199.         struct drm_mm_node *uninitialized_var(compressed_llb);
  200.         int ret;
  201.  
  202.         ret = find_compression_threshold(dev, &dev_priv->fbc.compressed_fb,
  203.                                          size, fb_cpp);
  204.         if (!ret)
  205.                 goto err_llb;
  206.         else if (ret > 1) {
  207.                 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
  208.  
  209.         }
  210.  
  211.         dev_priv->fbc.threshold = ret;
  212.  
  213.         if (HAS_PCH_SPLIT(dev))
  214.                 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
  215.         else if (IS_GM45(dev)) {
  216.                 I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
  217.         } else {
  218.                 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
  219.                 if (!compressed_llb)
  220.                         goto err_fb;
  221.  
  222.                 ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
  223.                                          4096, 4096, DRM_MM_SEARCH_DEFAULT);
  224.                 if (ret)
  225.                         goto err_fb;
  226.  
  227.                 dev_priv->fbc.compressed_llb = compressed_llb;
  228.  
  229.                 I915_WRITE(FBC_CFB_BASE,
  230.                            dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
  231.                 I915_WRITE(FBC_LL_BASE,
  232.                            dev_priv->mm.stolen_base + compressed_llb->start);
  233.         }
  234.  
  235.         dev_priv->fbc.size = size / dev_priv->fbc.threshold;
  236.  
  237.         DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
  238.                       size);
  239.  
  240.         return 0;
  241.  
  242. err_fb:
  243.         kfree(compressed_llb);
  244.         drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
  245. err_llb:
  246.         pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
  247.         return -ENOSPC;
  248. }
  249.  
  250. int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp)
  251. {
  252.         struct drm_i915_private *dev_priv = dev->dev_private;
  253.  
  254.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  255.                 return -ENODEV;
  256.  
  257.         if (size < dev_priv->fbc.size)
  258.                 return 0;
  259.  
  260.         /* Release any current block */
  261.         i915_gem_stolen_cleanup_compression(dev);
  262.  
  263.         return i915_setup_compression(dev, size, fb_cpp);
  264. }
  265.  
  266. void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
  267. {
  268.         struct drm_i915_private *dev_priv = dev->dev_private;
  269.  
  270.         if (dev_priv->fbc.size == 0)
  271.                 return;
  272.  
  273.         drm_mm_remove_node(&dev_priv->fbc.compressed_fb);
  274.  
  275.         if (dev_priv->fbc.compressed_llb) {
  276.                 drm_mm_remove_node(dev_priv->fbc.compressed_llb);
  277.                 kfree(dev_priv->fbc.compressed_llb);
  278.         }
  279.  
  280.         dev_priv->fbc.size = 0;
  281. }
  282.  
  283. void i915_gem_cleanup_stolen(struct drm_device *dev)
  284. {
  285.         struct drm_i915_private *dev_priv = dev->dev_private;
  286.  
  287.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  288.                 return;
  289.  
  290.         i915_gem_stolen_cleanup_compression(dev);
  291.         drm_mm_takedown(&dev_priv->mm.stolen);
  292. }
  293.  
  294. int i915_gem_init_stolen(struct drm_device *dev)
  295. {
  296.         struct drm_i915_private *dev_priv = dev->dev_private;
  297.         u32 tmp;
  298.         int bios_reserved = 0;
  299.  
  300. #ifdef CONFIG_INTEL_IOMMU
  301.         if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
  302.                 DRM_INFO("DMAR active, disabling use of stolen memory\n");
  303.                 return 0;
  304.         }
  305. #endif
  306.  
  307.         if (dev_priv->gtt.stolen_size == 0)
  308.                 return 0;
  309.  
  310.         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
  311.         if (dev_priv->mm.stolen_base == 0)
  312.                 return 0;
  313.  
  314.         DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
  315.                       dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
  316.  
  317.         if (INTEL_INFO(dev)->gen >= 8) {
  318.                 tmp = I915_READ(GEN7_BIOS_RESERVED);
  319.                 tmp >>= GEN8_BIOS_RESERVED_SHIFT;
  320.                 tmp &= GEN8_BIOS_RESERVED_MASK;
  321.                 bios_reserved = (1024*1024) << tmp;
  322.         } else if (IS_GEN7(dev)) {
  323.                 tmp = I915_READ(GEN7_BIOS_RESERVED);
  324.                 bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
  325.                         256*1024 : 1024*1024;
  326.         }
  327.  
  328.         if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
  329.                 return 0;
  330.  
  331.         /* Basic memrange allocator for stolen space */
  332.         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
  333.                     bios_reserved);
  334.  
  335.         return 0;
  336. }
  337.  
  338. static struct sg_table *
  339. i915_pages_create_for_stolen(struct drm_device *dev,
  340.                              u32 offset, u32 size)
  341. {
  342.         struct drm_i915_private *dev_priv = dev->dev_private;
  343.         struct sg_table *st;
  344.         struct scatterlist *sg;
  345.  
  346.         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
  347.         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
  348.  
  349.         /* We hide that we have no struct page backing our stolen object
  350.          * by wrapping the contiguous physical allocation with a fake
  351.          * dma mapping in a single scatterlist.
  352.          */
  353.  
  354.         st = kmalloc(sizeof(*st), GFP_KERNEL);
  355.         if (st == NULL)
  356.                 return NULL;
  357.  
  358.         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  359.                 kfree(st);
  360.                 return NULL;
  361.         }
  362.  
  363.         sg = st->sgl;
  364.         sg->offset = 0;
  365.         sg->length = size;
  366.  
  367.         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
  368.         sg_dma_len(sg) = size;
  369.  
  370.         return st;
  371. }
  372.  
  373. static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
  374. {
  375.         BUG();
  376.         return -EINVAL;
  377. }
  378.  
  379. static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
  380. {
  381.         /* Should only be called during free */
  382.         sg_free_table(obj->pages);
  383.         kfree(obj->pages);
  384. }
  385.  
  386.  
  387. static void
  388. i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
  389. {
  390.         if (obj->stolen) {
  391.                 drm_mm_remove_node(obj->stolen);
  392.                 kfree(obj->stolen);
  393.                 obj->stolen = NULL;
  394.         }
  395. }
  396. static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
  397.         .get_pages = i915_gem_object_get_pages_stolen,
  398.         .put_pages = i915_gem_object_put_pages_stolen,
  399.         .release = i915_gem_object_release_stolen,
  400. };
  401.  
  402. static struct drm_i915_gem_object *
  403. _i915_gem_object_create_stolen(struct drm_device *dev,
  404.                                struct drm_mm_node *stolen)
  405. {
  406.         struct drm_i915_gem_object *obj;
  407.  
  408.         obj = i915_gem_object_alloc(dev);
  409.         if (obj == NULL)
  410.                 return NULL;
  411.  
  412.         drm_gem_private_object_init(dev, &obj->base, stolen->size);
  413.         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
  414.  
  415.         obj->pages = i915_pages_create_for_stolen(dev,
  416.                                                   stolen->start, stolen->size);
  417.         if (obj->pages == NULL)
  418.                 goto cleanup;
  419.  
  420.         obj->has_dma_mapping = true;
  421.         i915_gem_object_pin_pages(obj);
  422.         obj->stolen = stolen;
  423.  
  424.         obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
  425.         obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
  426.  
  427.         return obj;
  428.  
  429. cleanup:
  430.         i915_gem_object_free(obj);
  431.         return NULL;
  432. }
  433.  
  434. struct drm_i915_gem_object *
  435. i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
  436. {
  437.         struct drm_i915_private *dev_priv = dev->dev_private;
  438.         struct drm_i915_gem_object *obj;
  439.         struct drm_mm_node *stolen;
  440.         int ret;
  441.  
  442.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  443.                 return NULL;
  444.  
  445.         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
  446.         if (size == 0)
  447.                 return NULL;
  448.  
  449.         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  450.         if (!stolen)
  451.                 return NULL;
  452.  
  453.         ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
  454.                                  4096, DRM_MM_SEARCH_DEFAULT);
  455.         if (ret) {
  456.                 kfree(stolen);
  457.                 return NULL;
  458.         }
  459.  
  460.         obj = _i915_gem_object_create_stolen(dev, stolen);
  461.         if (obj)
  462.                 return obj;
  463.  
  464.         drm_mm_remove_node(stolen);
  465.         kfree(stolen);
  466.         return NULL;
  467. }
  468.  
  469. struct drm_i915_gem_object *
  470. i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
  471.                                                u32 stolen_offset,
  472.                                                u32 gtt_offset,
  473.                                                u32 size)
  474. {
  475.         struct drm_i915_private *dev_priv = dev->dev_private;
  476.         struct i915_address_space *ggtt = &dev_priv->gtt.base;
  477.         struct drm_i915_gem_object *obj;
  478.         struct drm_mm_node *stolen;
  479.         struct i915_vma *vma;
  480.         int ret;
  481.  
  482.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  483.                 return NULL;
  484.  
  485.         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
  486.                         stolen_offset, gtt_offset, size);
  487.  
  488.         /* KISS and expect everything to be page-aligned */
  489.         BUG_ON(stolen_offset & 4095);
  490.         BUG_ON(size & 4095);
  491.  
  492.         if (WARN_ON(size == 0))
  493.                 return NULL;
  494.  
  495.         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  496.         if (!stolen)
  497.                 return NULL;
  498.  
  499.         stolen->start = stolen_offset;
  500.         stolen->size = size;
  501.         ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
  502.         if (ret) {
  503.                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
  504.                 kfree(stolen);
  505.                 return NULL;
  506.         }
  507.  
  508.         obj = _i915_gem_object_create_stolen(dev, stolen);
  509.         if (obj == NULL) {
  510.                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
  511.                 drm_mm_remove_node(stolen);
  512.                 kfree(stolen);
  513.                 return NULL;
  514.         }
  515.  
  516.         /* Some objects just need physical mem from stolen space */
  517.         if (gtt_offset == I915_GTT_OFFSET_NONE)
  518.                 return obj;
  519.  
  520.         vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
  521.         if (IS_ERR(vma)) {
  522.                 ret = PTR_ERR(vma);
  523.                 goto err_out;
  524.         }
  525.  
  526.         /* To simplify the initialisation sequence between KMS and GTT,
  527.          * we allow construction of the stolen object prior to
  528.          * setting up the GTT space. The actual reservation will occur
  529.          * later.
  530.          */
  531.         vma->node.start = gtt_offset;
  532.         vma->node.size = size;
  533.         if (drm_mm_initialized(&ggtt->mm)) {
  534.                 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
  535.                 if (ret) {
  536.                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
  537.                         goto err_vma;
  538.                 }
  539.                 }
  540.  
  541.         vma->bound |= GLOBAL_BIND;
  542.  
  543.         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
  544.         list_add_tail(&vma->mm_list, &ggtt->inactive_list);
  545.         i915_gem_object_pin_pages(obj);
  546.  
  547.         return obj;
  548.  
  549. err_vma:
  550.         i915_gem_vma_destroy(vma);
  551. err_out:
  552.         drm_mm_remove_node(stolen);
  553.         kfree(stolen);
  554.         drm_gem_object_unreference(&obj->base);
  555.         return NULL;
  556. }
  557.