Subversion Repositories Kolibri OS

Rev

Rev 6084 | Rev 6283 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright © 2008-2012 Intel Corporation
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice (including the next
  12.  * paragraph) shall be included in all copies or substantial portions of the
  13.  * Software.
  14.  *
  15.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18.  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19.  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20.  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21.  * IN THE SOFTWARE.
  22.  *
  23.  * Authors:
  24.  *    Eric Anholt <eric@anholt.net>
  25.  *    Chris Wilson <chris@chris-wilson.co.uk>
  26.  *
  27.  */
  28.  
  29. #include <drm/drmP.h>
  30. #include <drm/i915_drm.h>
  31. #include "i915_drv.h"
  32.  
  33. #define KB(x) ((x) * 1024)
  34. #define MB(x) (KB(x) * 1024)
  35.  
  36. /*
  37.  * The BIOS typically reserves some of the system's memory for the exclusive
  38.  * use of the integrated graphics. This memory is no longer available for
  39.  * use by the OS and so the user finds that his system has less memory
  40.  * available than he put in. We refer to this memory as stolen.
  41.  *
  42.  * The BIOS will allocate its framebuffer from the stolen memory. Our
  43.  * goal is try to reuse that object for our own fbcon which must always
  44.  * be available for panics. Anything else we can reuse the stolen memory
  45.  * for is a boon.
  46.  */
  47.  
  48. int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
  49.                                          struct drm_mm_node *node, u64 size,
  50.                                          unsigned alignment, u64 start, u64 end)
  51. {
  52.         int ret;
  53.  
  54.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  55.                 return -ENODEV;
  56.  
  57.         /* See the comment at the drm_mm_init() call for more about this check.
  58.          * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
  59.         if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
  60.                 start = 4096;
  61.  
  62.         mutex_lock(&dev_priv->mm.stolen_lock);
  63.         ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
  64.                                           alignment, start, end,
  65.                                           DRM_MM_SEARCH_DEFAULT);
  66.         mutex_unlock(&dev_priv->mm.stolen_lock);
  67.  
  68.         return ret;
  69. }
  70.  
  71. int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
  72.                                 struct drm_mm_node *node, u64 size,
  73.                                 unsigned alignment)
  74. {
  75.         return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
  76.                                         alignment, 0,
  77.                                         dev_priv->gtt.stolen_usable_size);
  78. }
  79.  
  80. void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
  81.                                  struct drm_mm_node *node)
  82. {
  83.         mutex_lock(&dev_priv->mm.stolen_lock);
  84.         drm_mm_remove_node(node);
  85.         mutex_unlock(&dev_priv->mm.stolen_lock);
  86. }
  87.  
  88. static unsigned long i915_stolen_to_physical(struct drm_device *dev)
  89. {
  90.         struct drm_i915_private *dev_priv = dev->dev_private;
  91.         struct resource *r;
  92.         u32 base;
  93.  
  94.         /* Almost universally we can find the Graphics Base of Stolen Memory
  95.          * at offset 0x5c in the igfx configuration space. On a few (desktop)
  96.          * machines this is also mirrored in the bridge device at different
  97.          * locations, or in the MCHBAR.
  98.          *
  99.          * On 865 we just check the TOUD register.
  100.          *
  101.          * On 830/845/85x the stolen memory base isn't available in any
  102.          * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
  103.          *
  104.          */
  105.         base = 0;
  106.         if (INTEL_INFO(dev)->gen >= 3) {
  107.                 /* Read Graphics Base of Stolen Memory directly */
  108.                 pci_read_config_dword(dev->pdev, 0x5c, &base);
  109.                 base &= ~((1<<20) - 1);
  110.         } else { /* GEN2 */
  111. #if 0
  112.                 /* Stolen is immediately above Top of Memory */
  113.                 base = max_low_pfn_mapped << PAGE_SHIFT;
  114. #endif
  115.         }
  116.  
  117.         if (base == 0)
  118.                 return 0;
  119.  
  120.         /* make sure we don't clobber the GTT if it's within stolen memory */
  121.         if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
  122.                 struct {
  123.                         u32 start, end;
  124.                 } stolen[2] = {
  125.                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
  126.                         { .start = base, .end = base + dev_priv->gtt.stolen_size, },
  127.                 };
  128.                 u64 gtt_start, gtt_end;
  129.  
  130.                 gtt_start = I915_READ(PGTBL_CTL);
  131.                 if (IS_GEN4(dev))
  132.                         gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
  133.                                 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
  134.                 else
  135.                         gtt_start &= PGTBL_ADDRESS_LO_MASK;
  136.                 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
  137.  
  138.                 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
  139.                         stolen[0].end = gtt_start;
  140.                 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
  141.                         stolen[1].start = gtt_end;
  142.  
  143.                 /* pick the larger of the two chunks */
  144.                 if (stolen[0].end - stolen[0].start >
  145.                     stolen[1].end - stolen[1].start) {
  146.                         base = stolen[0].start;
  147.                         dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
  148.                 } else {
  149.                         base = stolen[1].start;
  150.                         dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
  151.                 }
  152.  
  153.                 if (stolen[0].start != stolen[1].start ||
  154.                     stolen[0].end != stolen[1].end) {
  155.                         DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
  156.                                       (unsigned long long) gtt_start,
  157.                                       (unsigned long long) gtt_end - 1);
  158.                         DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
  159.                                       base, base + (u32) dev_priv->gtt.stolen_size - 1);
  160.                 }
  161.         }
  162.  
  163. #if 0
  164.  
  165.         /* Verify that nothing else uses this physical address. Stolen
  166.          * memory should be reserved by the BIOS and hidden from the
  167.          * kernel. So if the region is already marked as busy, something
  168.          * is seriously wrong.
  169.          */
  170.         r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
  171.                                     "Graphics Stolen Memory");
  172.         if (r == NULL) {
  173.                 /*
  174.                  * One more attempt but this time requesting region from
  175.                  * base + 1, as we have seen that this resolves the region
  176.                  * conflict with the PCI Bus.
  177.                  * This is a BIOS w/a: Some BIOS wrap stolen in the root
  178.                  * PCI bus, but have an off-by-one error. Hence retry the
  179.                  * reservation starting from 1 instead of 0.
  180.                  */
  181.                 r = devm_request_mem_region(dev->dev, base + 1,
  182.                                             dev_priv->gtt.stolen_size - 1,
  183.                                             "Graphics Stolen Memory");
  184.                 /*
  185.                  * GEN3 firmware likes to smash pci bridges into the stolen
  186.                  * range. Apparently this works.
  187.                  */
  188.                 if (r == NULL && !IS_GEN3(dev)) {
  189.                         DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
  190.                                   base, base + (uint32_t)dev_priv->gtt.stolen_size);
  191.                         base = 0;
  192.                 }
  193.         }
  194. #endif
  195.         return base;
  196. }
  197.  
  198. void i915_gem_cleanup_stolen(struct drm_device *dev)
  199. {
  200.         struct drm_i915_private *dev_priv = dev->dev_private;
  201.  
  202.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  203.                 return;
  204.  
  205.         drm_mm_takedown(&dev_priv->mm.stolen);
  206. }
  207.  
  208. static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
  209.                                     unsigned long *base, unsigned long *size)
  210. {
  211.         uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
  212.                                      CTG_STOLEN_RESERVED :
  213.                                      ELK_STOLEN_RESERVED);
  214.         unsigned long stolen_top = dev_priv->mm.stolen_base +
  215.                 dev_priv->gtt.stolen_size;
  216.  
  217.         *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
  218.  
  219.         WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
  220.  
  221.         /* On these platforms, the register doesn't have a size field, so the
  222.          * size is the distance between the base and the top of the stolen
  223.          * memory. We also have the genuine case where base is zero and there's
  224.          * nothing reserved. */
  225.         if (*base == 0)
  226.                 *size = 0;
  227.         else
  228.                 *size = stolen_top - *base;
  229. }
  230.  
  231. static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
  232.                                      unsigned long *base, unsigned long *size)
  233. {
  234.         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  235.  
  236.         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  237.  
  238.         switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
  239.         case GEN6_STOLEN_RESERVED_1M:
  240.                 *size = 1024 * 1024;
  241.                 break;
  242.         case GEN6_STOLEN_RESERVED_512K:
  243.                 *size = 512 * 1024;
  244.                 break;
  245.         case GEN6_STOLEN_RESERVED_256K:
  246.                 *size = 256 * 1024;
  247.                 break;
  248.         case GEN6_STOLEN_RESERVED_128K:
  249.                 *size = 128 * 1024;
  250.                 break;
  251.         default:
  252.                 *size = 1024 * 1024;
  253.                 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
  254.         }
  255. }
  256.  
  257. static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
  258.                                      unsigned long *base, unsigned long *size)
  259. {
  260.         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  261.  
  262.         *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
  263.  
  264.         switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
  265.         case GEN7_STOLEN_RESERVED_1M:
  266.                 *size = 1024 * 1024;
  267.                 break;
  268.         case GEN7_STOLEN_RESERVED_256K:
  269.                 *size = 256 * 1024;
  270.                 break;
  271.         default:
  272.                 *size = 1024 * 1024;
  273.                 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
  274.         }
  275. }
  276.  
  277. static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
  278.                                      unsigned long *base, unsigned long *size)
  279. {
  280.         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  281.  
  282.         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  283.  
  284.         switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
  285.         case GEN8_STOLEN_RESERVED_1M:
  286.                 *size = 1024 * 1024;
  287.                 break;
  288.         case GEN8_STOLEN_RESERVED_2M:
  289.                 *size = 2 * 1024 * 1024;
  290.                 break;
  291.         case GEN8_STOLEN_RESERVED_4M:
  292.                 *size = 4 * 1024 * 1024;
  293.                 break;
  294.         case GEN8_STOLEN_RESERVED_8M:
  295.                 *size = 8 * 1024 * 1024;
  296.                 break;
  297.         default:
  298.                 *size = 8 * 1024 * 1024;
  299.                 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
  300.         }
  301. }
  302.  
  303. static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
  304.                                     unsigned long *base, unsigned long *size)
  305. {
  306.         uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
  307.         unsigned long stolen_top;
  308.  
  309.         stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
  310.  
  311.         *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
  312.  
  313.         /* On these platforms, the register doesn't have a size field, so the
  314.          * size is the distance between the base and the top of the stolen
  315.          * memory. We also have the genuine case where base is zero and there's
  316.          * nothing reserved. */
  317.         if (*base == 0)
  318.                 *size = 0;
  319.         else
  320.                 *size = stolen_top - *base;
  321. }
  322.  
  323. int i915_gem_init_stolen(struct drm_device *dev)
  324. {
  325.         struct drm_i915_private *dev_priv = dev->dev_private;
  326.         unsigned long reserved_total, reserved_base = 0, reserved_size;
  327.         unsigned long stolen_top;
  328.  
  329.         mutex_init(&dev_priv->mm.stolen_lock);
  330.  
  331. #ifdef CONFIG_INTEL_IOMMU
  332.         if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
  333.                 DRM_INFO("DMAR active, disabling use of stolen memory\n");
  334.                 return 0;
  335.         }
  336. #endif
  337.  
  338.         if (dev_priv->gtt.stolen_size == 0)
  339.                 return 0;
  340.  
  341.         dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
  342.         if (dev_priv->mm.stolen_base == 0)
  343.                 return 0;
  344.  
  345.         stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
  346.  
  347.         switch (INTEL_INFO(dev_priv)->gen) {
  348.         case 2:
  349.         case 3:
  350.                 break;
  351.         case 4:
  352.                 if (IS_G4X(dev))
  353.                         g4x_get_stolen_reserved(dev_priv, &reserved_base,
  354.                                                 &reserved_size);
  355.                 break;
  356.         case 5:
  357.                 /* Assume the gen6 maximum for the older platforms. */
  358.                 reserved_size = 1024 * 1024;
  359.                 reserved_base = stolen_top - reserved_size;
  360.                 break;
  361.         case 6:
  362.                 gen6_get_stolen_reserved(dev_priv, &reserved_base,
  363.                                          &reserved_size);
  364.                 break;
  365.         case 7:
  366.                 gen7_get_stolen_reserved(dev_priv, &reserved_base,
  367.                                          &reserved_size);
  368.                 break;
  369.         default:
  370.                 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
  371.                         bdw_get_stolen_reserved(dev_priv, &reserved_base,
  372.                                                 &reserved_size);
  373.                 else
  374.                         gen8_get_stolen_reserved(dev_priv, &reserved_base,
  375.                                                  &reserved_size);
  376.                 break;
  377.         }
  378.  
  379.         /* It is possible for the reserved base to be zero, but the register
  380.          * field for size doesn't have a zero option. */
  381.         if (reserved_base == 0) {
  382.                 reserved_size = 0;
  383.                 reserved_base = stolen_top;
  384.         }
  385.  
  386.         if (reserved_base < dev_priv->mm.stolen_base ||
  387.             reserved_base + reserved_size > stolen_top) {
  388.                 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
  389.                               reserved_base, reserved_base + reserved_size,
  390.                               dev_priv->mm.stolen_base, stolen_top);
  391.                 return 0;
  392.         }
  393.  
  394.         /* It is possible for the reserved area to end before the end of stolen
  395.          * memory, so just consider the start. */
  396.         reserved_total = stolen_top - reserved_base;
  397.  
  398.         DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
  399.                       dev_priv->gtt.stolen_size >> 10,
  400.                       (dev_priv->gtt.stolen_size - reserved_total) >> 10);
  401.  
  402.         dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
  403.                                            reserved_total;
  404.  
  405.         /*
  406.          * Basic memrange allocator for stolen space.
  407.          *
  408.          * TODO: Notice that some platforms require us to not use the first page
  409.          * of the stolen memory but their BIOSes may still put the framebuffer
  410.          * on the first page. So we don't reserve this page for now because of
  411.          * that. Our current solution is to just prevent new nodes from being
  412.          * inserted on the first page - see the check we have at
  413.          * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
  414.          * problem later.
  415.          */
  416.         drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
  417.  
  418.     {
  419.         u32 usable_size = dev_priv->gtt.stolen_usable_size >> 20;
  420.         if(i915.fbsize > usable_size)
  421.         {
  422.             i915.fbsize = usable_size;
  423.             DRM_DEBUG_KMS("Adjust framebuffer size to match reserved memory\n"
  424.                           "new fbsize %dMB\n",i915.fbsize);
  425.         }
  426.     }
  427.  
  428.         return 0;
  429. }
  430.  
  431. static struct sg_table *
  432. i915_pages_create_for_stolen(struct drm_device *dev,
  433.                              u32 offset, u32 size)
  434. {
  435.         struct drm_i915_private *dev_priv = dev->dev_private;
  436.         struct sg_table *st;
  437.         struct scatterlist *sg;
  438.  
  439.         DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
  440.         BUG_ON(offset > dev_priv->gtt.stolen_size - size);
  441.  
  442.         /* We hide that we have no struct page backing our stolen object
  443.          * by wrapping the contiguous physical allocation with a fake
  444.          * dma mapping in a single scatterlist.
  445.          */
  446.  
  447.         st = kmalloc(sizeof(*st), GFP_KERNEL);
  448.         if (st == NULL)
  449.                 return NULL;
  450.  
  451.         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
  452.                 kfree(st);
  453.                 return NULL;
  454.         }
  455.  
  456.         sg = st->sgl;
  457.         sg->offset = 0;
  458.         sg->length = size;
  459.  
  460.         sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
  461.         sg_dma_len(sg) = size;
  462.  
  463.         return st;
  464. }
  465.  
  466. static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
  467. {
  468.         BUG();
  469.         return -EINVAL;
  470. }
  471.  
  472. static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
  473. {
  474.         /* Should only be called during free */
  475.         sg_free_table(obj->pages);
  476.         kfree(obj->pages);
  477. }
  478.  
  479.  
  480. static void
  481. i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
  482. {
  483.         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
  484.  
  485.         if (obj->stolen) {
  486.                 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
  487.                 kfree(obj->stolen);
  488.                 obj->stolen = NULL;
  489.         }
  490. }
  491. static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
  492.         .get_pages = i915_gem_object_get_pages_stolen,
  493.         .put_pages = i915_gem_object_put_pages_stolen,
  494.         .release = i915_gem_object_release_stolen,
  495. };
  496.  
  497. static struct drm_i915_gem_object *
  498. _i915_gem_object_create_stolen(struct drm_device *dev,
  499.                                struct drm_mm_node *stolen)
  500. {
  501.         struct drm_i915_gem_object *obj;
  502.  
  503.         obj = i915_gem_object_alloc(dev);
  504.         if (obj == NULL)
  505.                 return NULL;
  506.  
  507.         drm_gem_private_object_init(dev, &obj->base, stolen->size);
  508.         i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
  509.  
  510.         obj->pages = i915_pages_create_for_stolen(dev,
  511.                                                   stolen->start, stolen->size);
  512.         if (obj->pages == NULL)
  513.                 goto cleanup;
  514.  
  515.         i915_gem_object_pin_pages(obj);
  516.         obj->stolen = stolen;
  517.  
  518.         obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
  519.         obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
  520.  
  521.         return obj;
  522.  
  523. cleanup:
  524.         i915_gem_object_free(obj);
  525.         return NULL;
  526. }
  527.  
  528. struct drm_i915_gem_object *
  529. i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
  530. {
  531.         struct drm_i915_private *dev_priv = dev->dev_private;
  532.         struct drm_i915_gem_object *obj;
  533.         struct drm_mm_node *stolen;
  534.         int ret;
  535.  
  536.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  537.                 return NULL;
  538.  
  539.         DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
  540.         if (size == 0)
  541.                 return NULL;
  542.  
  543.         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  544.         if (!stolen)
  545.                 return NULL;
  546.  
  547.         ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
  548.         if (ret) {
  549.                 kfree(stolen);
  550.                 return NULL;
  551.         }
  552.  
  553.         obj = _i915_gem_object_create_stolen(dev, stolen);
  554.         if (obj)
  555.                 return obj;
  556.  
  557.         i915_gem_stolen_remove_node(dev_priv, stolen);
  558.         kfree(stolen);
  559.         return NULL;
  560. }
  561.  
  562. struct drm_i915_gem_object *
  563. i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
  564.                                                u32 stolen_offset,
  565.                                                u32 gtt_offset,
  566.                                                u32 size)
  567. {
  568.         struct drm_i915_private *dev_priv = dev->dev_private;
  569.         struct i915_address_space *ggtt = &dev_priv->gtt.base;
  570.         struct drm_i915_gem_object *obj;
  571.         struct drm_mm_node *stolen;
  572.         struct i915_vma *vma;
  573.         int ret;
  574.  
  575.         if (!drm_mm_initialized(&dev_priv->mm.stolen))
  576.                 return NULL;
  577.  
  578.         DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
  579.                         stolen_offset, gtt_offset, size);
  580.  
  581.         /* KISS and expect everything to be page-aligned */
  582.         if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
  583.             WARN_ON(stolen_offset & 4095))
  584.                 return NULL;
  585.  
  586.         stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
  587.         if (!stolen)
  588.                 return NULL;
  589.  
  590.         stolen->start = stolen_offset;
  591.         stolen->size = size;
  592.         mutex_lock(&dev_priv->mm.stolen_lock);
  593.         ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
  594.         mutex_unlock(&dev_priv->mm.stolen_lock);
  595.         if (ret) {
  596.                 DRM_DEBUG_KMS("failed to allocate stolen space\n");
  597.                 kfree(stolen);
  598.                 return NULL;
  599.         }
  600.  
  601.         obj = _i915_gem_object_create_stolen(dev, stolen);
  602.         if (obj == NULL) {
  603.                 DRM_DEBUG_KMS("failed to allocate stolen object\n");
  604.                 i915_gem_stolen_remove_node(dev_priv, stolen);
  605.                 kfree(stolen);
  606.                 return NULL;
  607.         }
  608.  
  609.         /* Some objects just need physical mem from stolen space */
  610.         if (gtt_offset == I915_GTT_OFFSET_NONE)
  611.                 return obj;
  612.  
  613.         vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
  614.         if (IS_ERR(vma)) {
  615.                 ret = PTR_ERR(vma);
  616.                 goto err;
  617.         }
  618.  
  619.         /* To simplify the initialisation sequence between KMS and GTT,
  620.          * we allow construction of the stolen object prior to
  621.          * setting up the GTT space. The actual reservation will occur
  622.          * later.
  623.          */
  624.         vma->node.start = gtt_offset;
  625.         vma->node.size = size;
  626.         if (drm_mm_initialized(&ggtt->mm)) {
  627.                 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
  628.                 if (ret) {
  629.                         DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
  630.                         goto err;
  631.                 }
  632.  
  633.                 vma->bound |= GLOBAL_BIND;
  634.                 __i915_vma_set_map_and_fenceable(vma);
  635.                 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
  636.         }
  637.  
  638.         list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
  639.         i915_gem_object_pin_pages(obj);
  640.  
  641.         return obj;
  642.  
  643. err:
  644.         drm_gem_object_unreference(&obj->base);
  645.         return NULL;
  646. }
  647.