Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6661 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <drm/drmP.h>
  29. #include <drm/radeon_drm.h>
  30. #include "radeon.h"
  31.  
  32.  
  33. void* pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
  34.                       addr_t *dma_handle)
  35. {
  36.  
  37.     size = (size + 0x7FFF) & ~0x7FFF;
  38.  
  39.     *dma_handle = AllocPages(size >> 12);
  40.     return (void*)MapIoMem(*dma_handle, size, PG_SW+PG_NOCACHE);
  41. }
  42.  
  43. /*
  44.  * GART
  45.  * The GART (Graphics Aperture Remapping Table) is an aperture
  46.  * in the GPU's address space.  System pages can be mapped into
  47.  * the aperture and look like contiguous pages from the GPU's
  48.  * perspective.  A page table maps the pages in the aperture
  49.  * to the actual backing pages in system memory.
  50.  *
  51.  * Radeon GPUs support both an internal GART, as described above,
  52.  * and AGP.  AGP works similarly, but the GART table is configured
  53.  * and maintained by the northbridge rather than the driver.
  54.  * Radeon hw has a separate AGP aperture that is programmed to
  55.  * point to the AGP aperture provided by the northbridge and the
  56.  * requests are passed through to the northbridge aperture.
  57.  * Both AGP and internal GART can be used at the same time, however
  58.  * that is not currently supported by the driver.
  59.  *
  60.  * This file handles the common internal GART management.
  61.  */
  62.  
  63. /*
  64.  * Common GART table functions.
  65.  */
  66. /**
  67.  * radeon_gart_table_ram_alloc - allocate system ram for gart page table
  68.  *
  69.  * @rdev: radeon_device pointer
  70.  *
  71.  * Allocate system memory for GART page table
  72.  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
  73.  * gart table to be in system memory.
  74.  * Returns 0 for success, -ENOMEM for failure.
  75.  */
  76. int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
  77. {
  78.         void *ptr;
  79.  
  80.     ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
  81.                   &rdev->gart.table_addr);
  82.         if (ptr == NULL) {
  83.                 return -ENOMEM;
  84.         }
  85. #ifdef CONFIG_X86
  86.         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
  87.             rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
  88.                 set_memory_uc((unsigned long)ptr,
  89.                               rdev->gart.table_size >> PAGE_SHIFT);
  90.         }
  91. #endif
  92.         rdev->gart.ptr = ptr;
  93.         memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
  94.         return 0;
  95. }
  96.  
  97. /**
  98.  * radeon_gart_table_ram_free - free system ram for gart page table
  99.  *
  100.  * @rdev: radeon_device pointer
  101.  *
  102.  * Free system memory for GART page table
  103.  * (r1xx-r3xx, non-pcie r4xx, rs400).  These asics require the
  104.  * gart table to be in system memory.
  105.  */
  106. void radeon_gart_table_ram_free(struct radeon_device *rdev)
  107. {
  108.         if (rdev->gart.ptr == NULL) {
  109.                 return;
  110.         }
  111. #ifdef CONFIG_X86
  112.         if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
  113.             rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
  114.                 set_memory_wb((unsigned long)rdev->gart.ptr,
  115.                               rdev->gart.table_size >> PAGE_SHIFT);
  116.         }
  117. #endif
  118.         rdev->gart.ptr = NULL;
  119.         rdev->gart.table_addr = 0;
  120. }
  121.  
  122. /**
  123.  * radeon_gart_table_vram_alloc - allocate vram for gart page table
  124.  *
  125.  * @rdev: radeon_device pointer
  126.  *
  127.  * Allocate video memory for GART page table
  128.  * (pcie r4xx, r5xx+).  These asics require the
  129.  * gart table to be in video memory.
  130.  * Returns 0 for success, error for failure.
  131.  */
  132. int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
  133. {
  134.         int r;
  135.  
  136.         if (rdev->gart.robj == NULL) {
  137.                 r = radeon_bo_create(rdev, rdev->gart.table_size,
  138.                                      PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
  139.                                      0, NULL, NULL, &rdev->gart.robj);
  140.                 if (r) {
  141.                         return r;
  142.                 }
  143.         }
  144.         return 0;
  145. }
  146.  
  147. /**
  148.  * radeon_gart_table_vram_pin - pin gart page table in vram
  149.  *
  150.  * @rdev: radeon_device pointer
  151.  *
  152.  * Pin the GART page table in vram so it will not be moved
  153.  * by the memory manager (pcie r4xx, r5xx+).  These asics require the
  154.  * gart table to be in video memory.
  155.  * Returns 0 for success, error for failure.
  156.  */
  157. int radeon_gart_table_vram_pin(struct radeon_device *rdev)
  158. {
  159.         uint64_t gpu_addr;
  160.         int r;
  161.  
  162.         r = radeon_bo_reserve(rdev->gart.robj, false);
  163.         if (unlikely(r != 0))
  164.                 return r;
  165.         r = radeon_bo_pin(rdev->gart.robj,
  166.                                 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
  167.         if (r) {
  168.                 radeon_bo_unreserve(rdev->gart.robj);
  169.                 return r;
  170.         }
  171.         r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
  172.         if (r)
  173.                 radeon_bo_unpin(rdev->gart.robj);
  174.         radeon_bo_unreserve(rdev->gart.robj);
  175.         rdev->gart.table_addr = gpu_addr;
  176.  
  177.         if (!r) {
  178.                 int i;
  179.  
  180.                 /* We might have dropped some GART table updates while it wasn't
  181.                  * mapped, restore all entries
  182.                  */
  183.                 for (i = 0; i < rdev->gart.num_gpu_pages; i++)
  184.                         radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
  185.                 mb();
  186.                 radeon_gart_tlb_flush(rdev);
  187.         }
  188.  
  189.         return r;
  190. }
  191.  
  192. /**
  193.  * radeon_gart_table_vram_unpin - unpin gart page table in vram
  194.  *
  195.  * @rdev: radeon_device pointer
  196.  *
  197.  * Unpin the GART page table in vram (pcie r4xx, r5xx+).
  198.  * These asics require the gart table to be in video memory.
  199.  */
  200. void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
  201. {
  202.         int r;
  203.  
  204.         if (rdev->gart.robj == NULL) {
  205.                 return;
  206.         }
  207.         r = radeon_bo_reserve(rdev->gart.robj, false);
  208.         if (likely(r == 0)) {
  209.                 radeon_bo_kunmap(rdev->gart.robj);
  210.                 radeon_bo_unpin(rdev->gart.robj);
  211.                 radeon_bo_unreserve(rdev->gart.robj);
  212.                 rdev->gart.ptr = NULL;
  213.         }
  214. }
  215.  
  216. /**
  217.  * radeon_gart_table_vram_free - free gart page table vram
  218.  *
  219.  * @rdev: radeon_device pointer
  220.  *
  221.  * Free the video memory used for the GART page table
  222.  * (pcie r4xx, r5xx+).  These asics require the gart table to
  223.  * be in video memory.
  224.  */
  225. void radeon_gart_table_vram_free(struct radeon_device *rdev)
  226. {
  227.         if (rdev->gart.robj == NULL) {
  228.                 return;
  229.         }
  230.         radeon_bo_unref(&rdev->gart.robj);
  231. }
  232.  
  233. /*
  234.  * Common gart functions.
  235.  */
  236. /**
  237.  * radeon_gart_unbind - unbind pages from the gart page table
  238.  *
  239.  * @rdev: radeon_device pointer
  240.  * @offset: offset into the GPU's gart aperture
  241.  * @pages: number of pages to unbind
  242.  *
  243.  * Unbinds the requested pages from the gart page table and
  244.  * replaces them with the dummy page (all asics).
  245.  */
  246. void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
  247.                         int pages)
  248. {
  249.         unsigned t;
  250.         unsigned p;
  251.         int i, j;
  252.  
  253.         if (!rdev->gart.ready) {
  254.                 WARN(1, "trying to unbind memory from uninitialized GART !\n");
  255.                 return;
  256.         }
  257.         t = offset / RADEON_GPU_PAGE_SIZE;
  258.         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
  259.         for (i = 0; i < pages; i++, p++) {
  260.                 if (rdev->gart.pages[p]) {
  261.                         rdev->gart.pages[p] = NULL;
  262.                         for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  263.                                 rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
  264.                                 if (rdev->gart.ptr) {
  265.                                         radeon_gart_set_page(rdev, t,
  266.                                                              rdev->dummy_page.entry);
  267.                                 }
  268.                         }
  269.                 }
  270.         }
  271.         if (rdev->gart.ptr) {
  272.                 mb();
  273.                 radeon_gart_tlb_flush(rdev);
  274.         }
  275. }
  276.  
  277. /**
  278.  * radeon_gart_bind - bind pages into the gart page table
  279.  *
  280.  * @rdev: radeon_device pointer
  281.  * @offset: offset into the GPU's gart aperture
  282.  * @pages: number of pages to bind
  283.  * @pagelist: pages to bind
  284.  * @dma_addr: DMA addresses of pages
  285.  * @flags: RADEON_GART_PAGE_* flags
  286.  *
  287.  * Binds the requested pages to the gart page table
  288.  * (all asics).
  289.  * Returns 0 for success, -EINVAL for failure.
  290.  */
  291. int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
  292.                      int pages, struct page **pagelist, dma_addr_t *dma_addr,
  293.                      uint32_t flags)
  294. {
  295.         unsigned t;
  296.         unsigned p;
  297.         uint64_t page_base, page_entry;
  298.         int i, j;
  299.  
  300.         if (!rdev->gart.ready) {
  301.                 WARN(1, "trying to bind memory to uninitialized GART !\n");
  302.                 return -EINVAL;
  303.         }
  304.         t = offset / RADEON_GPU_PAGE_SIZE;
  305.         p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
  306.  
  307.         for (i = 0; i < pages; i++, p++) {
  308.                 rdev->gart.pages[p] = pagelist[i];
  309.                 page_base = dma_addr[i];
  310.                 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
  311.                         page_entry = radeon_gart_get_page_entry(page_base, flags);
  312.                         rdev->gart.pages_entry[t] = page_entry;
  313.                         if (rdev->gart.ptr) {
  314.                                 radeon_gart_set_page(rdev, t, page_entry);
  315.                         }
  316.                         page_base += RADEON_GPU_PAGE_SIZE;
  317.                 }
  318.         }
  319.         if (rdev->gart.ptr) {
  320.                 mb();
  321.                 radeon_gart_tlb_flush(rdev);
  322.         }
  323.         return 0;
  324. }
  325.  
  326. /**
  327.  * radeon_gart_init - init the driver info for managing the gart
  328.  *
  329.  * @rdev: radeon_device pointer
  330.  *
  331.  * Allocate the dummy page and init the gart driver info (all asics).
  332.  * Returns 0 for success, error for failure.
  333.  */
  334. int radeon_gart_init(struct radeon_device *rdev)
  335. {
  336.         int r, i;
  337.  
  338.         if (rdev->gart.pages) {
  339.                 return 0;
  340.         }
  341.         /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
  342.         if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
  343.                 DRM_ERROR("Page size is smaller than GPU page size!\n");
  344.                 return -EINVAL;
  345.         }
  346.         r = radeon_dummy_page_init(rdev);
  347.         if (r)
  348.                 return r;
  349.         /* Compute table size */
  350.         rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
  351.         rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
  352.         DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
  353.                  rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
  354.         /* Allocate pages table */
  355.         rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
  356.         if (rdev->gart.pages == NULL) {
  357.                 radeon_gart_fini(rdev);
  358.                 return -ENOMEM;
  359.         }
  360.     rdev->gart.pages_entry = KernelAlloc(sizeof(uint64_t) *
  361.                                          rdev->gart.num_gpu_pages);
  362.         if (rdev->gart.pages_entry == NULL) {
  363.                 radeon_gart_fini(rdev);
  364.                 return -ENOMEM;
  365.         }
  366.         /* set GART entry to point to the dummy page by default */
  367.         for (i = 0; i < rdev->gart.num_gpu_pages; i++)
  368.                 rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
  369.         return 0;
  370. }
  371.  
  372. /**
  373.  * radeon_gart_fini - tear down the driver info for managing the gart
  374.  *
  375.  * @rdev: radeon_device pointer
  376.  *
  377.  * Tear down the gart driver info and free the dummy page (all asics).
  378.  */
  379. void radeon_gart_fini(struct radeon_device *rdev)
  380. {
  381.         if (rdev->gart.ready) {
  382.                 /* unbind pages */
  383.                 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
  384.         }
  385.         rdev->gart.ready = false;
  386.         vfree(rdev->gart.pages);
  387.         vfree(rdev->gart.pages_entry);
  388.         rdev->gart.pages = NULL;
  389.         rdev->gart.pages_entry = NULL;
  390.  
  391.         radeon_dummy_page_fini(rdev);
  392. }
  393.