Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2013 Advanced Micro Devices, Inc.
  3.  *
  4.  * Permission is hereby granted, free of charge, to any person obtaining a
  5.  * copy of this software and associated documentation files (the "Software"),
  6.  * to deal in the Software without restriction, including without limitation
  7.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8.  * and/or sell copies of the Software, and to permit persons to whom the
  9.  * Software is furnished to do so, subject to the following conditions:
  10.  *
  11.  * The above copyright notice and this permission notice shall be included in
  12.  * all copies or substantial portions of the Software.
  13.  *
  14.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20.  * OTHER DEALINGS IN THE SOFTWARE.
  21.  *
  22.  * Authors: Alex Deucher
  23.  */
  24. #include <drm/drmP.h>
  25. #include "radeon.h"
  26. #include "radeon_asic.h"
  27. #include "radeon_trace.h"
  28. #include "sid.h"
  29.  
  30. u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
  31.  
  32. /**
  33.  * si_dma_is_lockup - Check if the DMA engine is locked up
  34.  *
  35.  * @rdev: radeon_device pointer
  36.  * @ring: radeon_ring structure holding ring information
  37.  *
  38.  * Check if the async DMA engine is locked up.
  39.  * Returns true if the engine appears to be locked up, false if not.
  40.  */
  41. bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
  42. {
  43.         u32 reset_mask = si_gpu_check_soft_reset(rdev);
  44.         u32 mask;
  45.  
  46.         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
  47.                 mask = RADEON_RESET_DMA;
  48.         else
  49.                 mask = RADEON_RESET_DMA1;
  50.  
  51.         if (!(reset_mask & mask)) {
  52.                 radeon_ring_lockup_update(rdev, ring);
  53.                 return false;
  54.         }
  55.         return radeon_ring_test_lockup(rdev, ring);
  56. }
  57.  
  58. /**
  59.  * si_dma_vm_copy_pages - update PTEs by copying them from the GART
  60.  *
  61.  * @rdev: radeon_device pointer
  62.  * @ib: indirect buffer to fill with commands
  63.  * @pe: addr of the page entry
  64.  * @src: src addr where to copy from
  65.  * @count: number of page entries to update
  66.  *
  67.  * Update PTEs by copying them from the GART using the DMA (SI).
  68.  */
  69. void si_dma_vm_copy_pages(struct radeon_device *rdev,
  70.                         struct radeon_ib *ib,
  71.                           uint64_t pe, uint64_t src,
  72.                           unsigned count)
  73. {
  74.                 while (count) {
  75.                         unsigned bytes = count * 8;
  76.                         if (bytes > 0xFFFF8)
  77.                                 bytes = 0xFFFF8;
  78.  
  79.                         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
  80.                                                               1, 0, 0, bytes);
  81.                         ib->ptr[ib->length_dw++] = lower_32_bits(pe);
  82.                         ib->ptr[ib->length_dw++] = lower_32_bits(src);
  83.                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  84.                         ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
  85.  
  86.                         pe += bytes;
  87.                         src += bytes;
  88.                         count -= bytes / 8;
  89.                 }
  90. }
  91.  
  92. /**
  93.  * si_dma_vm_write_pages - update PTEs by writing them manually
  94.  *
  95.  * @rdev: radeon_device pointer
  96.  * @ib: indirect buffer to fill with commands
  97.  * @pe: addr of the page entry
  98.  * @addr: dst addr to write into pe
  99.  * @count: number of page entries to update
  100.  * @incr: increase next addr by incr bytes
  101.  * @flags: access flags
  102.  *
  103.  * Update PTEs by writing them manually using the DMA (SI).
  104.  */
  105. void si_dma_vm_write_pages(struct radeon_device *rdev,
  106.                            struct radeon_ib *ib,
  107.                            uint64_t pe,
  108.                            uint64_t addr, unsigned count,
  109.                            uint32_t incr, uint32_t flags)
  110. {
  111.         uint64_t value;
  112.         unsigned ndw;
  113.  
  114.                 while (count) {
  115.                         ndw = count * 2;
  116.                         if (ndw > 0xFFFFE)
  117.                                 ndw = 0xFFFFE;
  118.  
  119.                         /* for non-physically contiguous pages (system) */
  120.                         ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
  121.                         ib->ptr[ib->length_dw++] = pe;
  122.                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  123.                         for (; ndw > 0; ndw -= 2, --count, pe += 8) {
  124.                         if (flags & R600_PTE_SYSTEM) {
  125.                                 value = radeon_vm_map_gart(rdev, addr);
  126.                                 value &= 0xFFFFFFFFFFFFF000ULL;
  127.                         } else if (flags & R600_PTE_VALID) {
  128.                                 value = addr;
  129.                         } else {
  130.                                 value = 0;
  131.                         }
  132.                                 addr += incr;
  133.                                 value |= flags;
  134.                                 ib->ptr[ib->length_dw++] = value;
  135.                                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
  136.                         }
  137.                 }
  138. }
  139.  
  140. /**
  141.  * si_dma_vm_set_pages - update the page tables using the DMA
  142.  *
  143.  * @rdev: radeon_device pointer
  144.  * @ib: indirect buffer to fill with commands
  145.  * @pe: addr of the page entry
  146.  * @addr: dst addr to write into pe
  147.  * @count: number of page entries to update
  148.  * @incr: increase next addr by incr bytes
  149.  * @flags: access flags
  150.  *
  151.  * Update the page tables using the DMA (SI).
  152.  */
  153. void si_dma_vm_set_pages(struct radeon_device *rdev,
  154.                          struct radeon_ib *ib,
  155.                          uint64_t pe,
  156.                          uint64_t addr, unsigned count,
  157.                          uint32_t incr, uint32_t flags)
  158. {
  159.         uint64_t value;
  160.         unsigned ndw;
  161.  
  162.                 while (count) {
  163.                         ndw = count * 2;
  164.                         if (ndw > 0xFFFFE)
  165.                                 ndw = 0xFFFFE;
  166.  
  167.                         if (flags & R600_PTE_VALID)
  168.                                 value = addr;
  169.                         else
  170.                                 value = 0;
  171.  
  172.                         /* for physically contiguous pages (vram) */
  173.                         ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
  174.                         ib->ptr[ib->length_dw++] = pe; /* dst addr */
  175.                         ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
  176.                         ib->ptr[ib->length_dw++] = flags; /* mask */
  177.                         ib->ptr[ib->length_dw++] = 0;
  178.                         ib->ptr[ib->length_dw++] = value; /* value */
  179.                         ib->ptr[ib->length_dw++] = upper_32_bits(value);
  180.                         ib->ptr[ib->length_dw++] = incr; /* increment size */
  181.                         ib->ptr[ib->length_dw++] = 0;
  182.                         pe += ndw * 4;
  183.                         addr += (ndw / 2) * incr;
  184.                         count -= ndw / 2;
  185.                 }
  186. }
  187.  
  188. void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
  189. {
  190.         struct radeon_ring *ring = &rdev->ring[ridx];
  191.  
  192.         if (vm == NULL)
  193.                 return;
  194.  
  195.         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  196.         if (vm->id < 8) {
  197.                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
  198.         } else {
  199.                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
  200.         }
  201.         radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
  202.  
  203.         /* flush hdp cache */
  204.         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  205.         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
  206.         radeon_ring_write(ring, 1);
  207.  
  208.         /* bits 0-7 are the VM contexts0-7 */
  209.         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
  210.         radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
  211.         radeon_ring_write(ring, 1 << vm->id);
  212. }
  213.  
  214. /**
  215.  * si_copy_dma - copy pages using the DMA engine
  216.  *
  217.  * @rdev: radeon_device pointer
  218.  * @src_offset: src GPU address
  219.  * @dst_offset: dst GPU address
  220.  * @num_gpu_pages: number of GPU pages to xfer
  221.  * @fence: radeon fence object
  222.  *
  223.  * Copy GPU paging using the DMA engine (SI).
  224.  * Used by the radeon ttm implementation to move pages if
  225.  * registered as the asic copy callback.
  226.  */
  227. int si_copy_dma(struct radeon_device *rdev,
  228.                 uint64_t src_offset, uint64_t dst_offset,
  229.                 unsigned num_gpu_pages,
  230.                 struct radeon_fence **fence)
  231. {
  232.         struct radeon_semaphore *sem = NULL;
  233.         int ring_index = rdev->asic->copy.dma_ring_index;
  234.         struct radeon_ring *ring = &rdev->ring[ring_index];
  235.         u32 size_in_bytes, cur_size_in_bytes;
  236.         int i, num_loops;
  237.         int r = 0;
  238.  
  239.         r = radeon_semaphore_create(rdev, &sem);
  240.         if (r) {
  241.                 DRM_ERROR("radeon: moving bo (%d).\n", r);
  242.                 return r;
  243.         }
  244.  
  245.         size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
  246.         num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
  247.         r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
  248.         if (r) {
  249.                 DRM_ERROR("radeon: moving bo (%d).\n", r);
  250.                 radeon_semaphore_free(rdev, &sem, NULL);
  251.                 return r;
  252.         }
  253.  
  254.         radeon_semaphore_sync_to(sem, *fence);
  255.         radeon_semaphore_sync_rings(rdev, sem, ring->idx);
  256.  
  257.         for (i = 0; i < num_loops; i++) {
  258.                 cur_size_in_bytes = size_in_bytes;
  259.                 if (cur_size_in_bytes > 0xFFFFF)
  260.                         cur_size_in_bytes = 0xFFFFF;
  261.                 size_in_bytes -= cur_size_in_bytes;
  262.                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
  263.                 radeon_ring_write(ring, lower_32_bits(dst_offset));
  264.                 radeon_ring_write(ring, lower_32_bits(src_offset));
  265.                 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
  266.                 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
  267.                 src_offset += cur_size_in_bytes;
  268.                 dst_offset += cur_size_in_bytes;
  269.         }
  270.  
  271.         r = radeon_fence_emit(rdev, fence, ring->idx);
  272.         if (r) {
  273.                 radeon_ring_unlock_undo(rdev, ring);
  274.                 radeon_semaphore_free(rdev, &sem, NULL);
  275.                 return r;
  276.         }
  277.  
  278.         radeon_ring_unlock_commit(rdev, ring, false);
  279.         radeon_semaphore_free(rdev, &sem, *fence);
  280.  
  281.         return r;
  282. }
  283.  
  284.