Subversion Repositories Kolibri OS

Rev

Rev 1404 | Rev 1413 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <linux/seq_file.h>
  29. #include "drmP.h"
  30. #include "radeon_reg.h"
  31. #include "radeon.h"
  32. #include "atom.h"
  33. #include "r100d.h"
  34. #include "r420d.h"
  35. #include "r420_reg_safe.h"
  36.  
  37. static void r420_set_reg_safe(struct radeon_device *rdev)
  38. {
  39.         rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
  40.         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
  41. }
  42.  
  43. int r420_mc_init(struct radeon_device *rdev)
  44. {
  45.         int r;
  46.  
  47.         /* Setup GPU memory space */
  48.         rdev->mc.vram_location = 0xFFFFFFFFUL;
  49.         rdev->mc.gtt_location = 0xFFFFFFFFUL;
  50.         if (rdev->flags & RADEON_IS_AGP) {
  51.                 r = radeon_agp_init(rdev);
  52.                 if (r) {
  53.                         radeon_agp_disable(rdev);
  54.                 } else {
  55.                         rdev->mc.gtt_location = rdev->mc.agp_base;
  56.                 }
  57.         }
  58.         r = radeon_mc_setup(rdev);
  59.         if (r) {
  60.                 return r;
  61.         }
  62.         return 0;
  63. }
  64.  
  65. void r420_pipes_init(struct radeon_device *rdev)
  66. {
  67.         unsigned tmp;
  68.         unsigned gb_pipe_select;
  69.         unsigned num_pipes;
  70.  
  71.         /* GA_ENHANCE workaround TCL deadlock issue */
  72.         WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
  73.         /* add idle wait as per freedesktop.org bug 24041 */
  74.         if (r100_gui_wait_for_idle(rdev)) {
  75.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  76.                        "programming pipes. Bad things might happen.\n");
  77.         }
  78.         /* get max number of pipes */
  79.         gb_pipe_select = RREG32(0x402C);
  80.         num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
  81.         rdev->num_gb_pipes = num_pipes;
  82.         tmp = 0;
  83.         switch (num_pipes) {
  84.         default:
  85.                 /* force to 1 pipe */
  86.                 num_pipes = 1;
  87.         case 1:
  88.                 tmp = (0 << 1);
  89.                 break;
  90.         case 2:
  91.                 tmp = (3 << 1);
  92.                 break;
  93.         case 3:
  94.                 tmp = (6 << 1);
  95.                 break;
  96.         case 4:
  97.                 tmp = (7 << 1);
  98.                 break;
  99.         }
  100.         WREG32(0x42C8, (1 << num_pipes) - 1);
  101.         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
  102.         tmp |= (1 << 4) | (1 << 0);
  103.         WREG32(0x4018, tmp);
  104.         if (r100_gui_wait_for_idle(rdev)) {
  105.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  106.                        "programming pipes. Bad things might happen.\n");
  107.         }
  108.  
  109.         tmp = RREG32(0x170C);
  110.         WREG32(0x170C, tmp | (1 << 31));
  111.  
  112.         WREG32(R300_RB2D_DSTCACHE_MODE,
  113.                RREG32(R300_RB2D_DSTCACHE_MODE) |
  114.                R300_DC_AUTOFLUSH_ENABLE |
  115.                R300_DC_DC_DISABLE_IGNORE_PE);
  116.  
  117.         if (r100_gui_wait_for_idle(rdev)) {
  118.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  119.                        "programming pipes. Bad things might happen.\n");
  120.         }
  121.  
  122.         if (rdev->family == CHIP_RV530) {
  123.                 tmp = RREG32(RV530_GB_PIPE_SELECT2);
  124.                 if ((tmp & 3) == 3)
  125.                         rdev->num_z_pipes = 2;
  126.                 else
  127.                         rdev->num_z_pipes = 1;
  128.         } else
  129.                 rdev->num_z_pipes = 1;
  130.  
  131.         DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
  132.                  rdev->num_gb_pipes, rdev->num_z_pipes);
  133. }
  134.  
  135. u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
  136. {
  137.         u32 r;
  138.  
  139.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
  140.         r = RREG32(R_0001FC_MC_IND_DATA);
  141.         return r;
  142. }
  143.  
  144. void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  145. {
  146.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
  147.                 S_0001F8_MC_IND_WR_EN(1));
  148.         WREG32(R_0001FC_MC_IND_DATA, v);
  149. }
  150.  
  151. static void r420_debugfs(struct radeon_device *rdev)
  152. {
  153.         if (r100_debugfs_rbbm_init(rdev)) {
  154.                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
  155.         }
  156.         if (r420_debugfs_pipes_info_init(rdev)) {
  157.                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
  158.         }
  159. }
  160.  
  161. static void r420_clock_resume(struct radeon_device *rdev)
  162. {
  163.         u32 sclk_cntl;
  164.  
  165.         if (radeon_dynclks != -1 && radeon_dynclks)
  166.                 radeon_atom_set_clock_gating(rdev, 1);
  167.         sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
  168.         sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
  169.         if (rdev->family == CHIP_R420)
  170.                 sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
  171.         WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
  172. }
  173.  
  174. static void r420_cp_errata_init(struct radeon_device *rdev)
  175. {
  176.         /* RV410 and R420 can lock up if CP DMA to host memory happens
  177.          * while the 2D engine is busy.
  178.          *
  179.          * The proper workaround is to queue a RESYNC at the beginning
  180.          * of the CP init, apparently.
  181.          */
  182.         radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
  183.         radeon_ring_lock(rdev, 8);
  184.         radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
  185.         radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
  186.         radeon_ring_write(rdev, 0xDEADBEEF);
  187.         radeon_ring_unlock_commit(rdev);
  188. }
  189.  
  190. static void r420_cp_errata_fini(struct radeon_device *rdev)
  191. {
  192.         /* Catch the RESYNC we dispatched all the way back,
  193.          * at the very beginning of the CP init.
  194.          */
  195.         radeon_ring_lock(rdev, 8);
  196.         radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
  197.         radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
  198.         radeon_ring_unlock_commit(rdev);
  199.         radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
  200. }
  201.  
  202. static int r420_startup(struct radeon_device *rdev)
  203. {
  204.         int r;
  205.  
  206.         /* set common regs */
  207.         r100_set_common_regs(rdev);
  208.         /* program mc */
  209.         r300_mc_program(rdev);
  210.         /* Resume clock */
  211.         r420_clock_resume(rdev);
  212.         /* Initialize GART (initialize after TTM so we can allocate
  213.          * memory through TTM but finalize after TTM) */
  214.         if (rdev->flags & RADEON_IS_PCIE) {
  215.                 r = rv370_pcie_gart_enable(rdev);
  216.                 if (r)
  217.                         return r;
  218.         }
  219.         if (rdev->flags & RADEON_IS_PCI) {
  220.                 r = r100_pci_gart_enable(rdev);
  221.                 if (r)
  222.                         return r;
  223.         }
  224.         r420_pipes_init(rdev);
  225.         /* Enable IRQ */
  226. //      r100_irq_set(rdev);
  227.         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  228.         /* 1M ring buffer */
  229.         r = r100_cp_init(rdev, 1024 * 1024);
  230.         if (r) {
  231.                 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
  232.                 return r;
  233.         }
  234.         r420_cp_errata_init(rdev);
  235. //      r = r100_wb_init(rdev);
  236. //      if (r) {
  237. //              dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
  238. //      }
  239. //      r = r100_ib_init(rdev);
  240. //      if (r) {
  241. //              dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
  242. //              return r;
  243. //      }
  244.         return 0;
  245. }
  246.  
  247. int r420_resume(struct radeon_device *rdev)
  248. {
  249.         /* Make sur GART are not working */
  250.         if (rdev->flags & RADEON_IS_PCIE)
  251.                 rv370_pcie_gart_disable(rdev);
  252.         if (rdev->flags & RADEON_IS_PCI)
  253.                 r100_pci_gart_disable(rdev);
  254.         /* Resume clock before doing reset */
  255.         r420_clock_resume(rdev);
  256.         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  257.         if (radeon_gpu_reset(rdev)) {
  258.                 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  259.                         RREG32(R_000E40_RBBM_STATUS),
  260.                         RREG32(R_0007C0_CP_STAT));
  261.         }
  262.         /* check if cards are posted or not */
  263.         if (rdev->is_atom_bios) {
  264.                 atom_asic_init(rdev->mode_info.atom_context);
  265.         } else {
  266.                 radeon_combios_asic_init(rdev->ddev);
  267.         }
  268.         /* Resume clock after posting */
  269.         r420_clock_resume(rdev);
  270.         /* Initialize surface registers */
  271.         radeon_surface_init(rdev);
  272.         return r420_startup(rdev);
  273. }
  274.  
  275.  
  276.  
  277. int r420_init(struct radeon_device *rdev)
  278. {
  279.         int r;
  280.  
  281.         /* Initialize scratch registers */
  282.         radeon_scratch_init(rdev);
  283.         /* Initialize surface registers */
  284.         radeon_surface_init(rdev);
  285.         /* TODO: disable VGA need to use VGA request */
  286.         /* BIOS*/
  287.         if (!radeon_get_bios(rdev)) {
  288.                 if (ASIC_IS_AVIVO(rdev))
  289.                         return -EINVAL;
  290.         }
  291.         if (rdev->is_atom_bios) {
  292.                 r = radeon_atombios_init(rdev);
  293.                 if (r) {
  294.                         return r;
  295.                 }
  296.         } else {
  297.                 r = radeon_combios_init(rdev);
  298.                 if (r) {
  299.                         return r;
  300.                 }
  301.         }
  302.         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  303.         if (radeon_gpu_reset(rdev)) {
  304.                 dev_warn(rdev->dev,
  305.                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  306.                         RREG32(R_000E40_RBBM_STATUS),
  307.                         RREG32(R_0007C0_CP_STAT));
  308.         }
  309.         /* check if cards are posted or not */
  310.         if (radeon_boot_test_post_card(rdev) == false)
  311.                 return -EINVAL;
  312.  
  313.         /* Initialize clocks */
  314.         radeon_get_clock_info(rdev->ddev);
  315.         /* Initialize power management */
  316.         radeon_pm_init(rdev);
  317.         /* Get vram informations */
  318.         r300_vram_info(rdev);
  319.         /* Initialize memory controller (also test AGP) */
  320.         r = r420_mc_init(rdev);
  321.         if (r) {
  322.                 return r;
  323.         }
  324.         r420_debugfs(rdev);
  325.         /* Fence driver */
  326. //      r = radeon_fence_driver_init(rdev);
  327. //      if (r) {
  328. //              return r;
  329. //      }
  330. //      r = radeon_irq_kms_init(rdev);
  331. //      if (r) {
  332. //              return r;
  333. //      }
  334.         /* Memory manager */
  335.         r = radeon_bo_init(rdev);
  336.         if (r) {
  337.                 return r;
  338.         }
  339.         if (rdev->family == CHIP_R420)
  340.                 r100_enable_bm(rdev);
  341.  
  342.         if (rdev->flags & RADEON_IS_PCIE) {
  343.                 r = rv370_pcie_gart_init(rdev);
  344.                 if (r)
  345.                         return r;
  346.         }
  347.         if (rdev->flags & RADEON_IS_PCI) {
  348.                 r = r100_pci_gart_init(rdev);
  349.                 if (r)
  350.                         return r;
  351.         }
  352.         r420_set_reg_safe(rdev);
  353.         rdev->accel_working = true;
  354.         r = r420_startup(rdev);
  355.         if (r) {
  356.                 /* Somethings want wront with the accel init stop accel */
  357.                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
  358. //       r420_suspend(rdev);
  359. //              r100_cp_fini(rdev);
  360. //              r100_wb_fini(rdev);
  361. //              r100_ib_fini(rdev);
  362.                 if (rdev->flags & RADEON_IS_PCIE)
  363.                         rv370_pcie_gart_fini(rdev);
  364.                 if (rdev->flags & RADEON_IS_PCI)
  365.                         r100_pci_gart_fini(rdev);
  366. //              radeon_agp_fini(rdev);
  367.                 rdev->accel_working = false;
  368.         }
  369.         return 0;
  370. }
  371.  
  372. /*
  373.  * Debugfs info
  374.  */
  375. #if defined(CONFIG_DEBUG_FS)
  376. static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
  377. {
  378.         struct drm_info_node *node = (struct drm_info_node *) m->private;
  379.         struct drm_device *dev = node->minor->dev;
  380.         struct radeon_device *rdev = dev->dev_private;
  381.         uint32_t tmp;
  382.  
  383.         tmp = RREG32(R400_GB_PIPE_SELECT);
  384.         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
  385.         tmp = RREG32(R300_GB_TILE_CONFIG);
  386.         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
  387.         tmp = RREG32(R300_DST_PIPE_CONFIG);
  388.         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
  389.         return 0;
  390. }
  391.  
  392. static struct drm_info_list r420_pipes_info_list[] = {
  393.         {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
  394. };
  395. #endif
  396.  
  397. int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
  398. {
  399. #if defined(CONFIG_DEBUG_FS)
  400.         return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
  401. #else
  402.         return 0;
  403. #endif
  404. }
  405.