Subversion Repositories Kolibri OS

Rev

Rev 3120 | Rev 5078 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon_reg.h"
  32. #include "radeon.h"
  33. #include "radeon_asic.h"
  34. #include "atom.h"
  35. #include "r100d.h"
  36. #include "r420d.h"
  37. #include "r420_reg_safe.h"
  38.  
  39. void r420_pm_init_profile(struct radeon_device *rdev)
  40. {
  41.         /* default */
  42.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  43.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  44.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  45.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  46.         /* low sh */
  47.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
  48.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
  49.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  50.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  51.         /* mid sh */
  52.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
  53.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
  54.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  55.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  56.         /* high sh */
  57.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
  58.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  59.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  60.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  61.         /* low mh */
  62.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
  63.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  64.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  65.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  66.         /* mid mh */
  67.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
  68.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  69.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  70.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  71.         /* high mh */
  72.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
  73.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  74.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  75.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  76. }
  77.  
  78. static void r420_set_reg_safe(struct radeon_device *rdev)
  79. {
  80.         rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
  81.         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
  82. }
  83.  
  84. void r420_pipes_init(struct radeon_device *rdev)
  85. {
  86.         unsigned tmp;
  87.         unsigned gb_pipe_select;
  88.         unsigned num_pipes;
  89.  
  90.         /* GA_ENHANCE workaround TCL deadlock issue */
  91.         WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
  92.                (1 << 2) | (1 << 3));
  93.         /* add idle wait as per freedesktop.org bug 24041 */
  94.         if (r100_gui_wait_for_idle(rdev)) {
  95.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  96.                        "programming pipes. Bad things might happen.\n");
  97.         }
  98.         /* get max number of pipes */
  99.         gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
  100.         num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
  101.  
  102.         /* SE chips have 1 pipe */
  103.         if ((rdev->pdev->device == 0x5e4c) ||
  104.             (rdev->pdev->device == 0x5e4f))
  105.                 num_pipes = 1;
  106.  
  107.         rdev->num_gb_pipes = num_pipes;
  108.         tmp = 0;
  109.         switch (num_pipes) {
  110.         default:
  111.                 /* force to 1 pipe */
  112.                 num_pipes = 1;
  113.         case 1:
  114.                 tmp = (0 << 1);
  115.                 break;
  116.         case 2:
  117.                 tmp = (3 << 1);
  118.                 break;
  119.         case 3:
  120.                 tmp = (6 << 1);
  121.                 break;
  122.         case 4:
  123.                 tmp = (7 << 1);
  124.                 break;
  125.         }
  126.         WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
  127.         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
  128.         tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
  129.         WREG32(R300_GB_TILE_CONFIG, tmp);
  130.         if (r100_gui_wait_for_idle(rdev)) {
  131.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  132.                        "programming pipes. Bad things might happen.\n");
  133.         }
  134.  
  135.         tmp = RREG32(R300_DST_PIPE_CONFIG);
  136.         WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
  137.  
  138.         WREG32(R300_RB2D_DSTCACHE_MODE,
  139.                RREG32(R300_RB2D_DSTCACHE_MODE) |
  140.                R300_DC_AUTOFLUSH_ENABLE |
  141.                R300_DC_DC_DISABLE_IGNORE_PE);
  142.  
  143.         if (r100_gui_wait_for_idle(rdev)) {
  144.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  145.                        "programming pipes. Bad things might happen.\n");
  146.         }
  147.  
  148.         if (rdev->family == CHIP_RV530) {
  149.                 tmp = RREG32(RV530_GB_PIPE_SELECT2);
  150.                 if ((tmp & 3) == 3)
  151.                         rdev->num_z_pipes = 2;
  152.                 else
  153.                         rdev->num_z_pipes = 1;
  154.         } else
  155.                 rdev->num_z_pipes = 1;
  156.  
  157.         DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
  158.                  rdev->num_gb_pipes, rdev->num_z_pipes);
  159. }
  160.  
  161. u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
  162. {
  163.         u32 r;
  164.  
  165.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
  166.         r = RREG32(R_0001FC_MC_IND_DATA);
  167.         return r;
  168. }
  169.  
  170. void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  171. {
  172.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
  173.                 S_0001F8_MC_IND_WR_EN(1));
  174.         WREG32(R_0001FC_MC_IND_DATA, v);
  175. }
  176.  
  177. static void r420_debugfs(struct radeon_device *rdev)
  178. {
  179.         if (r100_debugfs_rbbm_init(rdev)) {
  180.                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
  181.         }
  182.         if (r420_debugfs_pipes_info_init(rdev)) {
  183.                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
  184.         }
  185. }
  186.  
  187. static void r420_clock_resume(struct radeon_device *rdev)
  188. {
  189.         u32 sclk_cntl;
  190.  
  191.         if (radeon_dynclks != -1 && radeon_dynclks)
  192.                 radeon_atom_set_clock_gating(rdev, 1);
  193.         sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
  194.         sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
  195.         if (rdev->family == CHIP_R420)
  196.                 sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
  197.         WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
  198. }
  199.  
  200. static void r420_cp_errata_init(struct radeon_device *rdev)
  201. {
  202.         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  203.  
  204.         /* RV410 and R420 can lock up if CP DMA to host memory happens
  205.          * while the 2D engine is busy.
  206.          *
  207.          * The proper workaround is to queue a RESYNC at the beginning
  208.          * of the CP init, apparently.
  209.          */
  210.         radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
  211.         radeon_ring_lock(rdev, ring, 8);
  212.         radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
  213.         radeon_ring_write(ring, rdev->config.r300.resync_scratch);
  214.         radeon_ring_write(ring, 0xDEADBEEF);
  215.         radeon_ring_unlock_commit(rdev, ring);
  216. }
  217.  
  218. static void r420_cp_errata_fini(struct radeon_device *rdev)
  219. {
  220.         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  221.  
  222.         /* Catch the RESYNC we dispatched all the way back,
  223.          * at the very beginning of the CP init.
  224.          */
  225.         radeon_ring_lock(rdev, ring, 8);
  226.         radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
  227.         radeon_ring_write(ring, R300_RB3D_DC_FINISH);
  228.         radeon_ring_unlock_commit(rdev, ring);
  229.         radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
  230. }
  231.  
  232. static int r420_startup(struct radeon_device *rdev)
  233. {
  234.         int r;
  235.  
  236.         /* set common regs */
  237.         r100_set_common_regs(rdev);
  238.         /* program mc */
  239.         r300_mc_program(rdev);
  240.         /* Resume clock */
  241.         r420_clock_resume(rdev);
  242.         /* Initialize GART (initialize after TTM so we can allocate
  243.          * memory through TTM but finalize after TTM) */
  244.         if (rdev->flags & RADEON_IS_PCIE) {
  245.                 r = rv370_pcie_gart_enable(rdev);
  246.                 if (r)
  247.                         return r;
  248.         }
  249.         if (rdev->flags & RADEON_IS_PCI) {
  250.                 r = r100_pci_gart_enable(rdev);
  251.                 if (r)
  252.                         return r;
  253.         }
  254.         r420_pipes_init(rdev);
  255.  
  256.         /* allocate wb buffer */
  257.         r = radeon_wb_init(rdev);
  258.         if (r)
  259.                 return r;
  260.  
  261.         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  262.         if (r) {
  263.                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  264.                 return r;
  265.         }
  266.  
  267.         /* Enable IRQ */
  268.         if (!rdev->irq.installed) {
  269.                 r = radeon_irq_kms_init(rdev);
  270.                 if (r)
  271.                         return r;
  272.         }
  273.  
  274.         r100_irq_set(rdev);
  275.         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  276.         /* 1M ring buffer */
  277.         r = r100_cp_init(rdev, 1024 * 1024);
  278.         if (r) {
  279.                 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  280.                 return r;
  281.         }
  282.         r420_cp_errata_init(rdev);
  283.  
  284.         r = radeon_ib_pool_init(rdev);
  285.         if (r) {
  286.                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  287.                 return r;
  288.         }
  289.  
  290.         return 0;
  291. }
  292.  
  293.  
  294.  
  295.  
  296.  
  297.  
  298. int r420_init(struct radeon_device *rdev)
  299. {
  300.         int r;
  301.  
  302.         /* Initialize scratch registers */
  303.         radeon_scratch_init(rdev);
  304.         /* Initialize surface registers */
  305.         radeon_surface_init(rdev);
  306.         /* TODO: disable VGA need to use VGA request */
  307.         /* restore some register to sane defaults */
  308.         r100_restore_sanity(rdev);
  309.         /* BIOS*/
  310.         if (!radeon_get_bios(rdev)) {
  311.                 if (ASIC_IS_AVIVO(rdev))
  312.                         return -EINVAL;
  313.         }
  314.         if (rdev->is_atom_bios) {
  315.                 r = radeon_atombios_init(rdev);
  316.                 if (r) {
  317.                         return r;
  318.                 }
  319.         } else {
  320.                 r = radeon_combios_init(rdev);
  321.                 if (r) {
  322.                         return r;
  323.                 }
  324.         }
  325.         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  326.         if (radeon_asic_reset(rdev)) {
  327.                 dev_warn(rdev->dev,
  328.                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  329.                         RREG32(R_000E40_RBBM_STATUS),
  330.                         RREG32(R_0007C0_CP_STAT));
  331.         }
  332.         /* check if cards are posted or not */
  333.         if (radeon_boot_test_post_card(rdev) == false)
  334.                 return -EINVAL;
  335.  
  336.         /* Initialize clocks */
  337.         radeon_get_clock_info(rdev->ddev);
  338.         /* initialize AGP */
  339.         if (rdev->flags & RADEON_IS_AGP) {
  340.                 r = radeon_agp_init(rdev);
  341.         if (r) {
  342.                         radeon_agp_disable(rdev);
  343.         }
  344.         }
  345.         /* initialize memory controller */
  346.         r300_mc_init(rdev);
  347.         r420_debugfs(rdev);
  348.         /* Fence driver */
  349.         r = radeon_fence_driver_init(rdev);
  350.         if (r) {
  351.                 return r;
  352.         }
  353.         /* Memory manager */
  354.         r = radeon_bo_init(rdev);
  355.         if (r) {
  356.                 return r;
  357.         }
  358.         if (rdev->family == CHIP_R420)
  359.                 r100_enable_bm(rdev);
  360.  
  361.         if (rdev->flags & RADEON_IS_PCIE) {
  362.                 r = rv370_pcie_gart_init(rdev);
  363.                 if (r)
  364.                         return r;
  365.         }
  366.         if (rdev->flags & RADEON_IS_PCI) {
  367.                 r = r100_pci_gart_init(rdev);
  368.                 if (r)
  369.                         return r;
  370.         }
  371.         r420_set_reg_safe(rdev);
  372.  
  373.         rdev->accel_working = true;
  374.         r = r420_startup(rdev);
  375.         if (r) {
  376.                 /* Somethings want wront with the accel init stop accel */
  377.                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
  378.                 if (rdev->flags & RADEON_IS_PCIE)
  379.                         rv370_pcie_gart_fini(rdev);
  380.                 if (rdev->flags & RADEON_IS_PCI)
  381.                         r100_pci_gart_fini(rdev);
  382.                 rdev->accel_working = false;
  383.         }
  384.         return 0;
  385. }
  386.  
  387. /*
  388.  * Debugfs info
  389.  */
  390. #if defined(CONFIG_DEBUG_FS)
  391. static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
  392. {
  393.         struct drm_info_node *node = (struct drm_info_node *) m->private;
  394.         struct drm_device *dev = node->minor->dev;
  395.         struct radeon_device *rdev = dev->dev_private;
  396.         uint32_t tmp;
  397.  
  398.         tmp = RREG32(R400_GB_PIPE_SELECT);
  399.         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
  400.         tmp = RREG32(R300_GB_TILE_CONFIG);
  401.         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
  402.         tmp = RREG32(R300_DST_PIPE_CONFIG);
  403.         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
  404.         return 0;
  405. }
  406.  
  407. static struct drm_info_list r420_pipes_info_list[] = {
  408.         {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
  409. };
  410. #endif
  411.  
  412. int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
  413. {
  414. #if defined(CONFIG_DEBUG_FS)
  415.         return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
  416. #else
  417.         return 0;
  418. #endif
  419. }
  420.