Subversion Repositories Kolibri OS

Rev

Rev 3764 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. #include <linux/seq_file.h>
  29. #include <linux/slab.h>
  30. #include <drm/drmP.h>
  31. #include "radeon_reg.h"
  32. #include "radeon.h"
  33. #include "radeon_asic.h"
  34. #include "atom.h"
  35. #include "r100d.h"
  36. #include "r420d.h"
  37. #include "r420_reg_safe.h"
  38.  
  39. void r420_pm_init_profile(struct radeon_device *rdev)
  40. {
  41.         /* default */
  42.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
  43.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  44.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
  45.         rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
  46.         /* low sh */
  47.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
  48.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
  49.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
  50.         rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
  51.         /* mid sh */
  52.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
  53.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
  54.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
  55.         rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
  56.         /* high sh */
  57.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
  58.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  59.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
  60.         rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
  61.         /* low mh */
  62.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
  63.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  64.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
  65.         rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
  66.         /* mid mh */
  67.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
  68.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  69.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
  70.         rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
  71.         /* high mh */
  72.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
  73.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
  74.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
  75.         rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
  76. }
  77.  
  78. static void r420_set_reg_safe(struct radeon_device *rdev)
  79. {
  80.         rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
  81.         rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
  82. }
  83.  
  84. void r420_pipes_init(struct radeon_device *rdev)
  85. {
  86.         unsigned tmp;
  87.         unsigned gb_pipe_select;
  88.         unsigned num_pipes;
  89.  
  90.         /* GA_ENHANCE workaround TCL deadlock issue */
  91.         WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
  92.                (1 << 2) | (1 << 3));
  93.         /* add idle wait as per freedesktop.org bug 24041 */
  94.         if (r100_gui_wait_for_idle(rdev)) {
  95.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  96.                        "programming pipes. Bad things might happen.\n");
  97.         }
  98.         /* get max number of pipes */
  99.         gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
  100.         num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
  101.  
  102.         /* SE chips have 1 pipe */
  103.         if ((rdev->pdev->device == 0x5e4c) ||
  104.             (rdev->pdev->device == 0x5e4f))
  105.                 num_pipes = 1;
  106.  
  107.         rdev->num_gb_pipes = num_pipes;
  108.         tmp = 0;
  109.         switch (num_pipes) {
  110.         default:
  111.                 /* force to 1 pipe */
  112.                 num_pipes = 1;
  113.         case 1:
  114.                 tmp = (0 << 1);
  115.                 break;
  116.         case 2:
  117.                 tmp = (3 << 1);
  118.                 break;
  119.         case 3:
  120.                 tmp = (6 << 1);
  121.                 break;
  122.         case 4:
  123.                 tmp = (7 << 1);
  124.                 break;
  125.         }
  126.         WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
  127.         /* Sub pixel 1/12 so we can have 4K rendering according to doc */
  128.         tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
  129.         WREG32(R300_GB_TILE_CONFIG, tmp);
  130.         if (r100_gui_wait_for_idle(rdev)) {
  131.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  132.                        "programming pipes. Bad things might happen.\n");
  133.         }
  134.  
  135.         tmp = RREG32(R300_DST_PIPE_CONFIG);
  136.         WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
  137.  
  138.         WREG32(R300_RB2D_DSTCACHE_MODE,
  139.                RREG32(R300_RB2D_DSTCACHE_MODE) |
  140.                R300_DC_AUTOFLUSH_ENABLE |
  141.                R300_DC_DC_DISABLE_IGNORE_PE);
  142.  
  143.         if (r100_gui_wait_for_idle(rdev)) {
  144.                 printk(KERN_WARNING "Failed to wait GUI idle while "
  145.                        "programming pipes. Bad things might happen.\n");
  146.         }
  147.  
  148.         if (rdev->family == CHIP_RV530) {
  149.                 tmp = RREG32(RV530_GB_PIPE_SELECT2);
  150.                 if ((tmp & 3) == 3)
  151.                         rdev->num_z_pipes = 2;
  152.                 else
  153.                         rdev->num_z_pipes = 1;
  154.         } else
  155.                 rdev->num_z_pipes = 1;
  156.  
  157.         DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n",
  158.                  rdev->num_gb_pipes, rdev->num_z_pipes);
  159. }
  160.  
  161. u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg)
  162. {
  163.         unsigned long flags;
  164.         u32 r;
  165.  
  166.         spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  167.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg));
  168.         r = RREG32(R_0001FC_MC_IND_DATA);
  169.         spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  170.         return r;
  171. }
  172.  
  173. void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
  174. {
  175.         unsigned long flags;
  176.  
  177.         spin_lock_irqsave(&rdev->mc_idx_lock, flags);
  178.         WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) |
  179.                 S_0001F8_MC_IND_WR_EN(1));
  180.         WREG32(R_0001FC_MC_IND_DATA, v);
  181.         spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
  182. }
  183.  
  184. static void r420_debugfs(struct radeon_device *rdev)
  185. {
  186.         if (r100_debugfs_rbbm_init(rdev)) {
  187.                 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
  188.         }
  189.         if (r420_debugfs_pipes_info_init(rdev)) {
  190.                 DRM_ERROR("Failed to register debugfs file for pipes !\n");
  191.         }
  192. }
  193.  
  194. static void r420_clock_resume(struct radeon_device *rdev)
  195. {
  196.         u32 sclk_cntl;
  197.  
  198.         if (radeon_dynclks != -1 && radeon_dynclks)
  199.                 radeon_atom_set_clock_gating(rdev, 1);
  200.         sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL);
  201.         sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
  202.         if (rdev->family == CHIP_R420)
  203.                 sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1);
  204.         WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
  205. }
  206.  
  207. static void r420_cp_errata_init(struct radeon_device *rdev)
  208. {
  209.         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  210.  
  211.         /* RV410 and R420 can lock up if CP DMA to host memory happens
  212.          * while the 2D engine is busy.
  213.          *
  214.          * The proper workaround is to queue a RESYNC at the beginning
  215.          * of the CP init, apparently.
  216.          */
  217.         radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
  218.         radeon_ring_lock(rdev, ring, 8);
  219.         radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
  220.         radeon_ring_write(ring, rdev->config.r300.resync_scratch);
  221.         radeon_ring_write(ring, 0xDEADBEEF);
  222.         radeon_ring_unlock_commit(rdev, ring, false);
  223. }
  224.  
  225. static void r420_cp_errata_fini(struct radeon_device *rdev)
  226. {
  227.         struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
  228.  
  229.         /* Catch the RESYNC we dispatched all the way back,
  230.          * at the very beginning of the CP init.
  231.          */
  232.         radeon_ring_lock(rdev, ring, 8);
  233.         radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
  234.         radeon_ring_write(ring, R300_RB3D_DC_FINISH);
  235.         radeon_ring_unlock_commit(rdev, ring, false);
  236.         radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
  237. }
  238.  
  239. static int r420_startup(struct radeon_device *rdev)
  240. {
  241.         int r;
  242.  
  243.         /* set common regs */
  244.         r100_set_common_regs(rdev);
  245.         /* program mc */
  246.         r300_mc_program(rdev);
  247.         /* Resume clock */
  248.         r420_clock_resume(rdev);
  249.         /* Initialize GART (initialize after TTM so we can allocate
  250.          * memory through TTM but finalize after TTM) */
  251.         if (rdev->flags & RADEON_IS_PCIE) {
  252.                 r = rv370_pcie_gart_enable(rdev);
  253.                 if (r)
  254.                         return r;
  255.         }
  256.         if (rdev->flags & RADEON_IS_PCI) {
  257.                 r = r100_pci_gart_enable(rdev);
  258.                 if (r)
  259.                         return r;
  260.         }
  261.         r420_pipes_init(rdev);
  262.  
  263.         /* allocate wb buffer */
  264.         r = radeon_wb_init(rdev);
  265.         if (r)
  266.                 return r;
  267.  
  268.         r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
  269.         if (r) {
  270.                 dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
  271.                 return r;
  272.         }
  273.  
  274.         /* Enable IRQ */
  275.         if (!rdev->irq.installed) {
  276.                 r = radeon_irq_kms_init(rdev);
  277.                 if (r)
  278.                         return r;
  279.         }
  280.  
  281.         r100_irq_set(rdev);
  282.         rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
  283.         /* 1M ring buffer */
  284.         r = r100_cp_init(rdev, 1024 * 1024);
  285.         if (r) {
  286.                 dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
  287.                 return r;
  288.         }
  289.         r420_cp_errata_init(rdev);
  290.  
  291.         r = radeon_ib_pool_init(rdev);
  292.         if (r) {
  293.                 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
  294.                 return r;
  295.         }
  296.  
  297.         return 0;
  298. }
  299.  
  300.  
  301.  
  302.  
  303.  
  304.  
  305. int r420_init(struct radeon_device *rdev)
  306. {
  307.         int r;
  308.  
  309.         /* Initialize scratch registers */
  310.         radeon_scratch_init(rdev);
  311.         /* Initialize surface registers */
  312.         radeon_surface_init(rdev);
  313.         /* TODO: disable VGA need to use VGA request */
  314.         /* restore some register to sane defaults */
  315.         r100_restore_sanity(rdev);
  316.         /* BIOS*/
  317.         if (!radeon_get_bios(rdev)) {
  318.                 if (ASIC_IS_AVIVO(rdev))
  319.                         return -EINVAL;
  320.         }
  321.         if (rdev->is_atom_bios) {
  322.                 r = radeon_atombios_init(rdev);
  323.                 if (r) {
  324.                         return r;
  325.                 }
  326.         } else {
  327.                 r = radeon_combios_init(rdev);
  328.                 if (r) {
  329.                         return r;
  330.                 }
  331.         }
  332.         /* Reset gpu before posting otherwise ATOM will enter infinite loop */
  333.         if (radeon_asic_reset(rdev)) {
  334.                 dev_warn(rdev->dev,
  335.                         "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
  336.                         RREG32(R_000E40_RBBM_STATUS),
  337.                         RREG32(R_0007C0_CP_STAT));
  338.         }
  339.         /* check if cards are posted or not */
  340.         if (radeon_boot_test_post_card(rdev) == false)
  341.                 return -EINVAL;
  342.  
  343.         /* Initialize clocks */
  344.         radeon_get_clock_info(rdev->ddev);
  345.         /* initialize AGP */
  346.         if (rdev->flags & RADEON_IS_AGP) {
  347.                 r = radeon_agp_init(rdev);
  348.         if (r) {
  349.                         radeon_agp_disable(rdev);
  350.         }
  351.         }
  352.         /* initialize memory controller */
  353.         r300_mc_init(rdev);
  354.         r420_debugfs(rdev);
  355.         /* Fence driver */
  356.         r = radeon_fence_driver_init(rdev);
  357.         if (r) {
  358.                 return r;
  359.         }
  360.         /* Memory manager */
  361.         r = radeon_bo_init(rdev);
  362.         if (r) {
  363.                 return r;
  364.         }
  365.         if (rdev->family == CHIP_R420)
  366.                 r100_enable_bm(rdev);
  367.  
  368.         if (rdev->flags & RADEON_IS_PCIE) {
  369.                 r = rv370_pcie_gart_init(rdev);
  370.                 if (r)
  371.                         return r;
  372.         }
  373.         if (rdev->flags & RADEON_IS_PCI) {
  374.                 r = r100_pci_gart_init(rdev);
  375.                 if (r)
  376.                         return r;
  377.         }
  378.         r420_set_reg_safe(rdev);
  379.  
  380.         /* Initialize power management */
  381.         radeon_pm_init(rdev);
  382.  
  383.         rdev->accel_working = true;
  384.         r = r420_startup(rdev);
  385.         if (r) {
  386.                 /* Somethings want wront with the accel init stop accel */
  387.                 dev_err(rdev->dev, "Disabling GPU acceleration\n");
  388.                 if (rdev->flags & RADEON_IS_PCIE)
  389.                         rv370_pcie_gart_fini(rdev);
  390.                 if (rdev->flags & RADEON_IS_PCI)
  391.                         r100_pci_gart_fini(rdev);
  392.                 rdev->accel_working = false;
  393.         }
  394.         return 0;
  395. }
  396.  
  397. /*
  398.  * Debugfs info
  399.  */
  400. #if defined(CONFIG_DEBUG_FS)
  401. static int r420_debugfs_pipes_info(struct seq_file *m, void *data)
  402. {
  403.         struct drm_info_node *node = (struct drm_info_node *) m->private;
  404.         struct drm_device *dev = node->minor->dev;
  405.         struct radeon_device *rdev = dev->dev_private;
  406.         uint32_t tmp;
  407.  
  408.         tmp = RREG32(R400_GB_PIPE_SELECT);
  409.         seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp);
  410.         tmp = RREG32(R300_GB_TILE_CONFIG);
  411.         seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp);
  412.         tmp = RREG32(R300_DST_PIPE_CONFIG);
  413.         seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp);
  414.         return 0;
  415. }
  416.  
  417. static struct drm_info_list r420_pipes_info_list[] = {
  418.         {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL},
  419. };
  420. #endif
  421.  
  422. int r420_debugfs_pipes_info_init(struct radeon_device *rdev)
  423. {
  424. #if defined(CONFIG_DEBUG_FS)
  425.         return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1);
  426. #else
  427.         return 0;
  428. #endif
  429. }
  430.